xfs_aops.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_shared.h"
  20. #include "xfs_format.h"
  21. #include "xfs_log_format.h"
  22. #include "xfs_trans_resv.h"
  23. #include "xfs_mount.h"
  24. #include "xfs_inode.h"
  25. #include "xfs_trans.h"
  26. #include "xfs_inode_item.h"
  27. #include "xfs_alloc.h"
  28. #include "xfs_error.h"
  29. #include "xfs_iomap.h"
  30. #include "xfs_trace.h"
  31. #include "xfs_bmap.h"
  32. #include "xfs_bmap_util.h"
  33. #include "xfs_bmap_btree.h"
  34. #include <linux/gfp.h>
  35. #include <linux/mpage.h>
  36. #include <linux/pagevec.h>
  37. #include <linux/writeback.h>
  38. void
  39. xfs_count_page_state(
  40. struct page *page,
  41. int *delalloc,
  42. int *unwritten)
  43. {
  44. struct buffer_head *bh, *head;
  45. *delalloc = *unwritten = 0;
  46. bh = head = page_buffers(page);
  47. do {
  48. if (buffer_unwritten(bh))
  49. (*unwritten) = 1;
  50. else if (buffer_delay(bh))
  51. (*delalloc) = 1;
  52. } while ((bh = bh->b_this_page) != head);
  53. }
  54. STATIC struct block_device *
  55. xfs_find_bdev_for_inode(
  56. struct inode *inode)
  57. {
  58. struct xfs_inode *ip = XFS_I(inode);
  59. struct xfs_mount *mp = ip->i_mount;
  60. if (XFS_IS_REALTIME_INODE(ip))
  61. return mp->m_rtdev_targp->bt_bdev;
  62. else
  63. return mp->m_ddev_targp->bt_bdev;
  64. }
  65. /*
  66. * We're now finished for good with this ioend structure.
  67. * Update the page state via the associated buffer_heads,
  68. * release holds on the inode and bio, and finally free
  69. * up memory. Do not use the ioend after this.
  70. */
  71. STATIC void
  72. xfs_destroy_ioend(
  73. xfs_ioend_t *ioend)
  74. {
  75. struct buffer_head *bh, *next;
  76. for (bh = ioend->io_buffer_head; bh; bh = next) {
  77. next = bh->b_private;
  78. bh->b_end_io(bh, !ioend->io_error);
  79. }
  80. mempool_free(ioend, xfs_ioend_pool);
  81. }
  82. /*
  83. * Fast and loose check if this write could update the on-disk inode size.
  84. */
  85. static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
  86. {
  87. return ioend->io_offset + ioend->io_size >
  88. XFS_I(ioend->io_inode)->i_d.di_size;
  89. }
  90. STATIC int
  91. xfs_setfilesize_trans_alloc(
  92. struct xfs_ioend *ioend)
  93. {
  94. struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
  95. struct xfs_trans *tp;
  96. int error;
  97. tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
  98. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
  99. if (error) {
  100. xfs_trans_cancel(tp);
  101. return error;
  102. }
  103. ioend->io_append_trans = tp;
  104. /*
  105. * We may pass freeze protection with a transaction. So tell lockdep
  106. * we released it.
  107. */
  108. __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
  109. /*
  110. * We hand off the transaction to the completion thread now, so
  111. * clear the flag here.
  112. */
  113. current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
  114. return 0;
  115. }
  116. /*
  117. * Update on-disk file size now that data has been written to disk.
  118. */
  119. STATIC int
  120. xfs_setfilesize(
  121. struct xfs_inode *ip,
  122. struct xfs_trans *tp,
  123. xfs_off_t offset,
  124. size_t size)
  125. {
  126. xfs_fsize_t isize;
  127. xfs_ilock(ip, XFS_ILOCK_EXCL);
  128. isize = xfs_new_eof(ip, offset + size);
  129. if (!isize) {
  130. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  131. xfs_trans_cancel(tp);
  132. return 0;
  133. }
  134. trace_xfs_setfilesize(ip, offset, size);
  135. ip->i_d.di_size = isize;
  136. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  137. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  138. return xfs_trans_commit(tp);
  139. }
  140. STATIC int
  141. xfs_setfilesize_ioend(
  142. struct xfs_ioend *ioend)
  143. {
  144. struct xfs_inode *ip = XFS_I(ioend->io_inode);
  145. struct xfs_trans *tp = ioend->io_append_trans;
  146. /*
  147. * The transaction may have been allocated in the I/O submission thread,
  148. * thus we need to mark ourselves as being in a transaction manually.
  149. * Similarly for freeze protection.
  150. */
  151. current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
  152. __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
  153. /* we abort the update if there was an IO error */
  154. if (ioend->io_error) {
  155. xfs_trans_cancel(tp);
  156. return ioend->io_error;
  157. }
  158. return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
  159. }
  160. /*
  161. * Schedule IO completion handling on the final put of an ioend.
  162. *
  163. * If there is no work to do we might as well call it a day and free the
  164. * ioend right now.
  165. */
  166. STATIC void
  167. xfs_finish_ioend(
  168. struct xfs_ioend *ioend)
  169. {
  170. if (atomic_dec_and_test(&ioend->io_remaining)) {
  171. struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
  172. if (ioend->io_type == XFS_IO_UNWRITTEN)
  173. queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
  174. else if (ioend->io_append_trans)
  175. queue_work(mp->m_data_workqueue, &ioend->io_work);
  176. else
  177. xfs_destroy_ioend(ioend);
  178. }
  179. }
  180. /*
  181. * IO write completion.
  182. */
  183. STATIC void
  184. xfs_end_io(
  185. struct work_struct *work)
  186. {
  187. xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
  188. struct xfs_inode *ip = XFS_I(ioend->io_inode);
  189. int error = 0;
  190. if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
  191. ioend->io_error = -EIO;
  192. goto done;
  193. }
  194. /*
  195. * For unwritten extents we need to issue transactions to convert a
  196. * range to normal written extens after the data I/O has finished.
  197. * Detecting and handling completion IO errors is done individually
  198. * for each case as different cleanup operations need to be performed
  199. * on error.
  200. */
  201. if (ioend->io_type == XFS_IO_UNWRITTEN) {
  202. if (ioend->io_error)
  203. goto done;
  204. error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
  205. ioend->io_size);
  206. } else if (ioend->io_append_trans) {
  207. error = xfs_setfilesize_ioend(ioend);
  208. } else {
  209. ASSERT(!xfs_ioend_is_append(ioend));
  210. }
  211. done:
  212. if (error)
  213. ioend->io_error = error;
  214. xfs_destroy_ioend(ioend);
  215. }
  216. /*
  217. * Allocate and initialise an IO completion structure.
  218. * We need to track unwritten extent write completion here initially.
  219. * We'll need to extend this for updating the ondisk inode size later
  220. * (vs. incore size).
  221. */
  222. STATIC xfs_ioend_t *
  223. xfs_alloc_ioend(
  224. struct inode *inode,
  225. unsigned int type)
  226. {
  227. xfs_ioend_t *ioend;
  228. ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
  229. /*
  230. * Set the count to 1 initially, which will prevent an I/O
  231. * completion callback from happening before we have started
  232. * all the I/O from calling the completion routine too early.
  233. */
  234. atomic_set(&ioend->io_remaining, 1);
  235. ioend->io_error = 0;
  236. ioend->io_list = NULL;
  237. ioend->io_type = type;
  238. ioend->io_inode = inode;
  239. ioend->io_buffer_head = NULL;
  240. ioend->io_buffer_tail = NULL;
  241. ioend->io_offset = 0;
  242. ioend->io_size = 0;
  243. ioend->io_append_trans = NULL;
  244. INIT_WORK(&ioend->io_work, xfs_end_io);
  245. return ioend;
  246. }
  247. STATIC int
  248. xfs_map_blocks(
  249. struct inode *inode,
  250. loff_t offset,
  251. struct xfs_bmbt_irec *imap,
  252. int type,
  253. int nonblocking)
  254. {
  255. struct xfs_inode *ip = XFS_I(inode);
  256. struct xfs_mount *mp = ip->i_mount;
  257. ssize_t count = i_blocksize(inode);
  258. xfs_fileoff_t offset_fsb, end_fsb;
  259. int error = 0;
  260. int bmapi_flags = XFS_BMAPI_ENTIRE;
  261. int nimaps = 1;
  262. if (XFS_FORCED_SHUTDOWN(mp))
  263. return -EIO;
  264. if (type == XFS_IO_UNWRITTEN)
  265. bmapi_flags |= XFS_BMAPI_IGSTATE;
  266. if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
  267. if (nonblocking)
  268. return -EAGAIN;
  269. xfs_ilock(ip, XFS_ILOCK_SHARED);
  270. }
  271. ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
  272. (ip->i_df.if_flags & XFS_IFEXTENTS));
  273. ASSERT(offset <= mp->m_super->s_maxbytes);
  274. if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes)
  275. count = mp->m_super->s_maxbytes - offset;
  276. end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
  277. offset_fsb = XFS_B_TO_FSBT(mp, offset);
  278. error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
  279. imap, &nimaps, bmapi_flags);
  280. xfs_iunlock(ip, XFS_ILOCK_SHARED);
  281. if (error)
  282. return error;
  283. if (type == XFS_IO_DELALLOC &&
  284. (!nimaps || isnullstartblock(imap->br_startblock))) {
  285. error = xfs_iomap_write_allocate(ip, offset, imap);
  286. if (!error)
  287. trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
  288. return error;
  289. }
  290. #ifdef DEBUG
  291. if (type == XFS_IO_UNWRITTEN) {
  292. ASSERT(nimaps);
  293. ASSERT(imap->br_startblock != HOLESTARTBLOCK);
  294. ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
  295. }
  296. #endif
  297. if (nimaps)
  298. trace_xfs_map_blocks_found(ip, offset, count, type, imap);
  299. return 0;
  300. }
  301. STATIC int
  302. xfs_imap_valid(
  303. struct inode *inode,
  304. struct xfs_bmbt_irec *imap,
  305. xfs_off_t offset)
  306. {
  307. offset >>= inode->i_blkbits;
  308. return offset >= imap->br_startoff &&
  309. offset < imap->br_startoff + imap->br_blockcount;
  310. }
  311. /*
  312. * BIO completion handler for buffered IO.
  313. */
  314. STATIC void
  315. xfs_end_bio(
  316. struct bio *bio)
  317. {
  318. xfs_ioend_t *ioend = bio->bi_private;
  319. if (!ioend->io_error)
  320. ioend->io_error = bio->bi_error;
  321. /* Toss bio and pass work off to an xfsdatad thread */
  322. bio->bi_private = NULL;
  323. bio->bi_end_io = NULL;
  324. bio_put(bio);
  325. xfs_finish_ioend(ioend);
  326. }
  327. STATIC void
  328. xfs_submit_ioend_bio(
  329. struct writeback_control *wbc,
  330. xfs_ioend_t *ioend,
  331. struct bio *bio)
  332. {
  333. atomic_inc(&ioend->io_remaining);
  334. bio->bi_private = ioend;
  335. bio->bi_end_io = xfs_end_bio;
  336. submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
  337. }
  338. STATIC struct bio *
  339. xfs_alloc_ioend_bio(
  340. struct buffer_head *bh)
  341. {
  342. struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
  343. ASSERT(bio->bi_private == NULL);
  344. bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
  345. bio->bi_bdev = bh->b_bdev;
  346. return bio;
  347. }
  348. STATIC void
  349. xfs_start_buffer_writeback(
  350. struct buffer_head *bh)
  351. {
  352. ASSERT(buffer_mapped(bh));
  353. ASSERT(buffer_locked(bh));
  354. ASSERT(!buffer_delay(bh));
  355. ASSERT(!buffer_unwritten(bh));
  356. mark_buffer_async_write(bh);
  357. set_buffer_uptodate(bh);
  358. clear_buffer_dirty(bh);
  359. }
  360. STATIC void
  361. xfs_start_page_writeback(
  362. struct page *page,
  363. int clear_dirty,
  364. int buffers)
  365. {
  366. ASSERT(PageLocked(page));
  367. ASSERT(!PageWriteback(page));
  368. /*
  369. * if the page was not fully cleaned, we need to ensure that the higher
  370. * layers come back to it correctly. That means we need to keep the page
  371. * dirty, and for WB_SYNC_ALL writeback we need to ensure the
  372. * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
  373. * write this page in this writeback sweep will be made.
  374. */
  375. if (clear_dirty) {
  376. clear_page_dirty_for_io(page);
  377. set_page_writeback(page);
  378. } else
  379. set_page_writeback_keepwrite(page);
  380. unlock_page(page);
  381. /* If no buffers on the page are to be written, finish it here */
  382. if (!buffers)
  383. end_page_writeback(page);
  384. }
  385. static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
  386. {
  387. return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
  388. }
  389. /*
  390. * Submit all of the bios for all of the ioends we have saved up, covering the
  391. * initial writepage page and also any probed pages.
  392. *
  393. * Because we may have multiple ioends spanning a page, we need to start
  394. * writeback on all the buffers before we submit them for I/O. If we mark the
  395. * buffers as we got, then we can end up with a page that only has buffers
  396. * marked async write and I/O complete on can occur before we mark the other
  397. * buffers async write.
  398. *
  399. * The end result of this is that we trip a bug in end_page_writeback() because
  400. * we call it twice for the one page as the code in end_buffer_async_write()
  401. * assumes that all buffers on the page are started at the same time.
  402. *
  403. * The fix is two passes across the ioend list - one to start writeback on the
  404. * buffer_heads, and then submit them for I/O on the second pass.
  405. *
  406. * If @fail is non-zero, it means that we have a situation where some part of
  407. * the submission process has failed after we have marked paged for writeback
  408. * and unlocked them. In this situation, we need to fail the ioend chain rather
  409. * than submit it to IO. This typically only happens on a filesystem shutdown.
  410. */
  411. STATIC void
  412. xfs_submit_ioend(
  413. struct writeback_control *wbc,
  414. xfs_ioend_t *ioend,
  415. int fail)
  416. {
  417. xfs_ioend_t *head = ioend;
  418. xfs_ioend_t *next;
  419. struct buffer_head *bh;
  420. struct bio *bio;
  421. sector_t lastblock = 0;
  422. /* Pass 1 - start writeback */
  423. do {
  424. next = ioend->io_list;
  425. for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
  426. xfs_start_buffer_writeback(bh);
  427. } while ((ioend = next) != NULL);
  428. /* Pass 2 - submit I/O */
  429. ioend = head;
  430. do {
  431. next = ioend->io_list;
  432. bio = NULL;
  433. /*
  434. * If we are failing the IO now, just mark the ioend with an
  435. * error and finish it. This will run IO completion immediately
  436. * as there is only one reference to the ioend at this point in
  437. * time.
  438. */
  439. if (fail) {
  440. ioend->io_error = fail;
  441. xfs_finish_ioend(ioend);
  442. continue;
  443. }
  444. for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
  445. if (!bio) {
  446. retry:
  447. bio = xfs_alloc_ioend_bio(bh);
  448. } else if (bh->b_blocknr != lastblock + 1) {
  449. xfs_submit_ioend_bio(wbc, ioend, bio);
  450. goto retry;
  451. }
  452. if (xfs_bio_add_buffer(bio, bh) != bh->b_size) {
  453. xfs_submit_ioend_bio(wbc, ioend, bio);
  454. goto retry;
  455. }
  456. lastblock = bh->b_blocknr;
  457. }
  458. if (bio)
  459. xfs_submit_ioend_bio(wbc, ioend, bio);
  460. xfs_finish_ioend(ioend);
  461. } while ((ioend = next) != NULL);
  462. }
  463. /*
  464. * Cancel submission of all buffer_heads so far in this endio.
  465. * Toss the endio too. Only ever called for the initial page
  466. * in a writepage request, so only ever one page.
  467. */
  468. STATIC void
  469. xfs_cancel_ioend(
  470. xfs_ioend_t *ioend)
  471. {
  472. xfs_ioend_t *next;
  473. struct buffer_head *bh, *next_bh;
  474. do {
  475. next = ioend->io_list;
  476. bh = ioend->io_buffer_head;
  477. do {
  478. next_bh = bh->b_private;
  479. clear_buffer_async_write(bh);
  480. /*
  481. * The unwritten flag is cleared when added to the
  482. * ioend. We're not submitting for I/O so mark the
  483. * buffer unwritten again for next time around.
  484. */
  485. if (ioend->io_type == XFS_IO_UNWRITTEN)
  486. set_buffer_unwritten(bh);
  487. unlock_buffer(bh);
  488. } while ((bh = next_bh) != NULL);
  489. mempool_free(ioend, xfs_ioend_pool);
  490. } while ((ioend = next) != NULL);
  491. }
  492. /*
  493. * Test to see if we've been building up a completion structure for
  494. * earlier buffers -- if so, we try to append to this ioend if we
  495. * can, otherwise we finish off any current ioend and start another.
  496. * Return true if we've finished the given ioend.
  497. */
  498. STATIC void
  499. xfs_add_to_ioend(
  500. struct inode *inode,
  501. struct buffer_head *bh,
  502. xfs_off_t offset,
  503. unsigned int type,
  504. xfs_ioend_t **result,
  505. int need_ioend)
  506. {
  507. xfs_ioend_t *ioend = *result;
  508. if (!ioend || need_ioend || type != ioend->io_type) {
  509. xfs_ioend_t *previous = *result;
  510. ioend = xfs_alloc_ioend(inode, type);
  511. ioend->io_offset = offset;
  512. ioend->io_buffer_head = bh;
  513. ioend->io_buffer_tail = bh;
  514. if (previous)
  515. previous->io_list = ioend;
  516. *result = ioend;
  517. } else {
  518. ioend->io_buffer_tail->b_private = bh;
  519. ioend->io_buffer_tail = bh;
  520. }
  521. bh->b_private = NULL;
  522. ioend->io_size += bh->b_size;
  523. }
  524. STATIC void
  525. xfs_map_buffer(
  526. struct inode *inode,
  527. struct buffer_head *bh,
  528. struct xfs_bmbt_irec *imap,
  529. xfs_off_t offset)
  530. {
  531. sector_t bn;
  532. struct xfs_mount *m = XFS_I(inode)->i_mount;
  533. xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
  534. xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
  535. ASSERT(imap->br_startblock != HOLESTARTBLOCK);
  536. ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
  537. bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
  538. ((offset - iomap_offset) >> inode->i_blkbits);
  539. ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
  540. bh->b_blocknr = bn;
  541. set_buffer_mapped(bh);
  542. }
  543. STATIC void
  544. xfs_map_at_offset(
  545. struct inode *inode,
  546. struct buffer_head *bh,
  547. struct xfs_bmbt_irec *imap,
  548. xfs_off_t offset)
  549. {
  550. ASSERT(imap->br_startblock != HOLESTARTBLOCK);
  551. ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
  552. xfs_map_buffer(inode, bh, imap, offset);
  553. set_buffer_mapped(bh);
  554. clear_buffer_delay(bh);
  555. clear_buffer_unwritten(bh);
  556. }
  557. /*
  558. * Test if a given page contains at least one buffer of a given @type.
  559. * If @check_all_buffers is true, then we walk all the buffers in the page to
  560. * try to find one of the type passed in. If it is not set, then the caller only
  561. * needs to check the first buffer on the page for a match.
  562. */
  563. STATIC bool
  564. xfs_check_page_type(
  565. struct page *page,
  566. unsigned int type,
  567. bool check_all_buffers)
  568. {
  569. struct buffer_head *bh;
  570. struct buffer_head *head;
  571. if (PageWriteback(page))
  572. return false;
  573. if (!page->mapping)
  574. return false;
  575. if (!page_has_buffers(page))
  576. return false;
  577. bh = head = page_buffers(page);
  578. do {
  579. if (buffer_unwritten(bh)) {
  580. if (type == XFS_IO_UNWRITTEN)
  581. return true;
  582. } else if (buffer_delay(bh)) {
  583. if (type == XFS_IO_DELALLOC)
  584. return true;
  585. } else if (buffer_dirty(bh) && buffer_mapped(bh)) {
  586. if (type == XFS_IO_OVERWRITE)
  587. return true;
  588. }
  589. /* If we are only checking the first buffer, we are done now. */
  590. if (!check_all_buffers)
  591. break;
  592. } while ((bh = bh->b_this_page) != head);
  593. return false;
  594. }
  595. /*
  596. * Allocate & map buffers for page given the extent map. Write it out.
  597. * except for the original page of a writepage, this is called on
  598. * delalloc/unwritten pages only, for the original page it is possible
  599. * that the page has no mapping at all.
  600. */
  601. STATIC int
  602. xfs_convert_page(
  603. struct inode *inode,
  604. struct page *page,
  605. loff_t tindex,
  606. struct xfs_bmbt_irec *imap,
  607. xfs_ioend_t **ioendp,
  608. struct writeback_control *wbc)
  609. {
  610. struct buffer_head *bh, *head;
  611. xfs_off_t end_offset;
  612. unsigned long p_offset;
  613. unsigned int type;
  614. int len, page_dirty;
  615. int count = 0, done = 0, uptodate = 1;
  616. xfs_off_t offset = page_offset(page);
  617. if (page->index != tindex)
  618. goto fail;
  619. if (!trylock_page(page))
  620. goto fail;
  621. if (PageWriteback(page))
  622. goto fail_unlock_page;
  623. if (page->mapping != inode->i_mapping)
  624. goto fail_unlock_page;
  625. if (!xfs_check_page_type(page, (*ioendp)->io_type, false))
  626. goto fail_unlock_page;
  627. /*
  628. * page_dirty is initially a count of buffers on the page before
  629. * EOF and is decremented as we move each into a cleanable state.
  630. *
  631. * Derivation:
  632. *
  633. * End offset is the highest offset that this page should represent.
  634. * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
  635. * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
  636. * hence give us the correct page_dirty count. On any other page,
  637. * it will be zero and in that case we need page_dirty to be the
  638. * count of buffers on the page.
  639. */
  640. end_offset = min_t(unsigned long long,
  641. (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
  642. i_size_read(inode));
  643. /*
  644. * If the current map does not span the entire page we are about to try
  645. * to write, then give up. The only way we can write a page that spans
  646. * multiple mappings in a single writeback iteration is via the
  647. * xfs_vm_writepage() function. Data integrity writeback requires the
  648. * entire page to be written in a single attempt, otherwise the part of
  649. * the page we don't write here doesn't get written as part of the data
  650. * integrity sync.
  651. *
  652. * For normal writeback, we also don't attempt to write partial pages
  653. * here as it simply means that write_cache_pages() will see it under
  654. * writeback and ignore the page until some point in the future, at
  655. * which time this will be the only page in the file that needs
  656. * writeback. Hence for more optimal IO patterns, we should always
  657. * avoid partial page writeback due to multiple mappings on a page here.
  658. */
  659. if (!xfs_imap_valid(inode, imap, end_offset))
  660. goto fail_unlock_page;
  661. len = 1 << inode->i_blkbits;
  662. p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
  663. PAGE_CACHE_SIZE);
  664. p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
  665. page_dirty = p_offset / len;
  666. /*
  667. * The moment we find a buffer that doesn't match our current type
  668. * specification or can't be written, abort the loop and start
  669. * writeback. As per the above xfs_imap_valid() check, only
  670. * xfs_vm_writepage() can handle partial page writeback fully - we are
  671. * limited here to the buffers that are contiguous with the current
  672. * ioend, and hence a buffer we can't write breaks that contiguity and
  673. * we have to defer the rest of the IO to xfs_vm_writepage().
  674. */
  675. bh = head = page_buffers(page);
  676. do {
  677. if (offset >= end_offset)
  678. break;
  679. if (!buffer_uptodate(bh))
  680. uptodate = 0;
  681. if (!(PageUptodate(page) || buffer_uptodate(bh))) {
  682. done = 1;
  683. break;
  684. }
  685. if (buffer_unwritten(bh) || buffer_delay(bh) ||
  686. buffer_mapped(bh)) {
  687. if (buffer_unwritten(bh))
  688. type = XFS_IO_UNWRITTEN;
  689. else if (buffer_delay(bh))
  690. type = XFS_IO_DELALLOC;
  691. else
  692. type = XFS_IO_OVERWRITE;
  693. /*
  694. * imap should always be valid because of the above
  695. * partial page end_offset check on the imap.
  696. */
  697. ASSERT(xfs_imap_valid(inode, imap, offset));
  698. lock_buffer(bh);
  699. if (type != XFS_IO_OVERWRITE)
  700. xfs_map_at_offset(inode, bh, imap, offset);
  701. xfs_add_to_ioend(inode, bh, offset, type,
  702. ioendp, done);
  703. page_dirty--;
  704. count++;
  705. } else {
  706. done = 1;
  707. break;
  708. }
  709. } while (offset += len, (bh = bh->b_this_page) != head);
  710. if (uptodate && bh == head)
  711. SetPageUptodate(page);
  712. if (count) {
  713. if (--wbc->nr_to_write <= 0 &&
  714. wbc->sync_mode == WB_SYNC_NONE)
  715. done = 1;
  716. }
  717. xfs_start_page_writeback(page, !page_dirty, count);
  718. return done;
  719. fail_unlock_page:
  720. unlock_page(page);
  721. fail:
  722. return 1;
  723. }
  724. /*
  725. * Convert & write out a cluster of pages in the same extent as defined
  726. * by mp and following the start page.
  727. */
  728. STATIC void
  729. xfs_cluster_write(
  730. struct inode *inode,
  731. pgoff_t tindex,
  732. struct xfs_bmbt_irec *imap,
  733. xfs_ioend_t **ioendp,
  734. struct writeback_control *wbc,
  735. pgoff_t tlast)
  736. {
  737. struct pagevec pvec;
  738. int done = 0, i;
  739. pagevec_init(&pvec, 0);
  740. while (!done && tindex <= tlast) {
  741. unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
  742. if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
  743. break;
  744. for (i = 0; i < pagevec_count(&pvec); i++) {
  745. done = xfs_convert_page(inode, pvec.pages[i], tindex++,
  746. imap, ioendp, wbc);
  747. if (done)
  748. break;
  749. }
  750. pagevec_release(&pvec);
  751. cond_resched();
  752. }
  753. }
  754. STATIC void
  755. xfs_vm_invalidatepage(
  756. struct page *page,
  757. unsigned int offset,
  758. unsigned int length)
  759. {
  760. trace_xfs_invalidatepage(page->mapping->host, page, offset,
  761. length);
  762. block_invalidatepage(page, offset, length);
  763. }
  764. /*
  765. * If the page has delalloc buffers on it, we need to punch them out before we
  766. * invalidate the page. If we don't, we leave a stale delalloc mapping on the
  767. * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
  768. * is done on that same region - the delalloc extent is returned when none is
  769. * supposed to be there.
  770. *
  771. * We prevent this by truncating away the delalloc regions on the page before
  772. * invalidating it. Because they are delalloc, we can do this without needing a
  773. * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
  774. * truncation without a transaction as there is no space left for block
  775. * reservation (typically why we see a ENOSPC in writeback).
  776. *
  777. * This is not a performance critical path, so for now just do the punching a
  778. * buffer head at a time.
  779. */
  780. STATIC void
  781. xfs_aops_discard_page(
  782. struct page *page)
  783. {
  784. struct inode *inode = page->mapping->host;
  785. struct xfs_inode *ip = XFS_I(inode);
  786. struct buffer_head *bh, *head;
  787. loff_t offset = page_offset(page);
  788. if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
  789. goto out_invalidate;
  790. if (XFS_FORCED_SHUTDOWN(ip->i_mount))
  791. goto out_invalidate;
  792. xfs_alert(ip->i_mount,
  793. "page discard on page %p, inode 0x%llx, offset %llu.",
  794. page, ip->i_ino, offset);
  795. xfs_ilock(ip, XFS_ILOCK_EXCL);
  796. bh = head = page_buffers(page);
  797. do {
  798. int error;
  799. xfs_fileoff_t start_fsb;
  800. if (!buffer_delay(bh))
  801. goto next_buffer;
  802. start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
  803. error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
  804. if (error) {
  805. /* something screwed, just bail */
  806. if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
  807. xfs_alert(ip->i_mount,
  808. "page discard unable to remove delalloc mapping.");
  809. }
  810. break;
  811. }
  812. next_buffer:
  813. offset += i_blocksize(inode);
  814. } while ((bh = bh->b_this_page) != head);
  815. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  816. out_invalidate:
  817. xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
  818. return;
  819. }
  820. /*
  821. * Write out a dirty page.
  822. *
  823. * For delalloc space on the page we need to allocate space and flush it.
  824. * For unwritten space on the page we need to start the conversion to
  825. * regular allocated space.
  826. * For any other dirty buffer heads on the page we should flush them.
  827. */
  828. STATIC int
  829. xfs_vm_writepage(
  830. struct page *page,
  831. struct writeback_control *wbc)
  832. {
  833. struct inode *inode = page->mapping->host;
  834. struct buffer_head *bh, *head;
  835. struct xfs_bmbt_irec imap;
  836. xfs_ioend_t *ioend = NULL, *iohead = NULL;
  837. loff_t offset;
  838. unsigned int type;
  839. __uint64_t end_offset;
  840. pgoff_t end_index, last_index;
  841. ssize_t len;
  842. int err, imap_valid = 0, uptodate = 1;
  843. int count = 0;
  844. int nonblocking = 0;
  845. trace_xfs_writepage(inode, page, 0, 0);
  846. ASSERT(page_has_buffers(page));
  847. /*
  848. * Refuse to write the page out if we are called from reclaim context.
  849. *
  850. * This avoids stack overflows when called from deeply used stacks in
  851. * random callers for direct reclaim or memcg reclaim. We explicitly
  852. * allow reclaim from kswapd as the stack usage there is relatively low.
  853. *
  854. * This should never happen except in the case of a VM regression so
  855. * warn about it.
  856. */
  857. if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
  858. PF_MEMALLOC))
  859. goto redirty;
  860. /*
  861. * Given that we do not allow direct reclaim to call us, we should
  862. * never be called while in a filesystem transaction.
  863. */
  864. if (WARN_ON_ONCE(current->flags & PF_FSTRANS))
  865. goto redirty;
  866. /* Is this page beyond the end of the file? */
  867. offset = i_size_read(inode);
  868. end_index = offset >> PAGE_CACHE_SHIFT;
  869. last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
  870. /*
  871. * The page index is less than the end_index, adjust the end_offset
  872. * to the highest offset that this page should represent.
  873. * -----------------------------------------------------
  874. * | file mapping | <EOF> |
  875. * -----------------------------------------------------
  876. * | Page ... | Page N-2 | Page N-1 | Page N | |
  877. * ^--------------------------------^----------|--------
  878. * | desired writeback range | see else |
  879. * ---------------------------------^------------------|
  880. */
  881. if (page->index < end_index)
  882. end_offset = (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT;
  883. else {
  884. /*
  885. * Check whether the page to write out is beyond or straddles
  886. * i_size or not.
  887. * -------------------------------------------------------
  888. * | file mapping | <EOF> |
  889. * -------------------------------------------------------
  890. * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
  891. * ^--------------------------------^-----------|---------
  892. * | | Straddles |
  893. * ---------------------------------^-----------|--------|
  894. */
  895. unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
  896. /*
  897. * Skip the page if it is fully outside i_size, e.g. due to a
  898. * truncate operation that is in progress. We must redirty the
  899. * page so that reclaim stops reclaiming it. Otherwise
  900. * xfs_vm_releasepage() is called on it and gets confused.
  901. *
  902. * Note that the end_index is unsigned long, it would overflow
  903. * if the given offset is greater than 16TB on 32-bit system
  904. * and if we do check the page is fully outside i_size or not
  905. * via "if (page->index >= end_index + 1)" as "end_index + 1"
  906. * will be evaluated to 0. Hence this page will be redirtied
  907. * and be written out repeatedly which would result in an
  908. * infinite loop, the user program that perform this operation
  909. * will hang. Instead, we can verify this situation by checking
  910. * if the page to write is totally beyond the i_size or if it's
  911. * offset is just equal to the EOF.
  912. */
  913. if (page->index > end_index ||
  914. (page->index == end_index && offset_into_page == 0))
  915. goto redirty;
  916. /*
  917. * The page straddles i_size. It must be zeroed out on each
  918. * and every writepage invocation because it may be mmapped.
  919. * "A file is mapped in multiples of the page size. For a file
  920. * that is not a multiple of the page size, the remaining
  921. * memory is zeroed when mapped, and writes to that region are
  922. * not written out to the file."
  923. */
  924. zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
  925. /* Adjust the end_offset to the end of file */
  926. end_offset = offset;
  927. }
  928. len = 1 << inode->i_blkbits;
  929. bh = head = page_buffers(page);
  930. offset = page_offset(page);
  931. type = XFS_IO_OVERWRITE;
  932. if (wbc->sync_mode == WB_SYNC_NONE)
  933. nonblocking = 1;
  934. do {
  935. int new_ioend = 0;
  936. if (offset >= end_offset)
  937. break;
  938. if (!buffer_uptodate(bh))
  939. uptodate = 0;
  940. /*
  941. * set_page_dirty dirties all buffers in a page, independent
  942. * of their state. The dirty state however is entirely
  943. * meaningless for holes (!mapped && uptodate), so skip
  944. * buffers covering holes here.
  945. */
  946. if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
  947. imap_valid = 0;
  948. continue;
  949. }
  950. if (buffer_unwritten(bh)) {
  951. if (type != XFS_IO_UNWRITTEN) {
  952. type = XFS_IO_UNWRITTEN;
  953. imap_valid = 0;
  954. }
  955. } else if (buffer_delay(bh)) {
  956. if (type != XFS_IO_DELALLOC) {
  957. type = XFS_IO_DELALLOC;
  958. imap_valid = 0;
  959. }
  960. } else if (buffer_uptodate(bh)) {
  961. if (type != XFS_IO_OVERWRITE) {
  962. type = XFS_IO_OVERWRITE;
  963. imap_valid = 0;
  964. }
  965. } else {
  966. if (PageUptodate(page))
  967. ASSERT(buffer_mapped(bh));
  968. /*
  969. * This buffer is not uptodate and will not be
  970. * written to disk. Ensure that we will put any
  971. * subsequent writeable buffers into a new
  972. * ioend.
  973. */
  974. imap_valid = 0;
  975. continue;
  976. }
  977. if (imap_valid)
  978. imap_valid = xfs_imap_valid(inode, &imap, offset);
  979. if (!imap_valid) {
  980. /*
  981. * If we didn't have a valid mapping then we need to
  982. * put the new mapping into a separate ioend structure.
  983. * This ensures non-contiguous extents always have
  984. * separate ioends, which is particularly important
  985. * for unwritten extent conversion at I/O completion
  986. * time.
  987. */
  988. new_ioend = 1;
  989. err = xfs_map_blocks(inode, offset, &imap, type,
  990. nonblocking);
  991. if (err)
  992. goto error;
  993. imap_valid = xfs_imap_valid(inode, &imap, offset);
  994. }
  995. if (imap_valid) {
  996. lock_buffer(bh);
  997. if (type != XFS_IO_OVERWRITE)
  998. xfs_map_at_offset(inode, bh, &imap, offset);
  999. xfs_add_to_ioend(inode, bh, offset, type, &ioend,
  1000. new_ioend);
  1001. count++;
  1002. }
  1003. if (!iohead)
  1004. iohead = ioend;
  1005. } while (offset += len, ((bh = bh->b_this_page) != head));
  1006. if (uptodate && bh == head)
  1007. SetPageUptodate(page);
  1008. xfs_start_page_writeback(page, 1, count);
  1009. /* if there is no IO to be submitted for this page, we are done */
  1010. if (!ioend)
  1011. return 0;
  1012. ASSERT(iohead);
  1013. /*
  1014. * Any errors from this point onwards need tobe reported through the IO
  1015. * completion path as we have marked the initial page as under writeback
  1016. * and unlocked it.
  1017. */
  1018. if (imap_valid) {
  1019. xfs_off_t end_index;
  1020. end_index = imap.br_startoff + imap.br_blockcount;
  1021. /* to bytes */
  1022. end_index <<= inode->i_blkbits;
  1023. /* to pages */
  1024. end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
  1025. /* check against file size */
  1026. if (end_index > last_index)
  1027. end_index = last_index;
  1028. xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
  1029. wbc, end_index);
  1030. }
  1031. /*
  1032. * Reserve log space if we might write beyond the on-disk inode size.
  1033. */
  1034. err = 0;
  1035. if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
  1036. err = xfs_setfilesize_trans_alloc(ioend);
  1037. xfs_submit_ioend(wbc, iohead, err);
  1038. return 0;
  1039. error:
  1040. if (iohead)
  1041. xfs_cancel_ioend(iohead);
  1042. if (err == -EAGAIN)
  1043. goto redirty;
  1044. xfs_aops_discard_page(page);
  1045. ClearPageUptodate(page);
  1046. unlock_page(page);
  1047. return err;
  1048. redirty:
  1049. redirty_page_for_writepage(wbc, page);
  1050. unlock_page(page);
  1051. return 0;
  1052. }
  1053. STATIC int
  1054. xfs_vm_writepages(
  1055. struct address_space *mapping,
  1056. struct writeback_control *wbc)
  1057. {
  1058. xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
  1059. return generic_writepages(mapping, wbc);
  1060. }
  1061. /*
  1062. * Called to move a page into cleanable state - and from there
  1063. * to be released. The page should already be clean. We always
  1064. * have buffer heads in this call.
  1065. *
  1066. * Returns 1 if the page is ok to release, 0 otherwise.
  1067. */
  1068. STATIC int
  1069. xfs_vm_releasepage(
  1070. struct page *page,
  1071. gfp_t gfp_mask)
  1072. {
  1073. int delalloc, unwritten;
  1074. trace_xfs_releasepage(page->mapping->host, page, 0, 0);
  1075. xfs_count_page_state(page, &delalloc, &unwritten);
  1076. if (WARN_ON_ONCE(delalloc))
  1077. return 0;
  1078. if (WARN_ON_ONCE(unwritten))
  1079. return 0;
  1080. return try_to_free_buffers(page);
  1081. }
  1082. /*
  1083. * When we map a DIO buffer, we may need to attach an ioend that describes the
  1084. * type of write IO we are doing. This passes to the completion function the
  1085. * operations it needs to perform. If the mapping is for an overwrite wholly
  1086. * within the EOF then we don't need an ioend and so we don't allocate one.
  1087. * This avoids the unnecessary overhead of allocating and freeing ioends for
  1088. * workloads that don't require transactions on IO completion.
  1089. *
  1090. * If we get multiple mappings in a single IO, we might be mapping different
  1091. * types. But because the direct IO can only have a single private pointer, we
  1092. * need to ensure that:
  1093. *
  1094. * a) i) the ioend spans the entire region of unwritten mappings; or
  1095. * ii) the ioend spans all the mappings that cross or are beyond EOF; and
  1096. * b) if it contains unwritten extents, it is *permanently* marked as such
  1097. *
  1098. * We could do this by chaining ioends like buffered IO does, but we only
  1099. * actually get one IO completion callback from the direct IO, and that spans
  1100. * the entire IO regardless of how many mappings and IOs are needed to complete
  1101. * the DIO. There is only going to be one reference to the ioend and its life
  1102. * cycle is constrained by the DIO completion code. hence we don't need
  1103. * reference counting here.
  1104. *
  1105. * Note that for DIO, an IO to the highest supported file block offset (i.e.
  1106. * 2^63 - 1FSB bytes) will result in the offset + count overflowing a signed 64
  1107. * bit variable. Hence if we see this overflow, we have to assume that the IO is
  1108. * extending the file size. We won't know for sure until IO completion is run
  1109. * and the actual max write offset is communicated to the IO completion
  1110. * routine.
  1111. *
  1112. * For DAX page faults, we are preparing to never see unwritten extents here,
  1113. * nor should we ever extend the inode size. Hence we will soon have nothing to
  1114. * do here for this case, ensuring we don't have to provide an IO completion
  1115. * callback to free an ioend that we don't actually need for a fault into the
  1116. * page at offset (2^63 - 1FSB) bytes.
  1117. */
  1118. static void
  1119. xfs_map_direct(
  1120. struct inode *inode,
  1121. struct buffer_head *bh_result,
  1122. struct xfs_bmbt_irec *imap,
  1123. xfs_off_t offset,
  1124. bool dax_fault)
  1125. {
  1126. struct xfs_ioend *ioend;
  1127. xfs_off_t size = bh_result->b_size;
  1128. int type;
  1129. if (ISUNWRITTEN(imap))
  1130. type = XFS_IO_UNWRITTEN;
  1131. else
  1132. type = XFS_IO_OVERWRITE;
  1133. trace_xfs_gbmap_direct(XFS_I(inode), offset, size, type, imap);
  1134. if (dax_fault) {
  1135. ASSERT(type == XFS_IO_OVERWRITE);
  1136. trace_xfs_gbmap_direct_none(XFS_I(inode), offset, size, type,
  1137. imap);
  1138. return;
  1139. }
  1140. if (bh_result->b_private) {
  1141. ioend = bh_result->b_private;
  1142. ASSERT(ioend->io_size > 0);
  1143. ASSERT(offset >= ioend->io_offset);
  1144. if (offset + size > ioend->io_offset + ioend->io_size)
  1145. ioend->io_size = offset - ioend->io_offset + size;
  1146. if (type == XFS_IO_UNWRITTEN && type != ioend->io_type)
  1147. ioend->io_type = XFS_IO_UNWRITTEN;
  1148. trace_xfs_gbmap_direct_update(XFS_I(inode), ioend->io_offset,
  1149. ioend->io_size, ioend->io_type,
  1150. imap);
  1151. } else if (type == XFS_IO_UNWRITTEN ||
  1152. offset + size > i_size_read(inode) ||
  1153. offset + size < 0) {
  1154. ioend = xfs_alloc_ioend(inode, type);
  1155. ioend->io_offset = offset;
  1156. ioend->io_size = size;
  1157. bh_result->b_private = ioend;
  1158. set_buffer_defer_completion(bh_result);
  1159. trace_xfs_gbmap_direct_new(XFS_I(inode), offset, size, type,
  1160. imap);
  1161. } else {
  1162. trace_xfs_gbmap_direct_none(XFS_I(inode), offset, size, type,
  1163. imap);
  1164. }
  1165. }
  1166. /*
  1167. * If this is O_DIRECT or the mpage code calling tell them how large the mapping
  1168. * is, so that we can avoid repeated get_blocks calls.
  1169. *
  1170. * If the mapping spans EOF, then we have to break the mapping up as the mapping
  1171. * for blocks beyond EOF must be marked new so that sub block regions can be
  1172. * correctly zeroed. We can't do this for mappings within EOF unless the mapping
  1173. * was just allocated or is unwritten, otherwise the callers would overwrite
  1174. * existing data with zeros. Hence we have to split the mapping into a range up
  1175. * to and including EOF, and a second mapping for beyond EOF.
  1176. */
  1177. static void
  1178. xfs_map_trim_size(
  1179. struct inode *inode,
  1180. sector_t iblock,
  1181. struct buffer_head *bh_result,
  1182. struct xfs_bmbt_irec *imap,
  1183. xfs_off_t offset,
  1184. ssize_t size)
  1185. {
  1186. xfs_off_t mapping_size;
  1187. mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
  1188. mapping_size <<= inode->i_blkbits;
  1189. ASSERT(mapping_size > 0);
  1190. if (mapping_size > size)
  1191. mapping_size = size;
  1192. if (offset < i_size_read(inode) &&
  1193. (xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) {
  1194. /* limit mapping to block that spans EOF */
  1195. mapping_size = roundup_64(i_size_read(inode) - offset,
  1196. i_blocksize(inode));
  1197. }
  1198. if (mapping_size > LONG_MAX)
  1199. mapping_size = LONG_MAX;
  1200. bh_result->b_size = mapping_size;
  1201. }
  1202. STATIC int
  1203. __xfs_get_blocks(
  1204. struct inode *inode,
  1205. sector_t iblock,
  1206. struct buffer_head *bh_result,
  1207. int create,
  1208. bool direct,
  1209. bool dax_fault)
  1210. {
  1211. struct xfs_inode *ip = XFS_I(inode);
  1212. struct xfs_mount *mp = ip->i_mount;
  1213. xfs_fileoff_t offset_fsb, end_fsb;
  1214. int error = 0;
  1215. int lockmode = 0;
  1216. struct xfs_bmbt_irec imap;
  1217. int nimaps = 1;
  1218. xfs_off_t offset;
  1219. ssize_t size;
  1220. int new = 0;
  1221. if (XFS_FORCED_SHUTDOWN(mp))
  1222. return -EIO;
  1223. offset = (xfs_off_t)iblock << inode->i_blkbits;
  1224. ASSERT(bh_result->b_size >= i_blocksize(inode));
  1225. size = bh_result->b_size;
  1226. if (!create && direct && offset >= i_size_read(inode))
  1227. return 0;
  1228. /*
  1229. * Direct I/O is usually done on preallocated files, so try getting
  1230. * a block mapping without an exclusive lock first. For buffered
  1231. * writes we already have the exclusive iolock anyway, so avoiding
  1232. * a lock roundtrip here by taking the ilock exclusive from the
  1233. * beginning is a useful micro optimization.
  1234. */
  1235. if (create && !direct) {
  1236. lockmode = XFS_ILOCK_EXCL;
  1237. xfs_ilock(ip, lockmode);
  1238. } else {
  1239. lockmode = xfs_ilock_data_map_shared(ip);
  1240. }
  1241. ASSERT(offset <= mp->m_super->s_maxbytes);
  1242. if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes)
  1243. size = mp->m_super->s_maxbytes - offset;
  1244. end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
  1245. offset_fsb = XFS_B_TO_FSBT(mp, offset);
  1246. error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
  1247. &imap, &nimaps, XFS_BMAPI_ENTIRE);
  1248. if (error)
  1249. goto out_unlock;
  1250. /*
  1251. * The only time we can ever safely find delalloc blocks on direct I/O
  1252. * is a dio write to post-eof speculative preallocation. All other
  1253. * scenarios are indicative of a problem or misuse (such as mixing
  1254. * direct and mapped I/O).
  1255. *
  1256. * The file may be unmapped by the time we get here so we cannot
  1257. * reliably fail the I/O based on mapping. Instead, fail the I/O if this
  1258. * is a read or a write within eof. Otherwise, carry on but warn as a
  1259. * precuation if the file happens to be mapped.
  1260. */
  1261. if (direct && imap.br_startblock == DELAYSTARTBLOCK) {
  1262. if (!create || offset < i_size_read(VFS_I(ip))) {
  1263. WARN_ON_ONCE(1);
  1264. error = -EIO;
  1265. goto out_unlock;
  1266. }
  1267. WARN_ON_ONCE(mapping_mapped(VFS_I(ip)->i_mapping));
  1268. }
  1269. /* for DAX, we convert unwritten extents directly */
  1270. if (create &&
  1271. (!nimaps ||
  1272. (imap.br_startblock == HOLESTARTBLOCK ||
  1273. imap.br_startblock == DELAYSTARTBLOCK) ||
  1274. (IS_DAX(inode) && ISUNWRITTEN(&imap)))) {
  1275. if (direct || xfs_get_extsz_hint(ip)) {
  1276. /*
  1277. * xfs_iomap_write_direct() expects the shared lock. It
  1278. * is unlocked on return.
  1279. */
  1280. if (lockmode == XFS_ILOCK_EXCL)
  1281. xfs_ilock_demote(ip, lockmode);
  1282. error = xfs_iomap_write_direct(ip, offset, size,
  1283. &imap, nimaps);
  1284. if (error)
  1285. return error;
  1286. new = 1;
  1287. } else {
  1288. /*
  1289. * Delalloc reservations do not require a transaction,
  1290. * we can go on without dropping the lock here. If we
  1291. * are allocating a new delalloc block, make sure that
  1292. * we set the new flag so that we mark the buffer new so
  1293. * that we know that it is newly allocated if the write
  1294. * fails.
  1295. */
  1296. if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
  1297. new = 1;
  1298. error = xfs_iomap_write_delay(ip, offset, size, &imap);
  1299. if (error)
  1300. goto out_unlock;
  1301. xfs_iunlock(ip, lockmode);
  1302. }
  1303. trace_xfs_get_blocks_alloc(ip, offset, size,
  1304. ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
  1305. : XFS_IO_DELALLOC, &imap);
  1306. } else if (nimaps) {
  1307. trace_xfs_get_blocks_found(ip, offset, size,
  1308. ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
  1309. : XFS_IO_OVERWRITE, &imap);
  1310. xfs_iunlock(ip, lockmode);
  1311. } else {
  1312. trace_xfs_get_blocks_notfound(ip, offset, size);
  1313. goto out_unlock;
  1314. }
  1315. if (IS_DAX(inode) && create) {
  1316. ASSERT(!ISUNWRITTEN(&imap));
  1317. /* zeroing is not needed at a higher layer */
  1318. new = 0;
  1319. }
  1320. /* trim mapping down to size requested */
  1321. if (direct || size > (1 << inode->i_blkbits))
  1322. xfs_map_trim_size(inode, iblock, bh_result,
  1323. &imap, offset, size);
  1324. /*
  1325. * For unwritten extents do not report a disk address in the buffered
  1326. * read case (treat as if we're reading into a hole).
  1327. */
  1328. if (imap.br_startblock != HOLESTARTBLOCK &&
  1329. imap.br_startblock != DELAYSTARTBLOCK &&
  1330. (create || !ISUNWRITTEN(&imap))) {
  1331. xfs_map_buffer(inode, bh_result, &imap, offset);
  1332. if (ISUNWRITTEN(&imap))
  1333. set_buffer_unwritten(bh_result);
  1334. /* direct IO needs special help */
  1335. if (create && direct)
  1336. xfs_map_direct(inode, bh_result, &imap, offset,
  1337. dax_fault);
  1338. }
  1339. /*
  1340. * If this is a realtime file, data may be on a different device.
  1341. * to that pointed to from the buffer_head b_bdev currently.
  1342. */
  1343. bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
  1344. /*
  1345. * If we previously allocated a block out beyond eof and we are now
  1346. * coming back to use it then we will need to flag it as new even if it
  1347. * has a disk address.
  1348. *
  1349. * With sub-block writes into unwritten extents we also need to mark
  1350. * the buffer as new so that the unwritten parts of the buffer gets
  1351. * correctly zeroed.
  1352. */
  1353. if (create &&
  1354. ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
  1355. (offset >= i_size_read(inode)) ||
  1356. (new || ISUNWRITTEN(&imap))))
  1357. set_buffer_new(bh_result);
  1358. if (imap.br_startblock == DELAYSTARTBLOCK) {
  1359. if (create) {
  1360. set_buffer_uptodate(bh_result);
  1361. set_buffer_mapped(bh_result);
  1362. set_buffer_delay(bh_result);
  1363. }
  1364. }
  1365. return 0;
  1366. out_unlock:
  1367. xfs_iunlock(ip, lockmode);
  1368. return error;
  1369. }
  1370. int
  1371. xfs_get_blocks(
  1372. struct inode *inode,
  1373. sector_t iblock,
  1374. struct buffer_head *bh_result,
  1375. int create)
  1376. {
  1377. return __xfs_get_blocks(inode, iblock, bh_result, create, false, false);
  1378. }
  1379. int
  1380. xfs_get_blocks_direct(
  1381. struct inode *inode,
  1382. sector_t iblock,
  1383. struct buffer_head *bh_result,
  1384. int create)
  1385. {
  1386. return __xfs_get_blocks(inode, iblock, bh_result, create, true, false);
  1387. }
  1388. int
  1389. xfs_get_blocks_dax_fault(
  1390. struct inode *inode,
  1391. sector_t iblock,
  1392. struct buffer_head *bh_result,
  1393. int create)
  1394. {
  1395. return __xfs_get_blocks(inode, iblock, bh_result, create, true, true);
  1396. }
  1397. static void
  1398. __xfs_end_io_direct_write(
  1399. struct inode *inode,
  1400. struct xfs_ioend *ioend,
  1401. loff_t offset,
  1402. ssize_t size)
  1403. {
  1404. struct xfs_mount *mp = XFS_I(inode)->i_mount;
  1405. if (XFS_FORCED_SHUTDOWN(mp) || ioend->io_error)
  1406. goto out_end_io;
  1407. /*
  1408. * dio completion end_io functions are only called on writes if more
  1409. * than 0 bytes was written.
  1410. */
  1411. ASSERT(size > 0);
  1412. /*
  1413. * The ioend only maps whole blocks, while the IO may be sector aligned.
  1414. * Hence the ioend offset/size may not match the IO offset/size exactly.
  1415. * Because we don't map overwrites within EOF into the ioend, the offset
  1416. * may not match, but only if the endio spans EOF. Either way, write
  1417. * the IO sizes into the ioend so that completion processing does the
  1418. * right thing.
  1419. */
  1420. ASSERT(offset + size <= ioend->io_offset + ioend->io_size);
  1421. ioend->io_size = size;
  1422. ioend->io_offset = offset;
  1423. /*
  1424. * The ioend tells us whether we are doing unwritten extent conversion
  1425. * or an append transaction that updates the on-disk file size. These
  1426. * cases are the only cases where we should *potentially* be needing
  1427. * to update the VFS inode size.
  1428. *
  1429. * We need to update the in-core inode size here so that we don't end up
  1430. * with the on-disk inode size being outside the in-core inode size. We
  1431. * have no other method of updating EOF for AIO, so always do it here
  1432. * if necessary.
  1433. *
  1434. * We need to lock the test/set EOF update as we can be racing with
  1435. * other IO completions here to update the EOF. Failing to serialise
  1436. * here can result in EOF moving backwards and Bad Things Happen when
  1437. * that occurs.
  1438. */
  1439. spin_lock(&XFS_I(inode)->i_flags_lock);
  1440. if (offset + size > i_size_read(inode))
  1441. i_size_write(inode, offset + size);
  1442. spin_unlock(&XFS_I(inode)->i_flags_lock);
  1443. /*
  1444. * If we are doing an append IO that needs to update the EOF on disk,
  1445. * do the transaction reserve now so we can use common end io
  1446. * processing. Stashing the error (if there is one) in the ioend will
  1447. * result in the ioend processing passing on the error if it is
  1448. * possible as we can't return it from here.
  1449. */
  1450. if (ioend->io_type == XFS_IO_OVERWRITE)
  1451. ioend->io_error = xfs_setfilesize_trans_alloc(ioend);
  1452. out_end_io:
  1453. xfs_end_io(&ioend->io_work);
  1454. return;
  1455. }
  1456. /*
  1457. * Complete a direct I/O write request.
  1458. *
  1459. * The ioend structure is passed from __xfs_get_blocks() to tell us what to do.
  1460. * If no ioend exists (i.e. @private == NULL) then the write IO is an overwrite
  1461. * wholly within the EOF and so there is nothing for us to do. Note that in this
  1462. * case the completion can be called in interrupt context, whereas if we have an
  1463. * ioend we will always be called in task context (i.e. from a workqueue).
  1464. */
  1465. STATIC void
  1466. xfs_end_io_direct_write(
  1467. struct kiocb *iocb,
  1468. loff_t offset,
  1469. ssize_t size,
  1470. void *private)
  1471. {
  1472. struct inode *inode = file_inode(iocb->ki_filp);
  1473. struct xfs_ioend *ioend = private;
  1474. trace_xfs_gbmap_direct_endio(XFS_I(inode), offset, size,
  1475. ioend ? ioend->io_type : 0, NULL);
  1476. if (!ioend) {
  1477. ASSERT(offset + size <= i_size_read(inode));
  1478. return;
  1479. }
  1480. __xfs_end_io_direct_write(inode, ioend, offset, size);
  1481. }
  1482. static inline ssize_t
  1483. xfs_vm_do_dio(
  1484. struct inode *inode,
  1485. struct kiocb *iocb,
  1486. struct iov_iter *iter,
  1487. loff_t offset,
  1488. void (*endio)(struct kiocb *iocb,
  1489. loff_t offset,
  1490. ssize_t size,
  1491. void *private),
  1492. int flags)
  1493. {
  1494. struct block_device *bdev;
  1495. if (IS_DAX(inode))
  1496. return dax_do_io(iocb, inode, iter, offset,
  1497. xfs_get_blocks_direct, endio, 0);
  1498. bdev = xfs_find_bdev_for_inode(inode);
  1499. return __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
  1500. xfs_get_blocks_direct, endio, NULL, flags);
  1501. }
  1502. STATIC ssize_t
  1503. xfs_vm_direct_IO(
  1504. struct kiocb *iocb,
  1505. struct iov_iter *iter,
  1506. loff_t offset)
  1507. {
  1508. struct inode *inode = iocb->ki_filp->f_mapping->host;
  1509. if (iov_iter_rw(iter) == WRITE)
  1510. return xfs_vm_do_dio(inode, iocb, iter, offset,
  1511. xfs_end_io_direct_write, DIO_ASYNC_EXTEND);
  1512. return xfs_vm_do_dio(inode, iocb, iter, offset, NULL, 0);
  1513. }
  1514. /*
  1515. * Punch out the delalloc blocks we have already allocated.
  1516. *
  1517. * Don't bother with xfs_setattr given that nothing can have made it to disk yet
  1518. * as the page is still locked at this point.
  1519. */
  1520. STATIC void
  1521. xfs_vm_kill_delalloc_range(
  1522. struct inode *inode,
  1523. loff_t start,
  1524. loff_t end)
  1525. {
  1526. struct xfs_inode *ip = XFS_I(inode);
  1527. xfs_fileoff_t start_fsb;
  1528. xfs_fileoff_t end_fsb;
  1529. int error;
  1530. start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
  1531. end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
  1532. if (end_fsb <= start_fsb)
  1533. return;
  1534. xfs_ilock(ip, XFS_ILOCK_EXCL);
  1535. error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
  1536. end_fsb - start_fsb);
  1537. if (error) {
  1538. /* something screwed, just bail */
  1539. if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
  1540. xfs_alert(ip->i_mount,
  1541. "xfs_vm_write_failed: unable to clean up ino %lld",
  1542. ip->i_ino);
  1543. }
  1544. }
  1545. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1546. }
  1547. STATIC void
  1548. xfs_vm_write_failed(
  1549. struct inode *inode,
  1550. struct page *page,
  1551. loff_t pos,
  1552. unsigned len)
  1553. {
  1554. loff_t block_offset;
  1555. loff_t block_start;
  1556. loff_t block_end;
  1557. loff_t from = pos & (PAGE_CACHE_SIZE - 1);
  1558. loff_t to = from + len;
  1559. struct buffer_head *bh, *head;
  1560. /*
  1561. * The request pos offset might be 32 or 64 bit, this is all fine
  1562. * on 64-bit platform. However, for 64-bit pos request on 32-bit
  1563. * platform, the high 32-bit will be masked off if we evaluate the
  1564. * block_offset via (pos & PAGE_MASK) because the PAGE_MASK is
  1565. * 0xfffff000 as an unsigned long, hence the result is incorrect
  1566. * which could cause the following ASSERT failed in most cases.
  1567. * In order to avoid this, we can evaluate the block_offset of the
  1568. * start of the page by using shifts rather than masks the mismatch
  1569. * problem.
  1570. */
  1571. block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT;
  1572. ASSERT(block_offset + from == pos);
  1573. head = page_buffers(page);
  1574. block_start = 0;
  1575. for (bh = head; bh != head || !block_start;
  1576. bh = bh->b_this_page, block_start = block_end,
  1577. block_offset += bh->b_size) {
  1578. block_end = block_start + bh->b_size;
  1579. /* skip buffers before the write */
  1580. if (block_end <= from)
  1581. continue;
  1582. /* if the buffer is after the write, we're done */
  1583. if (block_start >= to)
  1584. break;
  1585. if (!buffer_delay(bh))
  1586. continue;
  1587. if (!buffer_new(bh) && block_offset < i_size_read(inode))
  1588. continue;
  1589. xfs_vm_kill_delalloc_range(inode, block_offset,
  1590. block_offset + bh->b_size);
  1591. /*
  1592. * This buffer does not contain data anymore. make sure anyone
  1593. * who finds it knows that for certain.
  1594. */
  1595. clear_buffer_delay(bh);
  1596. clear_buffer_uptodate(bh);
  1597. clear_buffer_mapped(bh);
  1598. clear_buffer_new(bh);
  1599. clear_buffer_dirty(bh);
  1600. }
  1601. }
  1602. /*
  1603. * This used to call block_write_begin(), but it unlocks and releases the page
  1604. * on error, and we need that page to be able to punch stale delalloc blocks out
  1605. * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
  1606. * the appropriate point.
  1607. */
  1608. STATIC int
  1609. xfs_vm_write_begin(
  1610. struct file *file,
  1611. struct address_space *mapping,
  1612. loff_t pos,
  1613. unsigned len,
  1614. unsigned flags,
  1615. struct page **pagep,
  1616. void **fsdata)
  1617. {
  1618. pgoff_t index = pos >> PAGE_CACHE_SHIFT;
  1619. struct page *page;
  1620. int status;
  1621. ASSERT(len <= PAGE_CACHE_SIZE);
  1622. page = grab_cache_page_write_begin(mapping, index, flags);
  1623. if (!page)
  1624. return -ENOMEM;
  1625. status = __block_write_begin(page, pos, len, xfs_get_blocks);
  1626. if (unlikely(status)) {
  1627. struct inode *inode = mapping->host;
  1628. size_t isize = i_size_read(inode);
  1629. xfs_vm_write_failed(inode, page, pos, len);
  1630. unlock_page(page);
  1631. /*
  1632. * If the write is beyond EOF, we only want to kill blocks
  1633. * allocated in this write, not blocks that were previously
  1634. * written successfully.
  1635. */
  1636. if (pos + len > isize) {
  1637. ssize_t start = max_t(ssize_t, pos, isize);
  1638. truncate_pagecache_range(inode, start, pos + len);
  1639. }
  1640. page_cache_release(page);
  1641. page = NULL;
  1642. }
  1643. *pagep = page;
  1644. return status;
  1645. }
  1646. /*
  1647. * On failure, we only need to kill delalloc blocks beyond EOF in the range of
  1648. * this specific write because they will never be written. Previous writes
  1649. * beyond EOF where block allocation succeeded do not need to be trashed, so
  1650. * only new blocks from this write should be trashed. For blocks within
  1651. * EOF, generic_write_end() zeros them so they are safe to leave alone and be
  1652. * written with all the other valid data.
  1653. */
  1654. STATIC int
  1655. xfs_vm_write_end(
  1656. struct file *file,
  1657. struct address_space *mapping,
  1658. loff_t pos,
  1659. unsigned len,
  1660. unsigned copied,
  1661. struct page *page,
  1662. void *fsdata)
  1663. {
  1664. int ret;
  1665. ASSERT(len <= PAGE_CACHE_SIZE);
  1666. ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
  1667. if (unlikely(ret < len)) {
  1668. struct inode *inode = mapping->host;
  1669. size_t isize = i_size_read(inode);
  1670. loff_t to = pos + len;
  1671. if (to > isize) {
  1672. /* only kill blocks in this write beyond EOF */
  1673. if (pos > isize)
  1674. isize = pos;
  1675. xfs_vm_kill_delalloc_range(inode, isize, to);
  1676. truncate_pagecache_range(inode, isize, to);
  1677. }
  1678. }
  1679. return ret;
  1680. }
  1681. STATIC sector_t
  1682. xfs_vm_bmap(
  1683. struct address_space *mapping,
  1684. sector_t block)
  1685. {
  1686. struct inode *inode = (struct inode *)mapping->host;
  1687. struct xfs_inode *ip = XFS_I(inode);
  1688. trace_xfs_vm_bmap(XFS_I(inode));
  1689. xfs_ilock(ip, XFS_IOLOCK_SHARED);
  1690. filemap_write_and_wait(mapping);
  1691. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  1692. return generic_block_bmap(mapping, block, xfs_get_blocks);
  1693. }
  1694. STATIC int
  1695. xfs_vm_readpage(
  1696. struct file *unused,
  1697. struct page *page)
  1698. {
  1699. return mpage_readpage(page, xfs_get_blocks);
  1700. }
  1701. STATIC int
  1702. xfs_vm_readpages(
  1703. struct file *unused,
  1704. struct address_space *mapping,
  1705. struct list_head *pages,
  1706. unsigned nr_pages)
  1707. {
  1708. return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
  1709. }
  1710. /*
  1711. * This is basically a copy of __set_page_dirty_buffers() with one
  1712. * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
  1713. * dirty, we'll never be able to clean them because we don't write buffers
  1714. * beyond EOF, and that means we can't invalidate pages that span EOF
  1715. * that have been marked dirty. Further, the dirty state can leak into
  1716. * the file interior if the file is extended, resulting in all sorts of
  1717. * bad things happening as the state does not match the underlying data.
  1718. *
  1719. * XXX: this really indicates that bufferheads in XFS need to die. Warts like
  1720. * this only exist because of bufferheads and how the generic code manages them.
  1721. */
  1722. STATIC int
  1723. xfs_vm_set_page_dirty(
  1724. struct page *page)
  1725. {
  1726. struct address_space *mapping = page->mapping;
  1727. struct inode *inode = mapping->host;
  1728. loff_t end_offset;
  1729. loff_t offset;
  1730. int newly_dirty;
  1731. struct mem_cgroup *memcg;
  1732. if (unlikely(!mapping))
  1733. return !TestSetPageDirty(page);
  1734. end_offset = i_size_read(inode);
  1735. offset = page_offset(page);
  1736. spin_lock(&mapping->private_lock);
  1737. if (page_has_buffers(page)) {
  1738. struct buffer_head *head = page_buffers(page);
  1739. struct buffer_head *bh = head;
  1740. do {
  1741. if (offset < end_offset)
  1742. set_buffer_dirty(bh);
  1743. bh = bh->b_this_page;
  1744. offset += i_blocksize(inode);
  1745. } while (bh != head);
  1746. }
  1747. /*
  1748. * Use mem_group_begin_page_stat() to keep PageDirty synchronized with
  1749. * per-memcg dirty page counters.
  1750. */
  1751. memcg = mem_cgroup_begin_page_stat(page);
  1752. newly_dirty = !TestSetPageDirty(page);
  1753. spin_unlock(&mapping->private_lock);
  1754. if (newly_dirty) {
  1755. /* sigh - __set_page_dirty() is static, so copy it here, too */
  1756. unsigned long flags;
  1757. spin_lock_irqsave(&mapping->tree_lock, flags);
  1758. if (page->mapping) { /* Race with truncate? */
  1759. WARN_ON_ONCE(!PageUptodate(page));
  1760. account_page_dirtied(page, mapping, memcg);
  1761. radix_tree_tag_set(&mapping->page_tree,
  1762. page_index(page), PAGECACHE_TAG_DIRTY);
  1763. }
  1764. spin_unlock_irqrestore(&mapping->tree_lock, flags);
  1765. }
  1766. mem_cgroup_end_page_stat(memcg);
  1767. if (newly_dirty)
  1768. __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
  1769. return newly_dirty;
  1770. }
  1771. const struct address_space_operations xfs_address_space_operations = {
  1772. .readpage = xfs_vm_readpage,
  1773. .readpages = xfs_vm_readpages,
  1774. .writepage = xfs_vm_writepage,
  1775. .writepages = xfs_vm_writepages,
  1776. .set_page_dirty = xfs_vm_set_page_dirty,
  1777. .releasepage = xfs_vm_releasepage,
  1778. .invalidatepage = xfs_vm_invalidatepage,
  1779. .write_begin = xfs_vm_write_begin,
  1780. .write_end = xfs_vm_write_end,
  1781. .bmap = xfs_vm_bmap,
  1782. .direct_IO = xfs_vm_direct_IO,
  1783. .migratepage = buffer_migrate_page,
  1784. .is_partially_uptodate = block_is_partially_uptodate,
  1785. .error_remove_page = generic_error_remove_page,
  1786. };