addr.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/backing-dev.h>
  3. #include <linux/fs.h>
  4. #include <linux/mm.h>
  5. #include <linux/pagemap.h>
  6. #include <linux/writeback.h> /* generic_writepages */
  7. #include <linux/slab.h>
  8. #include <linux/pagevec.h>
  9. #include <linux/task_io_accounting_ops.h>
  10. #include "super.h"
  11. #include "mds_client.h"
  12. #include "cache.h"
  13. #include <linux/ceph/osd_client.h>
  14. /*
  15. * Ceph address space ops.
  16. *
  17. * There are a few funny things going on here.
  18. *
  19. * The page->private field is used to reference a struct
  20. * ceph_snap_context for _every_ dirty page. This indicates which
  21. * snapshot the page was logically dirtied in, and thus which snap
  22. * context needs to be associated with the osd write during writeback.
  23. *
  24. * Similarly, struct ceph_inode_info maintains a set of counters to
  25. * count dirty pages on the inode. In the absence of snapshots,
  26. * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
  27. *
  28. * When a snapshot is taken (that is, when the client receives
  29. * notification that a snapshot was taken), each inode with caps and
  30. * with dirty pages (dirty pages implies there is a cap) gets a new
  31. * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
  32. * order, new snaps go to the tail). The i_wrbuffer_ref_head count is
  33. * moved to capsnap->dirty. (Unless a sync write is currently in
  34. * progress. In that case, the capsnap is said to be "pending", new
  35. * writes cannot start, and the capsnap isn't "finalized" until the
  36. * write completes (or fails) and a final size/mtime for the inode for
  37. * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0.
  38. *
  39. * On writeback, we must submit writes to the osd IN SNAP ORDER. So,
  40. * we look for the first capsnap in i_cap_snaps and write out pages in
  41. * that snap context _only_. Then we move on to the next capsnap,
  42. * eventually reaching the "live" or "head" context (i.e., pages that
  43. * are not yet snapped) and are writing the most recently dirtied
  44. * pages.
  45. *
  46. * Invalidate and so forth must take care to ensure the dirty page
  47. * accounting is preserved.
  48. */
  49. #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10))
  50. #define CONGESTION_OFF_THRESH(congestion_kb) \
  51. (CONGESTION_ON_THRESH(congestion_kb) - \
  52. (CONGESTION_ON_THRESH(congestion_kb) >> 2))
  53. static inline struct ceph_snap_context *page_snap_context(struct page *page)
  54. {
  55. if (PagePrivate(page))
  56. return (void *)page->private;
  57. return NULL;
  58. }
  59. /*
  60. * Dirty a page. Optimistically adjust accounting, on the assumption
  61. * that we won't race with invalidate. If we do, readjust.
  62. */
  63. static int ceph_set_page_dirty(struct page *page)
  64. {
  65. struct address_space *mapping = page->mapping;
  66. struct inode *inode;
  67. struct ceph_inode_info *ci;
  68. struct ceph_snap_context *snapc;
  69. int ret;
  70. if (unlikely(!mapping))
  71. return !TestSetPageDirty(page);
  72. if (PageDirty(page)) {
  73. dout("%p set_page_dirty %p idx %lu -- already dirty\n",
  74. mapping->host, page, page->index);
  75. BUG_ON(!PagePrivate(page));
  76. return 0;
  77. }
  78. inode = mapping->host;
  79. ci = ceph_inode(inode);
  80. /* dirty the head */
  81. spin_lock(&ci->i_ceph_lock);
  82. BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference
  83. if (__ceph_have_pending_cap_snap(ci)) {
  84. struct ceph_cap_snap *capsnap =
  85. list_last_entry(&ci->i_cap_snaps,
  86. struct ceph_cap_snap,
  87. ci_item);
  88. snapc = ceph_get_snap_context(capsnap->context);
  89. capsnap->dirty_pages++;
  90. } else {
  91. BUG_ON(!ci->i_head_snapc);
  92. snapc = ceph_get_snap_context(ci->i_head_snapc);
  93. ++ci->i_wrbuffer_ref_head;
  94. }
  95. if (ci->i_wrbuffer_ref == 0)
  96. ihold(inode);
  97. ++ci->i_wrbuffer_ref;
  98. dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
  99. "snapc %p seq %lld (%d snaps)\n",
  100. mapping->host, page, page->index,
  101. ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
  102. ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
  103. snapc, snapc->seq, snapc->num_snaps);
  104. spin_unlock(&ci->i_ceph_lock);
  105. /*
  106. * Reference snap context in page->private. Also set
  107. * PagePrivate so that we get invalidatepage callback.
  108. */
  109. BUG_ON(PagePrivate(page));
  110. page->private = (unsigned long)snapc;
  111. SetPagePrivate(page);
  112. ret = __set_page_dirty_nobuffers(page);
  113. WARN_ON(!PageLocked(page));
  114. WARN_ON(!page->mapping);
  115. return ret;
  116. }
  117. /*
  118. * If we are truncating the full page (i.e. offset == 0), adjust the
  119. * dirty page counters appropriately. Only called if there is private
  120. * data on the page.
  121. */
  122. static void ceph_invalidatepage(struct page *page, unsigned int offset,
  123. unsigned int length)
  124. {
  125. struct inode *inode;
  126. struct ceph_inode_info *ci;
  127. struct ceph_snap_context *snapc = page_snap_context(page);
  128. inode = page->mapping->host;
  129. ci = ceph_inode(inode);
  130. if (offset != 0 || length != PAGE_CACHE_SIZE) {
  131. dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n",
  132. inode, page, page->index, offset, length);
  133. return;
  134. }
  135. ceph_invalidate_fscache_page(inode, page);
  136. if (!PagePrivate(page))
  137. return;
  138. /*
  139. * We can get non-dirty pages here due to races between
  140. * set_page_dirty and truncate_complete_page; just spit out a
  141. * warning, in case we end up with accounting problems later.
  142. */
  143. if (!PageDirty(page))
  144. pr_err("%p invalidatepage %p page not dirty\n", inode, page);
  145. ClearPageChecked(page);
  146. dout("%p invalidatepage %p idx %lu full dirty page\n",
  147. inode, page, page->index);
  148. ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
  149. ceph_put_snap_context(snapc);
  150. page->private = 0;
  151. ClearPagePrivate(page);
  152. }
  153. static int ceph_releasepage(struct page *page, gfp_t g)
  154. {
  155. struct inode *inode = page->mapping ? page->mapping->host : NULL;
  156. dout("%p releasepage %p idx %lu\n", inode, page, page->index);
  157. WARN_ON(PageDirty(page));
  158. /* Can we release the page from the cache? */
  159. if (!ceph_release_fscache_page(page, g))
  160. return 0;
  161. return !PagePrivate(page);
  162. }
  163. /*
  164. * read a single page, without unlocking it.
  165. */
  166. static int ceph_do_readpage(struct file *filp, struct page *page)
  167. {
  168. struct inode *inode = file_inode(filp);
  169. struct ceph_inode_info *ci = ceph_inode(inode);
  170. struct ceph_osd_client *osdc =
  171. &ceph_inode_to_client(inode)->client->osdc;
  172. int err = 0;
  173. u64 off = page_offset(page);
  174. u64 len = PAGE_CACHE_SIZE;
  175. if (off >= i_size_read(inode)) {
  176. zero_user_segment(page, 0, PAGE_CACHE_SIZE);
  177. SetPageUptodate(page);
  178. return 0;
  179. }
  180. if (ci->i_inline_version != CEPH_INLINE_NONE) {
  181. /*
  182. * Uptodate inline data should have been added
  183. * into page cache while getting Fcr caps.
  184. */
  185. if (off == 0)
  186. return -EINVAL;
  187. zero_user_segment(page, 0, PAGE_CACHE_SIZE);
  188. SetPageUptodate(page);
  189. return 0;
  190. }
  191. err = ceph_readpage_from_fscache(inode, page);
  192. if (err == 0)
  193. return -EINPROGRESS;
  194. dout("readpage inode %p file %p page %p index %lu\n",
  195. inode, filp, page, page->index);
  196. err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout,
  197. off, &len,
  198. ci->i_truncate_seq, ci->i_truncate_size,
  199. &page, 1, 0);
  200. if (err == -ENOENT)
  201. err = 0;
  202. if (err < 0) {
  203. SetPageError(page);
  204. ceph_fscache_readpage_cancel(inode, page);
  205. goto out;
  206. }
  207. if (err < PAGE_CACHE_SIZE)
  208. /* zero fill remainder of page */
  209. zero_user_segment(page, err, PAGE_CACHE_SIZE);
  210. else
  211. flush_dcache_page(page);
  212. SetPageUptodate(page);
  213. ceph_readpage_to_fscache(inode, page);
  214. out:
  215. return err < 0 ? err : 0;
  216. }
  217. static int ceph_readpage(struct file *filp, struct page *page)
  218. {
  219. int r = ceph_do_readpage(filp, page);
  220. if (r != -EINPROGRESS)
  221. unlock_page(page);
  222. else
  223. r = 0;
  224. return r;
  225. }
  226. /*
  227. * Finish an async read(ahead) op.
  228. */
  229. static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
  230. {
  231. struct inode *inode = req->r_inode;
  232. struct ceph_osd_data *osd_data;
  233. int rc = req->r_result;
  234. int bytes = le32_to_cpu(msg->hdr.data_len);
  235. int num_pages;
  236. int i;
  237. dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes);
  238. /* unlock all pages, zeroing any data we didn't read */
  239. osd_data = osd_req_op_extent_osd_data(req, 0);
  240. BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
  241. num_pages = calc_pages_for((u64)osd_data->alignment,
  242. (u64)osd_data->length);
  243. for (i = 0; i < num_pages; i++) {
  244. struct page *page = osd_data->pages[i];
  245. if (rc < 0 && rc != ENOENT)
  246. goto unlock;
  247. if (bytes < (int)PAGE_CACHE_SIZE) {
  248. /* zero (remainder of) page */
  249. int s = bytes < 0 ? 0 : bytes;
  250. zero_user_segment(page, s, PAGE_CACHE_SIZE);
  251. }
  252. dout("finish_read %p uptodate %p idx %lu\n", inode, page,
  253. page->index);
  254. flush_dcache_page(page);
  255. SetPageUptodate(page);
  256. ceph_readpage_to_fscache(inode, page);
  257. unlock:
  258. unlock_page(page);
  259. page_cache_release(page);
  260. bytes -= PAGE_CACHE_SIZE;
  261. }
  262. kfree(osd_data->pages);
  263. }
  264. static void ceph_unlock_page_vector(struct page **pages, int num_pages)
  265. {
  266. int i;
  267. for (i = 0; i < num_pages; i++)
  268. unlock_page(pages[i]);
  269. }
  270. /*
  271. * start an async read(ahead) operation. return nr_pages we submitted
  272. * a read for on success, or negative error code.
  273. */
  274. static int start_read(struct inode *inode, struct list_head *page_list, int max)
  275. {
  276. struct ceph_osd_client *osdc =
  277. &ceph_inode_to_client(inode)->client->osdc;
  278. struct ceph_inode_info *ci = ceph_inode(inode);
  279. struct page *page = list_entry(page_list->prev, struct page, lru);
  280. struct ceph_vino vino;
  281. struct ceph_osd_request *req;
  282. u64 off;
  283. u64 len;
  284. int i;
  285. struct page **pages;
  286. pgoff_t next_index;
  287. int nr_pages = 0;
  288. int ret;
  289. off = (u64) page_offset(page);
  290. /* count pages */
  291. next_index = page->index;
  292. list_for_each_entry_reverse(page, page_list, lru) {
  293. if (page->index != next_index)
  294. break;
  295. nr_pages++;
  296. next_index++;
  297. if (max && nr_pages == max)
  298. break;
  299. }
  300. len = nr_pages << PAGE_CACHE_SHIFT;
  301. dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages,
  302. off, len);
  303. vino = ceph_vino(inode);
  304. req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len,
  305. 0, 1, CEPH_OSD_OP_READ,
  306. CEPH_OSD_FLAG_READ, NULL,
  307. ci->i_truncate_seq, ci->i_truncate_size,
  308. false);
  309. if (IS_ERR(req))
  310. return PTR_ERR(req);
  311. /* build page vector */
  312. nr_pages = calc_pages_for(0, len);
  313. pages = kmalloc(sizeof(*pages) * nr_pages, GFP_KERNEL);
  314. ret = -ENOMEM;
  315. if (!pages)
  316. goto out;
  317. for (i = 0; i < nr_pages; ++i) {
  318. page = list_entry(page_list->prev, struct page, lru);
  319. BUG_ON(PageLocked(page));
  320. list_del(&page->lru);
  321. dout("start_read %p adding %p idx %lu\n", inode, page,
  322. page->index);
  323. if (add_to_page_cache_lru(page, &inode->i_data, page->index,
  324. GFP_KERNEL)) {
  325. ceph_fscache_uncache_page(inode, page);
  326. page_cache_release(page);
  327. dout("start_read %p add_to_page_cache failed %p\n",
  328. inode, page);
  329. nr_pages = i;
  330. goto out_pages;
  331. }
  332. pages[i] = page;
  333. }
  334. osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false);
  335. req->r_callback = finish_read;
  336. req->r_inode = inode;
  337. ceph_osdc_build_request(req, off, NULL, vino.snap, NULL);
  338. dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len);
  339. ret = ceph_osdc_start_request(osdc, req, false);
  340. if (ret < 0)
  341. goto out_pages;
  342. ceph_osdc_put_request(req);
  343. return nr_pages;
  344. out_pages:
  345. ceph_unlock_page_vector(pages, nr_pages);
  346. ceph_release_page_vector(pages, nr_pages);
  347. out:
  348. ceph_osdc_put_request(req);
  349. return ret;
  350. }
  351. /*
  352. * Read multiple pages. Leave pages we don't read + unlock in page_list;
  353. * the caller (VM) cleans them up.
  354. */
  355. static int ceph_readpages(struct file *file, struct address_space *mapping,
  356. struct list_head *page_list, unsigned nr_pages)
  357. {
  358. struct inode *inode = file_inode(file);
  359. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  360. int rc = 0;
  361. int max = 0;
  362. if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE)
  363. return -EINVAL;
  364. rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list,
  365. &nr_pages);
  366. if (rc == 0)
  367. goto out;
  368. if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE)
  369. max = (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1)
  370. >> PAGE_SHIFT;
  371. dout("readpages %p file %p nr_pages %d max %d\n", inode,
  372. file, nr_pages,
  373. max);
  374. while (!list_empty(page_list)) {
  375. rc = start_read(inode, page_list, max);
  376. if (rc < 0)
  377. goto out;
  378. BUG_ON(rc == 0);
  379. }
  380. out:
  381. ceph_fscache_readpages_cancel(inode, page_list);
  382. dout("readpages %p file %p ret %d\n", inode, file, rc);
  383. return rc;
  384. }
  385. /*
  386. * Get ref for the oldest snapc for an inode with dirty data... that is, the
  387. * only snap context we are allowed to write back.
  388. */
  389. static struct ceph_snap_context *get_oldest_context(struct inode *inode,
  390. loff_t *snap_size)
  391. {
  392. struct ceph_inode_info *ci = ceph_inode(inode);
  393. struct ceph_snap_context *snapc = NULL;
  394. struct ceph_cap_snap *capsnap = NULL;
  395. spin_lock(&ci->i_ceph_lock);
  396. list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
  397. dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
  398. capsnap->context, capsnap->dirty_pages);
  399. if (capsnap->dirty_pages) {
  400. snapc = ceph_get_snap_context(capsnap->context);
  401. if (snap_size)
  402. *snap_size = capsnap->size;
  403. break;
  404. }
  405. }
  406. if (!snapc && ci->i_wrbuffer_ref_head) {
  407. snapc = ceph_get_snap_context(ci->i_head_snapc);
  408. dout(" head snapc %p has %d dirty pages\n",
  409. snapc, ci->i_wrbuffer_ref_head);
  410. }
  411. spin_unlock(&ci->i_ceph_lock);
  412. return snapc;
  413. }
  414. /*
  415. * Write a single page, but leave the page locked.
  416. *
  417. * If we get a write error, set the page error bit, but still adjust the
  418. * dirty page accounting (i.e., page is no longer dirty).
  419. */
  420. static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
  421. {
  422. struct inode *inode;
  423. struct ceph_inode_info *ci;
  424. struct ceph_fs_client *fsc;
  425. struct ceph_osd_client *osdc;
  426. struct ceph_snap_context *snapc, *oldest;
  427. loff_t page_off = page_offset(page);
  428. loff_t snap_size = -1;
  429. long writeback_stat;
  430. u64 truncate_size;
  431. u32 truncate_seq;
  432. int err = 0, len = PAGE_CACHE_SIZE;
  433. dout("writepage %p idx %lu\n", page, page->index);
  434. if (!page->mapping || !page->mapping->host) {
  435. dout("writepage %p - no mapping\n", page);
  436. return -EFAULT;
  437. }
  438. inode = page->mapping->host;
  439. ci = ceph_inode(inode);
  440. fsc = ceph_inode_to_client(inode);
  441. osdc = &fsc->client->osdc;
  442. /* verify this is a writeable snap context */
  443. snapc = page_snap_context(page);
  444. if (snapc == NULL) {
  445. dout("writepage %p page %p not dirty?\n", inode, page);
  446. goto out;
  447. }
  448. oldest = get_oldest_context(inode, &snap_size);
  449. if (snapc->seq > oldest->seq) {
  450. dout("writepage %p page %p snapc %p not writeable - noop\n",
  451. inode, page, snapc);
  452. /* we should only noop if called by kswapd */
  453. WARN_ON((current->flags & PF_MEMALLOC) == 0);
  454. ceph_put_snap_context(oldest);
  455. goto out;
  456. }
  457. ceph_put_snap_context(oldest);
  458. spin_lock(&ci->i_ceph_lock);
  459. truncate_seq = ci->i_truncate_seq;
  460. truncate_size = ci->i_truncate_size;
  461. if (snap_size == -1)
  462. snap_size = i_size_read(inode);
  463. spin_unlock(&ci->i_ceph_lock);
  464. /* is this a partial page at end of file? */
  465. if (page_off >= snap_size) {
  466. dout("%p page eof %llu\n", page, snap_size);
  467. goto out;
  468. }
  469. if (snap_size < page_off + len)
  470. len = snap_size - page_off;
  471. dout("writepage %p page %p index %lu on %llu~%u snapc %p\n",
  472. inode, page, page->index, page_off, len, snapc);
  473. writeback_stat = atomic_long_inc_return(&fsc->writeback_count);
  474. if (writeback_stat >
  475. CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
  476. set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC);
  477. ceph_readpage_to_fscache(inode, page);
  478. set_page_writeback(page);
  479. err = ceph_osdc_writepages(osdc, ceph_vino(inode),
  480. &ci->i_layout, snapc,
  481. page_off, len,
  482. truncate_seq, truncate_size,
  483. &inode->i_mtime, &page, 1);
  484. if (err < 0) {
  485. dout("writepage setting page/mapping error %d %p\n", err, page);
  486. SetPageError(page);
  487. mapping_set_error(&inode->i_data, err);
  488. if (wbc)
  489. wbc->pages_skipped++;
  490. } else {
  491. dout("writepage cleaned page %p\n", page);
  492. err = 0; /* vfs expects us to return 0 */
  493. }
  494. page->private = 0;
  495. ClearPagePrivate(page);
  496. end_page_writeback(page);
  497. ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
  498. ceph_put_snap_context(snapc); /* page's reference */
  499. out:
  500. return err;
  501. }
  502. static int ceph_writepage(struct page *page, struct writeback_control *wbc)
  503. {
  504. int err;
  505. struct inode *inode = page->mapping->host;
  506. BUG_ON(!inode);
  507. ihold(inode);
  508. err = writepage_nounlock(page, wbc);
  509. unlock_page(page);
  510. iput(inode);
  511. return err;
  512. }
  513. /*
  514. * lame release_pages helper. release_pages() isn't exported to
  515. * modules.
  516. */
  517. static void ceph_release_pages(struct page **pages, int num)
  518. {
  519. struct pagevec pvec;
  520. int i;
  521. pagevec_init(&pvec, 0);
  522. for (i = 0; i < num; i++) {
  523. if (pagevec_add(&pvec, pages[i]) == 0)
  524. pagevec_release(&pvec);
  525. }
  526. pagevec_release(&pvec);
  527. }
  528. /*
  529. * async writeback completion handler.
  530. *
  531. * If we get an error, set the mapping error bit, but not the individual
  532. * page error bits.
  533. */
  534. static void writepages_finish(struct ceph_osd_request *req,
  535. struct ceph_msg *msg)
  536. {
  537. struct inode *inode = req->r_inode;
  538. struct ceph_inode_info *ci = ceph_inode(inode);
  539. struct ceph_osd_data *osd_data;
  540. unsigned wrote;
  541. struct page *page;
  542. int num_pages;
  543. int i;
  544. struct ceph_snap_context *snapc = req->r_snapc;
  545. struct address_space *mapping = inode->i_mapping;
  546. int rc = req->r_result;
  547. u64 bytes = req->r_ops[0].extent.length;
  548. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  549. long writeback_stat;
  550. unsigned issued = ceph_caps_issued(ci);
  551. osd_data = osd_req_op_extent_osd_data(req, 0);
  552. BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
  553. num_pages = calc_pages_for((u64)osd_data->alignment,
  554. (u64)osd_data->length);
  555. if (rc >= 0) {
  556. /*
  557. * Assume we wrote the pages we originally sent. The
  558. * osd might reply with fewer pages if our writeback
  559. * raced with a truncation and was adjusted at the osd,
  560. * so don't believe the reply.
  561. */
  562. wrote = num_pages;
  563. } else {
  564. wrote = 0;
  565. mapping_set_error(mapping, rc);
  566. }
  567. dout("writepages_finish %p rc %d bytes %llu wrote %d (pages)\n",
  568. inode, rc, bytes, wrote);
  569. /* clean all pages */
  570. for (i = 0; i < num_pages; i++) {
  571. page = osd_data->pages[i];
  572. BUG_ON(!page);
  573. WARN_ON(!PageUptodate(page));
  574. writeback_stat =
  575. atomic_long_dec_return(&fsc->writeback_count);
  576. if (writeback_stat <
  577. CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
  578. clear_bdi_congested(&fsc->backing_dev_info,
  579. BLK_RW_ASYNC);
  580. ceph_put_snap_context(page_snap_context(page));
  581. page->private = 0;
  582. ClearPagePrivate(page);
  583. dout("unlocking %d %p\n", i, page);
  584. end_page_writeback(page);
  585. /*
  586. * We lost the cache cap, need to truncate the page before
  587. * it is unlocked, otherwise we'd truncate it later in the
  588. * page truncation thread, possibly losing some data that
  589. * raced its way in
  590. */
  591. if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0)
  592. generic_error_remove_page(inode->i_mapping, page);
  593. unlock_page(page);
  594. }
  595. dout("%p wrote+cleaned %d pages\n", inode, wrote);
  596. ceph_put_wrbuffer_cap_refs(ci, num_pages, snapc);
  597. ceph_release_pages(osd_data->pages, num_pages);
  598. if (osd_data->pages_from_pool)
  599. mempool_free(osd_data->pages,
  600. ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool);
  601. else
  602. kfree(osd_data->pages);
  603. ceph_osdc_put_request(req);
  604. }
  605. /*
  606. * initiate async writeback
  607. */
  608. static int ceph_writepages_start(struct address_space *mapping,
  609. struct writeback_control *wbc)
  610. {
  611. struct inode *inode = mapping->host;
  612. struct ceph_inode_info *ci = ceph_inode(inode);
  613. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  614. struct ceph_vino vino = ceph_vino(inode);
  615. pgoff_t index, start, end;
  616. int range_whole = 0;
  617. int should_loop = 1;
  618. pgoff_t max_pages = 0, max_pages_ever = 0;
  619. struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
  620. struct pagevec pvec;
  621. int done = 0;
  622. int rc = 0;
  623. unsigned int wsize = i_blocksize(inode);
  624. struct ceph_osd_request *req = NULL;
  625. int do_sync = 0;
  626. loff_t snap_size, i_size;
  627. u64 truncate_size;
  628. u32 truncate_seq;
  629. /*
  630. * Include a 'sync' in the OSD request if this is a data
  631. * integrity write (e.g., O_SYNC write or fsync()), or if our
  632. * cap is being revoked.
  633. */
  634. if ((wbc->sync_mode == WB_SYNC_ALL) ||
  635. ceph_caps_revoking(ci, CEPH_CAP_FILE_BUFFER))
  636. do_sync = 1;
  637. dout("writepages_start %p dosync=%d (mode=%s)\n",
  638. inode, do_sync,
  639. wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
  640. (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
  641. if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
  642. pr_warn("writepage_start %p on forced umount\n", inode);
  643. truncate_pagecache(inode, 0);
  644. mapping_set_error(mapping, -EIO);
  645. return -EIO; /* we're in a forced umount, don't write! */
  646. }
  647. if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize)
  648. wsize = fsc->mount_options->wsize;
  649. if (wsize < PAGE_CACHE_SIZE)
  650. wsize = PAGE_CACHE_SIZE;
  651. max_pages_ever = wsize >> PAGE_CACHE_SHIFT;
  652. pagevec_init(&pvec, 0);
  653. /* where to start/end? */
  654. if (wbc->range_cyclic) {
  655. start = mapping->writeback_index; /* Start from prev offset */
  656. end = -1;
  657. dout(" cyclic, start at %lu\n", start);
  658. } else {
  659. start = wbc->range_start >> PAGE_CACHE_SHIFT;
  660. end = wbc->range_end >> PAGE_CACHE_SHIFT;
  661. if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
  662. range_whole = 1;
  663. should_loop = 0;
  664. dout(" not cyclic, %lu to %lu\n", start, end);
  665. }
  666. index = start;
  667. retry:
  668. /* find oldest snap context with dirty data */
  669. ceph_put_snap_context(snapc);
  670. snap_size = -1;
  671. snapc = get_oldest_context(inode, &snap_size);
  672. if (!snapc) {
  673. /* hmm, why does writepages get called when there
  674. is no dirty data? */
  675. dout(" no snap context with dirty data?\n");
  676. goto out;
  677. }
  678. dout(" oldest snapc is %p seq %lld (%d snaps)\n",
  679. snapc, snapc->seq, snapc->num_snaps);
  680. spin_lock(&ci->i_ceph_lock);
  681. truncate_seq = ci->i_truncate_seq;
  682. truncate_size = ci->i_truncate_size;
  683. i_size = i_size_read(inode);
  684. spin_unlock(&ci->i_ceph_lock);
  685. if (last_snapc && snapc != last_snapc) {
  686. /* if we switched to a newer snapc, restart our scan at the
  687. * start of the original file range. */
  688. dout(" snapc differs from last pass, restarting at %lu\n",
  689. index);
  690. index = start;
  691. }
  692. last_snapc = snapc;
  693. while (!done && index <= end) {
  694. unsigned i;
  695. int first;
  696. pgoff_t next;
  697. int pvec_pages, locked_pages;
  698. struct page **pages = NULL;
  699. mempool_t *pool = NULL; /* Becomes non-null if mempool used */
  700. struct page *page;
  701. int want;
  702. u64 offset, len;
  703. long writeback_stat;
  704. next = 0;
  705. locked_pages = 0;
  706. max_pages = max_pages_ever;
  707. get_more_pages:
  708. first = -1;
  709. want = min(end - index,
  710. min((pgoff_t)PAGEVEC_SIZE,
  711. max_pages - (pgoff_t)locked_pages) - 1)
  712. + 1;
  713. pvec_pages = pagevec_lookup_tag(&pvec, mapping, &index,
  714. PAGECACHE_TAG_DIRTY,
  715. want);
  716. dout("pagevec_lookup_tag got %d\n", pvec_pages);
  717. if (!pvec_pages && !locked_pages)
  718. break;
  719. for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) {
  720. page = pvec.pages[i];
  721. dout("? %p idx %lu\n", page, page->index);
  722. if (locked_pages == 0)
  723. lock_page(page); /* first page */
  724. else if (!trylock_page(page))
  725. break;
  726. /* only dirty pages, or our accounting breaks */
  727. if (unlikely(!PageDirty(page)) ||
  728. unlikely(page->mapping != mapping)) {
  729. dout("!dirty or !mapping %p\n", page);
  730. unlock_page(page);
  731. break;
  732. }
  733. if (!wbc->range_cyclic && page->index > end) {
  734. dout("end of range %p\n", page);
  735. done = 1;
  736. unlock_page(page);
  737. break;
  738. }
  739. if (next && (page->index != next)) {
  740. dout("not consecutive %p\n", page);
  741. unlock_page(page);
  742. break;
  743. }
  744. if (wbc->sync_mode != WB_SYNC_NONE) {
  745. dout("waiting on writeback %p\n", page);
  746. wait_on_page_writeback(page);
  747. }
  748. if (page_offset(page) >=
  749. (snap_size == -1 ? i_size : snap_size)) {
  750. dout("%p page eof %llu\n", page,
  751. (snap_size == -1 ? i_size : snap_size));
  752. done = 1;
  753. unlock_page(page);
  754. break;
  755. }
  756. if (PageWriteback(page)) {
  757. dout("%p under writeback\n", page);
  758. unlock_page(page);
  759. break;
  760. }
  761. /* only if matching snap context */
  762. pgsnapc = page_snap_context(page);
  763. if (pgsnapc->seq > snapc->seq) {
  764. dout("page snapc %p %lld > oldest %p %lld\n",
  765. pgsnapc, pgsnapc->seq, snapc, snapc->seq);
  766. unlock_page(page);
  767. if (!locked_pages)
  768. continue; /* keep looking for snap */
  769. break;
  770. }
  771. if (!clear_page_dirty_for_io(page)) {
  772. dout("%p !clear_page_dirty_for_io\n", page);
  773. unlock_page(page);
  774. break;
  775. }
  776. /*
  777. * We have something to write. If this is
  778. * the first locked page this time through,
  779. * allocate an osd request and a page array
  780. * that it will use.
  781. */
  782. if (locked_pages == 0) {
  783. BUG_ON(pages);
  784. /* prepare async write request */
  785. offset = (u64)page_offset(page);
  786. len = wsize;
  787. req = ceph_osdc_new_request(&fsc->client->osdc,
  788. &ci->i_layout, vino,
  789. offset, &len, 0,
  790. do_sync ? 2 : 1,
  791. CEPH_OSD_OP_WRITE,
  792. CEPH_OSD_FLAG_WRITE |
  793. CEPH_OSD_FLAG_ONDISK,
  794. snapc, truncate_seq,
  795. truncate_size, true);
  796. if (IS_ERR(req)) {
  797. rc = PTR_ERR(req);
  798. unlock_page(page);
  799. break;
  800. }
  801. if (do_sync)
  802. osd_req_op_init(req, 1,
  803. CEPH_OSD_OP_STARTSYNC, 0);
  804. req->r_callback = writepages_finish;
  805. req->r_inode = inode;
  806. max_pages = calc_pages_for(0, (u64)len);
  807. pages = kmalloc(max_pages * sizeof (*pages),
  808. GFP_NOFS);
  809. if (!pages) {
  810. pool = fsc->wb_pagevec_pool;
  811. pages = mempool_alloc(pool, GFP_NOFS);
  812. BUG_ON(!pages);
  813. }
  814. }
  815. /* note position of first page in pvec */
  816. if (first < 0)
  817. first = i;
  818. dout("%p will write page %p idx %lu\n",
  819. inode, page, page->index);
  820. writeback_stat =
  821. atomic_long_inc_return(&fsc->writeback_count);
  822. if (writeback_stat > CONGESTION_ON_THRESH(
  823. fsc->mount_options->congestion_kb)) {
  824. set_bdi_congested(&fsc->backing_dev_info,
  825. BLK_RW_ASYNC);
  826. }
  827. set_page_writeback(page);
  828. pages[locked_pages] = page;
  829. locked_pages++;
  830. next = page->index + 1;
  831. }
  832. /* did we get anything? */
  833. if (!locked_pages)
  834. goto release_pvec_pages;
  835. if (i) {
  836. int j;
  837. BUG_ON(!locked_pages || first < 0);
  838. if (pvec_pages && i == pvec_pages &&
  839. locked_pages < max_pages) {
  840. dout("reached end pvec, trying for more\n");
  841. pagevec_reinit(&pvec);
  842. goto get_more_pages;
  843. }
  844. /* shift unused pages over in the pvec... we
  845. * will need to release them below. */
  846. for (j = i; j < pvec_pages; j++) {
  847. dout(" pvec leftover page %p\n",
  848. pvec.pages[j]);
  849. pvec.pages[j-i+first] = pvec.pages[j];
  850. }
  851. pvec.nr -= i-first;
  852. }
  853. /* Format the osd request message and submit the write */
  854. offset = page_offset(pages[0]);
  855. len = (u64)locked_pages << PAGE_CACHE_SHIFT;
  856. if (snap_size == -1) {
  857. len = min(len, (u64)i_size_read(inode) - offset);
  858. /* writepages_finish() clears writeback pages
  859. * according to the data length, so make sure
  860. * data length covers all locked pages */
  861. len = max(len, 1 +
  862. ((u64)(locked_pages - 1) << PAGE_CACHE_SHIFT));
  863. } else {
  864. len = min(len, snap_size - offset);
  865. }
  866. dout("writepages got %d pages at %llu~%llu\n",
  867. locked_pages, offset, len);
  868. osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
  869. !!pool, false);
  870. pages = NULL; /* request message now owns the pages array */
  871. pool = NULL;
  872. /* Update the write op length in case we changed it */
  873. osd_req_op_extent_update(req, 0, len);
  874. vino = ceph_vino(inode);
  875. ceph_osdc_build_request(req, offset, snapc, vino.snap,
  876. &inode->i_mtime);
  877. rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
  878. BUG_ON(rc);
  879. req = NULL;
  880. /* continue? */
  881. index = next;
  882. wbc->nr_to_write -= locked_pages;
  883. if (wbc->nr_to_write <= 0)
  884. done = 1;
  885. release_pvec_pages:
  886. dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr,
  887. pvec.nr ? pvec.pages[0] : NULL);
  888. pagevec_release(&pvec);
  889. if (locked_pages && !done)
  890. goto retry;
  891. }
  892. if (should_loop && !done) {
  893. /* more to do; loop back to beginning of file */
  894. dout("writepages looping back to beginning of file\n");
  895. should_loop = 0;
  896. index = 0;
  897. goto retry;
  898. }
  899. if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
  900. mapping->writeback_index = index;
  901. out:
  902. if (req)
  903. ceph_osdc_put_request(req);
  904. ceph_put_snap_context(snapc);
  905. dout("writepages done, rc = %d\n", rc);
  906. return rc;
  907. }
  908. /*
  909. * See if a given @snapc is either writeable, or already written.
  910. */
  911. static int context_is_writeable_or_written(struct inode *inode,
  912. struct ceph_snap_context *snapc)
  913. {
  914. struct ceph_snap_context *oldest = get_oldest_context(inode, NULL);
  915. int ret = !oldest || snapc->seq <= oldest->seq;
  916. ceph_put_snap_context(oldest);
  917. return ret;
  918. }
  919. /*
  920. * We are only allowed to write into/dirty the page if the page is
  921. * clean, or already dirty within the same snap context.
  922. *
  923. * called with page locked.
  924. * return success with page locked,
  925. * or any failure (incl -EAGAIN) with page unlocked.
  926. */
  927. static int ceph_update_writeable_page(struct file *file,
  928. loff_t pos, unsigned len,
  929. struct page *page)
  930. {
  931. struct inode *inode = file_inode(file);
  932. struct ceph_inode_info *ci = ceph_inode(inode);
  933. loff_t page_off = pos & PAGE_CACHE_MASK;
  934. int pos_in_page = pos & ~PAGE_CACHE_MASK;
  935. int end_in_page = pos_in_page + len;
  936. loff_t i_size;
  937. int r;
  938. struct ceph_snap_context *snapc, *oldest;
  939. retry_locked:
  940. /* writepages currently holds page lock, but if we change that later, */
  941. wait_on_page_writeback(page);
  942. snapc = page_snap_context(page);
  943. if (snapc && snapc != ci->i_head_snapc) {
  944. /*
  945. * this page is already dirty in another (older) snap
  946. * context! is it writeable now?
  947. */
  948. oldest = get_oldest_context(inode, NULL);
  949. if (snapc->seq > oldest->seq) {
  950. ceph_put_snap_context(oldest);
  951. dout(" page %p snapc %p not current or oldest\n",
  952. page, snapc);
  953. /*
  954. * queue for writeback, and wait for snapc to
  955. * be writeable or written
  956. */
  957. snapc = ceph_get_snap_context(snapc);
  958. unlock_page(page);
  959. ceph_queue_writeback(inode);
  960. r = wait_event_interruptible(ci->i_cap_wq,
  961. context_is_writeable_or_written(inode, snapc));
  962. ceph_put_snap_context(snapc);
  963. if (r == -ERESTARTSYS)
  964. return r;
  965. return -EAGAIN;
  966. }
  967. ceph_put_snap_context(oldest);
  968. /* yay, writeable, do it now (without dropping page lock) */
  969. dout(" page %p snapc %p not current, but oldest\n",
  970. page, snapc);
  971. if (!clear_page_dirty_for_io(page))
  972. goto retry_locked;
  973. r = writepage_nounlock(page, NULL);
  974. if (r < 0)
  975. goto fail_unlock;
  976. goto retry_locked;
  977. }
  978. if (PageUptodate(page)) {
  979. dout(" page %p already uptodate\n", page);
  980. return 0;
  981. }
  982. /* full page? */
  983. if (pos_in_page == 0 && len == PAGE_CACHE_SIZE)
  984. return 0;
  985. /* past end of file? */
  986. i_size = inode->i_size; /* caller holds i_mutex */
  987. if (page_off >= i_size ||
  988. (pos_in_page == 0 && (pos+len) >= i_size &&
  989. end_in_page - pos_in_page != PAGE_CACHE_SIZE)) {
  990. dout(" zeroing %p 0 - %d and %d - %d\n",
  991. page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE);
  992. zero_user_segments(page,
  993. 0, pos_in_page,
  994. end_in_page, PAGE_CACHE_SIZE);
  995. return 0;
  996. }
  997. /* we need to read it. */
  998. r = ceph_do_readpage(file, page);
  999. if (r < 0) {
  1000. if (r == -EINPROGRESS)
  1001. return -EAGAIN;
  1002. goto fail_unlock;
  1003. }
  1004. goto retry_locked;
  1005. fail_unlock:
  1006. unlock_page(page);
  1007. return r;
  1008. }
  1009. /*
  1010. * We are only allowed to write into/dirty the page if the page is
  1011. * clean, or already dirty within the same snap context.
  1012. */
  1013. static int ceph_write_begin(struct file *file, struct address_space *mapping,
  1014. loff_t pos, unsigned len, unsigned flags,
  1015. struct page **pagep, void **fsdata)
  1016. {
  1017. struct inode *inode = file_inode(file);
  1018. struct page *page;
  1019. pgoff_t index = pos >> PAGE_CACHE_SHIFT;
  1020. int r;
  1021. do {
  1022. /* get a page */
  1023. page = grab_cache_page_write_begin(mapping, index, 0);
  1024. if (!page)
  1025. return -ENOMEM;
  1026. *pagep = page;
  1027. dout("write_begin file %p inode %p page %p %d~%d\n", file,
  1028. inode, page, (int)pos, (int)len);
  1029. r = ceph_update_writeable_page(file, pos, len, page);
  1030. if (r < 0)
  1031. page_cache_release(page);
  1032. else
  1033. *pagep = page;
  1034. } while (r == -EAGAIN);
  1035. return r;
  1036. }
  1037. /*
  1038. * we don't do anything in here that simple_write_end doesn't do
  1039. * except adjust dirty page accounting
  1040. */
  1041. static int ceph_write_end(struct file *file, struct address_space *mapping,
  1042. loff_t pos, unsigned len, unsigned copied,
  1043. struct page *page, void *fsdata)
  1044. {
  1045. struct inode *inode = file_inode(file);
  1046. unsigned from = pos & (PAGE_CACHE_SIZE - 1);
  1047. int check_cap = 0;
  1048. dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
  1049. inode, page, (int)pos, (int)copied, (int)len);
  1050. /* zero the stale part of the page if we did a short copy */
  1051. if (copied < len)
  1052. zero_user_segment(page, from+copied, len);
  1053. /* did file size increase? */
  1054. /* (no need for i_size_read(); we caller holds i_mutex */
  1055. if (pos+copied > inode->i_size)
  1056. check_cap = ceph_inode_set_size(inode, pos+copied);
  1057. if (!PageUptodate(page))
  1058. SetPageUptodate(page);
  1059. set_page_dirty(page);
  1060. unlock_page(page);
  1061. page_cache_release(page);
  1062. if (check_cap)
  1063. ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
  1064. return copied;
  1065. }
  1066. /*
  1067. * we set .direct_IO to indicate direct io is supported, but since we
  1068. * intercept O_DIRECT reads and writes early, this function should
  1069. * never get called.
  1070. */
  1071. static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter,
  1072. loff_t pos)
  1073. {
  1074. WARN_ON(1);
  1075. return -EINVAL;
  1076. }
  1077. const struct address_space_operations ceph_aops = {
  1078. .readpage = ceph_readpage,
  1079. .readpages = ceph_readpages,
  1080. .writepage = ceph_writepage,
  1081. .writepages = ceph_writepages_start,
  1082. .write_begin = ceph_write_begin,
  1083. .write_end = ceph_write_end,
  1084. .set_page_dirty = ceph_set_page_dirty,
  1085. .invalidatepage = ceph_invalidatepage,
  1086. .releasepage = ceph_releasepage,
  1087. .direct_IO = ceph_direct_io,
  1088. };
  1089. /*
  1090. * vm ops
  1091. */
  1092. static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1093. {
  1094. struct inode *inode = file_inode(vma->vm_file);
  1095. struct ceph_inode_info *ci = ceph_inode(inode);
  1096. struct ceph_file_info *fi = vma->vm_file->private_data;
  1097. struct page *pinned_page = NULL;
  1098. loff_t off = vmf->pgoff << PAGE_CACHE_SHIFT;
  1099. int want, got, ret;
  1100. dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n",
  1101. inode, ceph_vinop(inode), off, (size_t)PAGE_CACHE_SIZE);
  1102. if (fi->fmode & CEPH_FILE_MODE_LAZY)
  1103. want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
  1104. else
  1105. want = CEPH_CAP_FILE_CACHE;
  1106. while (1) {
  1107. got = 0;
  1108. ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want,
  1109. -1, &got, &pinned_page);
  1110. if (ret == 0)
  1111. break;
  1112. if (ret != -ERESTARTSYS) {
  1113. WARN_ON(1);
  1114. return VM_FAULT_SIGBUS;
  1115. }
  1116. }
  1117. dout("filemap_fault %p %llu~%zd got cap refs on %s\n",
  1118. inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got));
  1119. if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
  1120. ci->i_inline_version == CEPH_INLINE_NONE)
  1121. ret = filemap_fault(vma, vmf);
  1122. else
  1123. ret = -EAGAIN;
  1124. dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n",
  1125. inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got), ret);
  1126. if (pinned_page)
  1127. page_cache_release(pinned_page);
  1128. ceph_put_cap_refs(ci, got);
  1129. if (ret != -EAGAIN)
  1130. return ret;
  1131. /* read inline data */
  1132. if (off >= PAGE_CACHE_SIZE) {
  1133. /* does not support inline data > PAGE_SIZE */
  1134. ret = VM_FAULT_SIGBUS;
  1135. } else {
  1136. int ret1;
  1137. struct address_space *mapping = inode->i_mapping;
  1138. struct page *page = find_or_create_page(mapping, 0,
  1139. mapping_gfp_constraint(mapping,
  1140. ~__GFP_FS));
  1141. if (!page) {
  1142. ret = VM_FAULT_OOM;
  1143. goto out;
  1144. }
  1145. ret1 = __ceph_do_getattr(inode, page,
  1146. CEPH_STAT_CAP_INLINE_DATA, true);
  1147. if (ret1 < 0 || off >= i_size_read(inode)) {
  1148. unlock_page(page);
  1149. page_cache_release(page);
  1150. ret = VM_FAULT_SIGBUS;
  1151. goto out;
  1152. }
  1153. if (ret1 < PAGE_CACHE_SIZE)
  1154. zero_user_segment(page, ret1, PAGE_CACHE_SIZE);
  1155. else
  1156. flush_dcache_page(page);
  1157. SetPageUptodate(page);
  1158. vmf->page = page;
  1159. ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
  1160. }
  1161. out:
  1162. dout("filemap_fault %p %llu~%zd read inline data ret %d\n",
  1163. inode, off, (size_t)PAGE_CACHE_SIZE, ret);
  1164. return ret;
  1165. }
  1166. /*
  1167. * Reuse write_begin here for simplicity.
  1168. */
  1169. static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
  1170. {
  1171. struct inode *inode = file_inode(vma->vm_file);
  1172. struct ceph_inode_info *ci = ceph_inode(inode);
  1173. struct ceph_file_info *fi = vma->vm_file->private_data;
  1174. struct ceph_cap_flush *prealloc_cf;
  1175. struct page *page = vmf->page;
  1176. loff_t off = page_offset(page);
  1177. loff_t size = i_size_read(inode);
  1178. size_t len;
  1179. int want, got, ret;
  1180. prealloc_cf = ceph_alloc_cap_flush();
  1181. if (!prealloc_cf)
  1182. return VM_FAULT_SIGBUS;
  1183. if (ci->i_inline_version != CEPH_INLINE_NONE) {
  1184. struct page *locked_page = NULL;
  1185. if (off == 0) {
  1186. lock_page(page);
  1187. locked_page = page;
  1188. }
  1189. ret = ceph_uninline_data(vma->vm_file, locked_page);
  1190. if (locked_page)
  1191. unlock_page(locked_page);
  1192. if (ret < 0) {
  1193. ret = VM_FAULT_SIGBUS;
  1194. goto out_free;
  1195. }
  1196. }
  1197. if (off + PAGE_CACHE_SIZE <= size)
  1198. len = PAGE_CACHE_SIZE;
  1199. else
  1200. len = size & ~PAGE_CACHE_MASK;
  1201. dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
  1202. inode, ceph_vinop(inode), off, len, size);
  1203. if (fi->fmode & CEPH_FILE_MODE_LAZY)
  1204. want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
  1205. else
  1206. want = CEPH_CAP_FILE_BUFFER;
  1207. while (1) {
  1208. got = 0;
  1209. ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, off + len,
  1210. &got, NULL);
  1211. if (ret == 0)
  1212. break;
  1213. if (ret != -ERESTARTSYS) {
  1214. WARN_ON(1);
  1215. ret = VM_FAULT_SIGBUS;
  1216. goto out_free;
  1217. }
  1218. }
  1219. dout("page_mkwrite %p %llu~%zd got cap refs on %s\n",
  1220. inode, off, len, ceph_cap_string(got));
  1221. /* Update time before taking page lock */
  1222. file_update_time(vma->vm_file);
  1223. lock_page(page);
  1224. ret = VM_FAULT_NOPAGE;
  1225. if ((off > size) ||
  1226. (page->mapping != inode->i_mapping))
  1227. goto out;
  1228. ret = ceph_update_writeable_page(vma->vm_file, off, len, page);
  1229. if (ret == 0) {
  1230. /* success. we'll keep the page locked. */
  1231. set_page_dirty(page);
  1232. ret = VM_FAULT_LOCKED;
  1233. } else {
  1234. if (ret == -ENOMEM)
  1235. ret = VM_FAULT_OOM;
  1236. else
  1237. ret = VM_FAULT_SIGBUS;
  1238. }
  1239. out:
  1240. if (ret != VM_FAULT_LOCKED)
  1241. unlock_page(page);
  1242. if (ret == VM_FAULT_LOCKED ||
  1243. ci->i_inline_version != CEPH_INLINE_NONE) {
  1244. int dirty;
  1245. spin_lock(&ci->i_ceph_lock);
  1246. ci->i_inline_version = CEPH_INLINE_NONE;
  1247. dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
  1248. &prealloc_cf);
  1249. spin_unlock(&ci->i_ceph_lock);
  1250. if (dirty)
  1251. __mark_inode_dirty(inode, dirty);
  1252. }
  1253. dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %d\n",
  1254. inode, off, len, ceph_cap_string(got), ret);
  1255. ceph_put_cap_refs(ci, got);
  1256. out_free:
  1257. ceph_free_cap_flush(prealloc_cf);
  1258. return ret;
  1259. }
  1260. void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
  1261. char *data, size_t len)
  1262. {
  1263. struct address_space *mapping = inode->i_mapping;
  1264. struct page *page;
  1265. if (locked_page) {
  1266. page = locked_page;
  1267. } else {
  1268. if (i_size_read(inode) == 0)
  1269. return;
  1270. page = find_or_create_page(mapping, 0,
  1271. mapping_gfp_constraint(mapping,
  1272. ~__GFP_FS));
  1273. if (!page)
  1274. return;
  1275. if (PageUptodate(page)) {
  1276. unlock_page(page);
  1277. page_cache_release(page);
  1278. return;
  1279. }
  1280. }
  1281. dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
  1282. inode, ceph_vinop(inode), len, locked_page);
  1283. if (len > 0) {
  1284. void *kaddr = kmap_atomic(page);
  1285. memcpy(kaddr, data, len);
  1286. kunmap_atomic(kaddr);
  1287. }
  1288. if (page != locked_page) {
  1289. if (len < PAGE_CACHE_SIZE)
  1290. zero_user_segment(page, len, PAGE_CACHE_SIZE);
  1291. else
  1292. flush_dcache_page(page);
  1293. SetPageUptodate(page);
  1294. unlock_page(page);
  1295. page_cache_release(page);
  1296. }
  1297. }
  1298. int ceph_uninline_data(struct file *filp, struct page *locked_page)
  1299. {
  1300. struct inode *inode = file_inode(filp);
  1301. struct ceph_inode_info *ci = ceph_inode(inode);
  1302. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  1303. struct ceph_osd_request *req;
  1304. struct page *page = NULL;
  1305. u64 len, inline_version;
  1306. int err = 0;
  1307. bool from_pagecache = false;
  1308. spin_lock(&ci->i_ceph_lock);
  1309. inline_version = ci->i_inline_version;
  1310. spin_unlock(&ci->i_ceph_lock);
  1311. dout("uninline_data %p %llx.%llx inline_version %llu\n",
  1312. inode, ceph_vinop(inode), inline_version);
  1313. if (inline_version == 1 || /* initial version, no data */
  1314. inline_version == CEPH_INLINE_NONE)
  1315. goto out;
  1316. if (locked_page) {
  1317. page = locked_page;
  1318. WARN_ON(!PageUptodate(page));
  1319. } else if (ceph_caps_issued(ci) &
  1320. (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) {
  1321. page = find_get_page(inode->i_mapping, 0);
  1322. if (page) {
  1323. if (PageUptodate(page)) {
  1324. from_pagecache = true;
  1325. lock_page(page);
  1326. } else {
  1327. page_cache_release(page);
  1328. page = NULL;
  1329. }
  1330. }
  1331. }
  1332. if (page) {
  1333. len = i_size_read(inode);
  1334. if (len > PAGE_CACHE_SIZE)
  1335. len = PAGE_CACHE_SIZE;
  1336. } else {
  1337. page = __page_cache_alloc(GFP_NOFS);
  1338. if (!page) {
  1339. err = -ENOMEM;
  1340. goto out;
  1341. }
  1342. err = __ceph_do_getattr(inode, page,
  1343. CEPH_STAT_CAP_INLINE_DATA, true);
  1344. if (err < 0) {
  1345. /* no inline data */
  1346. if (err == -ENODATA)
  1347. err = 0;
  1348. goto out;
  1349. }
  1350. len = err;
  1351. }
  1352. req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
  1353. ceph_vino(inode), 0, &len, 0, 1,
  1354. CEPH_OSD_OP_CREATE,
  1355. CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
  1356. ceph_empty_snapc, 0, 0, false);
  1357. if (IS_ERR(req)) {
  1358. err = PTR_ERR(req);
  1359. goto out;
  1360. }
  1361. ceph_osdc_build_request(req, 0, NULL, CEPH_NOSNAP, &inode->i_mtime);
  1362. err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
  1363. if (!err)
  1364. err = ceph_osdc_wait_request(&fsc->client->osdc, req);
  1365. ceph_osdc_put_request(req);
  1366. if (err < 0)
  1367. goto out;
  1368. req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
  1369. ceph_vino(inode), 0, &len, 1, 3,
  1370. CEPH_OSD_OP_WRITE,
  1371. CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
  1372. ceph_empty_snapc,
  1373. ci->i_truncate_seq, ci->i_truncate_size,
  1374. false);
  1375. if (IS_ERR(req)) {
  1376. err = PTR_ERR(req);
  1377. goto out;
  1378. }
  1379. osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false);
  1380. {
  1381. __le64 xattr_buf = cpu_to_le64(inline_version);
  1382. err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR,
  1383. "inline_version", &xattr_buf,
  1384. sizeof(xattr_buf),
  1385. CEPH_OSD_CMPXATTR_OP_GT,
  1386. CEPH_OSD_CMPXATTR_MODE_U64);
  1387. if (err)
  1388. goto out_put;
  1389. }
  1390. {
  1391. char xattr_buf[32];
  1392. int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf),
  1393. "%llu", inline_version);
  1394. err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR,
  1395. "inline_version",
  1396. xattr_buf, xattr_len, 0, 0);
  1397. if (err)
  1398. goto out_put;
  1399. }
  1400. ceph_osdc_build_request(req, 0, NULL, CEPH_NOSNAP, &inode->i_mtime);
  1401. err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
  1402. if (!err)
  1403. err = ceph_osdc_wait_request(&fsc->client->osdc, req);
  1404. out_put:
  1405. ceph_osdc_put_request(req);
  1406. if (err == -ECANCELED)
  1407. err = 0;
  1408. out:
  1409. if (page && page != locked_page) {
  1410. if (from_pagecache) {
  1411. unlock_page(page);
  1412. page_cache_release(page);
  1413. } else
  1414. __free_pages(page, 0);
  1415. }
  1416. dout("uninline_data %p %llx.%llx inline_version %llu = %d\n",
  1417. inode, ceph_vinop(inode), inline_version, err);
  1418. return err;
  1419. }
  1420. static const struct vm_operations_struct ceph_vmops = {
  1421. .fault = ceph_filemap_fault,
  1422. .page_mkwrite = ceph_page_mkwrite,
  1423. };
  1424. int ceph_mmap(struct file *file, struct vm_area_struct *vma)
  1425. {
  1426. struct address_space *mapping = file->f_mapping;
  1427. if (!mapping->a_ops->readpage)
  1428. return -ENOEXEC;
  1429. file_accessed(file);
  1430. vma->vm_ops = &ceph_vmops;
  1431. return 0;
  1432. }
  1433. enum {
  1434. POOL_READ = 1,
  1435. POOL_WRITE = 2,
  1436. };
  1437. static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
  1438. {
  1439. struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
  1440. struct ceph_mds_client *mdsc = fsc->mdsc;
  1441. struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
  1442. struct rb_node **p, *parent;
  1443. struct ceph_pool_perm *perm;
  1444. struct page **pages;
  1445. int err = 0, err2 = 0, have = 0;
  1446. down_read(&mdsc->pool_perm_rwsem);
  1447. p = &mdsc->pool_perm_tree.rb_node;
  1448. while (*p) {
  1449. perm = rb_entry(*p, struct ceph_pool_perm, node);
  1450. if (pool < perm->pool)
  1451. p = &(*p)->rb_left;
  1452. else if (pool > perm->pool)
  1453. p = &(*p)->rb_right;
  1454. else {
  1455. have = perm->perm;
  1456. break;
  1457. }
  1458. }
  1459. up_read(&mdsc->pool_perm_rwsem);
  1460. if (*p)
  1461. goto out;
  1462. dout("__ceph_pool_perm_get pool %u no perm cached\n", pool);
  1463. down_write(&mdsc->pool_perm_rwsem);
  1464. parent = NULL;
  1465. while (*p) {
  1466. parent = *p;
  1467. perm = rb_entry(parent, struct ceph_pool_perm, node);
  1468. if (pool < perm->pool)
  1469. p = &(*p)->rb_left;
  1470. else if (pool > perm->pool)
  1471. p = &(*p)->rb_right;
  1472. else {
  1473. have = perm->perm;
  1474. break;
  1475. }
  1476. }
  1477. if (*p) {
  1478. up_write(&mdsc->pool_perm_rwsem);
  1479. goto out;
  1480. }
  1481. rd_req = ceph_osdc_alloc_request(&fsc->client->osdc,
  1482. ceph_empty_snapc,
  1483. 1, false, GFP_NOFS);
  1484. if (!rd_req) {
  1485. err = -ENOMEM;
  1486. goto out_unlock;
  1487. }
  1488. rd_req->r_flags = CEPH_OSD_FLAG_READ;
  1489. osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0);
  1490. rd_req->r_base_oloc.pool = pool;
  1491. snprintf(rd_req->r_base_oid.name, sizeof(rd_req->r_base_oid.name),
  1492. "%llx.00000000", ci->i_vino.ino);
  1493. rd_req->r_base_oid.name_len = strlen(rd_req->r_base_oid.name);
  1494. wr_req = ceph_osdc_alloc_request(&fsc->client->osdc,
  1495. ceph_empty_snapc,
  1496. 1, false, GFP_NOFS);
  1497. if (!wr_req) {
  1498. err = -ENOMEM;
  1499. goto out_unlock;
  1500. }
  1501. wr_req->r_flags = CEPH_OSD_FLAG_WRITE |
  1502. CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK;
  1503. osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL);
  1504. wr_req->r_base_oloc.pool = pool;
  1505. wr_req->r_base_oid = rd_req->r_base_oid;
  1506. /* one page should be large enough for STAT data */
  1507. pages = ceph_alloc_page_vector(1, GFP_KERNEL);
  1508. if (IS_ERR(pages)) {
  1509. err = PTR_ERR(pages);
  1510. goto out_unlock;
  1511. }
  1512. osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
  1513. 0, false, true);
  1514. ceph_osdc_build_request(rd_req, 0, NULL, CEPH_NOSNAP,
  1515. &ci->vfs_inode.i_mtime);
  1516. err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false);
  1517. ceph_osdc_build_request(wr_req, 0, NULL, CEPH_NOSNAP,
  1518. &ci->vfs_inode.i_mtime);
  1519. err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false);
  1520. if (!err)
  1521. err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
  1522. if (!err2)
  1523. err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req);
  1524. if (err >= 0 || err == -ENOENT)
  1525. have |= POOL_READ;
  1526. else if (err != -EPERM)
  1527. goto out_unlock;
  1528. if (err2 == 0 || err2 == -EEXIST)
  1529. have |= POOL_WRITE;
  1530. else if (err2 != -EPERM) {
  1531. err = err2;
  1532. goto out_unlock;
  1533. }
  1534. perm = kmalloc(sizeof(*perm), GFP_NOFS);
  1535. if (!perm) {
  1536. err = -ENOMEM;
  1537. goto out_unlock;
  1538. }
  1539. perm->pool = pool;
  1540. perm->perm = have;
  1541. rb_link_node(&perm->node, parent, p);
  1542. rb_insert_color(&perm->node, &mdsc->pool_perm_tree);
  1543. err = 0;
  1544. out_unlock:
  1545. up_write(&mdsc->pool_perm_rwsem);
  1546. if (rd_req)
  1547. ceph_osdc_put_request(rd_req);
  1548. if (wr_req)
  1549. ceph_osdc_put_request(wr_req);
  1550. out:
  1551. if (!err)
  1552. err = have;
  1553. dout("__ceph_pool_perm_get pool %u result = %d\n", pool, err);
  1554. return err;
  1555. }
  1556. int ceph_pool_perm_check(struct ceph_inode_info *ci, int need)
  1557. {
  1558. u32 pool;
  1559. int ret, flags;
  1560. if (ceph_test_mount_opt(ceph_inode_to_client(&ci->vfs_inode),
  1561. NOPOOLPERM))
  1562. return 0;
  1563. spin_lock(&ci->i_ceph_lock);
  1564. flags = ci->i_ceph_flags;
  1565. pool = ceph_file_layout_pg_pool(ci->i_layout);
  1566. spin_unlock(&ci->i_ceph_lock);
  1567. check:
  1568. if (flags & CEPH_I_POOL_PERM) {
  1569. if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) {
  1570. dout("ceph_pool_perm_check pool %u no read perm\n",
  1571. pool);
  1572. return -EPERM;
  1573. }
  1574. if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) {
  1575. dout("ceph_pool_perm_check pool %u no write perm\n",
  1576. pool);
  1577. return -EPERM;
  1578. }
  1579. return 0;
  1580. }
  1581. ret = __ceph_pool_perm_get(ci, pool);
  1582. if (ret < 0)
  1583. return ret;
  1584. flags = CEPH_I_POOL_PERM;
  1585. if (ret & POOL_READ)
  1586. flags |= CEPH_I_POOL_RD;
  1587. if (ret & POOL_WRITE)
  1588. flags |= CEPH_I_POOL_WR;
  1589. spin_lock(&ci->i_ceph_lock);
  1590. if (pool == ceph_file_layout_pg_pool(ci->i_layout)) {
  1591. ci->i_ceph_flags = flags;
  1592. } else {
  1593. pool = ceph_file_layout_pg_pool(ci->i_layout);
  1594. flags = ci->i_ceph_flags;
  1595. }
  1596. spin_unlock(&ci->i_ceph_lock);
  1597. goto check;
  1598. }
  1599. void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc)
  1600. {
  1601. struct ceph_pool_perm *perm;
  1602. struct rb_node *n;
  1603. while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) {
  1604. n = rb_first(&mdsc->pool_perm_tree);
  1605. perm = rb_entry(n, struct ceph_pool_perm, node);
  1606. rb_erase(n, &mdsc->pool_perm_tree);
  1607. kfree(perm);
  1608. }
  1609. }