dir.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/spinlock.h>
  3. #include <linux/fs_struct.h>
  4. #include <linux/namei.h>
  5. #include <linux/slab.h>
  6. #include <linux/sched.h>
  7. #include "super.h"
  8. #include "mds_client.h"
  9. /*
  10. * Directory operations: readdir, lookup, create, link, unlink,
  11. * rename, etc.
  12. */
  13. /*
  14. * Ceph MDS operations are specified in terms of a base ino and
  15. * relative path. Thus, the client can specify an operation on a
  16. * specific inode (e.g., a getattr due to fstat(2)), or as a path
  17. * relative to, say, the root directory.
  18. *
  19. * Normally, we limit ourselves to strict inode ops (no path component)
  20. * or dentry operations (a single path component relative to an ino). The
  21. * exception to this is open_root_dentry(), which will open the mount
  22. * point by name.
  23. */
  24. const struct dentry_operations ceph_dentry_ops;
  25. /*
  26. * Initialize ceph dentry state.
  27. */
  28. int ceph_init_dentry(struct dentry *dentry)
  29. {
  30. struct ceph_dentry_info *di;
  31. if (dentry->d_fsdata)
  32. return 0;
  33. di = kmem_cache_alloc(ceph_dentry_cachep, GFP_KERNEL | __GFP_ZERO);
  34. if (!di)
  35. return -ENOMEM; /* oh well */
  36. spin_lock(&dentry->d_lock);
  37. if (dentry->d_fsdata) {
  38. /* lost a race */
  39. kmem_cache_free(ceph_dentry_cachep, di);
  40. goto out_unlock;
  41. }
  42. if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP)
  43. d_set_d_op(dentry, &ceph_dentry_ops);
  44. else if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_SNAPDIR)
  45. d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
  46. else
  47. d_set_d_op(dentry, &ceph_snap_dentry_ops);
  48. di->dentry = dentry;
  49. di->lease_session = NULL;
  50. dentry->d_time = jiffies;
  51. /* avoid reordering d_fsdata setup so that the check above is safe */
  52. smp_mb();
  53. dentry->d_fsdata = di;
  54. ceph_dentry_lru_add(dentry);
  55. out_unlock:
  56. spin_unlock(&dentry->d_lock);
  57. return 0;
  58. }
  59. struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry)
  60. {
  61. struct inode *inode = NULL;
  62. if (!dentry)
  63. return NULL;
  64. spin_lock(&dentry->d_lock);
  65. if (!IS_ROOT(dentry)) {
  66. inode = d_inode(dentry->d_parent);
  67. ihold(inode);
  68. }
  69. spin_unlock(&dentry->d_lock);
  70. return inode;
  71. }
  72. /*
  73. * for readdir, we encode the directory frag and offset within that
  74. * frag into f_pos.
  75. */
  76. static unsigned fpos_frag(loff_t p)
  77. {
  78. return p >> 32;
  79. }
  80. static unsigned fpos_off(loff_t p)
  81. {
  82. return p & 0xffffffff;
  83. }
  84. static int fpos_cmp(loff_t l, loff_t r)
  85. {
  86. int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r));
  87. if (v)
  88. return v;
  89. return (int)(fpos_off(l) - fpos_off(r));
  90. }
  91. /*
  92. * make note of the last dentry we read, so we can
  93. * continue at the same lexicographical point,
  94. * regardless of what dir changes take place on the
  95. * server.
  96. */
  97. static int note_last_dentry(struct ceph_file_info *fi, const char *name,
  98. int len, unsigned next_offset)
  99. {
  100. char *buf = kmalloc(len+1, GFP_KERNEL);
  101. if (!buf)
  102. return -ENOMEM;
  103. kfree(fi->last_name);
  104. fi->last_name = buf;
  105. memcpy(fi->last_name, name, len);
  106. fi->last_name[len] = 0;
  107. fi->next_offset = next_offset;
  108. dout("note_last_dentry '%s'\n", fi->last_name);
  109. return 0;
  110. }
  111. /*
  112. * When possible, we try to satisfy a readdir by peeking at the
  113. * dcache. We make this work by carefully ordering dentries on
  114. * d_child when we initially get results back from the MDS, and
  115. * falling back to a "normal" sync readdir if any dentries in the dir
  116. * are dropped.
  117. *
  118. * Complete dir indicates that we have all dentries in the dir. It is
  119. * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
  120. * the MDS if/when the directory is modified).
  121. */
  122. static int __dcache_readdir(struct file *file, struct dir_context *ctx,
  123. u32 shared_gen)
  124. {
  125. struct ceph_file_info *fi = file->private_data;
  126. struct dentry *parent = file->f_path.dentry;
  127. struct inode *dir = d_inode(parent);
  128. struct dentry *dentry, *last = NULL;
  129. struct ceph_dentry_info *di;
  130. unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry *);
  131. int err = 0;
  132. loff_t ptr_pos = 0;
  133. struct ceph_readdir_cache_control cache_ctl = {};
  134. dout("__dcache_readdir %p v%u at %llu\n", dir, shared_gen, ctx->pos);
  135. /* we can calculate cache index for the first dirfrag */
  136. if (ceph_frag_is_leftmost(fpos_frag(ctx->pos))) {
  137. cache_ctl.index = fpos_off(ctx->pos) - 2;
  138. BUG_ON(cache_ctl.index < 0);
  139. ptr_pos = cache_ctl.index * sizeof(struct dentry *);
  140. }
  141. while (true) {
  142. pgoff_t pgoff;
  143. bool emit_dentry;
  144. if (ptr_pos >= i_size_read(dir)) {
  145. fi->flags |= CEPH_F_ATEND;
  146. err = 0;
  147. break;
  148. }
  149. err = -EAGAIN;
  150. pgoff = ptr_pos >> PAGE_CACHE_SHIFT;
  151. if (!cache_ctl.page || pgoff != page_index(cache_ctl.page)) {
  152. ceph_readdir_cache_release(&cache_ctl);
  153. cache_ctl.page = find_lock_page(&dir->i_data, pgoff);
  154. if (!cache_ctl.page) {
  155. dout(" page %lu not found\n", pgoff);
  156. break;
  157. }
  158. /* reading/filling the cache are serialized by
  159. * i_mutex, no need to use page lock */
  160. unlock_page(cache_ctl.page);
  161. cache_ctl.dentries = kmap(cache_ctl.page);
  162. }
  163. rcu_read_lock();
  164. spin_lock(&parent->d_lock);
  165. /* check i_size again here, because empty directory can be
  166. * marked as complete while not holding the i_mutex. */
  167. if (ceph_dir_is_complete_ordered(dir) &&
  168. ptr_pos < i_size_read(dir))
  169. dentry = cache_ctl.dentries[cache_ctl.index % nsize];
  170. else
  171. dentry = NULL;
  172. spin_unlock(&parent->d_lock);
  173. if (dentry && !lockref_get_not_dead(&dentry->d_lockref))
  174. dentry = NULL;
  175. rcu_read_unlock();
  176. if (!dentry)
  177. break;
  178. emit_dentry = false;
  179. di = ceph_dentry(dentry);
  180. spin_lock(&dentry->d_lock);
  181. if (di->lease_shared_gen == shared_gen &&
  182. d_really_is_positive(dentry) &&
  183. ceph_snap(d_inode(dentry)) != CEPH_SNAPDIR &&
  184. ceph_ino(d_inode(dentry)) != CEPH_INO_CEPH &&
  185. fpos_cmp(ctx->pos, di->offset) <= 0) {
  186. emit_dentry = true;
  187. }
  188. spin_unlock(&dentry->d_lock);
  189. if (emit_dentry) {
  190. dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
  191. dentry, dentry, d_inode(dentry));
  192. ctx->pos = di->offset;
  193. if (!dir_emit(ctx, dentry->d_name.name,
  194. dentry->d_name.len,
  195. ceph_translate_ino(dentry->d_sb,
  196. d_inode(dentry)->i_ino),
  197. d_inode(dentry)->i_mode >> 12)) {
  198. dput(dentry);
  199. err = 0;
  200. break;
  201. }
  202. ctx->pos++;
  203. if (last)
  204. dput(last);
  205. last = dentry;
  206. } else {
  207. dput(dentry);
  208. }
  209. cache_ctl.index++;
  210. ptr_pos += sizeof(struct dentry *);
  211. }
  212. ceph_readdir_cache_release(&cache_ctl);
  213. if (last) {
  214. int ret;
  215. di = ceph_dentry(last);
  216. ret = note_last_dentry(fi, last->d_name.name, last->d_name.len,
  217. fpos_off(di->offset) + 1);
  218. if (ret < 0)
  219. err = ret;
  220. dput(last);
  221. /* last_name no longer match cache index */
  222. if (fi->readdir_cache_idx >= 0) {
  223. fi->readdir_cache_idx = -1;
  224. fi->dir_release_count = 0;
  225. }
  226. }
  227. return err;
  228. }
  229. static int ceph_readdir(struct file *file, struct dir_context *ctx)
  230. {
  231. struct ceph_file_info *fi = file->private_data;
  232. struct inode *inode = file_inode(file);
  233. struct ceph_inode_info *ci = ceph_inode(inode);
  234. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  235. struct ceph_mds_client *mdsc = fsc->mdsc;
  236. unsigned frag = fpos_frag(ctx->pos);
  237. int off = fpos_off(ctx->pos);
  238. int err;
  239. u32 ftype;
  240. struct ceph_mds_reply_info_parsed *rinfo;
  241. dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off);
  242. if (fi->flags & CEPH_F_ATEND)
  243. return 0;
  244. /* always start with . and .. */
  245. if (ctx->pos == 0) {
  246. dout("readdir off 0 -> '.'\n");
  247. if (!dir_emit(ctx, ".", 1,
  248. ceph_translate_ino(inode->i_sb, inode->i_ino),
  249. inode->i_mode >> 12))
  250. return 0;
  251. ctx->pos = 1;
  252. off = 1;
  253. }
  254. if (ctx->pos == 1) {
  255. ino_t ino = parent_ino(file->f_path.dentry);
  256. dout("readdir off 1 -> '..'\n");
  257. if (!dir_emit(ctx, "..", 2,
  258. ceph_translate_ino(inode->i_sb, ino),
  259. inode->i_mode >> 12))
  260. return 0;
  261. ctx->pos = 2;
  262. off = 2;
  263. }
  264. /* can we use the dcache? */
  265. spin_lock(&ci->i_ceph_lock);
  266. if (ceph_test_mount_opt(fsc, DCACHE) &&
  267. !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
  268. ceph_snap(inode) != CEPH_SNAPDIR &&
  269. __ceph_dir_is_complete_ordered(ci) &&
  270. __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
  271. u32 shared_gen = ci->i_shared_gen;
  272. spin_unlock(&ci->i_ceph_lock);
  273. err = __dcache_readdir(file, ctx, shared_gen);
  274. if (err != -EAGAIN)
  275. return err;
  276. frag = fpos_frag(ctx->pos);
  277. off = fpos_off(ctx->pos);
  278. } else {
  279. spin_unlock(&ci->i_ceph_lock);
  280. }
  281. /* proceed with a normal readdir */
  282. more:
  283. /* do we have the correct frag content buffered? */
  284. if (fi->frag != frag || fi->last_readdir == NULL) {
  285. struct ceph_mds_request *req;
  286. int op = ceph_snap(inode) == CEPH_SNAPDIR ?
  287. CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
  288. /* discard old result, if any */
  289. if (fi->last_readdir) {
  290. ceph_mdsc_put_request(fi->last_readdir);
  291. fi->last_readdir = NULL;
  292. }
  293. dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
  294. ceph_vinop(inode), frag, fi->last_name);
  295. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  296. if (IS_ERR(req))
  297. return PTR_ERR(req);
  298. err = ceph_alloc_readdir_reply_buffer(req, inode);
  299. if (err) {
  300. ceph_mdsc_put_request(req);
  301. return err;
  302. }
  303. /* hints to request -> mds selection code */
  304. req->r_direct_mode = USE_AUTH_MDS;
  305. req->r_direct_hash = ceph_frag_value(frag);
  306. req->r_direct_is_hash = true;
  307. if (fi->last_name) {
  308. req->r_path2 = kstrdup(fi->last_name, GFP_KERNEL);
  309. if (!req->r_path2) {
  310. ceph_mdsc_put_request(req);
  311. return -ENOMEM;
  312. }
  313. }
  314. req->r_dir_release_cnt = fi->dir_release_count;
  315. req->r_dir_ordered_cnt = fi->dir_ordered_count;
  316. req->r_readdir_cache_idx = fi->readdir_cache_idx;
  317. req->r_readdir_offset = fi->next_offset;
  318. req->r_args.readdir.frag = cpu_to_le32(frag);
  319. req->r_inode = inode;
  320. ihold(inode);
  321. req->r_dentry = dget(file->f_path.dentry);
  322. err = ceph_mdsc_do_request(mdsc, NULL, req);
  323. if (err < 0) {
  324. ceph_mdsc_put_request(req);
  325. return err;
  326. }
  327. dout("readdir got and parsed readdir result=%d"
  328. " on frag %x, end=%d, complete=%d\n", err, frag,
  329. (int)req->r_reply_info.dir_end,
  330. (int)req->r_reply_info.dir_complete);
  331. /* note next offset and last dentry name */
  332. rinfo = &req->r_reply_info;
  333. if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
  334. frag = le32_to_cpu(rinfo->dir_dir->frag);
  335. off = req->r_readdir_offset;
  336. fi->next_offset = off;
  337. }
  338. fi->frag = frag;
  339. fi->offset = fi->next_offset;
  340. fi->last_readdir = req;
  341. if (req->r_did_prepopulate) {
  342. fi->readdir_cache_idx = req->r_readdir_cache_idx;
  343. if (fi->readdir_cache_idx < 0) {
  344. /* preclude from marking dir ordered */
  345. fi->dir_ordered_count = 0;
  346. } else if (ceph_frag_is_leftmost(frag) && off == 2) {
  347. /* note dir version at start of readdir so
  348. * we can tell if any dentries get dropped */
  349. fi->dir_release_count = req->r_dir_release_cnt;
  350. fi->dir_ordered_count = req->r_dir_ordered_cnt;
  351. }
  352. } else {
  353. dout("readdir !did_prepopulate");
  354. /* disable readdir cache */
  355. fi->readdir_cache_idx = -1;
  356. /* preclude from marking dir complete */
  357. fi->dir_release_count = 0;
  358. }
  359. if (req->r_reply_info.dir_end) {
  360. kfree(fi->last_name);
  361. fi->last_name = NULL;
  362. if (ceph_frag_is_rightmost(frag))
  363. fi->next_offset = 2;
  364. else
  365. fi->next_offset = 0;
  366. } else {
  367. err = note_last_dentry(fi,
  368. rinfo->dir_dname[rinfo->dir_nr-1],
  369. rinfo->dir_dname_len[rinfo->dir_nr-1],
  370. fi->next_offset + rinfo->dir_nr);
  371. if (err)
  372. return err;
  373. }
  374. }
  375. rinfo = &fi->last_readdir->r_reply_info;
  376. dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
  377. rinfo->dir_nr, off, fi->offset);
  378. ctx->pos = ceph_make_fpos(frag, off);
  379. while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
  380. struct ceph_mds_reply_inode *in =
  381. rinfo->dir_in[off - fi->offset].in;
  382. struct ceph_vino vino;
  383. ino_t ino;
  384. dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
  385. off, off - fi->offset, rinfo->dir_nr, ctx->pos,
  386. rinfo->dir_dname_len[off - fi->offset],
  387. rinfo->dir_dname[off - fi->offset], in);
  388. BUG_ON(!in);
  389. ftype = le32_to_cpu(in->mode) >> 12;
  390. vino.ino = le64_to_cpu(in->ino);
  391. vino.snap = le64_to_cpu(in->snapid);
  392. ino = ceph_vino_to_ino(vino);
  393. if (!dir_emit(ctx,
  394. rinfo->dir_dname[off - fi->offset],
  395. rinfo->dir_dname_len[off - fi->offset],
  396. ceph_translate_ino(inode->i_sb, ino), ftype)) {
  397. dout("filldir stopping us...\n");
  398. return 0;
  399. }
  400. off++;
  401. ctx->pos++;
  402. }
  403. if (fi->last_name) {
  404. ceph_mdsc_put_request(fi->last_readdir);
  405. fi->last_readdir = NULL;
  406. goto more;
  407. }
  408. /* more frags? */
  409. if (!ceph_frag_is_rightmost(frag)) {
  410. frag = ceph_frag_next(frag);
  411. off = 0;
  412. ctx->pos = ceph_make_fpos(frag, off);
  413. dout("readdir next frag is %x\n", frag);
  414. goto more;
  415. }
  416. fi->flags |= CEPH_F_ATEND;
  417. /*
  418. * if dir_release_count still matches the dir, no dentries
  419. * were released during the whole readdir, and we should have
  420. * the complete dir contents in our cache.
  421. */
  422. if (atomic64_read(&ci->i_release_count) == fi->dir_release_count) {
  423. spin_lock(&ci->i_ceph_lock);
  424. if (fi->dir_ordered_count == atomic64_read(&ci->i_ordered_count)) {
  425. dout(" marking %p complete and ordered\n", inode);
  426. /* use i_size to track number of entries in
  427. * readdir cache */
  428. BUG_ON(fi->readdir_cache_idx < 0);
  429. i_size_write(inode, fi->readdir_cache_idx *
  430. sizeof(struct dentry*));
  431. } else {
  432. dout(" marking %p complete\n", inode);
  433. }
  434. __ceph_dir_set_complete(ci, fi->dir_release_count,
  435. fi->dir_ordered_count);
  436. spin_unlock(&ci->i_ceph_lock);
  437. }
  438. dout("readdir %p file %p done.\n", inode, file);
  439. return 0;
  440. }
  441. static void reset_readdir(struct ceph_file_info *fi, unsigned frag)
  442. {
  443. if (fi->last_readdir) {
  444. ceph_mdsc_put_request(fi->last_readdir);
  445. fi->last_readdir = NULL;
  446. }
  447. kfree(fi->last_name);
  448. fi->last_name = NULL;
  449. fi->dir_release_count = 0;
  450. fi->readdir_cache_idx = -1;
  451. if (ceph_frag_is_leftmost(frag))
  452. fi->next_offset = 2; /* compensate for . and .. */
  453. else
  454. fi->next_offset = 0;
  455. fi->flags &= ~CEPH_F_ATEND;
  456. }
  457. static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
  458. {
  459. struct ceph_file_info *fi = file->private_data;
  460. struct inode *inode = file->f_mapping->host;
  461. loff_t old_offset = ceph_make_fpos(fi->frag, fi->next_offset);
  462. loff_t retval;
  463. mutex_lock(&inode->i_mutex);
  464. retval = -EINVAL;
  465. switch (whence) {
  466. case SEEK_CUR:
  467. offset += file->f_pos;
  468. case SEEK_SET:
  469. break;
  470. case SEEK_END:
  471. retval = -EOPNOTSUPP;
  472. default:
  473. goto out;
  474. }
  475. if (offset >= 0) {
  476. if (offset != file->f_pos) {
  477. file->f_pos = offset;
  478. file->f_version = 0;
  479. fi->flags &= ~CEPH_F_ATEND;
  480. }
  481. retval = offset;
  482. if (offset == 0 ||
  483. fpos_frag(offset) != fi->frag ||
  484. fpos_off(offset) < fi->offset) {
  485. /* discard buffered readdir content on seekdir(0), or
  486. * seek to new frag, or seek prior to current chunk */
  487. dout("dir_llseek dropping %p content\n", file);
  488. reset_readdir(fi, fpos_frag(offset));
  489. } else if (fpos_cmp(offset, old_offset) > 0) {
  490. /* reset dir_release_count if we did a forward seek */
  491. fi->dir_release_count = 0;
  492. fi->readdir_cache_idx = -1;
  493. }
  494. }
  495. out:
  496. mutex_unlock(&inode->i_mutex);
  497. return retval;
  498. }
  499. /*
  500. * Handle lookups for the hidden .snap directory.
  501. */
  502. int ceph_handle_snapdir(struct ceph_mds_request *req,
  503. struct dentry *dentry, int err)
  504. {
  505. struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
  506. struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */
  507. /* .snap dir? */
  508. if (err == -ENOENT &&
  509. ceph_snap(parent) == CEPH_NOSNAP &&
  510. strcmp(dentry->d_name.name,
  511. fsc->mount_options->snapdir_name) == 0) {
  512. struct inode *inode = ceph_get_snapdir(parent);
  513. dout("ENOENT on snapdir %p '%pd', linking to snapdir %p\n",
  514. dentry, dentry, inode);
  515. BUG_ON(!d_unhashed(dentry));
  516. d_add(dentry, inode);
  517. err = 0;
  518. }
  519. return err;
  520. }
  521. /*
  522. * Figure out final result of a lookup/open request.
  523. *
  524. * Mainly, make sure we return the final req->r_dentry (if it already
  525. * existed) in place of the original VFS-provided dentry when they
  526. * differ.
  527. *
  528. * Gracefully handle the case where the MDS replies with -ENOENT and
  529. * no trace (which it may do, at its discretion, e.g., if it doesn't
  530. * care to issue a lease on the negative dentry).
  531. */
  532. struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
  533. struct dentry *dentry, int err)
  534. {
  535. if (err == -ENOENT) {
  536. /* no trace? */
  537. err = 0;
  538. if (!req->r_reply_info.head->is_dentry) {
  539. dout("ENOENT and no trace, dentry %p inode %p\n",
  540. dentry, d_inode(dentry));
  541. if (d_really_is_positive(dentry)) {
  542. d_drop(dentry);
  543. err = -ENOENT;
  544. } else {
  545. d_add(dentry, NULL);
  546. }
  547. }
  548. }
  549. if (err)
  550. dentry = ERR_PTR(err);
  551. else if (dentry != req->r_dentry)
  552. dentry = dget(req->r_dentry); /* we got spliced */
  553. else
  554. dentry = NULL;
  555. return dentry;
  556. }
  557. static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
  558. {
  559. return ceph_ino(inode) == CEPH_INO_ROOT &&
  560. strncmp(dentry->d_name.name, ".ceph", 5) == 0;
  561. }
  562. /*
  563. * Look up a single dir entry. If there is a lookup intent, inform
  564. * the MDS so that it gets our 'caps wanted' value in a single op.
  565. */
  566. static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
  567. unsigned int flags)
  568. {
  569. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  570. struct ceph_mds_client *mdsc = fsc->mdsc;
  571. struct ceph_mds_request *req;
  572. int op;
  573. int err;
  574. dout("lookup %p dentry %p '%pd'\n",
  575. dir, dentry, dentry);
  576. if (dentry->d_name.len > NAME_MAX)
  577. return ERR_PTR(-ENAMETOOLONG);
  578. err = ceph_init_dentry(dentry);
  579. if (err < 0)
  580. return ERR_PTR(err);
  581. /* can we conclude ENOENT locally? */
  582. if (d_really_is_negative(dentry)) {
  583. struct ceph_inode_info *ci = ceph_inode(dir);
  584. struct ceph_dentry_info *di = ceph_dentry(dentry);
  585. spin_lock(&ci->i_ceph_lock);
  586. dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
  587. if (strncmp(dentry->d_name.name,
  588. fsc->mount_options->snapdir_name,
  589. dentry->d_name.len) &&
  590. !is_root_ceph_dentry(dir, dentry) &&
  591. ceph_test_mount_opt(fsc, DCACHE) &&
  592. __ceph_dir_is_complete(ci) &&
  593. (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
  594. spin_unlock(&ci->i_ceph_lock);
  595. dout(" dir %p complete, -ENOENT\n", dir);
  596. d_add(dentry, NULL);
  597. di->lease_shared_gen = ci->i_shared_gen;
  598. return NULL;
  599. }
  600. spin_unlock(&ci->i_ceph_lock);
  601. }
  602. op = ceph_snap(dir) == CEPH_SNAPDIR ?
  603. CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
  604. req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
  605. if (IS_ERR(req))
  606. return ERR_CAST(req);
  607. req->r_dentry = dget(dentry);
  608. req->r_num_caps = 2;
  609. /* we only need inode linkage */
  610. req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
  611. req->r_locked_dir = dir;
  612. err = ceph_mdsc_do_request(mdsc, NULL, req);
  613. err = ceph_handle_snapdir(req, dentry, err);
  614. dentry = ceph_finish_lookup(req, dentry, err);
  615. ceph_mdsc_put_request(req); /* will dput(dentry) */
  616. dout("lookup result=%p\n", dentry);
  617. return dentry;
  618. }
  619. /*
  620. * If we do a create but get no trace back from the MDS, follow up with
  621. * a lookup (the VFS expects us to link up the provided dentry).
  622. */
  623. int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
  624. {
  625. struct dentry *result = ceph_lookup(dir, dentry, 0);
  626. if (result && !IS_ERR(result)) {
  627. /*
  628. * We created the item, then did a lookup, and found
  629. * it was already linked to another inode we already
  630. * had in our cache (and thus got spliced). To not
  631. * confuse VFS (especially when inode is a directory),
  632. * we don't link our dentry to that inode, return an
  633. * error instead.
  634. *
  635. * This event should be rare and it happens only when
  636. * we talk to old MDS. Recent MDS does not send traceless
  637. * reply for request that creates new inode.
  638. */
  639. d_drop(result);
  640. return -ESTALE;
  641. }
  642. return PTR_ERR(result);
  643. }
  644. static int ceph_mknod(struct inode *dir, struct dentry *dentry,
  645. umode_t mode, dev_t rdev)
  646. {
  647. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  648. struct ceph_mds_client *mdsc = fsc->mdsc;
  649. struct ceph_mds_request *req;
  650. struct ceph_acls_info acls = {};
  651. int err;
  652. if (ceph_snap(dir) != CEPH_NOSNAP)
  653. return -EROFS;
  654. err = ceph_pre_init_acls(dir, &mode, &acls);
  655. if (err < 0)
  656. return err;
  657. dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
  658. dir, dentry, mode, rdev);
  659. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
  660. if (IS_ERR(req)) {
  661. err = PTR_ERR(req);
  662. goto out;
  663. }
  664. req->r_dentry = dget(dentry);
  665. req->r_num_caps = 2;
  666. req->r_locked_dir = dir;
  667. req->r_args.mknod.mode = cpu_to_le32(mode);
  668. req->r_args.mknod.rdev = cpu_to_le32(rdev);
  669. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  670. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  671. if (acls.pagelist) {
  672. req->r_pagelist = acls.pagelist;
  673. acls.pagelist = NULL;
  674. }
  675. err = ceph_mdsc_do_request(mdsc, dir, req);
  676. if (!err && !req->r_reply_info.head->is_dentry)
  677. err = ceph_handle_notrace_create(dir, dentry);
  678. ceph_mdsc_put_request(req);
  679. out:
  680. if (!err)
  681. ceph_init_inode_acls(d_inode(dentry), &acls);
  682. else
  683. d_drop(dentry);
  684. ceph_release_acls_info(&acls);
  685. return err;
  686. }
  687. static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
  688. bool excl)
  689. {
  690. return ceph_mknod(dir, dentry, mode, 0);
  691. }
  692. static int ceph_symlink(struct inode *dir, struct dentry *dentry,
  693. const char *dest)
  694. {
  695. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  696. struct ceph_mds_client *mdsc = fsc->mdsc;
  697. struct ceph_mds_request *req;
  698. int err;
  699. if (ceph_snap(dir) != CEPH_NOSNAP)
  700. return -EROFS;
  701. dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
  702. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
  703. if (IS_ERR(req)) {
  704. err = PTR_ERR(req);
  705. goto out;
  706. }
  707. req->r_path2 = kstrdup(dest, GFP_KERNEL);
  708. if (!req->r_path2) {
  709. err = -ENOMEM;
  710. ceph_mdsc_put_request(req);
  711. goto out;
  712. }
  713. req->r_locked_dir = dir;
  714. req->r_dentry = dget(dentry);
  715. req->r_num_caps = 2;
  716. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  717. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  718. err = ceph_mdsc_do_request(mdsc, dir, req);
  719. if (!err && !req->r_reply_info.head->is_dentry)
  720. err = ceph_handle_notrace_create(dir, dentry);
  721. ceph_mdsc_put_request(req);
  722. out:
  723. if (err)
  724. d_drop(dentry);
  725. return err;
  726. }
  727. static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
  728. {
  729. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  730. struct ceph_mds_client *mdsc = fsc->mdsc;
  731. struct ceph_mds_request *req;
  732. struct ceph_acls_info acls = {};
  733. int err = -EROFS;
  734. int op;
  735. if (ceph_snap(dir) == CEPH_SNAPDIR) {
  736. /* mkdir .snap/foo is a MKSNAP */
  737. op = CEPH_MDS_OP_MKSNAP;
  738. dout("mksnap dir %p snap '%pd' dn %p\n", dir,
  739. dentry, dentry);
  740. } else if (ceph_snap(dir) == CEPH_NOSNAP) {
  741. dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
  742. op = CEPH_MDS_OP_MKDIR;
  743. } else {
  744. goto out;
  745. }
  746. mode |= S_IFDIR;
  747. err = ceph_pre_init_acls(dir, &mode, &acls);
  748. if (err < 0)
  749. goto out;
  750. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  751. if (IS_ERR(req)) {
  752. err = PTR_ERR(req);
  753. goto out;
  754. }
  755. req->r_dentry = dget(dentry);
  756. req->r_num_caps = 2;
  757. req->r_locked_dir = dir;
  758. req->r_args.mkdir.mode = cpu_to_le32(mode);
  759. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  760. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  761. if (acls.pagelist) {
  762. req->r_pagelist = acls.pagelist;
  763. acls.pagelist = NULL;
  764. }
  765. err = ceph_mdsc_do_request(mdsc, dir, req);
  766. if (!err &&
  767. !req->r_reply_info.head->is_target &&
  768. !req->r_reply_info.head->is_dentry)
  769. err = ceph_handle_notrace_create(dir, dentry);
  770. ceph_mdsc_put_request(req);
  771. out:
  772. if (!err)
  773. ceph_init_inode_acls(d_inode(dentry), &acls);
  774. else
  775. d_drop(dentry);
  776. ceph_release_acls_info(&acls);
  777. return err;
  778. }
  779. static int ceph_link(struct dentry *old_dentry, struct inode *dir,
  780. struct dentry *dentry)
  781. {
  782. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  783. struct ceph_mds_client *mdsc = fsc->mdsc;
  784. struct ceph_mds_request *req;
  785. int err;
  786. if (ceph_snap(dir) != CEPH_NOSNAP)
  787. return -EROFS;
  788. dout("link in dir %p old_dentry %p dentry %p\n", dir,
  789. old_dentry, dentry);
  790. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
  791. if (IS_ERR(req)) {
  792. d_drop(dentry);
  793. return PTR_ERR(req);
  794. }
  795. req->r_dentry = dget(dentry);
  796. req->r_num_caps = 2;
  797. req->r_old_dentry = dget(old_dentry);
  798. req->r_locked_dir = dir;
  799. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  800. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  801. /* release LINK_SHARED on source inode (mds will lock it) */
  802. req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
  803. err = ceph_mdsc_do_request(mdsc, dir, req);
  804. if (err) {
  805. d_drop(dentry);
  806. } else if (!req->r_reply_info.head->is_dentry) {
  807. ihold(d_inode(old_dentry));
  808. d_instantiate(dentry, d_inode(old_dentry));
  809. }
  810. ceph_mdsc_put_request(req);
  811. return err;
  812. }
  813. /*
  814. * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
  815. * looks like the link count will hit 0, drop any other caps (other
  816. * than PIN) we don't specifically want (due to the file still being
  817. * open).
  818. */
  819. static int drop_caps_for_unlink(struct inode *inode)
  820. {
  821. struct ceph_inode_info *ci = ceph_inode(inode);
  822. int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
  823. spin_lock(&ci->i_ceph_lock);
  824. if (inode->i_nlink == 1) {
  825. drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
  826. ci->i_ceph_flags |= CEPH_I_NODELAY;
  827. }
  828. spin_unlock(&ci->i_ceph_lock);
  829. return drop;
  830. }
  831. /*
  832. * rmdir and unlink are differ only by the metadata op code
  833. */
  834. static int ceph_unlink(struct inode *dir, struct dentry *dentry)
  835. {
  836. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  837. struct ceph_mds_client *mdsc = fsc->mdsc;
  838. struct inode *inode = d_inode(dentry);
  839. struct ceph_mds_request *req;
  840. int err = -EROFS;
  841. int op;
  842. if (ceph_snap(dir) == CEPH_SNAPDIR) {
  843. /* rmdir .snap/foo is RMSNAP */
  844. dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry);
  845. op = CEPH_MDS_OP_RMSNAP;
  846. } else if (ceph_snap(dir) == CEPH_NOSNAP) {
  847. dout("unlink/rmdir dir %p dn %p inode %p\n",
  848. dir, dentry, inode);
  849. op = d_is_dir(dentry) ?
  850. CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
  851. } else
  852. goto out;
  853. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  854. if (IS_ERR(req)) {
  855. err = PTR_ERR(req);
  856. goto out;
  857. }
  858. req->r_dentry = dget(dentry);
  859. req->r_num_caps = 2;
  860. req->r_locked_dir = dir;
  861. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  862. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  863. req->r_inode_drop = drop_caps_for_unlink(inode);
  864. err = ceph_mdsc_do_request(mdsc, dir, req);
  865. if (!err && !req->r_reply_info.head->is_dentry)
  866. d_delete(dentry);
  867. ceph_mdsc_put_request(req);
  868. out:
  869. return err;
  870. }
  871. static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
  872. struct inode *new_dir, struct dentry *new_dentry)
  873. {
  874. struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
  875. struct ceph_mds_client *mdsc = fsc->mdsc;
  876. struct ceph_mds_request *req;
  877. int op = CEPH_MDS_OP_RENAME;
  878. int err;
  879. if (ceph_snap(old_dir) != ceph_snap(new_dir))
  880. return -EXDEV;
  881. if (ceph_snap(old_dir) != CEPH_NOSNAP) {
  882. if (old_dir == new_dir && ceph_snap(old_dir) == CEPH_SNAPDIR)
  883. op = CEPH_MDS_OP_RENAMESNAP;
  884. else
  885. return -EROFS;
  886. }
  887. dout("rename dir %p dentry %p to dir %p dentry %p\n",
  888. old_dir, old_dentry, new_dir, new_dentry);
  889. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  890. if (IS_ERR(req))
  891. return PTR_ERR(req);
  892. ihold(old_dir);
  893. req->r_dentry = dget(new_dentry);
  894. req->r_num_caps = 2;
  895. req->r_old_dentry = dget(old_dentry);
  896. req->r_old_dentry_dir = old_dir;
  897. req->r_locked_dir = new_dir;
  898. req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
  899. req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
  900. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  901. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  902. /* release LINK_RDCACHE on source inode (mds will lock it) */
  903. req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
  904. if (d_really_is_positive(new_dentry))
  905. req->r_inode_drop = drop_caps_for_unlink(d_inode(new_dentry));
  906. err = ceph_mdsc_do_request(mdsc, old_dir, req);
  907. if (!err && !req->r_reply_info.head->is_dentry) {
  908. /*
  909. * Normally d_move() is done by fill_trace (called by
  910. * do_request, above). If there is no trace, we need
  911. * to do it here.
  912. */
  913. /* d_move screws up sibling dentries' offsets */
  914. ceph_dir_clear_complete(old_dir);
  915. ceph_dir_clear_complete(new_dir);
  916. d_move(old_dentry, new_dentry);
  917. /* ensure target dentry is invalidated, despite
  918. rehashing bug in vfs_rename_dir */
  919. ceph_invalidate_dentry_lease(new_dentry);
  920. }
  921. ceph_mdsc_put_request(req);
  922. return err;
  923. }
  924. /*
  925. * Ensure a dentry lease will no longer revalidate.
  926. */
  927. void ceph_invalidate_dentry_lease(struct dentry *dentry)
  928. {
  929. spin_lock(&dentry->d_lock);
  930. dentry->d_time = jiffies;
  931. ceph_dentry(dentry)->lease_shared_gen = 0;
  932. spin_unlock(&dentry->d_lock);
  933. }
  934. /*
  935. * Check if dentry lease is valid. If not, delete the lease. Try to
  936. * renew if the least is more than half up.
  937. */
  938. static int dentry_lease_is_valid(struct dentry *dentry)
  939. {
  940. struct ceph_dentry_info *di;
  941. struct ceph_mds_session *s;
  942. int valid = 0;
  943. u32 gen;
  944. unsigned long ttl;
  945. struct ceph_mds_session *session = NULL;
  946. struct inode *dir = NULL;
  947. u32 seq = 0;
  948. spin_lock(&dentry->d_lock);
  949. di = ceph_dentry(dentry);
  950. if (di->lease_session) {
  951. s = di->lease_session;
  952. spin_lock(&s->s_gen_ttl_lock);
  953. gen = s->s_cap_gen;
  954. ttl = s->s_cap_ttl;
  955. spin_unlock(&s->s_gen_ttl_lock);
  956. if (di->lease_gen == gen &&
  957. time_before(jiffies, dentry->d_time) &&
  958. time_before(jiffies, ttl)) {
  959. valid = 1;
  960. if (di->lease_renew_after &&
  961. time_after(jiffies, di->lease_renew_after)) {
  962. /* we should renew */
  963. dir = d_inode(dentry->d_parent);
  964. session = ceph_get_mds_session(s);
  965. seq = di->lease_seq;
  966. di->lease_renew_after = 0;
  967. di->lease_renew_from = jiffies;
  968. }
  969. }
  970. }
  971. spin_unlock(&dentry->d_lock);
  972. if (session) {
  973. ceph_mdsc_lease_send_msg(session, dir, dentry,
  974. CEPH_MDS_LEASE_RENEW, seq);
  975. ceph_put_mds_session(session);
  976. }
  977. dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
  978. return valid;
  979. }
  980. /*
  981. * Check if directory-wide content lease/cap is valid.
  982. */
  983. static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
  984. {
  985. struct ceph_inode_info *ci = ceph_inode(dir);
  986. struct ceph_dentry_info *di = ceph_dentry(dentry);
  987. int valid = 0;
  988. spin_lock(&ci->i_ceph_lock);
  989. if (ci->i_shared_gen == di->lease_shared_gen)
  990. valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
  991. spin_unlock(&ci->i_ceph_lock);
  992. dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
  993. dir, (unsigned)ci->i_shared_gen, dentry,
  994. (unsigned)di->lease_shared_gen, valid);
  995. return valid;
  996. }
  997. /*
  998. * Check if cached dentry can be trusted.
  999. */
  1000. static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
  1001. {
  1002. int valid = 0;
  1003. struct inode *dir;
  1004. if (flags & LOOKUP_RCU)
  1005. return -ECHILD;
  1006. dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
  1007. dentry, d_inode(dentry), ceph_dentry(dentry)->offset);
  1008. dir = ceph_get_dentry_parent_inode(dentry);
  1009. /* always trust cached snapped dentries, snapdir dentry */
  1010. if (ceph_snap(dir) != CEPH_NOSNAP) {
  1011. dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
  1012. dentry, d_inode(dentry));
  1013. valid = 1;
  1014. } else if (d_really_is_positive(dentry) &&
  1015. ceph_snap(d_inode(dentry)) == CEPH_SNAPDIR) {
  1016. valid = 1;
  1017. } else if (dentry_lease_is_valid(dentry) ||
  1018. dir_lease_is_valid(dir, dentry)) {
  1019. if (d_really_is_positive(dentry))
  1020. valid = ceph_is_any_caps(d_inode(dentry));
  1021. else
  1022. valid = 1;
  1023. }
  1024. dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
  1025. if (valid) {
  1026. ceph_dentry_lru_touch(dentry);
  1027. } else {
  1028. ceph_dir_clear_complete(dir);
  1029. }
  1030. iput(dir);
  1031. return valid;
  1032. }
  1033. /*
  1034. * Release our ceph_dentry_info.
  1035. */
  1036. static void ceph_d_release(struct dentry *dentry)
  1037. {
  1038. struct ceph_dentry_info *di = ceph_dentry(dentry);
  1039. dout("d_release %p\n", dentry);
  1040. ceph_dentry_lru_del(dentry);
  1041. if (di->lease_session)
  1042. ceph_put_mds_session(di->lease_session);
  1043. kmem_cache_free(ceph_dentry_cachep, di);
  1044. dentry->d_fsdata = NULL;
  1045. }
  1046. static int ceph_snapdir_d_revalidate(struct dentry *dentry,
  1047. unsigned int flags)
  1048. {
  1049. /*
  1050. * Eventually, we'll want to revalidate snapped metadata
  1051. * too... probably...
  1052. */
  1053. return 1;
  1054. }
  1055. /*
  1056. * When the VFS prunes a dentry from the cache, we need to clear the
  1057. * complete flag on the parent directory.
  1058. *
  1059. * Called under dentry->d_lock.
  1060. */
  1061. static void ceph_d_prune(struct dentry *dentry)
  1062. {
  1063. dout("ceph_d_prune %p\n", dentry);
  1064. /* do we have a valid parent? */
  1065. if (IS_ROOT(dentry))
  1066. return;
  1067. /* if we are not hashed, we don't affect dir's completeness */
  1068. if (d_unhashed(dentry))
  1069. return;
  1070. /*
  1071. * we hold d_lock, so d_parent is stable, and d_fsdata is never
  1072. * cleared until d_release
  1073. */
  1074. ceph_dir_clear_complete(d_inode(dentry->d_parent));
  1075. }
  1076. /*
  1077. * read() on a dir. This weird interface hack only works if mounted
  1078. * with '-o dirstat'.
  1079. */
  1080. static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
  1081. loff_t *ppos)
  1082. {
  1083. struct ceph_file_info *cf = file->private_data;
  1084. struct inode *inode = file_inode(file);
  1085. struct ceph_inode_info *ci = ceph_inode(inode);
  1086. int left;
  1087. const int bufsize = 1024;
  1088. if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
  1089. return -EISDIR;
  1090. if (!cf->dir_info) {
  1091. cf->dir_info = kmalloc(bufsize, GFP_KERNEL);
  1092. if (!cf->dir_info)
  1093. return -ENOMEM;
  1094. cf->dir_info_len =
  1095. snprintf(cf->dir_info, bufsize,
  1096. "entries: %20lld\n"
  1097. " files: %20lld\n"
  1098. " subdirs: %20lld\n"
  1099. "rentries: %20lld\n"
  1100. " rfiles: %20lld\n"
  1101. " rsubdirs: %20lld\n"
  1102. "rbytes: %20lld\n"
  1103. "rctime: %10ld.%09ld\n",
  1104. ci->i_files + ci->i_subdirs,
  1105. ci->i_files,
  1106. ci->i_subdirs,
  1107. ci->i_rfiles + ci->i_rsubdirs,
  1108. ci->i_rfiles,
  1109. ci->i_rsubdirs,
  1110. ci->i_rbytes,
  1111. (long)ci->i_rctime.tv_sec,
  1112. (long)ci->i_rctime.tv_nsec);
  1113. }
  1114. if (*ppos >= cf->dir_info_len)
  1115. return 0;
  1116. size = min_t(unsigned, size, cf->dir_info_len-*ppos);
  1117. left = copy_to_user(buf, cf->dir_info + *ppos, size);
  1118. if (left == size)
  1119. return -EFAULT;
  1120. *ppos += (size - left);
  1121. return size - left;
  1122. }
  1123. /*
  1124. * We maintain a private dentry LRU.
  1125. *
  1126. * FIXME: this needs to be changed to a per-mds lru to be useful.
  1127. */
  1128. void ceph_dentry_lru_add(struct dentry *dn)
  1129. {
  1130. struct ceph_dentry_info *di = ceph_dentry(dn);
  1131. struct ceph_mds_client *mdsc;
  1132. dout("dentry_lru_add %p %p '%pd'\n", di, dn, dn);
  1133. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1134. spin_lock(&mdsc->dentry_lru_lock);
  1135. list_add_tail(&di->lru, &mdsc->dentry_lru);
  1136. mdsc->num_dentry++;
  1137. spin_unlock(&mdsc->dentry_lru_lock);
  1138. }
  1139. void ceph_dentry_lru_touch(struct dentry *dn)
  1140. {
  1141. struct ceph_dentry_info *di = ceph_dentry(dn);
  1142. struct ceph_mds_client *mdsc;
  1143. dout("dentry_lru_touch %p %p '%pd' (offset %lld)\n", di, dn, dn,
  1144. di->offset);
  1145. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1146. spin_lock(&mdsc->dentry_lru_lock);
  1147. list_move_tail(&di->lru, &mdsc->dentry_lru);
  1148. spin_unlock(&mdsc->dentry_lru_lock);
  1149. }
  1150. void ceph_dentry_lru_del(struct dentry *dn)
  1151. {
  1152. struct ceph_dentry_info *di = ceph_dentry(dn);
  1153. struct ceph_mds_client *mdsc;
  1154. dout("dentry_lru_del %p %p '%pd'\n", di, dn, dn);
  1155. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1156. spin_lock(&mdsc->dentry_lru_lock);
  1157. list_del_init(&di->lru);
  1158. mdsc->num_dentry--;
  1159. spin_unlock(&mdsc->dentry_lru_lock);
  1160. }
  1161. /*
  1162. * Return name hash for a given dentry. This is dependent on
  1163. * the parent directory's hash function.
  1164. */
  1165. unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
  1166. {
  1167. struct ceph_inode_info *dci = ceph_inode(dir);
  1168. switch (dci->i_dir_layout.dl_dir_hash) {
  1169. case 0: /* for backward compat */
  1170. case CEPH_STR_HASH_LINUX:
  1171. return dn->d_name.hash;
  1172. default:
  1173. return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
  1174. dn->d_name.name, dn->d_name.len);
  1175. }
  1176. }
  1177. const struct file_operations ceph_dir_fops = {
  1178. .read = ceph_read_dir,
  1179. .iterate = ceph_readdir,
  1180. .llseek = ceph_dir_llseek,
  1181. .open = ceph_open,
  1182. .release = ceph_release,
  1183. .unlocked_ioctl = ceph_ioctl,
  1184. .fsync = ceph_fsync,
  1185. };
  1186. const struct file_operations ceph_snapdir_fops = {
  1187. .iterate = ceph_readdir,
  1188. .llseek = ceph_dir_llseek,
  1189. .open = ceph_open,
  1190. .release = ceph_release,
  1191. };
  1192. const struct inode_operations ceph_dir_iops = {
  1193. .lookup = ceph_lookup,
  1194. .permission = ceph_permission,
  1195. .getattr = ceph_getattr,
  1196. .setattr = ceph_setattr,
  1197. .setxattr = ceph_setxattr,
  1198. .getxattr = ceph_getxattr,
  1199. .listxattr = ceph_listxattr,
  1200. .removexattr = ceph_removexattr,
  1201. .get_acl = ceph_get_acl,
  1202. .set_acl = ceph_set_acl,
  1203. .mknod = ceph_mknod,
  1204. .symlink = ceph_symlink,
  1205. .mkdir = ceph_mkdir,
  1206. .link = ceph_link,
  1207. .unlink = ceph_unlink,
  1208. .rmdir = ceph_unlink,
  1209. .rename = ceph_rename,
  1210. .create = ceph_create,
  1211. .atomic_open = ceph_atomic_open,
  1212. };
  1213. const struct inode_operations ceph_snapdir_iops = {
  1214. .lookup = ceph_lookup,
  1215. .permission = ceph_permission,
  1216. .getattr = ceph_getattr,
  1217. .mkdir = ceph_mkdir,
  1218. .rmdir = ceph_unlink,
  1219. .rename = ceph_rename,
  1220. };
  1221. const struct dentry_operations ceph_dentry_ops = {
  1222. .d_revalidate = ceph_d_revalidate,
  1223. .d_release = ceph_d_release,
  1224. .d_prune = ceph_d_prune,
  1225. };
  1226. const struct dentry_operations ceph_snapdir_dentry_ops = {
  1227. .d_revalidate = ceph_snapdir_d_revalidate,
  1228. .d_release = ceph_d_release,
  1229. };
  1230. const struct dentry_operations ceph_snap_dentry_ops = {
  1231. .d_release = ceph_d_release,
  1232. .d_prune = ceph_d_prune,
  1233. };