glops.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/spinlock.h>
  10. #include <linux/completion.h>
  11. #include <linux/buffer_head.h>
  12. #include <linux/gfs2_ondisk.h>
  13. #include <linux/bio.h>
  14. #include <linux/posix_acl.h>
  15. #include "gfs2.h"
  16. #include "incore.h"
  17. #include "bmap.h"
  18. #include "glock.h"
  19. #include "glops.h"
  20. #include "inode.h"
  21. #include "log.h"
  22. #include "meta_io.h"
  23. #include "recovery.h"
  24. #include "rgrp.h"
  25. #include "util.h"
  26. #include "trans.h"
  27. #include "dir.h"
  28. struct workqueue_struct *gfs2_freeze_wq;
  29. static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
  30. {
  31. fs_err(gl->gl_name.ln_sbd,
  32. "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
  33. "state 0x%lx\n",
  34. bh, (unsigned long long)bh->b_blocknr, bh->b_state,
  35. bh->b_page->mapping, bh->b_page->flags);
  36. fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
  37. gl->gl_name.ln_type, gl->gl_name.ln_number,
  38. gfs2_glock2aspace(gl));
  39. gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n");
  40. }
  41. /**
  42. * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
  43. * @gl: the glock
  44. * @fsync: set when called from fsync (not all buffers will be clean)
  45. *
  46. * None of the buffers should be dirty, locked, or pinned.
  47. */
  48. static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
  49. unsigned int nr_revokes)
  50. {
  51. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  52. struct list_head *head = &gl->gl_ail_list;
  53. struct gfs2_bufdata *bd, *tmp;
  54. struct buffer_head *bh;
  55. const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
  56. gfs2_log_lock(sdp);
  57. spin_lock(&sdp->sd_ail_lock);
  58. list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
  59. if (nr_revokes == 0)
  60. break;
  61. bh = bd->bd_bh;
  62. if (bh->b_state & b_state) {
  63. if (fsync)
  64. continue;
  65. gfs2_ail_error(gl, bh);
  66. }
  67. gfs2_trans_add_revoke(sdp, bd);
  68. nr_revokes--;
  69. }
  70. GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
  71. spin_unlock(&sdp->sd_ail_lock);
  72. gfs2_log_unlock(sdp);
  73. }
  74. static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
  75. {
  76. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  77. struct gfs2_trans tr;
  78. memset(&tr, 0, sizeof(tr));
  79. INIT_LIST_HEAD(&tr.tr_buf);
  80. INIT_LIST_HEAD(&tr.tr_databuf);
  81. tr.tr_revokes = atomic_read(&gl->gl_ail_count);
  82. if (!tr.tr_revokes)
  83. return;
  84. /* A shortened, inline version of gfs2_trans_begin()
  85. * tr->alloced is not set since the transaction structure is
  86. * on the stack */
  87. tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
  88. tr.tr_ip = _RET_IP_;
  89. if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0)
  90. return;
  91. WARN_ON_ONCE(current->journal_info);
  92. current->journal_info = &tr;
  93. __gfs2_ail_flush(gl, 0, tr.tr_revokes);
  94. gfs2_trans_end(sdp);
  95. gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
  96. }
  97. void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
  98. {
  99. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  100. unsigned int revokes = atomic_read(&gl->gl_ail_count);
  101. unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
  102. int ret;
  103. if (!revokes)
  104. return;
  105. while (revokes > max_revokes)
  106. max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
  107. ret = gfs2_trans_begin(sdp, 0, max_revokes);
  108. if (ret)
  109. return;
  110. __gfs2_ail_flush(gl, fsync, max_revokes);
  111. gfs2_trans_end(sdp);
  112. gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
  113. }
  114. /**
  115. * rgrp_go_sync - sync out the metadata for this glock
  116. * @gl: the glock
  117. *
  118. * Called when demoting or unlocking an EX glock. We must flush
  119. * to disk all dirty buffers/pages relating to this glock, and must not
  120. * not return to caller to demote/unlock the glock until I/O is complete.
  121. */
  122. static void rgrp_go_sync(struct gfs2_glock *gl)
  123. {
  124. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  125. struct address_space *mapping = &sdp->sd_aspace;
  126. struct gfs2_rgrpd *rgd;
  127. int error;
  128. spin_lock(&gl->gl_lockref.lock);
  129. rgd = gl->gl_object;
  130. if (rgd)
  131. gfs2_rgrp_brelse(rgd);
  132. spin_unlock(&gl->gl_lockref.lock);
  133. if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
  134. return;
  135. GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
  136. gfs2_log_flush(sdp, gl, NORMAL_FLUSH);
  137. filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
  138. error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
  139. mapping_set_error(mapping, error);
  140. gfs2_ail_empty_gl(gl);
  141. spin_lock(&gl->gl_lockref.lock);
  142. rgd = gl->gl_object;
  143. if (rgd)
  144. gfs2_free_clones(rgd);
  145. spin_unlock(&gl->gl_lockref.lock);
  146. }
  147. /**
  148. * rgrp_go_inval - invalidate the metadata for this glock
  149. * @gl: the glock
  150. * @flags:
  151. *
  152. * We never used LM_ST_DEFERRED with resource groups, so that we
  153. * should always see the metadata flag set here.
  154. *
  155. */
  156. static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
  157. {
  158. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  159. struct address_space *mapping = &sdp->sd_aspace;
  160. struct gfs2_rgrpd *rgd = gl->gl_object;
  161. if (rgd)
  162. gfs2_rgrp_brelse(rgd);
  163. WARN_ON_ONCE(!(flags & DIO_METADATA));
  164. gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
  165. truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
  166. if (rgd)
  167. rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
  168. }
  169. /**
  170. * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
  171. * @gl: the glock protecting the inode
  172. *
  173. */
  174. static void inode_go_sync(struct gfs2_glock *gl)
  175. {
  176. struct gfs2_inode *ip = gl->gl_object;
  177. struct address_space *metamapping = gfs2_glock2aspace(gl);
  178. int error;
  179. if (ip && !S_ISREG(ip->i_inode.i_mode))
  180. ip = NULL;
  181. if (ip) {
  182. if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
  183. unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
  184. inode_dio_wait(&ip->i_inode);
  185. }
  186. if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
  187. return;
  188. GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
  189. gfs2_log_flush(gl->gl_name.ln_sbd, gl, NORMAL_FLUSH);
  190. filemap_fdatawrite(metamapping);
  191. if (ip) {
  192. struct address_space *mapping = ip->i_inode.i_mapping;
  193. filemap_fdatawrite(mapping);
  194. error = filemap_fdatawait(mapping);
  195. mapping_set_error(mapping, error);
  196. }
  197. error = filemap_fdatawait(metamapping);
  198. mapping_set_error(metamapping, error);
  199. gfs2_ail_empty_gl(gl);
  200. /*
  201. * Writeback of the data mapping may cause the dirty flag to be set
  202. * so we have to clear it again here.
  203. */
  204. smp_mb__before_atomic();
  205. clear_bit(GLF_DIRTY, &gl->gl_flags);
  206. }
  207. /**
  208. * inode_go_inval - prepare a inode glock to be released
  209. * @gl: the glock
  210. * @flags:
  211. *
  212. * Normally we invalidate everything, but if we are moving into
  213. * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
  214. * can keep hold of the metadata, since it won't have changed.
  215. *
  216. */
  217. static void inode_go_inval(struct gfs2_glock *gl, int flags)
  218. {
  219. struct gfs2_inode *ip = gl->gl_object;
  220. gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
  221. if (flags & DIO_METADATA) {
  222. struct address_space *mapping = gfs2_glock2aspace(gl);
  223. truncate_inode_pages(mapping, 0);
  224. if (ip) {
  225. set_bit(GIF_INVALID, &ip->i_flags);
  226. forget_all_cached_acls(&ip->i_inode);
  227. gfs2_dir_hash_inval(ip);
  228. }
  229. }
  230. if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
  231. gfs2_log_flush(gl->gl_name.ln_sbd, NULL, NORMAL_FLUSH);
  232. gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
  233. }
  234. if (ip && S_ISREG(ip->i_inode.i_mode))
  235. truncate_inode_pages(ip->i_inode.i_mapping, 0);
  236. }
  237. /**
  238. * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
  239. * @gl: the glock
  240. *
  241. * Returns: 1 if it's ok
  242. */
  243. static int inode_go_demote_ok(const struct gfs2_glock *gl)
  244. {
  245. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  246. struct gfs2_holder *gh;
  247. if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
  248. return 0;
  249. if (!list_empty(&gl->gl_holders)) {
  250. gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
  251. if (gh->gh_list.next != &gl->gl_holders)
  252. return 0;
  253. }
  254. return 1;
  255. }
  256. /**
  257. * gfs2_set_nlink - Set the inode's link count based on on-disk info
  258. * @inode: The inode in question
  259. * @nlink: The link count
  260. *
  261. * If the link count has hit zero, it must never be raised, whatever the
  262. * on-disk inode might say. When new struct inodes are created the link
  263. * count is set to 1, so that we can safely use this test even when reading
  264. * in on disk information for the first time.
  265. */
  266. static void gfs2_set_nlink(struct inode *inode, u32 nlink)
  267. {
  268. /*
  269. * We will need to review setting the nlink count here in the
  270. * light of the forthcoming ro bind mount work. This is a reminder
  271. * to do that.
  272. */
  273. if ((inode->i_nlink != nlink) && (inode->i_nlink != 0)) {
  274. if (nlink == 0)
  275. clear_nlink(inode);
  276. else
  277. set_nlink(inode, nlink);
  278. }
  279. }
  280. static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
  281. {
  282. const struct gfs2_dinode *str = buf;
  283. struct timespec atime;
  284. u16 height, depth;
  285. if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
  286. goto corrupt;
  287. ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
  288. ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
  289. ip->i_inode.i_rdev = 0;
  290. switch (ip->i_inode.i_mode & S_IFMT) {
  291. case S_IFBLK:
  292. case S_IFCHR:
  293. ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
  294. be32_to_cpu(str->di_minor));
  295. break;
  296. };
  297. i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
  298. i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
  299. gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
  300. i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
  301. gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
  302. atime.tv_sec = be64_to_cpu(str->di_atime);
  303. atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
  304. if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0)
  305. ip->i_inode.i_atime = atime;
  306. ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
  307. ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
  308. ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
  309. ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
  310. ip->i_goal = be64_to_cpu(str->di_goal_meta);
  311. ip->i_generation = be64_to_cpu(str->di_generation);
  312. ip->i_diskflags = be32_to_cpu(str->di_flags);
  313. ip->i_eattr = be64_to_cpu(str->di_eattr);
  314. /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
  315. gfs2_set_inode_flags(&ip->i_inode);
  316. height = be16_to_cpu(str->di_height);
  317. if (unlikely(height > GFS2_MAX_META_HEIGHT))
  318. goto corrupt;
  319. ip->i_height = (u8)height;
  320. depth = be16_to_cpu(str->di_depth);
  321. if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
  322. goto corrupt;
  323. ip->i_depth = (u8)depth;
  324. ip->i_entries = be32_to_cpu(str->di_entries);
  325. if (S_ISREG(ip->i_inode.i_mode))
  326. gfs2_set_aops(&ip->i_inode);
  327. return 0;
  328. corrupt:
  329. gfs2_consist_inode(ip);
  330. return -EIO;
  331. }
  332. /**
  333. * gfs2_inode_refresh - Refresh the incore copy of the dinode
  334. * @ip: The GFS2 inode
  335. *
  336. * Returns: errno
  337. */
  338. int gfs2_inode_refresh(struct gfs2_inode *ip)
  339. {
  340. struct buffer_head *dibh;
  341. int error;
  342. error = gfs2_meta_inode_buffer(ip, &dibh);
  343. if (error)
  344. return error;
  345. error = gfs2_dinode_in(ip, dibh->b_data);
  346. brelse(dibh);
  347. clear_bit(GIF_INVALID, &ip->i_flags);
  348. return error;
  349. }
  350. /**
  351. * inode_go_lock - operation done after an inode lock is locked by a process
  352. * @gl: the glock
  353. * @flags:
  354. *
  355. * Returns: errno
  356. */
  357. static int inode_go_lock(struct gfs2_holder *gh)
  358. {
  359. struct gfs2_glock *gl = gh->gh_gl;
  360. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  361. struct gfs2_inode *ip = gl->gl_object;
  362. int error = 0;
  363. if (!ip || (gh->gh_flags & GL_SKIP))
  364. return 0;
  365. if (test_bit(GIF_INVALID, &ip->i_flags)) {
  366. error = gfs2_inode_refresh(ip);
  367. if (error)
  368. return error;
  369. }
  370. if (gh->gh_state != LM_ST_DEFERRED)
  371. inode_dio_wait(&ip->i_inode);
  372. if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
  373. (gl->gl_state == LM_ST_EXCLUSIVE) &&
  374. (gh->gh_state == LM_ST_EXCLUSIVE)) {
  375. spin_lock(&sdp->sd_trunc_lock);
  376. if (list_empty(&ip->i_trunc_list))
  377. list_add(&sdp->sd_trunc_list, &ip->i_trunc_list);
  378. spin_unlock(&sdp->sd_trunc_lock);
  379. wake_up(&sdp->sd_quota_wait);
  380. return 1;
  381. }
  382. return error;
  383. }
  384. /**
  385. * inode_go_dump - print information about an inode
  386. * @seq: The iterator
  387. * @ip: the inode
  388. *
  389. */
  390. static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
  391. {
  392. const struct gfs2_inode *ip = gl->gl_object;
  393. if (ip == NULL)
  394. return;
  395. gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
  396. (unsigned long long)ip->i_no_formal_ino,
  397. (unsigned long long)ip->i_no_addr,
  398. IF2DT(ip->i_inode.i_mode), ip->i_flags,
  399. (unsigned int)ip->i_diskflags,
  400. (unsigned long long)i_size_read(&ip->i_inode));
  401. }
  402. /**
  403. * freeze_go_sync - promote/demote the freeze glock
  404. * @gl: the glock
  405. * @state: the requested state
  406. * @flags:
  407. *
  408. */
  409. static void freeze_go_sync(struct gfs2_glock *gl)
  410. {
  411. int error = 0;
  412. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  413. if (gl->gl_state == LM_ST_SHARED &&
  414. test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
  415. atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
  416. error = freeze_super(sdp->sd_vfs);
  417. if (error) {
  418. printk(KERN_INFO "GFS2: couldn't freeze filesystem: %d\n", error);
  419. gfs2_assert_withdraw(sdp, 0);
  420. }
  421. queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
  422. gfs2_log_flush(sdp, NULL, FREEZE_FLUSH);
  423. }
  424. }
  425. /**
  426. * freeze_go_xmote_bh - After promoting/demoting the freeze glock
  427. * @gl: the glock
  428. *
  429. */
  430. static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
  431. {
  432. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  433. struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
  434. struct gfs2_glock *j_gl = ip->i_gl;
  435. struct gfs2_log_header_host head;
  436. int error;
  437. if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
  438. j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
  439. error = gfs2_find_jhead(sdp->sd_jdesc, &head);
  440. if (error)
  441. gfs2_consist(sdp);
  442. if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
  443. gfs2_consist(sdp);
  444. /* Initialize some head of the log stuff */
  445. if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
  446. sdp->sd_log_sequence = head.lh_sequence + 1;
  447. gfs2_log_pointers_init(sdp, head.lh_blkno);
  448. }
  449. }
  450. return 0;
  451. }
  452. /**
  453. * trans_go_demote_ok
  454. * @gl: the glock
  455. *
  456. * Always returns 0
  457. */
  458. static int freeze_go_demote_ok(const struct gfs2_glock *gl)
  459. {
  460. return 0;
  461. }
  462. /**
  463. * iopen_go_callback - schedule the dcache entry for the inode to be deleted
  464. * @gl: the glock
  465. *
  466. * gl_lockref.lock lock is held while calling this
  467. */
  468. static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
  469. {
  470. struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
  471. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  472. if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
  473. return;
  474. if (gl->gl_demote_state == LM_ST_UNLOCKED &&
  475. gl->gl_state == LM_ST_SHARED && ip) {
  476. gl->gl_lockref.count++;
  477. if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
  478. gl->gl_lockref.count--;
  479. }
  480. }
  481. const struct gfs2_glock_operations gfs2_meta_glops = {
  482. .go_type = LM_TYPE_META,
  483. };
  484. const struct gfs2_glock_operations gfs2_inode_glops = {
  485. .go_sync = inode_go_sync,
  486. .go_inval = inode_go_inval,
  487. .go_demote_ok = inode_go_demote_ok,
  488. .go_lock = inode_go_lock,
  489. .go_dump = inode_go_dump,
  490. .go_type = LM_TYPE_INODE,
  491. .go_flags = GLOF_ASPACE | GLOF_LRU,
  492. };
  493. const struct gfs2_glock_operations gfs2_rgrp_glops = {
  494. .go_sync = rgrp_go_sync,
  495. .go_inval = rgrp_go_inval,
  496. .go_lock = gfs2_rgrp_go_lock,
  497. .go_unlock = gfs2_rgrp_go_unlock,
  498. .go_dump = gfs2_rgrp_dump,
  499. .go_type = LM_TYPE_RGRP,
  500. .go_flags = GLOF_LVB,
  501. };
  502. const struct gfs2_glock_operations gfs2_freeze_glops = {
  503. .go_sync = freeze_go_sync,
  504. .go_xmote_bh = freeze_go_xmote_bh,
  505. .go_demote_ok = freeze_go_demote_ok,
  506. .go_type = LM_TYPE_NONDISK,
  507. };
  508. const struct gfs2_glock_operations gfs2_iopen_glops = {
  509. .go_type = LM_TYPE_IOPEN,
  510. .go_callback = iopen_go_callback,
  511. .go_flags = GLOF_LRU,
  512. };
  513. const struct gfs2_glock_operations gfs2_flock_glops = {
  514. .go_type = LM_TYPE_FLOCK,
  515. .go_flags = GLOF_LRU,
  516. };
  517. const struct gfs2_glock_operations gfs2_nondisk_glops = {
  518. .go_type = LM_TYPE_NONDISK,
  519. };
  520. const struct gfs2_glock_operations gfs2_quota_glops = {
  521. .go_type = LM_TYPE_QUOTA,
  522. .go_flags = GLOF_LVB | GLOF_LRU,
  523. };
  524. const struct gfs2_glock_operations gfs2_journal_glops = {
  525. .go_type = LM_TYPE_JOURNAL,
  526. };
  527. const struct gfs2_glock_operations *gfs2_glops_list[] = {
  528. [LM_TYPE_META] = &gfs2_meta_glops,
  529. [LM_TYPE_INODE] = &gfs2_inode_glops,
  530. [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
  531. [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
  532. [LM_TYPE_FLOCK] = &gfs2_flock_glops,
  533. [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
  534. [LM_TYPE_QUOTA] = &gfs2_quota_glops,
  535. [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
  536. };