sufile.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229
  1. /*
  2. * sufile.c - NILFS segment usage file.
  3. *
  4. * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Written by Koji Sato <koji@osrg.net>.
  21. * Revised by Ryusuke Konishi <ryusuke@osrg.net>.
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/fs.h>
  25. #include <linux/string.h>
  26. #include <linux/buffer_head.h>
  27. #include <linux/errno.h>
  28. #include <linux/nilfs2_fs.h>
  29. #include "mdt.h"
  30. #include "sufile.h"
  31. #include <trace/events/nilfs2.h>
  32. /**
  33. * struct nilfs_sufile_info - on-memory private data of sufile
  34. * @mi: on-memory private data of metadata file
  35. * @ncleansegs: number of clean segments
  36. * @allocmin: lower limit of allocatable segment range
  37. * @allocmax: upper limit of allocatable segment range
  38. */
  39. struct nilfs_sufile_info {
  40. struct nilfs_mdt_info mi;
  41. unsigned long ncleansegs;/* number of clean segments */
  42. __u64 allocmin; /* lower limit of allocatable segment range */
  43. __u64 allocmax; /* upper limit of allocatable segment range */
  44. };
  45. static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
  46. {
  47. return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
  48. }
  49. static inline unsigned long
  50. nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
  51. {
  52. return NILFS_MDT(sufile)->mi_entries_per_block;
  53. }
  54. static unsigned long
  55. nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
  56. {
  57. __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  58. do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  59. return (unsigned long)t;
  60. }
  61. static unsigned long
  62. nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
  63. {
  64. __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  65. return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  66. }
  67. static unsigned long
  68. nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
  69. __u64 max)
  70. {
  71. return min_t(unsigned long,
  72. nilfs_sufile_segment_usages_per_block(sufile) -
  73. nilfs_sufile_get_offset(sufile, curr),
  74. max - curr + 1);
  75. }
  76. static struct nilfs_segment_usage *
  77. nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
  78. struct buffer_head *bh, void *kaddr)
  79. {
  80. return kaddr + bh_offset(bh) +
  81. nilfs_sufile_get_offset(sufile, segnum) *
  82. NILFS_MDT(sufile)->mi_entry_size;
  83. }
  84. static inline int nilfs_sufile_get_header_block(struct inode *sufile,
  85. struct buffer_head **bhp)
  86. {
  87. return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
  88. }
  89. static inline int
  90. nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
  91. int create, struct buffer_head **bhp)
  92. {
  93. return nilfs_mdt_get_block(sufile,
  94. nilfs_sufile_get_blkoff(sufile, segnum),
  95. create, NULL, bhp);
  96. }
  97. static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile,
  98. __u64 segnum)
  99. {
  100. return nilfs_mdt_delete_block(sufile,
  101. nilfs_sufile_get_blkoff(sufile, segnum));
  102. }
  103. static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
  104. u64 ncleanadd, u64 ndirtyadd)
  105. {
  106. struct nilfs_sufile_header *header;
  107. void *kaddr;
  108. kaddr = kmap_atomic(header_bh->b_page);
  109. header = kaddr + bh_offset(header_bh);
  110. le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
  111. le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
  112. kunmap_atomic(kaddr);
  113. mark_buffer_dirty(header_bh);
  114. }
  115. /**
  116. * nilfs_sufile_get_ncleansegs - return the number of clean segments
  117. * @sufile: inode of segment usage file
  118. */
  119. unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
  120. {
  121. return NILFS_SUI(sufile)->ncleansegs;
  122. }
  123. /**
  124. * nilfs_sufile_updatev - modify multiple segment usages at a time
  125. * @sufile: inode of segment usage file
  126. * @segnumv: array of segment numbers
  127. * @nsegs: size of @segnumv array
  128. * @create: creation flag
  129. * @ndone: place to store number of modified segments on @segnumv
  130. * @dofunc: primitive operation for the update
  131. *
  132. * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
  133. * against the given array of segments. The @dofunc is called with
  134. * buffers of a header block and the sufile block in which the target
  135. * segment usage entry is contained. If @ndone is given, the number
  136. * of successfully modified segments from the head is stored in the
  137. * place @ndone points to.
  138. *
  139. * Return Value: On success, zero is returned. On error, one of the
  140. * following negative error codes is returned.
  141. *
  142. * %-EIO - I/O error.
  143. *
  144. * %-ENOMEM - Insufficient amount of memory available.
  145. *
  146. * %-ENOENT - Given segment usage is in hole block (may be returned if
  147. * @create is zero)
  148. *
  149. * %-EINVAL - Invalid segment usage number
  150. */
  151. int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
  152. int create, size_t *ndone,
  153. void (*dofunc)(struct inode *, __u64,
  154. struct buffer_head *,
  155. struct buffer_head *))
  156. {
  157. struct buffer_head *header_bh, *bh;
  158. unsigned long blkoff, prev_blkoff;
  159. __u64 *seg;
  160. size_t nerr = 0, n = 0;
  161. int ret = 0;
  162. if (unlikely(nsegs == 0))
  163. goto out;
  164. down_write(&NILFS_MDT(sufile)->mi_sem);
  165. for (seg = segnumv; seg < segnumv + nsegs; seg++) {
  166. if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
  167. printk(KERN_WARNING
  168. "%s: invalid segment number: %llu\n", __func__,
  169. (unsigned long long)*seg);
  170. nerr++;
  171. }
  172. }
  173. if (nerr > 0) {
  174. ret = -EINVAL;
  175. goto out_sem;
  176. }
  177. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  178. if (ret < 0)
  179. goto out_sem;
  180. seg = segnumv;
  181. blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
  182. ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
  183. if (ret < 0)
  184. goto out_header;
  185. for (;;) {
  186. dofunc(sufile, *seg, header_bh, bh);
  187. if (++seg >= segnumv + nsegs)
  188. break;
  189. prev_blkoff = blkoff;
  190. blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
  191. if (blkoff == prev_blkoff)
  192. continue;
  193. /* get different block */
  194. brelse(bh);
  195. ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
  196. if (unlikely(ret < 0))
  197. goto out_header;
  198. }
  199. brelse(bh);
  200. out_header:
  201. n = seg - segnumv;
  202. brelse(header_bh);
  203. out_sem:
  204. up_write(&NILFS_MDT(sufile)->mi_sem);
  205. out:
  206. if (ndone)
  207. *ndone = n;
  208. return ret;
  209. }
  210. int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
  211. void (*dofunc)(struct inode *, __u64,
  212. struct buffer_head *,
  213. struct buffer_head *))
  214. {
  215. struct buffer_head *header_bh, *bh;
  216. int ret;
  217. if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
  218. printk(KERN_WARNING "%s: invalid segment number: %llu\n",
  219. __func__, (unsigned long long)segnum);
  220. return -EINVAL;
  221. }
  222. down_write(&NILFS_MDT(sufile)->mi_sem);
  223. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  224. if (ret < 0)
  225. goto out_sem;
  226. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
  227. if (!ret) {
  228. dofunc(sufile, segnum, header_bh, bh);
  229. brelse(bh);
  230. }
  231. brelse(header_bh);
  232. out_sem:
  233. up_write(&NILFS_MDT(sufile)->mi_sem);
  234. return ret;
  235. }
  236. /**
  237. * nilfs_sufile_set_alloc_range - limit range of segment to be allocated
  238. * @sufile: inode of segment usage file
  239. * @start: minimum segment number of allocatable region (inclusive)
  240. * @end: maximum segment number of allocatable region (inclusive)
  241. *
  242. * Return Value: On success, 0 is returned. On error, one of the
  243. * following negative error codes is returned.
  244. *
  245. * %-ERANGE - invalid segment region
  246. */
  247. int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
  248. {
  249. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  250. __u64 nsegs;
  251. int ret = -ERANGE;
  252. down_write(&NILFS_MDT(sufile)->mi_sem);
  253. nsegs = nilfs_sufile_get_nsegments(sufile);
  254. if (start <= end && end < nsegs) {
  255. sui->allocmin = start;
  256. sui->allocmax = end;
  257. ret = 0;
  258. }
  259. up_write(&NILFS_MDT(sufile)->mi_sem);
  260. return ret;
  261. }
  262. /**
  263. * nilfs_sufile_alloc - allocate a segment
  264. * @sufile: inode of segment usage file
  265. * @segnump: pointer to segment number
  266. *
  267. * Description: nilfs_sufile_alloc() allocates a clean segment.
  268. *
  269. * Return Value: On success, 0 is returned and the segment number of the
  270. * allocated segment is stored in the place pointed by @segnump. On error, one
  271. * of the following negative error codes is returned.
  272. *
  273. * %-EIO - I/O error.
  274. *
  275. * %-ENOMEM - Insufficient amount of memory available.
  276. *
  277. * %-ENOSPC - No clean segment left.
  278. */
  279. int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
  280. {
  281. struct buffer_head *header_bh, *su_bh;
  282. struct nilfs_sufile_header *header;
  283. struct nilfs_segment_usage *su;
  284. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  285. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  286. __u64 segnum, maxsegnum, last_alloc;
  287. void *kaddr;
  288. unsigned long nsegments, nsus, cnt;
  289. int ret, j;
  290. down_write(&NILFS_MDT(sufile)->mi_sem);
  291. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  292. if (ret < 0)
  293. goto out_sem;
  294. kaddr = kmap_atomic(header_bh->b_page);
  295. header = kaddr + bh_offset(header_bh);
  296. last_alloc = le64_to_cpu(header->sh_last_alloc);
  297. kunmap_atomic(kaddr);
  298. nsegments = nilfs_sufile_get_nsegments(sufile);
  299. maxsegnum = sui->allocmax;
  300. segnum = last_alloc + 1;
  301. if (segnum < sui->allocmin || segnum > sui->allocmax)
  302. segnum = sui->allocmin;
  303. for (cnt = 0; cnt < nsegments; cnt += nsus) {
  304. if (segnum > maxsegnum) {
  305. if (cnt < sui->allocmax - sui->allocmin + 1) {
  306. /*
  307. * wrap around in the limited region.
  308. * if allocation started from
  309. * sui->allocmin, this never happens.
  310. */
  311. segnum = sui->allocmin;
  312. maxsegnum = last_alloc;
  313. } else if (segnum > sui->allocmin &&
  314. sui->allocmax + 1 < nsegments) {
  315. segnum = sui->allocmax + 1;
  316. maxsegnum = nsegments - 1;
  317. } else if (sui->allocmin > 0) {
  318. segnum = 0;
  319. maxsegnum = sui->allocmin - 1;
  320. } else {
  321. break; /* never happens */
  322. }
  323. }
  324. trace_nilfs2_segment_usage_check(sufile, segnum, cnt);
  325. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
  326. &su_bh);
  327. if (ret < 0)
  328. goto out_header;
  329. kaddr = kmap_atomic(su_bh->b_page);
  330. su = nilfs_sufile_block_get_segment_usage(
  331. sufile, segnum, su_bh, kaddr);
  332. nsus = nilfs_sufile_segment_usages_in_block(
  333. sufile, segnum, maxsegnum);
  334. for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
  335. if (!nilfs_segment_usage_clean(su))
  336. continue;
  337. /* found a clean segment */
  338. nilfs_segment_usage_set_dirty(su);
  339. kunmap_atomic(kaddr);
  340. kaddr = kmap_atomic(header_bh->b_page);
  341. header = kaddr + bh_offset(header_bh);
  342. le64_add_cpu(&header->sh_ncleansegs, -1);
  343. le64_add_cpu(&header->sh_ndirtysegs, 1);
  344. header->sh_last_alloc = cpu_to_le64(segnum);
  345. kunmap_atomic(kaddr);
  346. sui->ncleansegs--;
  347. mark_buffer_dirty(header_bh);
  348. mark_buffer_dirty(su_bh);
  349. nilfs_mdt_mark_dirty(sufile);
  350. brelse(su_bh);
  351. *segnump = segnum;
  352. trace_nilfs2_segment_usage_allocated(sufile, segnum);
  353. goto out_header;
  354. }
  355. kunmap_atomic(kaddr);
  356. brelse(su_bh);
  357. }
  358. /* no segments left */
  359. ret = -ENOSPC;
  360. out_header:
  361. brelse(header_bh);
  362. out_sem:
  363. up_write(&NILFS_MDT(sufile)->mi_sem);
  364. return ret;
  365. }
  366. void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
  367. struct buffer_head *header_bh,
  368. struct buffer_head *su_bh)
  369. {
  370. struct nilfs_segment_usage *su;
  371. void *kaddr;
  372. kaddr = kmap_atomic(su_bh->b_page);
  373. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  374. if (unlikely(!nilfs_segment_usage_clean(su))) {
  375. printk(KERN_WARNING "%s: segment %llu must be clean\n",
  376. __func__, (unsigned long long)segnum);
  377. kunmap_atomic(kaddr);
  378. return;
  379. }
  380. nilfs_segment_usage_set_dirty(su);
  381. kunmap_atomic(kaddr);
  382. nilfs_sufile_mod_counter(header_bh, -1, 1);
  383. NILFS_SUI(sufile)->ncleansegs--;
  384. mark_buffer_dirty(su_bh);
  385. nilfs_mdt_mark_dirty(sufile);
  386. }
  387. void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
  388. struct buffer_head *header_bh,
  389. struct buffer_head *su_bh)
  390. {
  391. struct nilfs_segment_usage *su;
  392. void *kaddr;
  393. int clean, dirty;
  394. kaddr = kmap_atomic(su_bh->b_page);
  395. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  396. if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
  397. su->su_nblocks == cpu_to_le32(0)) {
  398. kunmap_atomic(kaddr);
  399. return;
  400. }
  401. clean = nilfs_segment_usage_clean(su);
  402. dirty = nilfs_segment_usage_dirty(su);
  403. /* make the segment garbage */
  404. su->su_lastmod = cpu_to_le64(0);
  405. su->su_nblocks = cpu_to_le32(0);
  406. su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
  407. kunmap_atomic(kaddr);
  408. nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
  409. NILFS_SUI(sufile)->ncleansegs -= clean;
  410. mark_buffer_dirty(su_bh);
  411. nilfs_mdt_mark_dirty(sufile);
  412. }
  413. void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
  414. struct buffer_head *header_bh,
  415. struct buffer_head *su_bh)
  416. {
  417. struct nilfs_segment_usage *su;
  418. void *kaddr;
  419. int sudirty;
  420. kaddr = kmap_atomic(su_bh->b_page);
  421. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  422. if (nilfs_segment_usage_clean(su)) {
  423. printk(KERN_WARNING "%s: segment %llu is already clean\n",
  424. __func__, (unsigned long long)segnum);
  425. kunmap_atomic(kaddr);
  426. return;
  427. }
  428. WARN_ON(nilfs_segment_usage_error(su));
  429. WARN_ON(!nilfs_segment_usage_dirty(su));
  430. sudirty = nilfs_segment_usage_dirty(su);
  431. nilfs_segment_usage_set_clean(su);
  432. kunmap_atomic(kaddr);
  433. mark_buffer_dirty(su_bh);
  434. nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
  435. NILFS_SUI(sufile)->ncleansegs++;
  436. nilfs_mdt_mark_dirty(sufile);
  437. trace_nilfs2_segment_usage_freed(sufile, segnum);
  438. }
  439. /**
  440. * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
  441. * @sufile: inode of segment usage file
  442. * @segnum: segment number
  443. */
  444. int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
  445. {
  446. struct buffer_head *bh;
  447. int ret;
  448. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
  449. if (!ret) {
  450. mark_buffer_dirty(bh);
  451. nilfs_mdt_mark_dirty(sufile);
  452. brelse(bh);
  453. }
  454. return ret;
  455. }
  456. /**
  457. * nilfs_sufile_set_segment_usage - set usage of a segment
  458. * @sufile: inode of segment usage file
  459. * @segnum: segment number
  460. * @nblocks: number of live blocks in the segment
  461. * @modtime: modification time (option)
  462. */
  463. int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
  464. unsigned long nblocks, time_t modtime)
  465. {
  466. struct buffer_head *bh;
  467. struct nilfs_segment_usage *su;
  468. void *kaddr;
  469. int ret;
  470. down_write(&NILFS_MDT(sufile)->mi_sem);
  471. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
  472. if (ret < 0)
  473. goto out_sem;
  474. kaddr = kmap_atomic(bh->b_page);
  475. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
  476. WARN_ON(nilfs_segment_usage_error(su));
  477. if (modtime)
  478. su->su_lastmod = cpu_to_le64(modtime);
  479. su->su_nblocks = cpu_to_le32(nblocks);
  480. kunmap_atomic(kaddr);
  481. mark_buffer_dirty(bh);
  482. nilfs_mdt_mark_dirty(sufile);
  483. brelse(bh);
  484. out_sem:
  485. up_write(&NILFS_MDT(sufile)->mi_sem);
  486. return ret;
  487. }
  488. /**
  489. * nilfs_sufile_get_stat - get segment usage statistics
  490. * @sufile: inode of segment usage file
  491. * @stat: pointer to a structure of segment usage statistics
  492. *
  493. * Description: nilfs_sufile_get_stat() returns information about segment
  494. * usage.
  495. *
  496. * Return Value: On success, 0 is returned, and segment usage information is
  497. * stored in the place pointed by @stat. On error, one of the following
  498. * negative error codes is returned.
  499. *
  500. * %-EIO - I/O error.
  501. *
  502. * %-ENOMEM - Insufficient amount of memory available.
  503. */
  504. int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
  505. {
  506. struct buffer_head *header_bh;
  507. struct nilfs_sufile_header *header;
  508. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  509. void *kaddr;
  510. int ret;
  511. down_read(&NILFS_MDT(sufile)->mi_sem);
  512. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  513. if (ret < 0)
  514. goto out_sem;
  515. kaddr = kmap_atomic(header_bh->b_page);
  516. header = kaddr + bh_offset(header_bh);
  517. sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
  518. sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  519. sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
  520. sustat->ss_ctime = nilfs->ns_ctime;
  521. sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
  522. spin_lock(&nilfs->ns_last_segment_lock);
  523. sustat->ss_prot_seq = nilfs->ns_prot_seq;
  524. spin_unlock(&nilfs->ns_last_segment_lock);
  525. kunmap_atomic(kaddr);
  526. brelse(header_bh);
  527. out_sem:
  528. up_read(&NILFS_MDT(sufile)->mi_sem);
  529. return ret;
  530. }
  531. void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
  532. struct buffer_head *header_bh,
  533. struct buffer_head *su_bh)
  534. {
  535. struct nilfs_segment_usage *su;
  536. void *kaddr;
  537. int suclean;
  538. kaddr = kmap_atomic(su_bh->b_page);
  539. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  540. if (nilfs_segment_usage_error(su)) {
  541. kunmap_atomic(kaddr);
  542. return;
  543. }
  544. suclean = nilfs_segment_usage_clean(su);
  545. nilfs_segment_usage_set_error(su);
  546. kunmap_atomic(kaddr);
  547. if (suclean) {
  548. nilfs_sufile_mod_counter(header_bh, -1, 0);
  549. NILFS_SUI(sufile)->ncleansegs--;
  550. }
  551. mark_buffer_dirty(su_bh);
  552. nilfs_mdt_mark_dirty(sufile);
  553. }
  554. /**
  555. * nilfs_sufile_truncate_range - truncate range of segment array
  556. * @sufile: inode of segment usage file
  557. * @start: start segment number (inclusive)
  558. * @end: end segment number (inclusive)
  559. *
  560. * Return Value: On success, 0 is returned. On error, one of the
  561. * following negative error codes is returned.
  562. *
  563. * %-EIO - I/O error.
  564. *
  565. * %-ENOMEM - Insufficient amount of memory available.
  566. *
  567. * %-EINVAL - Invalid number of segments specified
  568. *
  569. * %-EBUSY - Dirty or active segments are present in the range
  570. */
  571. static int nilfs_sufile_truncate_range(struct inode *sufile,
  572. __u64 start, __u64 end)
  573. {
  574. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  575. struct buffer_head *header_bh;
  576. struct buffer_head *su_bh;
  577. struct nilfs_segment_usage *su, *su2;
  578. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  579. unsigned long segusages_per_block;
  580. unsigned long nsegs, ncleaned;
  581. __u64 segnum;
  582. void *kaddr;
  583. ssize_t n, nc;
  584. int ret;
  585. int j;
  586. nsegs = nilfs_sufile_get_nsegments(sufile);
  587. ret = -EINVAL;
  588. if (start > end || start >= nsegs)
  589. goto out;
  590. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  591. if (ret < 0)
  592. goto out;
  593. segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
  594. ncleaned = 0;
  595. for (segnum = start; segnum <= end; segnum += n) {
  596. n = min_t(unsigned long,
  597. segusages_per_block -
  598. nilfs_sufile_get_offset(sufile, segnum),
  599. end - segnum + 1);
  600. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  601. &su_bh);
  602. if (ret < 0) {
  603. if (ret != -ENOENT)
  604. goto out_header;
  605. /* hole */
  606. continue;
  607. }
  608. kaddr = kmap_atomic(su_bh->b_page);
  609. su = nilfs_sufile_block_get_segment_usage(
  610. sufile, segnum, su_bh, kaddr);
  611. su2 = su;
  612. for (j = 0; j < n; j++, su = (void *)su + susz) {
  613. if ((le32_to_cpu(su->su_flags) &
  614. ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) ||
  615. nilfs_segment_is_active(nilfs, segnum + j)) {
  616. ret = -EBUSY;
  617. kunmap_atomic(kaddr);
  618. brelse(su_bh);
  619. goto out_header;
  620. }
  621. }
  622. nc = 0;
  623. for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) {
  624. if (nilfs_segment_usage_error(su)) {
  625. nilfs_segment_usage_set_clean(su);
  626. nc++;
  627. }
  628. }
  629. kunmap_atomic(kaddr);
  630. if (nc > 0) {
  631. mark_buffer_dirty(su_bh);
  632. ncleaned += nc;
  633. }
  634. brelse(su_bh);
  635. if (n == segusages_per_block) {
  636. /* make hole */
  637. nilfs_sufile_delete_segment_usage_block(sufile, segnum);
  638. }
  639. }
  640. ret = 0;
  641. out_header:
  642. if (ncleaned > 0) {
  643. NILFS_SUI(sufile)->ncleansegs += ncleaned;
  644. nilfs_sufile_mod_counter(header_bh, ncleaned, 0);
  645. nilfs_mdt_mark_dirty(sufile);
  646. }
  647. brelse(header_bh);
  648. out:
  649. return ret;
  650. }
  651. /**
  652. * nilfs_sufile_resize - resize segment array
  653. * @sufile: inode of segment usage file
  654. * @newnsegs: new number of segments
  655. *
  656. * Return Value: On success, 0 is returned. On error, one of the
  657. * following negative error codes is returned.
  658. *
  659. * %-EIO - I/O error.
  660. *
  661. * %-ENOMEM - Insufficient amount of memory available.
  662. *
  663. * %-ENOSPC - Enough free space is not left for shrinking
  664. *
  665. * %-EBUSY - Dirty or active segments exist in the region to be truncated
  666. */
  667. int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
  668. {
  669. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  670. struct buffer_head *header_bh;
  671. struct nilfs_sufile_header *header;
  672. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  673. void *kaddr;
  674. unsigned long nsegs, nrsvsegs;
  675. int ret = 0;
  676. down_write(&NILFS_MDT(sufile)->mi_sem);
  677. nsegs = nilfs_sufile_get_nsegments(sufile);
  678. if (nsegs == newnsegs)
  679. goto out;
  680. ret = -ENOSPC;
  681. nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs);
  682. if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs)
  683. goto out;
  684. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  685. if (ret < 0)
  686. goto out;
  687. if (newnsegs > nsegs) {
  688. sui->ncleansegs += newnsegs - nsegs;
  689. } else /* newnsegs < nsegs */ {
  690. ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1);
  691. if (ret < 0)
  692. goto out_header;
  693. sui->ncleansegs -= nsegs - newnsegs;
  694. }
  695. kaddr = kmap_atomic(header_bh->b_page);
  696. header = kaddr + bh_offset(header_bh);
  697. header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
  698. kunmap_atomic(kaddr);
  699. mark_buffer_dirty(header_bh);
  700. nilfs_mdt_mark_dirty(sufile);
  701. nilfs_set_nsegments(nilfs, newnsegs);
  702. out_header:
  703. brelse(header_bh);
  704. out:
  705. up_write(&NILFS_MDT(sufile)->mi_sem);
  706. return ret;
  707. }
  708. /**
  709. * nilfs_sufile_get_suinfo -
  710. * @sufile: inode of segment usage file
  711. * @segnum: segment number to start looking
  712. * @buf: array of suinfo
  713. * @sisz: byte size of suinfo
  714. * @nsi: size of suinfo array
  715. *
  716. * Description:
  717. *
  718. * Return Value: On success, 0 is returned and .... On error, one of the
  719. * following negative error codes is returned.
  720. *
  721. * %-EIO - I/O error.
  722. *
  723. * %-ENOMEM - Insufficient amount of memory available.
  724. */
  725. ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
  726. unsigned sisz, size_t nsi)
  727. {
  728. struct buffer_head *su_bh;
  729. struct nilfs_segment_usage *su;
  730. struct nilfs_suinfo *si = buf;
  731. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  732. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  733. void *kaddr;
  734. unsigned long nsegs, segusages_per_block;
  735. ssize_t n;
  736. int ret, i, j;
  737. down_read(&NILFS_MDT(sufile)->mi_sem);
  738. segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
  739. nsegs = min_t(unsigned long,
  740. nilfs_sufile_get_nsegments(sufile) - segnum,
  741. nsi);
  742. for (i = 0; i < nsegs; i += n, segnum += n) {
  743. n = min_t(unsigned long,
  744. segusages_per_block -
  745. nilfs_sufile_get_offset(sufile, segnum),
  746. nsegs - i);
  747. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  748. &su_bh);
  749. if (ret < 0) {
  750. if (ret != -ENOENT)
  751. goto out;
  752. /* hole */
  753. memset(si, 0, sisz * n);
  754. si = (void *)si + sisz * n;
  755. continue;
  756. }
  757. kaddr = kmap_atomic(su_bh->b_page);
  758. su = nilfs_sufile_block_get_segment_usage(
  759. sufile, segnum, su_bh, kaddr);
  760. for (j = 0; j < n;
  761. j++, su = (void *)su + susz, si = (void *)si + sisz) {
  762. si->sui_lastmod = le64_to_cpu(su->su_lastmod);
  763. si->sui_nblocks = le32_to_cpu(su->su_nblocks);
  764. si->sui_flags = le32_to_cpu(su->su_flags) &
  765. ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
  766. if (nilfs_segment_is_active(nilfs, segnum + j))
  767. si->sui_flags |=
  768. (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
  769. }
  770. kunmap_atomic(kaddr);
  771. brelse(su_bh);
  772. }
  773. ret = nsegs;
  774. out:
  775. up_read(&NILFS_MDT(sufile)->mi_sem);
  776. return ret;
  777. }
  778. /**
  779. * nilfs_sufile_set_suinfo - sets segment usage info
  780. * @sufile: inode of segment usage file
  781. * @buf: array of suinfo_update
  782. * @supsz: byte size of suinfo_update
  783. * @nsup: size of suinfo_update array
  784. *
  785. * Description: Takes an array of nilfs_suinfo_update structs and updates
  786. * segment usage accordingly. Only the fields indicated by the sup_flags
  787. * are updated.
  788. *
  789. * Return Value: On success, 0 is returned. On error, one of the
  790. * following negative error codes is returned.
  791. *
  792. * %-EIO - I/O error.
  793. *
  794. * %-ENOMEM - Insufficient amount of memory available.
  795. *
  796. * %-EINVAL - Invalid values in input (segment number, flags or nblocks)
  797. */
  798. ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
  799. unsigned supsz, size_t nsup)
  800. {
  801. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  802. struct buffer_head *header_bh, *bh;
  803. struct nilfs_suinfo_update *sup, *supend = buf + supsz * nsup;
  804. struct nilfs_segment_usage *su;
  805. void *kaddr;
  806. unsigned long blkoff, prev_blkoff;
  807. int cleansi, cleansu, dirtysi, dirtysu;
  808. long ncleaned = 0, ndirtied = 0;
  809. int ret = 0;
  810. if (unlikely(nsup == 0))
  811. return ret;
  812. for (sup = buf; sup < supend; sup = (void *)sup + supsz) {
  813. if (sup->sup_segnum >= nilfs->ns_nsegments
  814. || (sup->sup_flags &
  815. (~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS))
  816. || (nilfs_suinfo_update_nblocks(sup) &&
  817. sup->sup_sui.sui_nblocks >
  818. nilfs->ns_blocks_per_segment))
  819. return -EINVAL;
  820. }
  821. down_write(&NILFS_MDT(sufile)->mi_sem);
  822. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  823. if (ret < 0)
  824. goto out_sem;
  825. sup = buf;
  826. blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
  827. ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
  828. if (ret < 0)
  829. goto out_header;
  830. for (;;) {
  831. kaddr = kmap_atomic(bh->b_page);
  832. su = nilfs_sufile_block_get_segment_usage(
  833. sufile, sup->sup_segnum, bh, kaddr);
  834. if (nilfs_suinfo_update_lastmod(sup))
  835. su->su_lastmod = cpu_to_le64(sup->sup_sui.sui_lastmod);
  836. if (nilfs_suinfo_update_nblocks(sup))
  837. su->su_nblocks = cpu_to_le32(sup->sup_sui.sui_nblocks);
  838. if (nilfs_suinfo_update_flags(sup)) {
  839. /*
  840. * Active flag is a virtual flag projected by running
  841. * nilfs kernel code - drop it not to write it to
  842. * disk.
  843. */
  844. sup->sup_sui.sui_flags &=
  845. ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
  846. cleansi = nilfs_suinfo_clean(&sup->sup_sui);
  847. cleansu = nilfs_segment_usage_clean(su);
  848. dirtysi = nilfs_suinfo_dirty(&sup->sup_sui);
  849. dirtysu = nilfs_segment_usage_dirty(su);
  850. if (cleansi && !cleansu)
  851. ++ncleaned;
  852. else if (!cleansi && cleansu)
  853. --ncleaned;
  854. if (dirtysi && !dirtysu)
  855. ++ndirtied;
  856. else if (!dirtysi && dirtysu)
  857. --ndirtied;
  858. su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags);
  859. }
  860. kunmap_atomic(kaddr);
  861. sup = (void *)sup + supsz;
  862. if (sup >= supend)
  863. break;
  864. prev_blkoff = blkoff;
  865. blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
  866. if (blkoff == prev_blkoff)
  867. continue;
  868. /* get different block */
  869. mark_buffer_dirty(bh);
  870. put_bh(bh);
  871. ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
  872. if (unlikely(ret < 0))
  873. goto out_mark;
  874. }
  875. mark_buffer_dirty(bh);
  876. put_bh(bh);
  877. out_mark:
  878. if (ncleaned || ndirtied) {
  879. nilfs_sufile_mod_counter(header_bh, (u64)ncleaned,
  880. (u64)ndirtied);
  881. NILFS_SUI(sufile)->ncleansegs += ncleaned;
  882. }
  883. nilfs_mdt_mark_dirty(sufile);
  884. out_header:
  885. put_bh(header_bh);
  886. out_sem:
  887. up_write(&NILFS_MDT(sufile)->mi_sem);
  888. return ret;
  889. }
  890. /**
  891. * nilfs_sufile_trim_fs() - trim ioctl handle function
  892. * @sufile: inode of segment usage file
  893. * @range: fstrim_range structure
  894. *
  895. * start: First Byte to trim
  896. * len: number of Bytes to trim from start
  897. * minlen: minimum extent length in Bytes
  898. *
  899. * Decription: nilfs_sufile_trim_fs goes through all segments containing bytes
  900. * from start to start+len. start is rounded up to the next block boundary
  901. * and start+len is rounded down. For each clean segment blkdev_issue_discard
  902. * function is invoked.
  903. *
  904. * Return Value: On success, 0 is returned or negative error code, otherwise.
  905. */
  906. int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
  907. {
  908. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  909. struct buffer_head *su_bh;
  910. struct nilfs_segment_usage *su;
  911. void *kaddr;
  912. size_t n, i, susz = NILFS_MDT(sufile)->mi_entry_size;
  913. sector_t seg_start, seg_end, start_block, end_block;
  914. sector_t start = 0, nblocks = 0;
  915. u64 segnum, segnum_end, minlen, len, max_blocks, ndiscarded = 0;
  916. int ret = 0;
  917. unsigned int sects_per_block;
  918. sects_per_block = (1 << nilfs->ns_blocksize_bits) /
  919. bdev_logical_block_size(nilfs->ns_bdev);
  920. len = range->len >> nilfs->ns_blocksize_bits;
  921. minlen = range->minlen >> nilfs->ns_blocksize_bits;
  922. max_blocks = ((u64)nilfs->ns_nsegments * nilfs->ns_blocks_per_segment);
  923. if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits)
  924. return -EINVAL;
  925. start_block = (range->start + nilfs->ns_blocksize - 1) >>
  926. nilfs->ns_blocksize_bits;
  927. /*
  928. * range->len can be very large (actually, it is set to
  929. * ULLONG_MAX by default) - truncate upper end of the range
  930. * carefully so as not to overflow.
  931. */
  932. if (max_blocks - start_block < len)
  933. end_block = max_blocks - 1;
  934. else
  935. end_block = start_block + len - 1;
  936. segnum = nilfs_get_segnum_of_block(nilfs, start_block);
  937. segnum_end = nilfs_get_segnum_of_block(nilfs, end_block);
  938. down_read(&NILFS_MDT(sufile)->mi_sem);
  939. while (segnum <= segnum_end) {
  940. n = nilfs_sufile_segment_usages_in_block(sufile, segnum,
  941. segnum_end);
  942. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  943. &su_bh);
  944. if (ret < 0) {
  945. if (ret != -ENOENT)
  946. goto out_sem;
  947. /* hole */
  948. segnum += n;
  949. continue;
  950. }
  951. kaddr = kmap_atomic(su_bh->b_page);
  952. su = nilfs_sufile_block_get_segment_usage(sufile, segnum,
  953. su_bh, kaddr);
  954. for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) {
  955. if (!nilfs_segment_usage_clean(su))
  956. continue;
  957. nilfs_get_segment_range(nilfs, segnum, &seg_start,
  958. &seg_end);
  959. if (!nblocks) {
  960. /* start new extent */
  961. start = seg_start;
  962. nblocks = seg_end - seg_start + 1;
  963. continue;
  964. }
  965. if (start + nblocks == seg_start) {
  966. /* add to previous extent */
  967. nblocks += seg_end - seg_start + 1;
  968. continue;
  969. }
  970. /* discard previous extent */
  971. if (start < start_block) {
  972. nblocks -= start_block - start;
  973. start = start_block;
  974. }
  975. if (nblocks >= minlen) {
  976. kunmap_atomic(kaddr);
  977. ret = blkdev_issue_discard(nilfs->ns_bdev,
  978. start * sects_per_block,
  979. nblocks * sects_per_block,
  980. GFP_NOFS, 0);
  981. if (ret < 0) {
  982. put_bh(su_bh);
  983. goto out_sem;
  984. }
  985. ndiscarded += nblocks;
  986. kaddr = kmap_atomic(su_bh->b_page);
  987. su = nilfs_sufile_block_get_segment_usage(
  988. sufile, segnum, su_bh, kaddr);
  989. }
  990. /* start new extent */
  991. start = seg_start;
  992. nblocks = seg_end - seg_start + 1;
  993. }
  994. kunmap_atomic(kaddr);
  995. put_bh(su_bh);
  996. }
  997. if (nblocks) {
  998. /* discard last extent */
  999. if (start < start_block) {
  1000. nblocks -= start_block - start;
  1001. start = start_block;
  1002. }
  1003. if (start + nblocks > end_block + 1)
  1004. nblocks = end_block - start + 1;
  1005. if (nblocks >= minlen) {
  1006. ret = blkdev_issue_discard(nilfs->ns_bdev,
  1007. start * sects_per_block,
  1008. nblocks * sects_per_block,
  1009. GFP_NOFS, 0);
  1010. if (!ret)
  1011. ndiscarded += nblocks;
  1012. }
  1013. }
  1014. out_sem:
  1015. up_read(&NILFS_MDT(sufile)->mi_sem);
  1016. range->len = ndiscarded << nilfs->ns_blocksize_bits;
  1017. return ret;
  1018. }
  1019. /**
  1020. * nilfs_sufile_read - read or get sufile inode
  1021. * @sb: super block instance
  1022. * @susize: size of a segment usage entry
  1023. * @raw_inode: on-disk sufile inode
  1024. * @inodep: buffer to store the inode
  1025. */
  1026. int nilfs_sufile_read(struct super_block *sb, size_t susize,
  1027. struct nilfs_inode *raw_inode, struct inode **inodep)
  1028. {
  1029. struct inode *sufile;
  1030. struct nilfs_sufile_info *sui;
  1031. struct buffer_head *header_bh;
  1032. struct nilfs_sufile_header *header;
  1033. void *kaddr;
  1034. int err;
  1035. if (susize > sb->s_blocksize) {
  1036. printk(KERN_ERR
  1037. "NILFS: too large segment usage size: %zu bytes.\n",
  1038. susize);
  1039. return -EINVAL;
  1040. } else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) {
  1041. printk(KERN_ERR
  1042. "NILFS: too small segment usage size: %zu bytes.\n",
  1043. susize);
  1044. return -EINVAL;
  1045. }
  1046. sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
  1047. if (unlikely(!sufile))
  1048. return -ENOMEM;
  1049. if (!(sufile->i_state & I_NEW))
  1050. goto out;
  1051. err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
  1052. if (err)
  1053. goto failed;
  1054. nilfs_mdt_set_entry_size(sufile, susize,
  1055. sizeof(struct nilfs_sufile_header));
  1056. err = nilfs_read_inode_common(sufile, raw_inode);
  1057. if (err)
  1058. goto failed;
  1059. err = nilfs_sufile_get_header_block(sufile, &header_bh);
  1060. if (err)
  1061. goto failed;
  1062. sui = NILFS_SUI(sufile);
  1063. kaddr = kmap_atomic(header_bh->b_page);
  1064. header = kaddr + bh_offset(header_bh);
  1065. sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  1066. kunmap_atomic(kaddr);
  1067. brelse(header_bh);
  1068. sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
  1069. sui->allocmin = 0;
  1070. unlock_new_inode(sufile);
  1071. out:
  1072. *inodep = sufile;
  1073. return 0;
  1074. failed:
  1075. iget_failed(sufile);
  1076. return err;
  1077. }