xattr.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108
  1. /*
  2. * Copyright (C) International Business Machines Corp., 2000-2004
  3. * Copyright (C) Christoph Hellwig, 2002
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  13. * the GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/capability.h>
  20. #include <linux/fs.h>
  21. #include <linux/xattr.h>
  22. #include <linux/posix_acl_xattr.h>
  23. #include <linux/slab.h>
  24. #include <linux/quotaops.h>
  25. #include <linux/security.h>
  26. #include "jfs_incore.h"
  27. #include "jfs_superblock.h"
  28. #include "jfs_dmap.h"
  29. #include "jfs_debug.h"
  30. #include "jfs_dinode.h"
  31. #include "jfs_extent.h"
  32. #include "jfs_metapage.h"
  33. #include "jfs_xattr.h"
  34. #include "jfs_acl.h"
  35. /*
  36. * jfs_xattr.c: extended attribute service
  37. *
  38. * Overall design --
  39. *
  40. * Format:
  41. *
  42. * Extended attribute lists (jfs_ea_list) consist of an overall size (32 bit
  43. * value) and a variable (0 or more) number of extended attribute
  44. * entries. Each extended attribute entry (jfs_ea) is a <name,value> double
  45. * where <name> is constructed from a null-terminated ascii string
  46. * (1 ... 255 bytes in the name) and <value> is arbitrary 8 bit data
  47. * (1 ... 65535 bytes). The in-memory format is
  48. *
  49. * 0 1 2 4 4 + namelen + 1
  50. * +-------+--------+--------+----------------+-------------------+
  51. * | Flags | Name | Value | Name String \0 | Data . . . . |
  52. * | | Length | Length | | |
  53. * +-------+--------+--------+----------------+-------------------+
  54. *
  55. * A jfs_ea_list then is structured as
  56. *
  57. * 0 4 4 + EA_SIZE(ea1)
  58. * +------------+-------------------+--------------------+-----
  59. * | Overall EA | First FEA Element | Second FEA Element | .....
  60. * | List Size | | |
  61. * +------------+-------------------+--------------------+-----
  62. *
  63. * On-disk:
  64. *
  65. * FEALISTs are stored on disk using blocks allocated by dbAlloc() and
  66. * written directly. An EA list may be in-lined in the inode if there is
  67. * sufficient room available.
  68. */
  69. struct ea_buffer {
  70. int flag; /* Indicates what storage xattr points to */
  71. int max_size; /* largest xattr that fits in current buffer */
  72. dxd_t new_ea; /* dxd to replace ea when modifying xattr */
  73. struct metapage *mp; /* metapage containing ea list */
  74. struct jfs_ea_list *xattr; /* buffer containing ea list */
  75. };
  76. /*
  77. * ea_buffer.flag values
  78. */
  79. #define EA_INLINE 0x0001
  80. #define EA_EXTENT 0x0002
  81. #define EA_NEW 0x0004
  82. #define EA_MALLOC 0x0008
  83. static int is_known_namespace(const char *name)
  84. {
  85. if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) &&
  86. strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
  87. strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
  88. strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
  89. return false;
  90. return true;
  91. }
  92. /*
  93. * These three routines are used to recognize on-disk extended attributes
  94. * that are in a recognized namespace. If the attribute is not recognized,
  95. * "os2." is prepended to the name
  96. */
  97. static int is_os2_xattr(struct jfs_ea *ea)
  98. {
  99. return !is_known_namespace(ea->name);
  100. }
  101. static inline int name_size(struct jfs_ea *ea)
  102. {
  103. if (is_os2_xattr(ea))
  104. return ea->namelen + XATTR_OS2_PREFIX_LEN;
  105. else
  106. return ea->namelen;
  107. }
  108. static inline int copy_name(char *buffer, struct jfs_ea *ea)
  109. {
  110. int len = ea->namelen;
  111. if (is_os2_xattr(ea)) {
  112. memcpy(buffer, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN);
  113. buffer += XATTR_OS2_PREFIX_LEN;
  114. len += XATTR_OS2_PREFIX_LEN;
  115. }
  116. memcpy(buffer, ea->name, ea->namelen);
  117. buffer[ea->namelen] = 0;
  118. return len;
  119. }
  120. /* Forward references */
  121. static void ea_release(struct inode *inode, struct ea_buffer *ea_buf);
  122. /*
  123. * NAME: ea_write_inline
  124. *
  125. * FUNCTION: Attempt to write an EA inline if area is available
  126. *
  127. * PRE CONDITIONS:
  128. * Already verified that the specified EA is small enough to fit inline
  129. *
  130. * PARAMETERS:
  131. * ip - Inode pointer
  132. * ealist - EA list pointer
  133. * size - size of ealist in bytes
  134. * ea - dxd_t structure to be filled in with necessary EA information
  135. * if we successfully copy the EA inline
  136. *
  137. * NOTES:
  138. * Checks if the inode's inline area is available. If so, copies EA inline
  139. * and sets <ea> fields appropriately. Otherwise, returns failure, EA will
  140. * have to be put into an extent.
  141. *
  142. * RETURNS: 0 for successful copy to inline area; -1 if area not available
  143. */
  144. static int ea_write_inline(struct inode *ip, struct jfs_ea_list *ealist,
  145. int size, dxd_t * ea)
  146. {
  147. struct jfs_inode_info *ji = JFS_IP(ip);
  148. /*
  149. * Make sure we have an EA -- the NULL EA list is valid, but you
  150. * can't copy it!
  151. */
  152. if (ealist && size > sizeof (struct jfs_ea_list)) {
  153. assert(size <= sizeof (ji->i_inline_ea));
  154. /*
  155. * See if the space is available or if it is already being
  156. * used for an inline EA.
  157. */
  158. if (!(ji->mode2 & INLINEEA) && !(ji->ea.flag & DXD_INLINE))
  159. return -EPERM;
  160. DXDsize(ea, size);
  161. DXDlength(ea, 0);
  162. DXDaddress(ea, 0);
  163. memcpy(ji->i_inline_ea, ealist, size);
  164. ea->flag = DXD_INLINE;
  165. ji->mode2 &= ~INLINEEA;
  166. } else {
  167. ea->flag = 0;
  168. DXDsize(ea, 0);
  169. DXDlength(ea, 0);
  170. DXDaddress(ea, 0);
  171. /* Free up INLINE area */
  172. if (ji->ea.flag & DXD_INLINE)
  173. ji->mode2 |= INLINEEA;
  174. }
  175. return 0;
  176. }
  177. /*
  178. * NAME: ea_write
  179. *
  180. * FUNCTION: Write an EA for an inode
  181. *
  182. * PRE CONDITIONS: EA has been verified
  183. *
  184. * PARAMETERS:
  185. * ip - Inode pointer
  186. * ealist - EA list pointer
  187. * size - size of ealist in bytes
  188. * ea - dxd_t structure to be filled in appropriately with where the
  189. * EA was copied
  190. *
  191. * NOTES: Will write EA inline if able to, otherwise allocates blocks for an
  192. * extent and synchronously writes it to those blocks.
  193. *
  194. * RETURNS: 0 for success; Anything else indicates failure
  195. */
  196. static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
  197. dxd_t * ea)
  198. {
  199. struct super_block *sb = ip->i_sb;
  200. struct jfs_inode_info *ji = JFS_IP(ip);
  201. struct jfs_sb_info *sbi = JFS_SBI(sb);
  202. int nblocks;
  203. s64 blkno;
  204. int rc = 0, i;
  205. char *cp;
  206. s32 nbytes, nb;
  207. s32 bytes_to_write;
  208. struct metapage *mp;
  209. /*
  210. * Quick check to see if this is an in-linable EA. Short EAs
  211. * and empty EAs are all in-linable, provided the space exists.
  212. */
  213. if (!ealist || size <= sizeof (ji->i_inline_ea)) {
  214. if (!ea_write_inline(ip, ealist, size, ea))
  215. return 0;
  216. }
  217. /* figure out how many blocks we need */
  218. nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits;
  219. /* Allocate new blocks to quota. */
  220. rc = dquot_alloc_block(ip, nblocks);
  221. if (rc)
  222. return rc;
  223. rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno);
  224. if (rc) {
  225. /*Rollback quota allocation. */
  226. dquot_free_block(ip, nblocks);
  227. return rc;
  228. }
  229. /*
  230. * Now have nblocks worth of storage to stuff into the FEALIST.
  231. * loop over the FEALIST copying data into the buffer one page at
  232. * a time.
  233. */
  234. cp = (char *) ealist;
  235. nbytes = size;
  236. for (i = 0; i < nblocks; i += sbi->nbperpage) {
  237. /*
  238. * Determine how many bytes for this request, and round up to
  239. * the nearest aggregate block size
  240. */
  241. nb = min(PSIZE, nbytes);
  242. bytes_to_write =
  243. ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits))
  244. << sb->s_blocksize_bits;
  245. if (!(mp = get_metapage(ip, blkno + i, bytes_to_write, 1))) {
  246. rc = -EIO;
  247. goto failed;
  248. }
  249. memcpy(mp->data, cp, nb);
  250. /*
  251. * We really need a way to propagate errors for
  252. * forced writes like this one. --hch
  253. *
  254. * (__write_metapage => release_metapage => flush_metapage)
  255. */
  256. #ifdef _JFS_FIXME
  257. if ((rc = flush_metapage(mp))) {
  258. /*
  259. * the write failed -- this means that the buffer
  260. * is still assigned and the blocks are not being
  261. * used. this seems like the best error recovery
  262. * we can get ...
  263. */
  264. goto failed;
  265. }
  266. #else
  267. flush_metapage(mp);
  268. #endif
  269. cp += PSIZE;
  270. nbytes -= nb;
  271. }
  272. ea->flag = DXD_EXTENT;
  273. DXDsize(ea, le32_to_cpu(ealist->size));
  274. DXDlength(ea, nblocks);
  275. DXDaddress(ea, blkno);
  276. /* Free up INLINE area */
  277. if (ji->ea.flag & DXD_INLINE)
  278. ji->mode2 |= INLINEEA;
  279. return 0;
  280. failed:
  281. /* Rollback quota allocation. */
  282. dquot_free_block(ip, nblocks);
  283. dbFree(ip, blkno, nblocks);
  284. return rc;
  285. }
  286. /*
  287. * NAME: ea_read_inline
  288. *
  289. * FUNCTION: Read an inlined EA into user's buffer
  290. *
  291. * PARAMETERS:
  292. * ip - Inode pointer
  293. * ealist - Pointer to buffer to fill in with EA
  294. *
  295. * RETURNS: 0
  296. */
  297. static int ea_read_inline(struct inode *ip, struct jfs_ea_list *ealist)
  298. {
  299. struct jfs_inode_info *ji = JFS_IP(ip);
  300. int ea_size = sizeDXD(&ji->ea);
  301. if (ea_size == 0) {
  302. ealist->size = 0;
  303. return 0;
  304. }
  305. /* Sanity Check */
  306. if ((sizeDXD(&ji->ea) > sizeof (ji->i_inline_ea)))
  307. return -EIO;
  308. if (le32_to_cpu(((struct jfs_ea_list *) &ji->i_inline_ea)->size)
  309. != ea_size)
  310. return -EIO;
  311. memcpy(ealist, ji->i_inline_ea, ea_size);
  312. return 0;
  313. }
  314. /*
  315. * NAME: ea_read
  316. *
  317. * FUNCTION: copy EA data into user's buffer
  318. *
  319. * PARAMETERS:
  320. * ip - Inode pointer
  321. * ealist - Pointer to buffer to fill in with EA
  322. *
  323. * NOTES: If EA is inline calls ea_read_inline() to copy EA.
  324. *
  325. * RETURNS: 0 for success; other indicates failure
  326. */
  327. static int ea_read(struct inode *ip, struct jfs_ea_list *ealist)
  328. {
  329. struct super_block *sb = ip->i_sb;
  330. struct jfs_inode_info *ji = JFS_IP(ip);
  331. struct jfs_sb_info *sbi = JFS_SBI(sb);
  332. int nblocks;
  333. s64 blkno;
  334. char *cp = (char *) ealist;
  335. int i;
  336. int nbytes, nb;
  337. s32 bytes_to_read;
  338. struct metapage *mp;
  339. /* quick check for in-line EA */
  340. if (ji->ea.flag & DXD_INLINE)
  341. return ea_read_inline(ip, ealist);
  342. nbytes = sizeDXD(&ji->ea);
  343. if (!nbytes) {
  344. jfs_error(sb, "nbytes is 0\n");
  345. return -EIO;
  346. }
  347. /*
  348. * Figure out how many blocks were allocated when this EA list was
  349. * originally written to disk.
  350. */
  351. nblocks = lengthDXD(&ji->ea) << sbi->l2nbperpage;
  352. blkno = addressDXD(&ji->ea) << sbi->l2nbperpage;
  353. /*
  354. * I have found the disk blocks which were originally used to store
  355. * the FEALIST. now i loop over each contiguous block copying the
  356. * data into the buffer.
  357. */
  358. for (i = 0; i < nblocks; i += sbi->nbperpage) {
  359. /*
  360. * Determine how many bytes for this request, and round up to
  361. * the nearest aggregate block size
  362. */
  363. nb = min(PSIZE, nbytes);
  364. bytes_to_read =
  365. ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits))
  366. << sb->s_blocksize_bits;
  367. if (!(mp = read_metapage(ip, blkno + i, bytes_to_read, 1)))
  368. return -EIO;
  369. memcpy(cp, mp->data, nb);
  370. release_metapage(mp);
  371. cp += PSIZE;
  372. nbytes -= nb;
  373. }
  374. return 0;
  375. }
  376. /*
  377. * NAME: ea_get
  378. *
  379. * FUNCTION: Returns buffer containing existing extended attributes.
  380. * The size of the buffer will be the larger of the existing
  381. * attributes size, or min_size.
  382. *
  383. * The buffer, which may be inlined in the inode or in the
  384. * page cache must be release by calling ea_release or ea_put
  385. *
  386. * PARAMETERS:
  387. * inode - Inode pointer
  388. * ea_buf - Structure to be populated with ealist and its metadata
  389. * min_size- minimum size of buffer to be returned
  390. *
  391. * RETURNS: 0 for success; Other indicates failure
  392. */
  393. static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
  394. {
  395. struct jfs_inode_info *ji = JFS_IP(inode);
  396. struct super_block *sb = inode->i_sb;
  397. int size;
  398. int ea_size = sizeDXD(&ji->ea);
  399. int blocks_needed, current_blocks;
  400. s64 blkno;
  401. int rc;
  402. int quota_allocation = 0;
  403. /* When fsck.jfs clears a bad ea, it doesn't clear the size */
  404. if (ji->ea.flag == 0)
  405. ea_size = 0;
  406. if (ea_size == 0) {
  407. if (min_size == 0) {
  408. ea_buf->flag = 0;
  409. ea_buf->max_size = 0;
  410. ea_buf->xattr = NULL;
  411. return 0;
  412. }
  413. if ((min_size <= sizeof (ji->i_inline_ea)) &&
  414. (ji->mode2 & INLINEEA)) {
  415. ea_buf->flag = EA_INLINE | EA_NEW;
  416. ea_buf->max_size = sizeof (ji->i_inline_ea);
  417. ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea;
  418. DXDlength(&ea_buf->new_ea, 0);
  419. DXDaddress(&ea_buf->new_ea, 0);
  420. ea_buf->new_ea.flag = DXD_INLINE;
  421. DXDsize(&ea_buf->new_ea, min_size);
  422. return 0;
  423. }
  424. current_blocks = 0;
  425. } else if (ji->ea.flag & DXD_INLINE) {
  426. if (min_size <= sizeof (ji->i_inline_ea)) {
  427. ea_buf->flag = EA_INLINE;
  428. ea_buf->max_size = sizeof (ji->i_inline_ea);
  429. ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea;
  430. goto size_check;
  431. }
  432. current_blocks = 0;
  433. } else {
  434. if (!(ji->ea.flag & DXD_EXTENT)) {
  435. jfs_error(sb, "invalid ea.flag\n");
  436. return -EIO;
  437. }
  438. current_blocks = (ea_size + sb->s_blocksize - 1) >>
  439. sb->s_blocksize_bits;
  440. }
  441. size = max(min_size, ea_size);
  442. if (size > PSIZE) {
  443. /*
  444. * To keep the rest of the code simple. Allocate a
  445. * contiguous buffer to work with. Make the buffer large
  446. * enough to make use of the whole extent.
  447. */
  448. ea_buf->max_size = (size + sb->s_blocksize - 1) &
  449. ~(sb->s_blocksize - 1);
  450. ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
  451. if (ea_buf->xattr == NULL)
  452. return -ENOMEM;
  453. ea_buf->flag = EA_MALLOC;
  454. if (ea_size == 0)
  455. return 0;
  456. if ((rc = ea_read(inode, ea_buf->xattr))) {
  457. kfree(ea_buf->xattr);
  458. ea_buf->xattr = NULL;
  459. return rc;
  460. }
  461. goto size_check;
  462. }
  463. blocks_needed = (min_size + sb->s_blocksize - 1) >>
  464. sb->s_blocksize_bits;
  465. if (blocks_needed > current_blocks) {
  466. /* Allocate new blocks to quota. */
  467. rc = dquot_alloc_block(inode, blocks_needed);
  468. if (rc)
  469. return -EDQUOT;
  470. quota_allocation = blocks_needed;
  471. rc = dbAlloc(inode, INOHINT(inode), (s64) blocks_needed,
  472. &blkno);
  473. if (rc)
  474. goto clean_up;
  475. DXDlength(&ea_buf->new_ea, blocks_needed);
  476. DXDaddress(&ea_buf->new_ea, blkno);
  477. ea_buf->new_ea.flag = DXD_EXTENT;
  478. DXDsize(&ea_buf->new_ea, min_size);
  479. ea_buf->flag = EA_EXTENT | EA_NEW;
  480. ea_buf->mp = get_metapage(inode, blkno,
  481. blocks_needed << sb->s_blocksize_bits,
  482. 1);
  483. if (ea_buf->mp == NULL) {
  484. dbFree(inode, blkno, (s64) blocks_needed);
  485. rc = -EIO;
  486. goto clean_up;
  487. }
  488. ea_buf->xattr = ea_buf->mp->data;
  489. ea_buf->max_size = (min_size + sb->s_blocksize - 1) &
  490. ~(sb->s_blocksize - 1);
  491. if (ea_size == 0)
  492. return 0;
  493. if ((rc = ea_read(inode, ea_buf->xattr))) {
  494. discard_metapage(ea_buf->mp);
  495. dbFree(inode, blkno, (s64) blocks_needed);
  496. goto clean_up;
  497. }
  498. goto size_check;
  499. }
  500. ea_buf->flag = EA_EXTENT;
  501. ea_buf->mp = read_metapage(inode, addressDXD(&ji->ea),
  502. lengthDXD(&ji->ea) << sb->s_blocksize_bits,
  503. 1);
  504. if (ea_buf->mp == NULL) {
  505. rc = -EIO;
  506. goto clean_up;
  507. }
  508. ea_buf->xattr = ea_buf->mp->data;
  509. ea_buf->max_size = (ea_size + sb->s_blocksize - 1) &
  510. ~(sb->s_blocksize - 1);
  511. size_check:
  512. if (EALIST_SIZE(ea_buf->xattr) != ea_size) {
  513. printk(KERN_ERR "ea_get: invalid extended attribute\n");
  514. print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1,
  515. ea_buf->xattr, ea_size, 1);
  516. ea_release(inode, ea_buf);
  517. rc = -EIO;
  518. goto clean_up;
  519. }
  520. return ea_size;
  521. clean_up:
  522. /* Rollback quota allocation */
  523. if (quota_allocation)
  524. dquot_free_block(inode, quota_allocation);
  525. return (rc);
  526. }
  527. static void ea_release(struct inode *inode, struct ea_buffer *ea_buf)
  528. {
  529. if (ea_buf->flag & EA_MALLOC)
  530. kfree(ea_buf->xattr);
  531. else if (ea_buf->flag & EA_EXTENT) {
  532. assert(ea_buf->mp);
  533. release_metapage(ea_buf->mp);
  534. if (ea_buf->flag & EA_NEW)
  535. dbFree(inode, addressDXD(&ea_buf->new_ea),
  536. lengthDXD(&ea_buf->new_ea));
  537. }
  538. }
  539. static int ea_put(tid_t tid, struct inode *inode, struct ea_buffer *ea_buf,
  540. int new_size)
  541. {
  542. struct jfs_inode_info *ji = JFS_IP(inode);
  543. unsigned long old_blocks, new_blocks;
  544. int rc = 0;
  545. if (new_size == 0) {
  546. ea_release(inode, ea_buf);
  547. ea_buf = NULL;
  548. } else if (ea_buf->flag & EA_INLINE) {
  549. assert(new_size <= sizeof (ji->i_inline_ea));
  550. ji->mode2 &= ~INLINEEA;
  551. ea_buf->new_ea.flag = DXD_INLINE;
  552. DXDsize(&ea_buf->new_ea, new_size);
  553. DXDaddress(&ea_buf->new_ea, 0);
  554. DXDlength(&ea_buf->new_ea, 0);
  555. } else if (ea_buf->flag & EA_MALLOC) {
  556. rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea);
  557. kfree(ea_buf->xattr);
  558. } else if (ea_buf->flag & EA_NEW) {
  559. /* We have already allocated a new dxd */
  560. flush_metapage(ea_buf->mp);
  561. } else {
  562. /* ->xattr must point to original ea's metapage */
  563. rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea);
  564. discard_metapage(ea_buf->mp);
  565. }
  566. if (rc)
  567. return rc;
  568. old_blocks = new_blocks = 0;
  569. if (ji->ea.flag & DXD_EXTENT) {
  570. invalidate_dxd_metapages(inode, ji->ea);
  571. old_blocks = lengthDXD(&ji->ea);
  572. }
  573. if (ea_buf) {
  574. txEA(tid, inode, &ji->ea, &ea_buf->new_ea);
  575. if (ea_buf->new_ea.flag & DXD_EXTENT) {
  576. new_blocks = lengthDXD(&ea_buf->new_ea);
  577. if (ji->ea.flag & DXD_INLINE)
  578. ji->mode2 |= INLINEEA;
  579. }
  580. ji->ea = ea_buf->new_ea;
  581. } else {
  582. txEA(tid, inode, &ji->ea, NULL);
  583. if (ji->ea.flag & DXD_INLINE)
  584. ji->mode2 |= INLINEEA;
  585. ji->ea.flag = 0;
  586. ji->ea.size = 0;
  587. }
  588. /* If old blocks exist, they must be removed from quota allocation. */
  589. if (old_blocks)
  590. dquot_free_block(inode, old_blocks);
  591. inode->i_ctime = CURRENT_TIME;
  592. return 0;
  593. }
  594. /*
  595. * Most of the permission checking is done by xattr_permission in the vfs.
  596. * We also need to verify that this is a namespace that we recognize.
  597. */
  598. static int can_set_xattr(struct inode *inode, const char *name,
  599. const void *value, size_t value_len)
  600. {
  601. if (!strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN)) {
  602. /*
  603. * This makes sure that we aren't trying to set an
  604. * attribute in a different namespace by prefixing it
  605. * with "os2."
  606. */
  607. if (is_known_namespace(name + XATTR_OS2_PREFIX_LEN))
  608. return -EOPNOTSUPP;
  609. return 0;
  610. }
  611. /*
  612. * Don't allow setting an attribute in an unknown namespace.
  613. */
  614. if (strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) &&
  615. strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
  616. strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
  617. return -EOPNOTSUPP;
  618. return 0;
  619. }
  620. int __jfs_setxattr(tid_t tid, struct inode *inode, const char *name,
  621. const void *value, size_t value_len, int flags)
  622. {
  623. struct jfs_ea_list *ealist;
  624. struct jfs_ea *ea, *old_ea = NULL, *next_ea = NULL;
  625. struct ea_buffer ea_buf;
  626. int old_ea_size = 0;
  627. int xattr_size;
  628. int new_size;
  629. int namelen = strlen(name);
  630. char *os2name = NULL;
  631. int found = 0;
  632. int rc;
  633. int length;
  634. if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
  635. os2name = kmalloc(namelen - XATTR_OS2_PREFIX_LEN + 1,
  636. GFP_KERNEL);
  637. if (!os2name)
  638. return -ENOMEM;
  639. strcpy(os2name, name + XATTR_OS2_PREFIX_LEN);
  640. name = os2name;
  641. namelen -= XATTR_OS2_PREFIX_LEN;
  642. }
  643. down_write(&JFS_IP(inode)->xattr_sem);
  644. xattr_size = ea_get(inode, &ea_buf, 0);
  645. if (xattr_size < 0) {
  646. rc = xattr_size;
  647. goto out;
  648. }
  649. again:
  650. ealist = (struct jfs_ea_list *) ea_buf.xattr;
  651. new_size = sizeof (struct jfs_ea_list);
  652. if (xattr_size) {
  653. for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist);
  654. ea = NEXT_EA(ea)) {
  655. if ((namelen == ea->namelen) &&
  656. (memcmp(name, ea->name, namelen) == 0)) {
  657. found = 1;
  658. if (flags & XATTR_CREATE) {
  659. rc = -EEXIST;
  660. goto release;
  661. }
  662. old_ea = ea;
  663. old_ea_size = EA_SIZE(ea);
  664. next_ea = NEXT_EA(ea);
  665. } else
  666. new_size += EA_SIZE(ea);
  667. }
  668. }
  669. if (!found) {
  670. if (flags & XATTR_REPLACE) {
  671. rc = -ENODATA;
  672. goto release;
  673. }
  674. if (value == NULL) {
  675. rc = 0;
  676. goto release;
  677. }
  678. }
  679. if (value)
  680. new_size += sizeof (struct jfs_ea) + namelen + 1 + value_len;
  681. if (new_size > ea_buf.max_size) {
  682. /*
  683. * We need to allocate more space for merged ea list.
  684. * We should only have loop to again: once.
  685. */
  686. ea_release(inode, &ea_buf);
  687. xattr_size = ea_get(inode, &ea_buf, new_size);
  688. if (xattr_size < 0) {
  689. rc = xattr_size;
  690. goto out;
  691. }
  692. goto again;
  693. }
  694. /* Remove old ea of the same name */
  695. if (found) {
  696. /* number of bytes following target EA */
  697. length = (char *) END_EALIST(ealist) - (char *) next_ea;
  698. if (length > 0)
  699. memmove(old_ea, next_ea, length);
  700. xattr_size -= old_ea_size;
  701. }
  702. /* Add new entry to the end */
  703. if (value) {
  704. if (xattr_size == 0)
  705. /* Completely new ea list */
  706. xattr_size = sizeof (struct jfs_ea_list);
  707. /*
  708. * The size of EA value is limitted by on-disk format up to
  709. * __le16, there would be an overflow if the size is equal
  710. * to XATTR_SIZE_MAX (65536). In order to avoid this issue,
  711. * we can pre-checkup the value size against USHRT_MAX, and
  712. * return -E2BIG in this case, which is consistent with the
  713. * VFS setxattr interface.
  714. */
  715. if (value_len >= USHRT_MAX) {
  716. rc = -E2BIG;
  717. goto release;
  718. }
  719. ea = (struct jfs_ea *) ((char *) ealist + xattr_size);
  720. ea->flag = 0;
  721. ea->namelen = namelen;
  722. ea->valuelen = (cpu_to_le16(value_len));
  723. memcpy(ea->name, name, namelen);
  724. ea->name[namelen] = 0;
  725. if (value_len)
  726. memcpy(&ea->name[namelen + 1], value, value_len);
  727. xattr_size += EA_SIZE(ea);
  728. }
  729. /* DEBUG - If we did this right, these number match */
  730. if (xattr_size != new_size) {
  731. printk(KERN_ERR
  732. "__jfs_setxattr: xattr_size = %d, new_size = %d\n",
  733. xattr_size, new_size);
  734. rc = -EINVAL;
  735. goto release;
  736. }
  737. /*
  738. * If we're left with an empty list, there's no ea
  739. */
  740. if (new_size == sizeof (struct jfs_ea_list))
  741. new_size = 0;
  742. ealist->size = cpu_to_le32(new_size);
  743. rc = ea_put(tid, inode, &ea_buf, new_size);
  744. goto out;
  745. release:
  746. ea_release(inode, &ea_buf);
  747. out:
  748. up_write(&JFS_IP(inode)->xattr_sem);
  749. kfree(os2name);
  750. return rc;
  751. }
  752. int jfs_setxattr(struct dentry *dentry, const char *name, const void *value,
  753. size_t value_len, int flags)
  754. {
  755. struct inode *inode = d_inode(dentry);
  756. struct jfs_inode_info *ji = JFS_IP(inode);
  757. int rc;
  758. tid_t tid;
  759. /*
  760. * If this is a request for a synthetic attribute in the system.*
  761. * namespace use the generic infrastructure to resolve a handler
  762. * for it via sb->s_xattr.
  763. */
  764. if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
  765. return generic_setxattr(dentry, name, value, value_len, flags);
  766. if ((rc = can_set_xattr(inode, name, value, value_len)))
  767. return rc;
  768. if (value == NULL) { /* empty EA, do not remove */
  769. value = "";
  770. value_len = 0;
  771. }
  772. tid = txBegin(inode->i_sb, 0);
  773. mutex_lock(&ji->commit_mutex);
  774. rc = __jfs_setxattr(tid, d_inode(dentry), name, value, value_len,
  775. flags);
  776. if (!rc)
  777. rc = txCommit(tid, 1, &inode, 0);
  778. txEnd(tid);
  779. mutex_unlock(&ji->commit_mutex);
  780. return rc;
  781. }
  782. ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
  783. size_t buf_size)
  784. {
  785. struct jfs_ea_list *ealist;
  786. struct jfs_ea *ea;
  787. struct ea_buffer ea_buf;
  788. int xattr_size;
  789. ssize_t size;
  790. int namelen = strlen(name);
  791. char *value;
  792. down_read(&JFS_IP(inode)->xattr_sem);
  793. xattr_size = ea_get(inode, &ea_buf, 0);
  794. if (xattr_size < 0) {
  795. size = xattr_size;
  796. goto out;
  797. }
  798. if (xattr_size == 0)
  799. goto not_found;
  800. ealist = (struct jfs_ea_list *) ea_buf.xattr;
  801. /* Find the named attribute */
  802. for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea))
  803. if ((namelen == ea->namelen) &&
  804. memcmp(name, ea->name, namelen) == 0) {
  805. /* Found it */
  806. size = le16_to_cpu(ea->valuelen);
  807. if (!data)
  808. goto release;
  809. else if (size > buf_size) {
  810. size = -ERANGE;
  811. goto release;
  812. }
  813. value = ((char *) &ea->name) + ea->namelen + 1;
  814. memcpy(data, value, size);
  815. goto release;
  816. }
  817. not_found:
  818. size = -ENODATA;
  819. release:
  820. ea_release(inode, &ea_buf);
  821. out:
  822. up_read(&JFS_IP(inode)->xattr_sem);
  823. return size;
  824. }
  825. ssize_t jfs_getxattr(struct dentry *dentry, const char *name, void *data,
  826. size_t buf_size)
  827. {
  828. int err;
  829. /*
  830. * If this is a request for a synthetic attribute in the system.*
  831. * namespace use the generic infrastructure to resolve a handler
  832. * for it via sb->s_xattr.
  833. */
  834. if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
  835. return generic_getxattr(dentry, name, data, buf_size);
  836. if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
  837. /*
  838. * skip past "os2." prefix
  839. */
  840. name += XATTR_OS2_PREFIX_LEN;
  841. /*
  842. * Don't allow retrieving properly prefixed attributes
  843. * by prepending them with "os2."
  844. */
  845. if (is_known_namespace(name))
  846. return -EOPNOTSUPP;
  847. }
  848. err = __jfs_getxattr(d_inode(dentry), name, data, buf_size);
  849. return err;
  850. }
  851. /*
  852. * No special permissions are needed to list attributes except for trusted.*
  853. */
  854. static inline int can_list(struct jfs_ea *ea)
  855. {
  856. return (strncmp(ea->name, XATTR_TRUSTED_PREFIX,
  857. XATTR_TRUSTED_PREFIX_LEN) ||
  858. capable(CAP_SYS_ADMIN));
  859. }
  860. ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size)
  861. {
  862. struct inode *inode = d_inode(dentry);
  863. char *buffer;
  864. ssize_t size = 0;
  865. int xattr_size;
  866. struct jfs_ea_list *ealist;
  867. struct jfs_ea *ea;
  868. struct ea_buffer ea_buf;
  869. down_read(&JFS_IP(inode)->xattr_sem);
  870. xattr_size = ea_get(inode, &ea_buf, 0);
  871. if (xattr_size < 0) {
  872. size = xattr_size;
  873. goto out;
  874. }
  875. if (xattr_size == 0)
  876. goto release;
  877. ealist = (struct jfs_ea_list *) ea_buf.xattr;
  878. /* compute required size of list */
  879. for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
  880. if (can_list(ea))
  881. size += name_size(ea) + 1;
  882. }
  883. if (!data)
  884. goto release;
  885. if (size > buf_size) {
  886. size = -ERANGE;
  887. goto release;
  888. }
  889. /* Copy attribute names to buffer */
  890. buffer = data;
  891. for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
  892. if (can_list(ea)) {
  893. int namelen = copy_name(buffer, ea);
  894. buffer += namelen + 1;
  895. }
  896. }
  897. release:
  898. ea_release(inode, &ea_buf);
  899. out:
  900. up_read(&JFS_IP(inode)->xattr_sem);
  901. return size;
  902. }
  903. int jfs_removexattr(struct dentry *dentry, const char *name)
  904. {
  905. struct inode *inode = d_inode(dentry);
  906. struct jfs_inode_info *ji = JFS_IP(inode);
  907. int rc;
  908. tid_t tid;
  909. /*
  910. * If this is a request for a synthetic attribute in the system.*
  911. * namespace use the generic infrastructure to resolve a handler
  912. * for it via sb->s_xattr.
  913. */
  914. if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
  915. return generic_removexattr(dentry, name);
  916. if ((rc = can_set_xattr(inode, name, NULL, 0)))
  917. return rc;
  918. tid = txBegin(inode->i_sb, 0);
  919. mutex_lock(&ji->commit_mutex);
  920. rc = __jfs_setxattr(tid, d_inode(dentry), name, NULL, 0, XATTR_REPLACE);
  921. if (!rc)
  922. rc = txCommit(tid, 1, &inode, 0);
  923. txEnd(tid);
  924. mutex_unlock(&ji->commit_mutex);
  925. return rc;
  926. }
  927. /*
  928. * List of handlers for synthetic system.* attributes. All real ondisk
  929. * attributes are handled directly.
  930. */
  931. const struct xattr_handler *jfs_xattr_handlers[] = {
  932. #ifdef CONFIG_JFS_POSIX_ACL
  933. &posix_acl_access_xattr_handler,
  934. &posix_acl_default_xattr_handler,
  935. #endif
  936. NULL,
  937. };
  938. #ifdef CONFIG_JFS_SECURITY
  939. static int jfs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
  940. void *fs_info)
  941. {
  942. const struct xattr *xattr;
  943. tid_t *tid = fs_info;
  944. char *name;
  945. int err = 0;
  946. for (xattr = xattr_array; xattr->name != NULL; xattr++) {
  947. name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
  948. strlen(xattr->name) + 1, GFP_NOFS);
  949. if (!name) {
  950. err = -ENOMEM;
  951. break;
  952. }
  953. strcpy(name, XATTR_SECURITY_PREFIX);
  954. strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name);
  955. err = __jfs_setxattr(*tid, inode, name,
  956. xattr->value, xattr->value_len, 0);
  957. kfree(name);
  958. if (err < 0)
  959. break;
  960. }
  961. return err;
  962. }
  963. int jfs_init_security(tid_t tid, struct inode *inode, struct inode *dir,
  964. const struct qstr *qstr)
  965. {
  966. return security_inode_init_security(inode, dir, qstr,
  967. &jfs_initxattrs, &tid);
  968. }
  969. #endif