sidtab.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. /*
  2. * Implementation of the SID table type.
  3. *
  4. * Author : Stephen Smalley, <sds@epoch.ncsc.mil>
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/slab.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/errno.h>
  10. #include "flask.h"
  11. #include "security.h"
  12. #include "sidtab.h"
  13. #define SIDTAB_HASH(sid) \
  14. (sid & SIDTAB_HASH_MASK)
  15. int sidtab_init(struct sidtab *s)
  16. {
  17. int i;
  18. s->htable = kmalloc(sizeof(*(s->htable)) * SIDTAB_SIZE, GFP_ATOMIC);
  19. if (!s->htable)
  20. return -ENOMEM;
  21. for (i = 0; i < SIDTAB_SIZE; i++)
  22. s->htable[i] = NULL;
  23. s->nel = 0;
  24. s->next_sid = 1;
  25. s->shutdown = 0;
  26. spin_lock_init(&s->lock);
  27. return 0;
  28. }
  29. int sidtab_insert(struct sidtab *s, u32 sid, struct context *context)
  30. {
  31. int hvalue, rc = 0;
  32. struct sidtab_node *prev, *cur, *newnode;
  33. if (!s) {
  34. rc = -ENOMEM;
  35. goto out;
  36. }
  37. hvalue = SIDTAB_HASH(sid);
  38. prev = NULL;
  39. cur = s->htable[hvalue];
  40. while (cur && sid > cur->sid) {
  41. prev = cur;
  42. cur = cur->next;
  43. }
  44. if (cur && sid == cur->sid) {
  45. rc = -EEXIST;
  46. goto out;
  47. }
  48. newnode = kmalloc(sizeof(*newnode), GFP_ATOMIC);
  49. if (newnode == NULL) {
  50. rc = -ENOMEM;
  51. goto out;
  52. }
  53. newnode->sid = sid;
  54. if (context_cpy(&newnode->context, context)) {
  55. kfree(newnode);
  56. rc = -ENOMEM;
  57. goto out;
  58. }
  59. if (prev) {
  60. newnode->next = prev->next;
  61. wmb();
  62. prev->next = newnode;
  63. } else {
  64. newnode->next = s->htable[hvalue];
  65. wmb();
  66. s->htable[hvalue] = newnode;
  67. }
  68. s->nel++;
  69. if (sid >= s->next_sid)
  70. s->next_sid = sid + 1;
  71. out:
  72. return rc;
  73. }
  74. static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force)
  75. {
  76. int hvalue;
  77. struct sidtab_node *cur;
  78. if (!s)
  79. return NULL;
  80. hvalue = SIDTAB_HASH(sid);
  81. cur = s->htable[hvalue];
  82. while (cur && sid > cur->sid)
  83. cur = cur->next;
  84. if (force && cur && sid == cur->sid && cur->context.len)
  85. return &cur->context;
  86. if (cur == NULL || sid != cur->sid || cur->context.len) {
  87. /* Remap invalid SIDs to the unlabeled SID. */
  88. sid = SECINITSID_UNLABELED;
  89. hvalue = SIDTAB_HASH(sid);
  90. cur = s->htable[hvalue];
  91. while (cur && sid > cur->sid)
  92. cur = cur->next;
  93. if (!cur || sid != cur->sid)
  94. return NULL;
  95. }
  96. return &cur->context;
  97. }
  98. struct context *sidtab_search(struct sidtab *s, u32 sid)
  99. {
  100. return sidtab_search_core(s, sid, 0);
  101. }
  102. struct context *sidtab_search_force(struct sidtab *s, u32 sid)
  103. {
  104. return sidtab_search_core(s, sid, 1);
  105. }
  106. int sidtab_map(struct sidtab *s,
  107. int (*apply) (u32 sid,
  108. struct context *context,
  109. void *args),
  110. void *args)
  111. {
  112. int i, rc = 0;
  113. struct sidtab_node *cur;
  114. if (!s)
  115. goto out;
  116. for (i = 0; i < SIDTAB_SIZE; i++) {
  117. cur = s->htable[i];
  118. while (cur) {
  119. rc = apply(cur->sid, &cur->context, args);
  120. if (rc)
  121. goto out;
  122. cur = cur->next;
  123. }
  124. }
  125. out:
  126. return rc;
  127. }
  128. static void sidtab_update_cache(struct sidtab *s, struct sidtab_node *n, int loc)
  129. {
  130. BUG_ON(loc >= SIDTAB_CACHE_LEN);
  131. while (loc > 0) {
  132. s->cache[loc] = s->cache[loc - 1];
  133. loc--;
  134. }
  135. s->cache[0] = n;
  136. }
  137. static inline u32 sidtab_search_context(struct sidtab *s,
  138. struct context *context)
  139. {
  140. int i;
  141. struct sidtab_node *cur;
  142. for (i = 0; i < SIDTAB_SIZE; i++) {
  143. cur = s->htable[i];
  144. while (cur) {
  145. if (context_cmp(&cur->context, context)) {
  146. sidtab_update_cache(s, cur, SIDTAB_CACHE_LEN - 1);
  147. return cur->sid;
  148. }
  149. cur = cur->next;
  150. }
  151. }
  152. return 0;
  153. }
  154. static inline u32 sidtab_search_cache(struct sidtab *s, struct context *context)
  155. {
  156. int i;
  157. struct sidtab_node *node;
  158. for (i = 0; i < SIDTAB_CACHE_LEN; i++) {
  159. node = s->cache[i];
  160. if (unlikely(!node))
  161. return 0;
  162. if (context_cmp(&node->context, context)) {
  163. sidtab_update_cache(s, node, i);
  164. return node->sid;
  165. }
  166. }
  167. return 0;
  168. }
  169. int sidtab_context_to_sid(struct sidtab *s,
  170. struct context *context,
  171. u32 *out_sid)
  172. {
  173. u32 sid;
  174. int ret = 0;
  175. unsigned long flags;
  176. *out_sid = SECSID_NULL;
  177. sid = sidtab_search_cache(s, context);
  178. if (!sid)
  179. sid = sidtab_search_context(s, context);
  180. if (!sid) {
  181. spin_lock_irqsave(&s->lock, flags);
  182. /* Rescan now that we hold the lock. */
  183. sid = sidtab_search_context(s, context);
  184. if (sid)
  185. goto unlock_out;
  186. /* No SID exists for the context. Allocate a new one. */
  187. if (s->next_sid == UINT_MAX || s->shutdown) {
  188. ret = -ENOMEM;
  189. goto unlock_out;
  190. }
  191. sid = s->next_sid++;
  192. if (context->len)
  193. printk(KERN_INFO
  194. "SELinux: Context %s is not valid (left unmapped).\n",
  195. context->str);
  196. ret = sidtab_insert(s, sid, context);
  197. if (ret)
  198. s->next_sid--;
  199. unlock_out:
  200. spin_unlock_irqrestore(&s->lock, flags);
  201. }
  202. if (ret)
  203. return ret;
  204. *out_sid = sid;
  205. return 0;
  206. }
  207. void sidtab_hash_eval(struct sidtab *h, char *tag)
  208. {
  209. int i, chain_len, slots_used, max_chain_len;
  210. struct sidtab_node *cur;
  211. slots_used = 0;
  212. max_chain_len = 0;
  213. for (i = 0; i < SIDTAB_SIZE; i++) {
  214. cur = h->htable[i];
  215. if (cur) {
  216. slots_used++;
  217. chain_len = 0;
  218. while (cur) {
  219. chain_len++;
  220. cur = cur->next;
  221. }
  222. if (chain_len > max_chain_len)
  223. max_chain_len = chain_len;
  224. }
  225. }
  226. printk(KERN_DEBUG "%s: %d entries and %d/%d buckets used, longest "
  227. "chain length %d\n", tag, h->nel, slots_used, SIDTAB_SIZE,
  228. max_chain_len);
  229. }
  230. void sidtab_destroy(struct sidtab *s)
  231. {
  232. int i;
  233. struct sidtab_node *cur, *temp;
  234. if (!s)
  235. return;
  236. for (i = 0; i < SIDTAB_SIZE; i++) {
  237. cur = s->htable[i];
  238. while (cur) {
  239. temp = cur;
  240. cur = cur->next;
  241. context_destroy(&temp->context);
  242. kfree(temp);
  243. }
  244. s->htable[i] = NULL;
  245. }
  246. kfree(s->htable);
  247. s->htable = NULL;
  248. s->nel = 0;
  249. s->next_sid = 1;
  250. }
  251. void sidtab_set(struct sidtab *dst, struct sidtab *src)
  252. {
  253. unsigned long flags;
  254. int i;
  255. spin_lock_irqsave(&src->lock, flags);
  256. dst->htable = src->htable;
  257. dst->nel = src->nel;
  258. dst->next_sid = src->next_sid;
  259. dst->shutdown = 0;
  260. for (i = 0; i < SIDTAB_CACHE_LEN; i++)
  261. dst->cache[i] = NULL;
  262. spin_unlock_irqrestore(&src->lock, flags);
  263. }
  264. void sidtab_shutdown(struct sidtab *s)
  265. {
  266. unsigned long flags;
  267. spin_lock_irqsave(&s->lock, flags);
  268. s->shutdown = 1;
  269. spin_unlock_irqrestore(&s->lock, flags);
  270. }