123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905 |
- /*
- * Resizable, Scalable, Concurrent Hash Table
- *
- * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
- * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
- * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
- *
- * Code partially derived from nft_hash
- * Rewritten with rehash code from br_multicast plus single list
- * pointer as suggested by Josh Triplett
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
- #ifndef _LINUX_RHASHTABLE_H
- #define _LINUX_RHASHTABLE_H
- #include <linux/atomic.h>
- #include <linux/compiler.h>
- #include <linux/err.h>
- #include <linux/errno.h>
- #include <linux/jhash.h>
- #include <linux/list_nulls.h>
- #include <linux/workqueue.h>
- #include <linux/mutex.h>
- #include <linux/rcupdate.h>
- /*
- * The end of the chain is marked with a special nulls marks which has
- * the following format:
- *
- * +-------+-----------------------------------------------------+-+
- * | Base | Hash |1|
- * +-------+-----------------------------------------------------+-+
- *
- * Base (4 bits) : Reserved to distinguish between multiple tables.
- * Specified via &struct rhashtable_params.nulls_base.
- * Hash (27 bits): Full hash (unmasked) of first element added to bucket
- * 1 (1 bit) : Nulls marker (always set)
- *
- * The remaining bits of the next pointer remain unused for now.
- */
- #define RHT_BASE_BITS 4
- #define RHT_HASH_BITS 27
- #define RHT_BASE_SHIFT RHT_HASH_BITS
- /* Base bits plus 1 bit for nulls marker */
- #define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
- struct rhash_head {
- struct rhash_head __rcu *next;
- };
- /**
- * struct bucket_table - Table of hash buckets
- * @size: Number of hash buckets
- * @rehash: Current bucket being rehashed
- * @hash_rnd: Random seed to fold into hash
- * @locks_mask: Mask to apply before accessing locks[]
- * @locks: Array of spinlocks protecting individual buckets
- * @walkers: List of active walkers
- * @rcu: RCU structure for freeing the table
- * @future_tbl: Table under construction during rehashing
- * @buckets: size * hash buckets
- */
- struct bucket_table {
- unsigned int size;
- unsigned int rehash;
- u32 hash_rnd;
- unsigned int locks_mask;
- spinlock_t *locks;
- struct list_head walkers;
- struct rcu_head rcu;
- struct bucket_table __rcu *future_tbl;
- struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
- };
- /**
- * struct rhashtable_compare_arg - Key for the function rhashtable_compare
- * @ht: Hash table
- * @key: Key to compare against
- */
- struct rhashtable_compare_arg {
- struct rhashtable *ht;
- const void *key;
- };
- typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
- typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
- typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
- const void *obj);
- struct rhashtable;
- /**
- * struct rhashtable_params - Hash table construction parameters
- * @nelem_hint: Hint on number of elements, should be 75% of desired size
- * @key_len: Length of key
- * @key_offset: Offset of key in struct to be hashed
- * @head_offset: Offset of rhash_head in struct to be hashed
- * @insecure_max_entries: Maximum number of entries (may be exceeded)
- * @max_size: Maximum size while expanding
- * @min_size: Minimum size while shrinking
- * @nulls_base: Base value to generate nulls marker
- * @insecure_elasticity: Set to true to disable chain length checks
- * @automatic_shrinking: Enable automatic shrinking of tables
- * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
- * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
- * @obj_hashfn: Function to hash object
- * @obj_cmpfn: Function to compare key with object
- */
- struct rhashtable_params {
- size_t nelem_hint;
- size_t key_len;
- size_t key_offset;
- size_t head_offset;
- unsigned int insecure_max_entries;
- unsigned int max_size;
- unsigned int min_size;
- u32 nulls_base;
- bool insecure_elasticity;
- bool automatic_shrinking;
- size_t locks_mul;
- rht_hashfn_t hashfn;
- rht_obj_hashfn_t obj_hashfn;
- rht_obj_cmpfn_t obj_cmpfn;
- };
- /**
- * struct rhashtable - Hash table handle
- * @tbl: Bucket table
- * @key_len: Key length for hashfn
- * @elasticity: Maximum chain length before rehash
- * @p: Configuration parameters
- * @run_work: Deferred worker to expand/shrink asynchronously
- * @mutex: Mutex to protect current/future table swapping
- * @lock: Spin lock to protect walker list
- * @nelems: Number of elements in table
- */
- struct rhashtable {
- struct bucket_table __rcu *tbl;
- unsigned int key_len;
- unsigned int elasticity;
- struct rhashtable_params p;
- struct work_struct run_work;
- struct mutex mutex;
- spinlock_t lock;
- atomic_t nelems;
- };
- /**
- * struct rhashtable_walker - Hash table walker
- * @list: List entry on list of walkers
- * @tbl: The table that we were walking over
- */
- struct rhashtable_walker {
- struct list_head list;
- struct bucket_table *tbl;
- };
- /**
- * struct rhashtable_iter - Hash table iterator, fits into netlink cb
- * @ht: Table to iterate through
- * @p: Current pointer
- * @walker: Associated rhashtable walker
- * @slot: Current slot
- * @skip: Number of entries to skip in slot
- */
- struct rhashtable_iter {
- struct rhashtable *ht;
- struct rhash_head *p;
- struct rhashtable_walker *walker;
- unsigned int slot;
- unsigned int skip;
- };
- static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
- {
- return NULLS_MARKER(ht->p.nulls_base + hash);
- }
- #define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
- ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
- static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
- {
- return ((unsigned long) ptr & 1);
- }
- static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
- {
- return ((unsigned long) ptr) >> 1;
- }
- static inline void *rht_obj(const struct rhashtable *ht,
- const struct rhash_head *he)
- {
- return (char *)he - ht->p.head_offset;
- }
- static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
- unsigned int hash)
- {
- return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
- }
- static inline unsigned int rht_key_hashfn(
- struct rhashtable *ht, const struct bucket_table *tbl,
- const void *key, const struct rhashtable_params params)
- {
- unsigned int hash;
- /* params must be equal to ht->p if it isn't constant. */
- if (!__builtin_constant_p(params.key_len))
- hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
- else if (params.key_len) {
- unsigned int key_len = params.key_len;
- if (params.hashfn)
- hash = params.hashfn(key, key_len, tbl->hash_rnd);
- else if (key_len & (sizeof(u32) - 1))
- hash = jhash(key, key_len, tbl->hash_rnd);
- else
- hash = jhash2(key, key_len / sizeof(u32),
- tbl->hash_rnd);
- } else {
- unsigned int key_len = ht->p.key_len;
- if (params.hashfn)
- hash = params.hashfn(key, key_len, tbl->hash_rnd);
- else
- hash = jhash(key, key_len, tbl->hash_rnd);
- }
- return rht_bucket_index(tbl, hash);
- }
- static inline unsigned int rht_head_hashfn(
- struct rhashtable *ht, const struct bucket_table *tbl,
- const struct rhash_head *he, const struct rhashtable_params params)
- {
- const char *ptr = rht_obj(ht, he);
- return likely(params.obj_hashfn) ?
- rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
- ht->p.key_len,
- tbl->hash_rnd)) :
- rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
- }
- /**
- * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
- * @ht: hash table
- * @tbl: current table
- */
- static inline bool rht_grow_above_75(const struct rhashtable *ht,
- const struct bucket_table *tbl)
- {
- /* Expand table when exceeding 75% load */
- return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
- (!ht->p.max_size || tbl->size < ht->p.max_size);
- }
- /**
- * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
- * @ht: hash table
- * @tbl: current table
- */
- static inline bool rht_shrink_below_30(const struct rhashtable *ht,
- const struct bucket_table *tbl)
- {
- /* Shrink table beneath 30% load */
- return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
- tbl->size > ht->p.min_size;
- }
- /**
- * rht_grow_above_100 - returns true if nelems > table-size
- * @ht: hash table
- * @tbl: current table
- */
- static inline bool rht_grow_above_100(const struct rhashtable *ht,
- const struct bucket_table *tbl)
- {
- return atomic_read(&ht->nelems) > tbl->size &&
- (!ht->p.max_size || tbl->size < ht->p.max_size);
- }
- /**
- * rht_grow_above_max - returns true if table is above maximum
- * @ht: hash table
- * @tbl: current table
- */
- static inline bool rht_grow_above_max(const struct rhashtable *ht,
- const struct bucket_table *tbl)
- {
- return ht->p.insecure_max_entries &&
- atomic_read(&ht->nelems) >= ht->p.insecure_max_entries;
- }
- /* The bucket lock is selected based on the hash and protects mutations
- * on a group of hash buckets.
- *
- * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
- * a single lock always covers both buckets which may both contains
- * entries which link to the same bucket of the old table during resizing.
- * This allows to simplify the locking as locking the bucket in both
- * tables during resize always guarantee protection.
- *
- * IMPORTANT: When holding the bucket lock of both the old and new table
- * during expansions and shrinking, the old bucket lock must always be
- * acquired first.
- */
- static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
- unsigned int hash)
- {
- return &tbl->locks[hash & tbl->locks_mask];
- }
- #ifdef CONFIG_PROVE_LOCKING
- int lockdep_rht_mutex_is_held(struct rhashtable *ht);
- int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
- #else
- static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
- {
- return 1;
- }
- static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
- u32 hash)
- {
- return 1;
- }
- #endif /* CONFIG_PROVE_LOCKING */
- int rhashtable_init(struct rhashtable *ht,
- const struct rhashtable_params *params);
- struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
- const void *key,
- struct rhash_head *obj,
- struct bucket_table *old_tbl,
- void **data);
- int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
- int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
- void rhashtable_walk_exit(struct rhashtable_iter *iter);
- int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
- void *rhashtable_walk_next(struct rhashtable_iter *iter);
- void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
- void rhashtable_free_and_destroy(struct rhashtable *ht,
- void (*free_fn)(void *ptr, void *arg),
- void *arg);
- void rhashtable_destroy(struct rhashtable *ht);
- #define rht_dereference(p, ht) \
- rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
- #define rht_dereference_rcu(p, ht) \
- rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
- #define rht_dereference_bucket(p, tbl, hash) \
- rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
- #define rht_dereference_bucket_rcu(p, tbl, hash) \
- rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
- #define rht_entry(tpos, pos, member) \
- ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
- /**
- * rht_for_each_continue - continue iterating over hash chain
- * @pos: the &struct rhash_head to use as a loop cursor.
- * @head: the previous &struct rhash_head to continue from
- * @tbl: the &struct bucket_table
- * @hash: the hash value / bucket index
- */
- #define rht_for_each_continue(pos, head, tbl, hash) \
- for (pos = rht_dereference_bucket(head, tbl, hash); \
- !rht_is_a_nulls(pos); \
- pos = rht_dereference_bucket((pos)->next, tbl, hash))
- /**
- * rht_for_each - iterate over hash chain
- * @pos: the &struct rhash_head to use as a loop cursor.
- * @tbl: the &struct bucket_table
- * @hash: the hash value / bucket index
- */
- #define rht_for_each(pos, tbl, hash) \
- rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
- /**
- * rht_for_each_entry_continue - continue iterating over hash chain
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct rhash_head to use as a loop cursor.
- * @head: the previous &struct rhash_head to continue from
- * @tbl: the &struct bucket_table
- * @hash: the hash value / bucket index
- * @member: name of the &struct rhash_head within the hashable struct.
- */
- #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
- for (pos = rht_dereference_bucket(head, tbl, hash); \
- (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
- pos = rht_dereference_bucket((pos)->next, tbl, hash))
- /**
- * rht_for_each_entry - iterate over hash chain of given type
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct rhash_head to use as a loop cursor.
- * @tbl: the &struct bucket_table
- * @hash: the hash value / bucket index
- * @member: name of the &struct rhash_head within the hashable struct.
- */
- #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
- rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
- tbl, hash, member)
- /**
- * rht_for_each_entry_safe - safely iterate over hash chain of given type
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct rhash_head to use as a loop cursor.
- * @next: the &struct rhash_head to use as next in loop cursor.
- * @tbl: the &struct bucket_table
- * @hash: the hash value / bucket index
- * @member: name of the &struct rhash_head within the hashable struct.
- *
- * This hash chain list-traversal primitive allows for the looped code to
- * remove the loop cursor from the list.
- */
- #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
- for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
- next = !rht_is_a_nulls(pos) ? \
- rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
- (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
- pos = next, \
- next = !rht_is_a_nulls(pos) ? \
- rht_dereference_bucket(pos->next, tbl, hash) : NULL)
- /**
- * rht_for_each_rcu_continue - continue iterating over rcu hash chain
- * @pos: the &struct rhash_head to use as a loop cursor.
- * @head: the previous &struct rhash_head to continue from
- * @tbl: the &struct bucket_table
- * @hash: the hash value / bucket index
- *
- * This hash chain list-traversal primitive may safely run concurrently with
- * the _rcu mutation primitives such as rhashtable_insert() as long as the
- * traversal is guarded by rcu_read_lock().
- */
- #define rht_for_each_rcu_continue(pos, head, tbl, hash) \
- for (({barrier(); }), \
- pos = rht_dereference_bucket_rcu(head, tbl, hash); \
- !rht_is_a_nulls(pos); \
- pos = rcu_dereference_raw(pos->next))
- /**
- * rht_for_each_rcu - iterate over rcu hash chain
- * @pos: the &struct rhash_head to use as a loop cursor.
- * @tbl: the &struct bucket_table
- * @hash: the hash value / bucket index
- *
- * This hash chain list-traversal primitive may safely run concurrently with
- * the _rcu mutation primitives such as rhashtable_insert() as long as the
- * traversal is guarded by rcu_read_lock().
- */
- #define rht_for_each_rcu(pos, tbl, hash) \
- rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
- /**
- * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct rhash_head to use as a loop cursor.
- * @head: the previous &struct rhash_head to continue from
- * @tbl: the &struct bucket_table
- * @hash: the hash value / bucket index
- * @member: name of the &struct rhash_head within the hashable struct.
- *
- * This hash chain list-traversal primitive may safely run concurrently with
- * the _rcu mutation primitives such as rhashtable_insert() as long as the
- * traversal is guarded by rcu_read_lock().
- */
- #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
- for (({barrier(); }), \
- pos = rht_dereference_bucket_rcu(head, tbl, hash); \
- (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
- pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
- /**
- * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct rhash_head to use as a loop cursor.
- * @tbl: the &struct bucket_table
- * @hash: the hash value / bucket index
- * @member: name of the &struct rhash_head within the hashable struct.
- *
- * This hash chain list-traversal primitive may safely run concurrently with
- * the _rcu mutation primitives such as rhashtable_insert() as long as the
- * traversal is guarded by rcu_read_lock().
- */
- #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
- rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
- tbl, hash, member)
- static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
- const void *obj)
- {
- struct rhashtable *ht = arg->ht;
- const char *ptr = obj;
- return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
- }
- /* Internal function, do not use. */
- static inline struct rhash_head *__rhashtable_lookup(
- struct rhashtable *ht, const void *key,
- const struct rhashtable_params params)
- {
- struct rhashtable_compare_arg arg = {
- .ht = ht,
- .key = key,
- };
- const struct bucket_table *tbl;
- struct rhash_head *he;
- unsigned int hash;
- tbl = rht_dereference_rcu(ht->tbl, ht);
- restart:
- hash = rht_key_hashfn(ht, tbl, key, params);
- rht_for_each_rcu(he, tbl, hash) {
- if (params.obj_cmpfn ?
- params.obj_cmpfn(&arg, rht_obj(ht, he)) :
- rhashtable_compare(&arg, rht_obj(ht, he)))
- continue;
- return he;
- }
- /* Ensure we see any new tables. */
- smp_rmb();
- tbl = rht_dereference_rcu(tbl->future_tbl, ht);
- if (unlikely(tbl))
- goto restart;
- return NULL;
- }
- /**
- * rhashtable_lookup - search hash table
- * @ht: hash table
- * @key: the pointer to the key
- * @params: hash table parameters
- *
- * Computes the hash value for the key and traverses the bucket chain looking
- * for a entry with an identical key. The first matching entry is returned.
- *
- * This must only be called under the RCU read lock.
- *
- * Returns the first entry on which the compare function returned true.
- */
- static inline void *rhashtable_lookup(
- struct rhashtable *ht, const void *key,
- const struct rhashtable_params params)
- {
- struct rhash_head *he = __rhashtable_lookup(ht, key, params);
- return he ? rht_obj(ht, he) : NULL;
- }
- /**
- * rhashtable_lookup_fast - search hash table, without RCU read lock
- * @ht: hash table
- * @key: the pointer to the key
- * @params: hash table parameters
- *
- * Computes the hash value for the key and traverses the bucket chain looking
- * for a entry with an identical key. The first matching entry is returned.
- *
- * Only use this function when you have other mechanisms guaranteeing
- * that the object won't go away after the RCU read lock is released.
- *
- * Returns the first entry on which the compare function returned true.
- */
- static inline void *rhashtable_lookup_fast(
- struct rhashtable *ht, const void *key,
- const struct rhashtable_params params)
- {
- void *obj;
- rcu_read_lock();
- obj = rhashtable_lookup(ht, key, params);
- rcu_read_unlock();
- return obj;
- }
- /* Internal function, please use rhashtable_insert_fast() instead. This
- * function returns the existing element already in hashes in there is a clash,
- * otherwise it returns an error via ERR_PTR().
- */
- static inline void *__rhashtable_insert_fast(
- struct rhashtable *ht, const void *key, struct rhash_head *obj,
- const struct rhashtable_params params)
- {
- struct rhashtable_compare_arg arg = {
- .ht = ht,
- .key = key,
- };
- struct bucket_table *tbl, *new_tbl;
- struct rhash_head *head;
- spinlock_t *lock;
- unsigned int elasticity;
- unsigned int hash;
- void *data = NULL;
- int err;
- restart:
- rcu_read_lock();
- tbl = rht_dereference_rcu(ht->tbl, ht);
- /* All insertions must grab the oldest table containing
- * the hashed bucket that is yet to be rehashed.
- */
- for (;;) {
- hash = rht_head_hashfn(ht, tbl, obj, params);
- lock = rht_bucket_lock(tbl, hash);
- spin_lock_bh(lock);
- if (tbl->rehash <= hash)
- break;
- spin_unlock_bh(lock);
- tbl = rht_dereference_rcu(tbl->future_tbl, ht);
- }
- new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
- if (unlikely(new_tbl)) {
- tbl = rhashtable_insert_slow(ht, key, obj, new_tbl, &data);
- if (!IS_ERR_OR_NULL(tbl))
- goto slow_path;
- err = PTR_ERR(tbl);
- if (err == -EEXIST)
- err = 0;
- goto out;
- }
- err = -E2BIG;
- if (unlikely(rht_grow_above_max(ht, tbl)))
- goto out;
- if (unlikely(rht_grow_above_100(ht, tbl))) {
- slow_path:
- spin_unlock_bh(lock);
- err = rhashtable_insert_rehash(ht, tbl);
- rcu_read_unlock();
- if (err)
- return ERR_PTR(err);
- goto restart;
- }
- err = 0;
- elasticity = ht->elasticity;
- rht_for_each(head, tbl, hash) {
- if (key &&
- unlikely(!(params.obj_cmpfn ?
- params.obj_cmpfn(&arg, rht_obj(ht, head)) :
- rhashtable_compare(&arg, rht_obj(ht, head))))) {
- data = rht_obj(ht, head);
- goto out;
- }
- if (!--elasticity)
- goto slow_path;
- }
- head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
- RCU_INIT_POINTER(obj->next, head);
- rcu_assign_pointer(tbl->buckets[hash], obj);
- atomic_inc(&ht->nelems);
- if (rht_grow_above_75(ht, tbl))
- schedule_work(&ht->run_work);
- out:
- spin_unlock_bh(lock);
- rcu_read_unlock();
- return err ? ERR_PTR(err) : data;
- }
- /**
- * rhashtable_insert_fast - insert object into hash table
- * @ht: hash table
- * @obj: pointer to hash head inside object
- * @params: hash table parameters
- *
- * Will take a per bucket spinlock to protect against mutual mutations
- * on the same bucket. Multiple insertions may occur in parallel unless
- * they map to the same bucket lock.
- *
- * It is safe to call this function from atomic context.
- *
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
- */
- static inline int rhashtable_insert_fast(
- struct rhashtable *ht, struct rhash_head *obj,
- const struct rhashtable_params params)
- {
- void *ret;
- ret = __rhashtable_insert_fast(ht, NULL, obj, params);
- if (IS_ERR(ret))
- return PTR_ERR(ret);
- return ret == NULL ? 0 : -EEXIST;
- }
- /**
- * rhashtable_lookup_insert_fast - lookup and insert object into hash table
- * @ht: hash table
- * @obj: pointer to hash head inside object
- * @params: hash table parameters
- *
- * Locks down the bucket chain in both the old and new table if a resize
- * is in progress to ensure that writers can't remove from the old table
- * and can't insert to the new table during the atomic operation of search
- * and insertion. Searches for duplicates in both the old and new table if
- * a resize is in progress.
- *
- * This lookup function may only be used for fixed key hash table (key_len
- * parameter set). It will BUG() if used inappropriately.
- *
- * It is safe to call this function from atomic context.
- *
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
- */
- static inline int rhashtable_lookup_insert_fast(
- struct rhashtable *ht, struct rhash_head *obj,
- const struct rhashtable_params params)
- {
- const char *key = rht_obj(ht, obj);
- void *ret;
- BUG_ON(ht->p.obj_hashfn);
- ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params);
- if (IS_ERR(ret))
- return PTR_ERR(ret);
- return ret == NULL ? 0 : -EEXIST;
- }
- /**
- * rhashtable_lookup_insert_key - search and insert object to hash table
- * with explicit key
- * @ht: hash table
- * @key: key
- * @obj: pointer to hash head inside object
- * @params: hash table parameters
- *
- * Locks down the bucket chain in both the old and new table if a resize
- * is in progress to ensure that writers can't remove from the old table
- * and can't insert to the new table during the atomic operation of search
- * and insertion. Searches for duplicates in both the old and new table if
- * a resize is in progress.
- *
- * Lookups may occur in parallel with hashtable mutations and resizing.
- *
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
- *
- * Returns zero on success.
- */
- static inline int rhashtable_lookup_insert_key(
- struct rhashtable *ht, const void *key, struct rhash_head *obj,
- const struct rhashtable_params params)
- {
- void *ret;
- BUG_ON(!ht->p.obj_hashfn || !key);
- ret = __rhashtable_insert_fast(ht, key, obj, params);
- if (IS_ERR(ret))
- return PTR_ERR(ret);
- return ret == NULL ? 0 : -EEXIST;
- }
- /**
- * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
- * @ht: hash table
- * @obj: pointer to hash head inside object
- * @params: hash table parameters
- * @data: pointer to element data already in hashes
- *
- * Just like rhashtable_lookup_insert_key(), but this function returns the
- * object if it exists, NULL if it does not and the insertion was successful,
- * and an ERR_PTR otherwise.
- */
- static inline void *rhashtable_lookup_get_insert_key(
- struct rhashtable *ht, const void *key, struct rhash_head *obj,
- const struct rhashtable_params params)
- {
- BUG_ON(!ht->p.obj_hashfn || !key);
- return __rhashtable_insert_fast(ht, key, obj, params);
- }
- /* Internal function, please use rhashtable_remove_fast() instead */
- static inline int __rhashtable_remove_fast(
- struct rhashtable *ht, struct bucket_table *tbl,
- struct rhash_head *obj, const struct rhashtable_params params)
- {
- struct rhash_head __rcu **pprev;
- struct rhash_head *he;
- spinlock_t * lock;
- unsigned int hash;
- int err = -ENOENT;
- hash = rht_head_hashfn(ht, tbl, obj, params);
- lock = rht_bucket_lock(tbl, hash);
- spin_lock_bh(lock);
- pprev = &tbl->buckets[hash];
- rht_for_each(he, tbl, hash) {
- if (he != obj) {
- pprev = &he->next;
- continue;
- }
- rcu_assign_pointer(*pprev, obj->next);
- err = 0;
- break;
- }
- spin_unlock_bh(lock);
- return err;
- }
- /**
- * rhashtable_remove_fast - remove object from hash table
- * @ht: hash table
- * @obj: pointer to hash head inside object
- * @params: hash table parameters
- *
- * Since the hash chain is single linked, the removal operation needs to
- * walk the bucket chain upon removal. The removal operation is thus
- * considerable slow if the hash table is not correctly sized.
- *
- * Will automatically shrink the table via rhashtable_expand() if the
- * shrink_decision function specified at rhashtable_init() returns true.
- *
- * Returns zero on success, -ENOENT if the entry could not be found.
- */
- static inline int rhashtable_remove_fast(
- struct rhashtable *ht, struct rhash_head *obj,
- const struct rhashtable_params params)
- {
- struct bucket_table *tbl;
- int err;
- rcu_read_lock();
- tbl = rht_dereference_rcu(ht->tbl, ht);
- /* Because we have already taken (and released) the bucket
- * lock in old_tbl, if we find that future_tbl is not yet
- * visible then that guarantees the entry to still be in
- * the old tbl if it exists.
- */
- while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
- (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
- ;
- if (err)
- goto out;
- atomic_dec(&ht->nelems);
- if (unlikely(ht->p.automatic_shrinking &&
- rht_shrink_below_30(ht, tbl)))
- schedule_work(&ht->run_work);
- out:
- rcu_read_unlock();
- return err;
- }
- #endif /* _LINUX_RHASHTABLE_H */
|