ESAPI-C 1.0
The OWASP Enterprise Security API for C
|
00001 00032 #ifndef UTHASH_H 00033 #define UTHASH_H 00034 00035 #include <string.h> /* memcmp,strlen */ 00036 #include <stddef.h> /* ptrdiff_t */ 00037 00038 /* These macros use decltype or the earlier __typeof GNU extension. 00039 As decltype is only available in newer compilers (VS2010 or gcc 4.3+ 00040 when compiling c++ source) this code uses whatever method is needed 00041 or, for VS2008 where neither is available, uses casting workarounds. */ 00042 #ifdef _MSC_VER /* MS compiler */ 00043 #if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */ 00044 #define DECLTYPE(x) (decltype(x)) 00045 #else /* VS2008 or older (or VS2010 in C mode) */ 00046 #define NO_DECLTYPE 00047 #define DECLTYPE(x) 00048 #endif 00049 #else /* GNU, Sun and other compilers */ 00050 #define DECLTYPE(x) (__typeof(x)) 00051 #endif 00052 00053 #ifdef NO_DECLTYPE 00054 #define DECLTYPE_ASSIGN(dst,src) \ 00055 do { \ 00056 char **_da_dst = (char**)(&(dst)); \ 00057 *_da_dst = (char*)(src); \ 00058 } while(0) 00059 #else 00060 #define DECLTYPE_ASSIGN(dst,src) \ 00061 do { \ 00062 (dst) = DECLTYPE(dst)(src); \ 00063 } while(0) 00064 #endif 00065 00066 /* a number of the hash function use uint32_t which isn't defined on win32 */ 00067 #ifdef _MSC_VER 00068 typedef unsigned int uint32_t; 00069 #else 00070 #include <inttypes.h> /* uint32_t */ 00071 #endif 00072 00073 #define UTHASH_VERSION 1.9.3 00074 00075 #define uthash_fatal(msg) exit(-1) /* fatal error (out of memory,etc) */ 00076 #define uthash_malloc(sz) malloc(sz) /* malloc fcn */ 00077 #define uthash_free(ptr,sz) free(ptr) /* free fcn */ 00078 00079 #define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */ 00080 #define uthash_expand_fyi(tbl) /* can be defined to log expands */ 00081 00082 /* initial number of buckets */ 00083 #define HASH_INITIAL_NUM_BUCKETS 32 /* initial number of buckets */ 00084 #define HASH_INITIAL_NUM_BUCKETS_LOG2 5 /* lg2 of initial number of buckets */ 00085 #define HASH_BKT_CAPACITY_THRESH 10 /* expand when bucket count reaches */ 00086 00087 /* calculate the element whose hash handle address is hhe */ 00088 #define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho))) 00089 00090 #define HASH_FIND(hh,head,keyptr,keylen,out) \ 00091 do { \ 00092 unsigned _hf_bkt,_hf_hashv; \ 00093 out=NULL; \ 00094 if (head) { \ 00095 HASH_FCN(keyptr,keylen, (head)->hh.tbl->num_buckets, _hf_hashv, _hf_bkt); \ 00096 if (HASH_BLOOM_TEST((head)->hh.tbl, _hf_hashv)) { \ 00097 HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], \ 00098 keyptr,keylen,out); \ 00099 } \ 00100 } \ 00101 } while (0) 00102 00103 #ifdef HASH_BLOOM 00104 #define HASH_BLOOM_BITLEN (1ULL << HASH_BLOOM) 00105 #define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8) + ((HASH_BLOOM_BITLEN%8) ? 1:0) 00106 #define HASH_BLOOM_MAKE(tbl) \ 00107 do { \ 00108 (tbl)->bloom_nbits = HASH_BLOOM; \ 00109 (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \ 00110 if (!((tbl)->bloom_bv)) { uthash_fatal( "out of memory"); } \ 00111 memset((tbl)->bloom_bv, 0, HASH_BLOOM_BYTELEN); \ 00112 (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \ 00113 } while (0); 00114 00115 #define HASH_BLOOM_FREE(tbl) \ 00116 do { \ 00117 uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \ 00118 } while (0); 00119 00120 #define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8] |= (1U << ((idx)%8))) 00121 #define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8] & (1U << ((idx)%8))) 00122 00123 #define HASH_BLOOM_ADD(tbl,hashv) \ 00124 HASH_BLOOM_BITSET((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1))) 00125 00126 #define HASH_BLOOM_TEST(tbl,hashv) \ 00127 HASH_BLOOM_BITTEST((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1))) 00128 00129 #else 00130 #define HASH_BLOOM_MAKE(tbl) 00131 #define HASH_BLOOM_FREE(tbl) 00132 #define HASH_BLOOM_ADD(tbl,hashv) 00133 #define HASH_BLOOM_TEST(tbl,hashv) (1) 00134 #endif 00135 00136 #define HASH_MAKE_TABLE(hh,head) \ 00137 do { \ 00138 (head)->hh.tbl = (UT_hash_table*)uthash_malloc( \ 00139 sizeof(UT_hash_table)); \ 00140 if (!((head)->hh.tbl)) { uthash_fatal( "out of memory"); } \ 00141 memset((head)->hh.tbl, 0, sizeof(UT_hash_table)); \ 00142 (head)->hh.tbl->tail = &((head)->hh); \ 00143 (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \ 00144 (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \ 00145 (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \ 00146 (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \ 00147 HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \ 00148 if (! (head)->hh.tbl->buckets) { uthash_fatal( "out of memory"); } \ 00149 memset((head)->hh.tbl->buckets, 0, \ 00150 HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \ 00151 HASH_BLOOM_MAKE((head)->hh.tbl); \ 00152 (head)->hh.tbl->signature = HASH_SIGNATURE; \ 00153 } while(0) 00154 00155 #define HASH_ADD(hh,head,fieldname,keylen_in,add) \ 00156 HASH_ADD_KEYPTR(hh,head,&add->fieldname,keylen_in,add) 00157 00158 #define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \ 00159 do { \ 00160 unsigned _ha_bkt; \ 00161 (add)->hh.next = NULL; \ 00162 (add)->hh.key = (char*)keyptr; \ 00163 (add)->hh.keylen = keylen_in; \ 00164 if (!(head)) { \ 00165 head = (add); \ 00166 (head)->hh.prev = NULL; \ 00167 HASH_MAKE_TABLE(hh,head); \ 00168 } else { \ 00169 (head)->hh.tbl->tail->next = (add); \ 00170 (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \ 00171 (head)->hh.tbl->tail = &((add)->hh); \ 00172 } \ 00173 (head)->hh.tbl->num_items++; \ 00174 (add)->hh.tbl = (head)->hh.tbl; \ 00175 HASH_FCN(keyptr,keylen_in, (head)->hh.tbl->num_buckets, \ 00176 (add)->hh.hashv, _ha_bkt); \ 00177 HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt],&(add)->hh); \ 00178 HASH_BLOOM_ADD((head)->hh.tbl,(add)->hh.hashv); \ 00179 HASH_EMIT_KEY(hh,head,keyptr,keylen_in); \ 00180 HASH_FSCK(hh,head); \ 00181 } while(0) 00182 00183 #define HASH_TO_BKT( hashv, num_bkts, bkt ) \ 00184 do { \ 00185 bkt = ((hashv) & ((num_bkts) - 1)); \ 00186 } while(0) 00187 00188 /* delete "delptr" from the hash table. 00189 * "the usual" patch-up process for the app-order doubly-linked-list. 00190 * The use of _hd_hh_del below deserves special explanation. 00191 * These used to be expressed using (delptr) but that led to a bug 00192 * if someone used the same symbol for the head and deletee, like 00193 * HASH_DELETE(hh,users,users); 00194 * We want that to work, but by changing the head (users) below 00195 * we were forfeiting our ability to further refer to the deletee (users) 00196 * in the patch-up process. Solution: use scratch space to 00197 * copy the deletee pointer, then the latter references are via that 00198 * scratch pointer rather than through the repointed (users) symbol. 00199 */ 00200 #define HASH_DELETE(hh,head,delptr) \ 00201 do { \ 00202 unsigned _hd_bkt; \ 00203 struct UT_hash_handle *_hd_hh_del; \ 00204 if ( ((delptr)->hh.prev == NULL) && ((delptr)->hh.next == NULL) ) { \ 00205 uthash_free((head)->hh.tbl->buckets, \ 00206 (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \ 00207 HASH_BLOOM_FREE((head)->hh.tbl); \ 00208 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ 00209 head = NULL; \ 00210 } else { \ 00211 _hd_hh_del = &((delptr)->hh); \ 00212 if ((delptr) == ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->tail)) { \ 00213 (head)->hh.tbl->tail = \ 00214 (UT_hash_handle*)((char*)((delptr)->hh.prev) + \ 00215 (head)->hh.tbl->hho); \ 00216 } \ 00217 if ((delptr)->hh.prev) { \ 00218 ((UT_hash_handle*)((char*)((delptr)->hh.prev) + \ 00219 (head)->hh.tbl->hho))->next = (delptr)->hh.next; \ 00220 } else { \ 00221 DECLTYPE_ASSIGN(head,(delptr)->hh.next); \ 00222 } \ 00223 if (_hd_hh_del->next) { \ 00224 ((UT_hash_handle*)((char*)_hd_hh_del->next + \ 00225 (head)->hh.tbl->hho))->prev = \ 00226 _hd_hh_del->prev; \ 00227 } \ 00228 HASH_TO_BKT( _hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \ 00229 HASH_DEL_IN_BKT(hh,(head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \ 00230 (head)->hh.tbl->num_items--; \ 00231 } \ 00232 HASH_FSCK(hh,head); \ 00233 } while (0) 00234 00235 00236 /* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */ 00237 #define HASH_FIND_STR(head,findstr,out) \ 00238 HASH_FIND(hh,head,findstr,strlen(findstr),out) 00239 #define HASH_ADD_STR(head,strfield,add) \ 00240 HASH_ADD(hh,head,strfield,strlen(add->strfield),add) 00241 #define HASH_FIND_INT(head,findint,out) \ 00242 HASH_FIND(hh,head,findint,sizeof(int),out) 00243 #define HASH_ADD_INT(head,intfield,add) \ 00244 HASH_ADD(hh,head,intfield,sizeof(int),add) 00245 #define HASH_FIND_PTR(head,findptr,out) \ 00246 HASH_FIND(hh,head,findptr,sizeof(void *),out) 00247 #define HASH_ADD_PTR(head,ptrfield,add) \ 00248 HASH_ADD(hh,head,ptrfield,sizeof(void *),add) 00249 #define HASH_DEL(head,delptr) \ 00250 HASH_DELETE(hh,head,delptr) 00251 00252 /* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined. 00253 * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined. 00254 */ 00255 #ifdef HASH_DEBUG 00256 #define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0) 00257 #define HASH_FSCK(hh,head) \ 00258 do { \ 00259 unsigned _bkt_i; \ 00260 unsigned _count, _bkt_count; \ 00261 char *_prev; \ 00262 struct UT_hash_handle *_thh; \ 00263 if (head) { \ 00264 _count = 0; \ 00265 for( _bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; _bkt_i++) { \ 00266 _bkt_count = 0; \ 00267 _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \ 00268 _prev = NULL; \ 00269 while (_thh) { \ 00270 if (_prev != (char*)(_thh->hh_prev)) { \ 00271 HASH_OOPS("invalid hh_prev %p, actual %p\n", \ 00272 _thh->hh_prev, _prev ); \ 00273 } \ 00274 _bkt_count++; \ 00275 _prev = (char*)(_thh); \ 00276 _thh = _thh->hh_next; \ 00277 } \ 00278 _count += _bkt_count; \ 00279 if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \ 00280 HASH_OOPS("invalid bucket count %d, actual %d\n", \ 00281 (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \ 00282 } \ 00283 } \ 00284 if (_count != (head)->hh.tbl->num_items) { \ 00285 HASH_OOPS("invalid hh item count %d, actual %d\n", \ 00286 (head)->hh.tbl->num_items, _count ); \ 00287 } \ 00288 /* traverse hh in app order; check next/prev integrity, count */ \ 00289 _count = 0; \ 00290 _prev = NULL; \ 00291 _thh = &(head)->hh; \ 00292 while (_thh) { \ 00293 _count++; \ 00294 if (_prev !=(char*)(_thh->prev)) { \ 00295 HASH_OOPS("invalid prev %p, actual %p\n", \ 00296 _thh->prev, _prev ); \ 00297 } \ 00298 _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \ 00299 _thh = ( _thh->next ? (UT_hash_handle*)((char*)(_thh->next) + \ 00300 (head)->hh.tbl->hho) : NULL ); \ 00301 } \ 00302 if (_count != (head)->hh.tbl->num_items) { \ 00303 HASH_OOPS("invalid app item count %d, actual %d\n", \ 00304 (head)->hh.tbl->num_items, _count ); \ 00305 } \ 00306 } \ 00307 } while (0) 00308 #else 00309 #define HASH_FSCK(hh,head) 00310 #endif 00311 00312 /* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to 00313 * the descriptor to which this macro is defined for tuning the hash function. 00314 * The app can #include <unistd.h> to get the prototype for write(2). */ 00315 #ifdef HASH_EMIT_KEYS 00316 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \ 00317 do { \ 00318 unsigned _klen = fieldlen; \ 00319 write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \ 00320 write(HASH_EMIT_KEYS, keyptr, fieldlen); \ 00321 } while (0) 00322 #else 00323 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) 00324 #endif 00325 00326 /* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */ 00327 #ifdef HASH_FUNCTION 00328 #define HASH_FCN HASH_FUNCTION 00329 #else 00330 #define HASH_FCN HASH_JEN 00331 #endif 00332 00333 /* The Bernstein hash function, used in Perl prior to v5.6 */ 00334 #define HASH_BER(key,keylen,num_bkts,hashv,bkt) \ 00335 do { \ 00336 unsigned _hb_keylen=keylen; \ 00337 char *_hb_key=(char*)(key); \ 00338 (hashv) = 0; \ 00339 while (_hb_keylen--) { (hashv) = ((hashv) * 33) + *_hb_key++; } \ 00340 bkt = (hashv) & (num_bkts-1); \ 00341 } while (0) 00342 00343 00344 /* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at 00345 * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */ 00346 #define HASH_SAX(key,keylen,num_bkts,hashv,bkt) \ 00347 do { \ 00348 unsigned _sx_i; \ 00349 char *_hs_key=(char*)(key); \ 00350 hashv = 0; \ 00351 for(_sx_i=0; _sx_i < keylen; _sx_i++) \ 00352 hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \ 00353 bkt = hashv & (num_bkts-1); \ 00354 } while (0) 00355 00356 #define HASH_FNV(key,keylen,num_bkts,hashv,bkt) \ 00357 do { \ 00358 unsigned _fn_i; \ 00359 char *_hf_key=(char*)(key); \ 00360 hashv = 2166136261UL; \ 00361 for(_fn_i=0; _fn_i < keylen; _fn_i++) \ 00362 hashv = (hashv * 16777619) ^ _hf_key[_fn_i]; \ 00363 bkt = hashv & (num_bkts-1); \ 00364 } while(0); 00365 00366 #define HASH_OAT(key,keylen,num_bkts,hashv,bkt) \ 00367 do { \ 00368 unsigned _ho_i; \ 00369 char *_ho_key=(char*)(key); \ 00370 hashv = 0; \ 00371 for(_ho_i=0; _ho_i < keylen; _ho_i++) { \ 00372 hashv += _ho_key[_ho_i]; \ 00373 hashv += (hashv << 10); \ 00374 hashv ^= (hashv >> 6); \ 00375 } \ 00376 hashv += (hashv << 3); \ 00377 hashv ^= (hashv >> 11); \ 00378 hashv += (hashv << 15); \ 00379 bkt = hashv & (num_bkts-1); \ 00380 } while(0) 00381 00382 #define HASH_JEN_MIX(a,b,c) \ 00383 do { \ 00384 a -= b; a -= c; a ^= ( c >> 13 ); \ 00385 b -= c; b -= a; b ^= ( a << 8 ); \ 00386 c -= a; c -= b; c ^= ( b >> 13 ); \ 00387 a -= b; a -= c; a ^= ( c >> 12 ); \ 00388 b -= c; b -= a; b ^= ( a << 16 ); \ 00389 c -= a; c -= b; c ^= ( b >> 5 ); \ 00390 a -= b; a -= c; a ^= ( c >> 3 ); \ 00391 b -= c; b -= a; b ^= ( a << 10 ); \ 00392 c -= a; c -= b; c ^= ( b >> 15 ); \ 00393 } while (0) 00394 00395 #define HASH_JEN(key,keylen,num_bkts,hashv,bkt) \ 00396 do { \ 00397 unsigned _hj_i,_hj_j,_hj_k; \ 00398 char *_hj_key=(char*)(key); \ 00399 hashv = 0xfeedbeef; \ 00400 _hj_i = _hj_j = 0x9e3779b9; \ 00401 _hj_k = keylen; \ 00402 while (_hj_k >= 12) { \ 00403 _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \ 00404 + ( (unsigned)_hj_key[2] << 16 ) \ 00405 + ( (unsigned)_hj_key[3] << 24 ) ); \ 00406 _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \ 00407 + ( (unsigned)_hj_key[6] << 16 ) \ 00408 + ( (unsigned)_hj_key[7] << 24 ) ); \ 00409 hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \ 00410 + ( (unsigned)_hj_key[10] << 16 ) \ 00411 + ( (unsigned)_hj_key[11] << 24 ) ); \ 00412 \ 00413 HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ 00414 \ 00415 _hj_key += 12; \ 00416 _hj_k -= 12; \ 00417 } \ 00418 hashv += keylen; \ 00419 switch ( _hj_k ) { \ 00420 case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); \ 00421 case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); \ 00422 case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); \ 00423 case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); \ 00424 case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); \ 00425 case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); \ 00426 case 5: _hj_j += _hj_key[4]; \ 00427 case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); \ 00428 case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); \ 00429 case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); \ 00430 case 1: _hj_i += _hj_key[0]; \ 00431 } \ 00432 HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ 00433 bkt = hashv & (num_bkts-1); \ 00434 } while(0) 00435 00436 /* The Paul Hsieh hash function */ 00437 #undef get16bits 00438 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \ 00439 || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__) 00440 #define get16bits(d) (*((const uint16_t *) (d))) 00441 #endif 00442 00443 #if !defined (get16bits) 00444 #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \ 00445 +(uint32_t)(((const uint8_t *)(d))[0]) ) 00446 #endif 00447 #define HASH_SFH(key,keylen,num_bkts,hashv,bkt) \ 00448 do { \ 00449 char *_sfh_key=(char*)(key); \ 00450 uint32_t _sfh_tmp, _sfh_len = keylen; \ 00451 \ 00452 int _sfh_rem = _sfh_len & 3; \ 00453 _sfh_len >>= 2; \ 00454 hashv = 0xcafebabe; \ 00455 \ 00456 /* Main loop */ \ 00457 for (;_sfh_len > 0; _sfh_len--) { \ 00458 hashv += get16bits (_sfh_key); \ 00459 _sfh_tmp = (get16bits (_sfh_key+2) << 11) ^ hashv; \ 00460 hashv = (hashv << 16) ^ _sfh_tmp; \ 00461 _sfh_key += 2*sizeof (uint16_t); \ 00462 hashv += hashv >> 11; \ 00463 } \ 00464 \ 00465 /* Handle end cases */ \ 00466 switch (_sfh_rem) { \ 00467 case 3: hashv += get16bits (_sfh_key); \ 00468 hashv ^= hashv << 16; \ 00469 hashv ^= _sfh_key[sizeof (uint16_t)] << 18; \ 00470 hashv += hashv >> 11; \ 00471 break; \ 00472 case 2: hashv += get16bits (_sfh_key); \ 00473 hashv ^= hashv << 11; \ 00474 hashv += hashv >> 17; \ 00475 break; \ 00476 case 1: hashv += *_sfh_key; \ 00477 hashv ^= hashv << 10; \ 00478 hashv += hashv >> 1; \ 00479 } \ 00480 \ 00481 /* Force "avalanching" of final 127 bits */ \ 00482 hashv ^= hashv << 3; \ 00483 hashv += hashv >> 5; \ 00484 hashv ^= hashv << 4; \ 00485 hashv += hashv >> 17; \ 00486 hashv ^= hashv << 25; \ 00487 hashv += hashv >> 6; \ 00488 bkt = hashv & (num_bkts-1); \ 00489 } while(0); 00490 00491 #ifdef HASH_USING_NO_STRICT_ALIASING 00492 /* The MurmurHash exploits some CPU's (e.g. x86) tolerance for unaligned reads. 00493 * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error. 00494 * So MurmurHash comes in two versions, the faster unaligned one and the slower 00495 * aligned one. We only use the faster one on CPU's where we know it's safe. 00496 * 00497 * Note the preprocessor built-in defines can be emitted using: 00498 * 00499 * gcc -m64 -dM -E - < /dev/null (on gcc) 00500 * cc -## a.c (where a.c is a simple test file) (Sun Studio) 00501 */ 00502 #if (defined(__i386__) || defined(__x86_64__)) 00503 #define HASH_MUR HASH_MUR_UNALIGNED 00504 #else 00505 #define HASH_MUR HASH_MUR_ALIGNED 00506 #endif 00507 00508 /* Appleby's MurmurHash fast version for unaligned-tolerant archs like i386 */ 00509 #define HASH_MUR_UNALIGNED(key,keylen,num_bkts,hashv,bkt) \ 00510 do { \ 00511 const unsigned int _mur_m = 0x5bd1e995; \ 00512 const int _mur_r = 24; \ 00513 hashv = 0xcafebabe ^ keylen; \ 00514 char *_mur_key = (char *)(key); \ 00515 uint32_t _mur_tmp, _mur_len = keylen; \ 00516 \ 00517 for (;_mur_len >= 4; _mur_len-=4) { \ 00518 _mur_tmp = *(uint32_t *)_mur_key; \ 00519 _mur_tmp *= _mur_m; \ 00520 _mur_tmp ^= _mur_tmp >> _mur_r; \ 00521 _mur_tmp *= _mur_m; \ 00522 hashv *= _mur_m; \ 00523 hashv ^= _mur_tmp; \ 00524 _mur_key += 4; \ 00525 } \ 00526 \ 00527 switch(_mur_len) \ 00528 { \ 00529 case 3: hashv ^= _mur_key[2] << 16; \ 00530 case 2: hashv ^= _mur_key[1] << 8; \ 00531 case 1: hashv ^= _mur_key[0]; \ 00532 hashv *= _mur_m; \ 00533 }; \ 00534 \ 00535 hashv ^= hashv >> 13; \ 00536 hashv *= _mur_m; \ 00537 hashv ^= hashv >> 15; \ 00538 \ 00539 bkt = hashv & (num_bkts-1); \ 00540 } while(0) 00541 00542 /* Appleby's MurmurHash version for alignment-sensitive archs like Sparc */ 00543 #define HASH_MUR_ALIGNED(key,keylen,num_bkts,hashv,bkt) \ 00544 do { \ 00545 const unsigned int _mur_m = 0x5bd1e995; \ 00546 const int _mur_r = 24; \ 00547 hashv = 0xcafebabe ^ (keylen); \ 00548 char *_mur_key = (char *)(key); \ 00549 uint32_t _mur_len = keylen; \ 00550 int _mur_align = (int)_mur_key & 3; \ 00551 \ 00552 if (_mur_align && (_mur_len >= 4)) { \ 00553 unsigned _mur_t = 0, _mur_d = 0; \ 00554 switch(_mur_align) { \ 00555 case 1: _mur_t |= _mur_key[2] << 16; \ 00556 case 2: _mur_t |= _mur_key[1] << 8; \ 00557 case 3: _mur_t |= _mur_key[0]; \ 00558 } \ 00559 _mur_t <<= (8 * _mur_align); \ 00560 _mur_key += 4-_mur_align; \ 00561 _mur_len -= 4-_mur_align; \ 00562 int _mur_sl = 8 * (4-_mur_align); \ 00563 int _mur_sr = 8 * _mur_align; \ 00564 \ 00565 for (;_mur_len >= 4; _mur_len-=4) { \ 00566 _mur_d = *(unsigned *)_mur_key; \ 00567 _mur_t = (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \ 00568 unsigned _mur_k = _mur_t; \ 00569 _mur_k *= _mur_m; \ 00570 _mur_k ^= _mur_k >> _mur_r; \ 00571 _mur_k *= _mur_m; \ 00572 hashv *= _mur_m; \ 00573 hashv ^= _mur_k; \ 00574 _mur_t = _mur_d; \ 00575 _mur_key += 4; \ 00576 } \ 00577 _mur_d = 0; \ 00578 if(_mur_len >= _mur_align) { \ 00579 switch(_mur_align) { \ 00580 case 3: _mur_d |= _mur_key[2] << 16; \ 00581 case 2: _mur_d |= _mur_key[1] << 8; \ 00582 case 1: _mur_d |= _mur_key[0]; \ 00583 } \ 00584 unsigned _mur_k = (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \ 00585 _mur_k *= _mur_m; \ 00586 _mur_k ^= _mur_k >> _mur_r; \ 00587 _mur_k *= _mur_m; \ 00588 hashv *= _mur_m; \ 00589 hashv ^= _mur_k; \ 00590 _mur_k += _mur_align; \ 00591 _mur_len -= _mur_align; \ 00592 \ 00593 switch(_mur_len) \ 00594 { \ 00595 case 3: hashv ^= _mur_key[2] << 16; \ 00596 case 2: hashv ^= _mur_key[1] << 8; \ 00597 case 1: hashv ^= _mur_key[0]; \ 00598 hashv *= _mur_m; \ 00599 } \ 00600 } else { \ 00601 switch(_mur_len) \ 00602 { \ 00603 case 3: _mur_d ^= _mur_key[2] << 16; \ 00604 case 2: _mur_d ^= _mur_key[1] << 8; \ 00605 case 1: _mur_d ^= _mur_key[0]; \ 00606 case 0: hashv ^= (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \ 00607 hashv *= _mur_m; \ 00608 } \ 00609 } \ 00610 \ 00611 hashv ^= hashv >> 13; \ 00612 hashv *= _mur_m; \ 00613 hashv ^= hashv >> 15; \ 00614 } else { \ 00615 for (;_mur_len >= 4; _mur_len-=4) { \ 00616 unsigned _mur_k = *(unsigned*)_mur_key; \ 00617 _mur_k *= _mur_m; \ 00618 _mur_k ^= _mur_k >> _mur_r; \ 00619 _mur_k *= _mur_m; \ 00620 hashv *= _mur_m; \ 00621 hashv ^= _mur_k; \ 00622 _mur_key += 4; \ 00623 } \ 00624 switch(_mur_len) \ 00625 { \ 00626 case 3: hashv ^= _mur_key[2] << 16; \ 00627 case 2: hashv ^= _mur_key[1] << 8; \ 00628 case 1: hashv ^= _mur_key[0]; \ 00629 hashv *= _mur_m; \ 00630 } \ 00631 \ 00632 hashv ^= hashv >> 13; \ 00633 hashv *= _mur_m; \ 00634 hashv ^= hashv >> 15; \ 00635 } \ 00636 bkt = hashv & (num_bkts-1); \ 00637 } while(0) 00638 #endif /* HASH_USING_NO_STRICT_ALIASING */ 00639 00640 /* key comparison function; return 0 if keys equal */ 00641 #define HASH_KEYCMP(a,b,len) memcmp(a,b,len) 00642 00643 /* iterate over items in a known bucket to find desired item */ 00644 #define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,out) \ 00645 do { \ 00646 if (head.hh_head) DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,head.hh_head)); \ 00647 else out=NULL; \ 00648 while (out) { \ 00649 if (out->hh.keylen == keylen_in) { \ 00650 if ((HASH_KEYCMP(out->hh.key,keyptr,keylen_in)) == 0) break; \ 00651 } \ 00652 if (out->hh.hh_next) DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,out->hh.hh_next)); \ 00653 else out = NULL; \ 00654 } \ 00655 } while(0) 00656 00657 /* add an item to a bucket */ 00658 #define HASH_ADD_TO_BKT(head,addhh) \ 00659 do { \ 00660 head.count++; \ 00661 (addhh)->hh_next = head.hh_head; \ 00662 (addhh)->hh_prev = NULL; \ 00663 if (head.hh_head) { (head).hh_head->hh_prev = (addhh); } \ 00664 (head).hh_head=addhh; \ 00665 if (head.count >= ((head.expand_mult+1) * HASH_BKT_CAPACITY_THRESH) \ 00666 && (addhh)->tbl->noexpand != 1) { \ 00667 HASH_EXPAND_BUCKETS((addhh)->tbl); \ 00668 } \ 00669 } while(0) 00670 00671 /* remove an item from a given bucket */ 00672 #define HASH_DEL_IN_BKT(hh,head,hh_del) \ 00673 (head).count--; \ 00674 if ((head).hh_head == hh_del) { \ 00675 (head).hh_head = hh_del->hh_next; \ 00676 } \ 00677 if (hh_del->hh_prev) { \ 00678 hh_del->hh_prev->hh_next = hh_del->hh_next; \ 00679 } \ 00680 if (hh_del->hh_next) { \ 00681 hh_del->hh_next->hh_prev = hh_del->hh_prev; \ 00682 } 00683 00684 /* Bucket expansion has the effect of doubling the number of buckets 00685 * and redistributing the items into the new buckets. Ideally the 00686 * items will distribute more or less evenly into the new buckets 00687 * (the extent to which this is true is a measure of the quality of 00688 * the hash function as it applies to the key domain). 00689 * 00690 * With the items distributed into more buckets, the chain length 00691 * (item count) in each bucket is reduced. Thus by expanding buckets 00692 * the hash keeps a bound on the chain length. This bounded chain 00693 * length is the essence of how a hash provides constant time lookup. 00694 * 00695 * The calculation of tbl->ideal_chain_maxlen below deserves some 00696 * explanation. First, keep in mind that we're calculating the ideal 00697 * maximum chain length based on the *new* (doubled) bucket count. 00698 * In fractions this is just n/b (n=number of items,b=new num buckets). 00699 * Since the ideal chain length is an integer, we want to calculate 00700 * ceil(n/b). We don't depend on floating point arithmetic in this 00701 * hash, so to calculate ceil(n/b) with integers we could write 00702 * 00703 * ceil(n/b) = (n/b) + ((n%b)?1:0) 00704 * 00705 * and in fact a previous version of this hash did just that. 00706 * But now we have improved things a bit by recognizing that b is 00707 * always a power of two. We keep its base 2 log handy (call it lb), 00708 * so now we can write this with a bit shift and logical AND: 00709 * 00710 * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0) 00711 * 00712 */ 00713 #define HASH_EXPAND_BUCKETS(tbl) \ 00714 do { \ 00715 unsigned _he_bkt; \ 00716 unsigned _he_bkt_i; \ 00717 struct UT_hash_handle *_he_thh, *_he_hh_nxt; \ 00718 UT_hash_bucket *_he_new_buckets, *_he_newbkt; \ 00719 _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \ 00720 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ 00721 if (!_he_new_buckets) { uthash_fatal( "out of memory"); } \ 00722 memset(_he_new_buckets, 0, \ 00723 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ 00724 tbl->ideal_chain_maxlen = \ 00725 (tbl->num_items >> (tbl->log2_num_buckets+1)) + \ 00726 ((tbl->num_items & ((tbl->num_buckets*2)-1)) ? 1 : 0); \ 00727 tbl->nonideal_items = 0; \ 00728 for(_he_bkt_i = 0; _he_bkt_i < tbl->num_buckets; _he_bkt_i++) \ 00729 { \ 00730 _he_thh = tbl->buckets[ _he_bkt_i ].hh_head; \ 00731 while (_he_thh) { \ 00732 _he_hh_nxt = _he_thh->hh_next; \ 00733 HASH_TO_BKT( _he_thh->hashv, tbl->num_buckets*2, _he_bkt); \ 00734 _he_newbkt = &(_he_new_buckets[ _he_bkt ]); \ 00735 if (++(_he_newbkt->count) > tbl->ideal_chain_maxlen) { \ 00736 tbl->nonideal_items++; \ 00737 _he_newbkt->expand_mult = _he_newbkt->count / \ 00738 tbl->ideal_chain_maxlen; \ 00739 } \ 00740 _he_thh->hh_prev = NULL; \ 00741 _he_thh->hh_next = _he_newbkt->hh_head; \ 00742 if (_he_newbkt->hh_head) _he_newbkt->hh_head->hh_prev = \ 00743 _he_thh; \ 00744 _he_newbkt->hh_head = _he_thh; \ 00745 _he_thh = _he_hh_nxt; \ 00746 } \ 00747 } \ 00748 uthash_free( tbl->buckets, tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \ 00749 tbl->num_buckets *= 2; \ 00750 tbl->log2_num_buckets++; \ 00751 tbl->buckets = _he_new_buckets; \ 00752 tbl->ineff_expands = (tbl->nonideal_items > (tbl->num_items >> 1)) ? \ 00753 (tbl->ineff_expands+1) : 0; \ 00754 if (tbl->ineff_expands > 1) { \ 00755 tbl->noexpand=1; \ 00756 uthash_noexpand_fyi(tbl); \ 00757 } \ 00758 uthash_expand_fyi(tbl); \ 00759 } while(0) 00760 00761 00762 /* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */ 00763 /* Note that HASH_SORT assumes the hash handle name to be hh. 00764 * HASH_SRT was added to allow the hash handle name to be passed in. */ 00765 #define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn) 00766 #define HASH_SRT(hh,head,cmpfcn) \ 00767 do { \ 00768 unsigned _hs_i; \ 00769 unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \ 00770 struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \ 00771 if (head) { \ 00772 _hs_insize = 1; \ 00773 _hs_looping = 1; \ 00774 _hs_list = &((head)->hh); \ 00775 while (_hs_looping) { \ 00776 _hs_p = _hs_list; \ 00777 _hs_list = NULL; \ 00778 _hs_tail = NULL; \ 00779 _hs_nmerges = 0; \ 00780 while (_hs_p) { \ 00781 _hs_nmerges++; \ 00782 _hs_q = _hs_p; \ 00783 _hs_psize = 0; \ 00784 for ( _hs_i = 0; _hs_i < _hs_insize; _hs_i++ ) { \ 00785 _hs_psize++; \ 00786 _hs_q = (UT_hash_handle*)((_hs_q->next) ? \ 00787 ((void*)((char*)(_hs_q->next) + \ 00788 (head)->hh.tbl->hho)) : NULL); \ 00789 if (! (_hs_q) ) break; \ 00790 } \ 00791 _hs_qsize = _hs_insize; \ 00792 while ((_hs_psize > 0) || ((_hs_qsize > 0) && _hs_q )) { \ 00793 if (_hs_psize == 0) { \ 00794 _hs_e = _hs_q; \ 00795 _hs_q = (UT_hash_handle*)((_hs_q->next) ? \ 00796 ((void*)((char*)(_hs_q->next) + \ 00797 (head)->hh.tbl->hho)) : NULL); \ 00798 _hs_qsize--; \ 00799 } else if ( (_hs_qsize == 0) || !(_hs_q) ) { \ 00800 _hs_e = _hs_p; \ 00801 _hs_p = (UT_hash_handle*)((_hs_p->next) ? \ 00802 ((void*)((char*)(_hs_p->next) + \ 00803 (head)->hh.tbl->hho)) : NULL); \ 00804 _hs_psize--; \ 00805 } else if (( \ 00806 cmpfcn(DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_p)), \ 00807 DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_q))) \ 00808 ) <= 0) { \ 00809 _hs_e = _hs_p; \ 00810 _hs_p = (UT_hash_handle*)((_hs_p->next) ? \ 00811 ((void*)((char*)(_hs_p->next) + \ 00812 (head)->hh.tbl->hho)) : NULL); \ 00813 _hs_psize--; \ 00814 } else { \ 00815 _hs_e = _hs_q; \ 00816 _hs_q = (UT_hash_handle*)((_hs_q->next) ? \ 00817 ((void*)((char*)(_hs_q->next) + \ 00818 (head)->hh.tbl->hho)) : NULL); \ 00819 _hs_qsize--; \ 00820 } \ 00821 if ( _hs_tail ) { \ 00822 _hs_tail->next = ((_hs_e) ? \ 00823 ELMT_FROM_HH((head)->hh.tbl,_hs_e) : NULL); \ 00824 } else { \ 00825 _hs_list = _hs_e; \ 00826 } \ 00827 _hs_e->prev = ((_hs_tail) ? \ 00828 ELMT_FROM_HH((head)->hh.tbl,_hs_tail) : NULL); \ 00829 _hs_tail = _hs_e; \ 00830 } \ 00831 _hs_p = _hs_q; \ 00832 } \ 00833 _hs_tail->next = NULL; \ 00834 if ( _hs_nmerges <= 1 ) { \ 00835 _hs_looping=0; \ 00836 (head)->hh.tbl->tail = _hs_tail; \ 00837 DECLTYPE_ASSIGN(head,ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \ 00838 } \ 00839 _hs_insize *= 2; \ 00840 } \ 00841 HASH_FSCK(hh,head); \ 00842 } \ 00843 } while (0) 00844 00845 /* This function selects items from one hash into another hash. 00846 * The end result is that the selected items have dual presence 00847 * in both hashes. There is no copy of the items made; rather 00848 * they are added into the new hash through a secondary hash 00849 * hash handle that must be present in the structure. */ 00850 #define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \ 00851 do { \ 00852 unsigned _src_bkt, _dst_bkt; \ 00853 void *_last_elt=NULL, *_elt; \ 00854 UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \ 00855 ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \ 00856 if (src) { \ 00857 for(_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \ 00858 for(_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \ 00859 _src_hh; \ 00860 _src_hh = _src_hh->hh_next) { \ 00861 _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \ 00862 if (cond(_elt)) { \ 00863 _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \ 00864 _dst_hh->key = _src_hh->key; \ 00865 _dst_hh->keylen = _src_hh->keylen; \ 00866 _dst_hh->hashv = _src_hh->hashv; \ 00867 _dst_hh->prev = _last_elt; \ 00868 _dst_hh->next = NULL; \ 00869 if (_last_elt_hh) { _last_elt_hh->next = _elt; } \ 00870 if (!dst) { \ 00871 DECLTYPE_ASSIGN(dst,_elt); \ 00872 HASH_MAKE_TABLE(hh_dst,dst); \ 00873 } else { \ 00874 _dst_hh->tbl = (dst)->hh_dst.tbl; \ 00875 } \ 00876 HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \ 00877 HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt],_dst_hh); \ 00878 (dst)->hh_dst.tbl->num_items++; \ 00879 _last_elt = _elt; \ 00880 _last_elt_hh = _dst_hh; \ 00881 } \ 00882 } \ 00883 } \ 00884 } \ 00885 HASH_FSCK(hh_dst,dst); \ 00886 } while (0) 00887 00888 #define HASH_CLEAR(hh,head) \ 00889 do { \ 00890 if (head) { \ 00891 uthash_free((head)->hh.tbl->buckets, \ 00892 (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \ 00893 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ 00894 (head)=NULL; \ 00895 } \ 00896 } while(0) 00897 00898 #ifdef NO_DECLTYPE 00899 #define HASH_ITER(hh,head,el,tmp) \ 00900 for((el)=(head), (*(char**)(&(tmp)))=(char*)((head)?(head)->hh.next:NULL); \ 00901 el; (el)=(tmp),(*(char**)(&(tmp)))=(char*)((tmp)?(tmp)->hh.next:NULL)) 00902 #else 00903 #define HASH_ITER(hh,head,el,tmp) \ 00904 for((el)=(head),(tmp)=DECLTYPE(el)((head)?(head)->hh.next:NULL); \ 00905 el; (el)=(tmp),(tmp)=DECLTYPE(el)((tmp)?(tmp)->hh.next:NULL)) 00906 #endif 00907 00908 /* obtain a count of items in the hash */ 00909 #define HASH_COUNT(head) HASH_CNT(hh,head) 00910 #define HASH_CNT(hh,head) ((head)?((head)->hh.tbl->num_items):0) 00911 00912 typedef struct UT_hash_bucket { 00913 struct UT_hash_handle *hh_head; 00914 unsigned count; 00915 00916 /* expand_mult is normally set to 0. In this situation, the max chain length 00917 * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If 00918 * the bucket's chain exceeds this length, bucket expansion is triggered). 00919 * However, setting expand_mult to a non-zero value delays bucket expansion 00920 * (that would be triggered by additions to this particular bucket) 00921 * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH. 00922 * (The multiplier is simply expand_mult+1). The whole idea of this 00923 * multiplier is to reduce bucket expansions, since they are expensive, in 00924 * situations where we know that a particular bucket tends to be overused. 00925 * It is better to let its chain length grow to a longer yet-still-bounded 00926 * value, than to do an O(n) bucket expansion too often. 00927 */ 00928 unsigned expand_mult; 00929 00930 } UT_hash_bucket; 00931 00932 /* random signature used only to find hash tables in external analysis */ 00933 #define HASH_SIGNATURE 0xa0111fe1 00934 #define HASH_BLOOM_SIGNATURE 0xb12220f2 00935 00936 typedef struct UT_hash_table { 00937 UT_hash_bucket *buckets; 00938 unsigned num_buckets, log2_num_buckets; 00939 unsigned num_items; 00940 struct UT_hash_handle *tail; /* tail hh in app order, for fast append */ 00941 ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */ 00942 00943 /* in an ideal situation (all buckets used equally), no bucket would have 00944 * more than ceil(#items/#buckets) items. that's the ideal chain length. */ 00945 unsigned ideal_chain_maxlen; 00946 00947 /* nonideal_items is the number of items in the hash whose chain position 00948 * exceeds the ideal chain maxlen. these items pay the penalty for an uneven 00949 * hash distribution; reaching them in a chain traversal takes >ideal steps */ 00950 unsigned nonideal_items; 00951 00952 /* ineffective expands occur when a bucket doubling was performed, but 00953 * afterward, more than half the items in the hash had nonideal chain 00954 * positions. If this happens on two consecutive expansions we inhibit any 00955 * further expansion, as it's not helping; this happens when the hash 00956 * function isn't a good fit for the key domain. When expansion is inhibited 00957 * the hash will still work, albeit no longer in constant time. */ 00958 unsigned ineff_expands, noexpand; 00959 00960 uint32_t signature; /* used only to find hash tables in external analysis */ 00961 #ifdef HASH_BLOOM 00962 uint32_t bloom_sig; /* used only to test bloom exists in external analysis */ 00963 uint8_t *bloom_bv; 00964 char bloom_nbits; 00965 #endif 00966 00967 } UT_hash_table; 00968 00969 typedef struct UT_hash_handle { 00970 struct UT_hash_table *tbl; 00971 void *prev; /* prev element in app order */ 00972 void *next; /* next element in app order */ 00973 struct UT_hash_handle *hh_prev; /* previous hh in bucket order */ 00974 struct UT_hash_handle *hh_next; /* next hh in bucket order */ 00975 void *key; /* ptr to enclosing struct's key */ 00976 unsigned keylen; /* enclosing struct's key len */ 00977 unsigned hashv; /* result of hash-fcn(key) */ 00978 } UT_hash_handle; 00979 00980 #endif /* UTHASH_H */