uthash.h (73829B)
1 /* 2 Copyright (c) 2003-2022, Troy D. Hanson https://troydhanson.github.io/uthash/ 3 All rights reserved. 4 5 Redistribution and use in source and binary forms, with or without 6 modification, are permitted provided that the following conditions are met: 7 8 * Redistributions of source code must retain the above copyright 9 notice, this list of conditions and the following disclaimer. 10 11 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 12 IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 13 TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 14 PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER 15 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 16 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 17 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 18 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 19 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 21 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 */ 23 24 #ifndef UTHASH_H 25 #define UTHASH_H 26 27 #define UTHASH_VERSION 2.3.0 28 29 #include <string.h> /* memcmp, memset, strlen */ 30 #include <stddef.h> /* ptrdiff_t */ 31 #include <stdlib.h> /* exit */ 32 33 #if defined(HASH_DEFINE_OWN_STDINT) && HASH_DEFINE_OWN_STDINT 34 /* This codepath is provided for backward compatibility, but I plan to remove it. */ 35 #warning "HASH_DEFINE_OWN_STDINT is deprecated; please use HASH_NO_STDINT instead" 36 typedef unsigned int uint32_t; 37 typedef unsigned char uint8_t; 38 #elif defined(HASH_NO_STDINT) && HASH_NO_STDINT 39 #else 40 #include <stdint.h> /* uint8_t, uint32_t */ 41 #endif 42 43 /* These macros use decltype or the earlier __typeof GNU extension. 44 As decltype is only available in newer compilers (VS2010 or gcc 4.3+ 45 when compiling c++ source) this code uses whatever method is needed 46 or, for VS2008 where neither is available, uses casting workarounds. */ 47 #if !defined(DECLTYPE) && !defined(NO_DECLTYPE) 48 #if defined(_MSC_VER) /* MS compiler */ 49 #if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */ 50 #define DECLTYPE(x) (decltype(x)) 51 #else /* VS2008 or older (or VS2010 in C mode) */ 52 #define NO_DECLTYPE 53 #endif 54 #elif defined(__BORLANDC__) || defined(__ICCARM__) || defined(__LCC__) || defined(__WATCOMC__) 55 #define NO_DECLTYPE 56 #else /* GNU, Sun and other compilers */ 57 #define DECLTYPE(x) (__typeof(x)) 58 #endif 59 #endif 60 61 #ifdef NO_DECLTYPE 62 #define DECLTYPE(x) 63 #define DECLTYPE_ASSIGN(dst,src) \ 64 do { \ 65 char **_da_dst = (char**)(&(dst)); \ 66 *_da_dst = (char*)(src); \ 67 } while (0) 68 #else 69 #define DECLTYPE_ASSIGN(dst,src) \ 70 do { \ 71 (dst) = DECLTYPE(dst)(src); \ 72 } while (0) 73 #endif 74 75 #ifndef uthash_malloc 76 #define uthash_malloc(sz) malloc(sz) /* malloc fcn */ 77 #endif 78 #ifndef uthash_free 79 #define uthash_free(ptr,sz) free(ptr) /* free fcn */ 80 #endif 81 #ifndef uthash_bzero 82 #define uthash_bzero(a,n) memset(a,'\0',n) 83 #endif 84 #ifndef uthash_strlen 85 #define uthash_strlen(s) strlen(s) 86 #endif 87 88 #ifndef HASH_FUNCTION 89 #define HASH_FUNCTION(keyptr,keylen,hashv) HASH_JEN(keyptr, keylen, hashv) 90 #endif 91 92 #ifndef HASH_KEYCMP 93 #define HASH_KEYCMP(a,b,n) memcmp(a,b,n) 94 #endif 95 96 #ifndef uthash_noexpand_fyi 97 #define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */ 98 #endif 99 #ifndef uthash_expand_fyi 100 #define uthash_expand_fyi(tbl) /* can be defined to log expands */ 101 #endif 102 103 #ifndef HASH_NONFATAL_OOM 104 #define HASH_NONFATAL_OOM 0 105 #endif 106 107 #if HASH_NONFATAL_OOM 108 /* malloc failures can be recovered from */ 109 110 #ifndef uthash_nonfatal_oom 111 #define uthash_nonfatal_oom(obj) do {} while (0) /* non-fatal OOM error */ 112 #endif 113 114 #define HASH_RECORD_OOM(oomed) do { (oomed) = 1; } while (0) 115 #define IF_HASH_NONFATAL_OOM(x) x 116 117 #else 118 /* malloc failures result in lost memory, hash tables are unusable */ 119 120 #ifndef uthash_fatal 121 #define uthash_fatal(msg) exit(-1) /* fatal OOM error */ 122 #endif 123 124 #define HASH_RECORD_OOM(oomed) uthash_fatal("out of memory") 125 #define IF_HASH_NONFATAL_OOM(x) 126 127 #endif 128 129 /* initial number of buckets */ 130 #define HASH_INITIAL_NUM_BUCKETS 32U /* initial number of buckets */ 131 #define HASH_INITIAL_NUM_BUCKETS_LOG2 5U /* lg2 of initial number of buckets */ 132 #define HASH_BKT_CAPACITY_THRESH 10U /* expand when bucket count reaches */ 133 134 /* calculate the element whose hash handle address is hhp */ 135 #define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho))) 136 /* calculate the hash handle from element address elp */ 137 #define HH_FROM_ELMT(tbl,elp) ((UT_hash_handle*)(void*)(((char*)(elp)) + ((tbl)->hho))) 138 139 #define HASH_ROLLBACK_BKT(hh, head, itemptrhh) \ 140 do { \ 141 struct UT_hash_handle *_hd_hh_item = (itemptrhh); \ 142 unsigned _hd_bkt; \ 143 HASH_TO_BKT(_hd_hh_item->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \ 144 (head)->hh.tbl->buckets[_hd_bkt].count++; \ 145 _hd_hh_item->hh_next = NULL; \ 146 _hd_hh_item->hh_prev = NULL; \ 147 } while (0) 148 149 #define HASH_VALUE(keyptr,keylen,hashv) \ 150 do { \ 151 HASH_FUNCTION(keyptr, keylen, hashv); \ 152 } while (0) 153 154 #define HASH_FIND_BYHASHVALUE(hh,head,keyptr,keylen,hashval,out) \ 155 do { \ 156 (out) = NULL; \ 157 if (head) { \ 158 unsigned _hf_bkt; \ 159 HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _hf_bkt); \ 160 if (HASH_BLOOM_TEST((head)->hh.tbl, hashval) != 0) { \ 161 HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], keyptr, keylen, hashval, out); \ 162 } \ 163 } \ 164 } while (0) 165 166 #define HASH_FIND(hh,head,keyptr,keylen,out) \ 167 do { \ 168 (out) = NULL; \ 169 if (head) { \ 170 unsigned _hf_hashv; \ 171 HASH_VALUE(keyptr, keylen, _hf_hashv); \ 172 HASH_FIND_BYHASHVALUE(hh, head, keyptr, keylen, _hf_hashv, out); \ 173 } \ 174 } while (0) 175 176 #ifdef HASH_BLOOM 177 #define HASH_BLOOM_BITLEN (1UL << HASH_BLOOM) 178 #define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8UL) + (((HASH_BLOOM_BITLEN%8UL)!=0UL) ? 1UL : 0UL) 179 #define HASH_BLOOM_MAKE(tbl,oomed) \ 180 do { \ 181 (tbl)->bloom_nbits = HASH_BLOOM; \ 182 (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \ 183 if (!(tbl)->bloom_bv) { \ 184 HASH_RECORD_OOM(oomed); \ 185 } else { \ 186 uthash_bzero((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \ 187 (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \ 188 } \ 189 } while (0) 190 191 #define HASH_BLOOM_FREE(tbl) \ 192 do { \ 193 uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \ 194 } while (0) 195 196 #define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8U] |= (1U << ((idx)%8U))) 197 #define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8U] & (1U << ((idx)%8U))) 198 199 #define HASH_BLOOM_ADD(tbl,hashv) \ 200 HASH_BLOOM_BITSET((tbl)->bloom_bv, ((hashv) & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U))) 201 202 #define HASH_BLOOM_TEST(tbl,hashv) \ 203 HASH_BLOOM_BITTEST((tbl)->bloom_bv, ((hashv) & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U))) 204 205 #else 206 #define HASH_BLOOM_MAKE(tbl,oomed) 207 #define HASH_BLOOM_FREE(tbl) 208 #define HASH_BLOOM_ADD(tbl,hashv) 209 #define HASH_BLOOM_TEST(tbl,hashv) (1) 210 #define HASH_BLOOM_BYTELEN 0U 211 #endif 212 213 #define HASH_MAKE_TABLE(hh,head,oomed) \ 214 do { \ 215 (head)->hh.tbl = (UT_hash_table*)uthash_malloc(sizeof(UT_hash_table)); \ 216 if (!(head)->hh.tbl) { \ 217 HASH_RECORD_OOM(oomed); \ 218 } else { \ 219 uthash_bzero((head)->hh.tbl, sizeof(UT_hash_table)); \ 220 (head)->hh.tbl->tail = &((head)->hh); \ 221 (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \ 222 (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \ 223 (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \ 224 (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \ 225 HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \ 226 (head)->hh.tbl->signature = HASH_SIGNATURE; \ 227 if (!(head)->hh.tbl->buckets) { \ 228 HASH_RECORD_OOM(oomed); \ 229 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ 230 } else { \ 231 uthash_bzero((head)->hh.tbl->buckets, \ 232 HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \ 233 HASH_BLOOM_MAKE((head)->hh.tbl, oomed); \ 234 IF_HASH_NONFATAL_OOM( \ 235 if (oomed) { \ 236 uthash_free((head)->hh.tbl->buckets, \ 237 HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \ 238 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ 239 } \ 240 ) \ 241 } \ 242 } \ 243 } while (0) 244 245 #define HASH_REPLACE_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,replaced,cmpfcn) \ 246 do { \ 247 (replaced) = NULL; \ 248 HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \ 249 if (replaced) { \ 250 HASH_DELETE(hh, head, replaced); \ 251 } \ 252 HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn); \ 253 } while (0) 254 255 #define HASH_REPLACE_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add,replaced) \ 256 do { \ 257 (replaced) = NULL; \ 258 HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \ 259 if (replaced) { \ 260 HASH_DELETE(hh, head, replaced); \ 261 } \ 262 HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add); \ 263 } while (0) 264 265 #define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced) \ 266 do { \ 267 unsigned _hr_hashv; \ 268 HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \ 269 HASH_REPLACE_BYHASHVALUE(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced); \ 270 } while (0) 271 272 #define HASH_REPLACE_INORDER(hh,head,fieldname,keylen_in,add,replaced,cmpfcn) \ 273 do { \ 274 unsigned _hr_hashv; \ 275 HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \ 276 HASH_REPLACE_BYHASHVALUE_INORDER(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced, cmpfcn); \ 277 } while (0) 278 279 #define HASH_APPEND_LIST(hh, head, add) \ 280 do { \ 281 (add)->hh.next = NULL; \ 282 (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \ 283 (head)->hh.tbl->tail->next = (add); \ 284 (head)->hh.tbl->tail = &((add)->hh); \ 285 } while (0) 286 287 #define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \ 288 do { \ 289 do { \ 290 if (cmpfcn(DECLTYPE(head)(_hs_iter), add) > 0) { \ 291 break; \ 292 } \ 293 } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \ 294 } while (0) 295 296 #ifdef NO_DECLTYPE 297 #undef HASH_AKBI_INNER_LOOP 298 #define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \ 299 do { \ 300 char *_hs_saved_head = (char*)(head); \ 301 do { \ 302 DECLTYPE_ASSIGN(head, _hs_iter); \ 303 if (cmpfcn(head, add) > 0) { \ 304 DECLTYPE_ASSIGN(head, _hs_saved_head); \ 305 break; \ 306 } \ 307 DECLTYPE_ASSIGN(head, _hs_saved_head); \ 308 } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \ 309 } while (0) 310 #endif 311 312 #if HASH_NONFATAL_OOM 313 314 #define HASH_ADD_TO_TABLE(hh,head,keyptr,keylen_in,hashval,add,oomed) \ 315 do { \ 316 if (!(oomed)) { \ 317 unsigned _ha_bkt; \ 318 (head)->hh.tbl->num_items++; \ 319 HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \ 320 HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], hh, &(add)->hh, oomed); \ 321 if (oomed) { \ 322 HASH_ROLLBACK_BKT(hh, head, &(add)->hh); \ 323 HASH_DELETE_HH(hh, head, &(add)->hh); \ 324 (add)->hh.tbl = NULL; \ 325 uthash_nonfatal_oom(add); \ 326 } else { \ 327 HASH_BLOOM_ADD((head)->hh.tbl, hashval); \ 328 HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \ 329 } \ 330 } else { \ 331 (add)->hh.tbl = NULL; \ 332 uthash_nonfatal_oom(add); \ 333 } \ 334 } while (0) 335 336 #else 337 338 #define HASH_ADD_TO_TABLE(hh,head,keyptr,keylen_in,hashval,add,oomed) \ 339 do { \ 340 unsigned _ha_bkt; \ 341 (head)->hh.tbl->num_items++; \ 342 HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \ 343 HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], hh, &(add)->hh, oomed); \ 344 HASH_BLOOM_ADD((head)->hh.tbl, hashval); \ 345 HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \ 346 } while (0) 347 348 #endif 349 350 351 #define HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh,head,keyptr,keylen_in,hashval,add,cmpfcn) \ 352 do { \ 353 IF_HASH_NONFATAL_OOM( int _ha_oomed = 0; ) \ 354 (add)->hh.hashv = (hashval); \ 355 (add)->hh.key = (char*) (keyptr); \ 356 (add)->hh.keylen = (unsigned) (keylen_in); \ 357 if (!(head)) { \ 358 (add)->hh.next = NULL; \ 359 (add)->hh.prev = NULL; \ 360 HASH_MAKE_TABLE(hh, add, _ha_oomed); \ 361 IF_HASH_NONFATAL_OOM( if (!_ha_oomed) { ) \ 362 (head) = (add); \ 363 IF_HASH_NONFATAL_OOM( } ) \ 364 } else { \ 365 void *_hs_iter = (head); \ 366 (add)->hh.tbl = (head)->hh.tbl; \ 367 HASH_AKBI_INNER_LOOP(hh, head, add, cmpfcn); \ 368 if (_hs_iter) { \ 369 (add)->hh.next = _hs_iter; \ 370 if (((add)->hh.prev = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev)) { \ 371 HH_FROM_ELMT((head)->hh.tbl, (add)->hh.prev)->next = (add); \ 372 } else { \ 373 (head) = (add); \ 374 } \ 375 HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev = (add); \ 376 } else { \ 377 HASH_APPEND_LIST(hh, head, add); \ 378 } \ 379 } \ 380 HASH_ADD_TO_TABLE(hh, head, keyptr, keylen_in, hashval, add, _ha_oomed); \ 381 HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE_INORDER"); \ 382 } while (0) 383 384 #define HASH_ADD_KEYPTR_INORDER(hh,head,keyptr,keylen_in,add,cmpfcn) \ 385 do { \ 386 unsigned _hs_hashv; \ 387 HASH_VALUE(keyptr, keylen_in, _hs_hashv); \ 388 HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, keyptr, keylen_in, _hs_hashv, add, cmpfcn); \ 389 } while (0) 390 391 #define HASH_ADD_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,cmpfcn) \ 392 HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn) 393 394 #define HASH_ADD_INORDER(hh,head,fieldname,keylen_in,add,cmpfcn) \ 395 HASH_ADD_KEYPTR_INORDER(hh, head, &((add)->fieldname), keylen_in, add, cmpfcn) 396 397 #define HASH_ADD_KEYPTR_BYHASHVALUE(hh,head,keyptr,keylen_in,hashval,add) \ 398 do { \ 399 IF_HASH_NONFATAL_OOM( int _ha_oomed = 0; ) \ 400 (add)->hh.hashv = (hashval); \ 401 (add)->hh.key = (const void*) (keyptr); \ 402 (add)->hh.keylen = (unsigned) (keylen_in); \ 403 if (!(head)) { \ 404 (add)->hh.next = NULL; \ 405 (add)->hh.prev = NULL; \ 406 HASH_MAKE_TABLE(hh, add, _ha_oomed); \ 407 IF_HASH_NONFATAL_OOM( if (!_ha_oomed) { ) \ 408 (head) = (add); \ 409 IF_HASH_NONFATAL_OOM( } ) \ 410 } else { \ 411 (add)->hh.tbl = (head)->hh.tbl; \ 412 HASH_APPEND_LIST(hh, head, add); \ 413 } \ 414 HASH_ADD_TO_TABLE(hh, head, keyptr, keylen_in, hashval, add, _ha_oomed); \ 415 HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE"); \ 416 } while (0) 417 418 #define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \ 419 do { \ 420 unsigned _ha_hashv; \ 421 HASH_VALUE(keyptr, keylen_in, _ha_hashv); \ 422 HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, keyptr, keylen_in, _ha_hashv, add); \ 423 } while (0) 424 425 #define HASH_ADD_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add) \ 426 HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add) 427 428 #define HASH_ADD(hh,head,fieldname,keylen_in,add) \ 429 HASH_ADD_KEYPTR(hh, head, &((add)->fieldname), keylen_in, add) 430 431 #define HASH_TO_BKT(hashv,num_bkts,bkt) \ 432 do { \ 433 bkt = ((hashv) & ((num_bkts) - 1U)); \ 434 } while (0) 435 436 /* delete "delptr" from the hash table. 437 * "the usual" patch-up process for the app-order doubly-linked-list. 438 * The use of _hd_hh_del below deserves special explanation. 439 * These used to be expressed using (delptr) but that led to a bug 440 * if someone used the same symbol for the head and deletee, like 441 * HASH_DELETE(hh,users,users); 442 * We want that to work, but by changing the head (users) below 443 * we were forfeiting our ability to further refer to the deletee (users) 444 * in the patch-up process. Solution: use scratch space to 445 * copy the deletee pointer, then the latter references are via that 446 * scratch pointer rather than through the repointed (users) symbol. 447 */ 448 #define HASH_DELETE(hh,head,delptr) \ 449 HASH_DELETE_HH(hh, head, &(delptr)->hh) 450 451 #define HASH_DELETE_HH(hh,head,delptrhh) \ 452 do { \ 453 struct UT_hash_handle *_hd_hh_del = (delptrhh); \ 454 if ((_hd_hh_del->prev == NULL) && (_hd_hh_del->next == NULL)) { \ 455 HASH_BLOOM_FREE((head)->hh.tbl); \ 456 uthash_free((head)->hh.tbl->buckets, \ 457 (head)->hh.tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ 458 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ 459 (head) = NULL; \ 460 } else { \ 461 unsigned _hd_bkt; \ 462 if (_hd_hh_del == (head)->hh.tbl->tail) { \ 463 (head)->hh.tbl->tail = HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev); \ 464 } \ 465 if (_hd_hh_del->prev != NULL) { \ 466 HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev)->next = _hd_hh_del->next; \ 467 } else { \ 468 DECLTYPE_ASSIGN(head, _hd_hh_del->next); \ 469 } \ 470 if (_hd_hh_del->next != NULL) { \ 471 HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->next)->prev = _hd_hh_del->prev; \ 472 } \ 473 HASH_TO_BKT(_hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \ 474 HASH_DEL_IN_BKT((head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \ 475 (head)->hh.tbl->num_items--; \ 476 } \ 477 HASH_FSCK(hh, head, "HASH_DELETE_HH"); \ 478 } while (0) 479 480 /* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */ 481 #define HASH_FIND_STR(head,findstr,out) \ 482 do { \ 483 unsigned _uthash_hfstr_keylen = (unsigned)uthash_strlen(findstr); \ 484 HASH_FIND(hh, head, findstr, _uthash_hfstr_keylen, out); \ 485 } while (0) 486 #define HASH_ADD_STR(head,strfield,add) \ 487 do { \ 488 unsigned _uthash_hastr_keylen = (unsigned)uthash_strlen((add)->strfield); \ 489 HASH_ADD(hh, head, strfield[0], _uthash_hastr_keylen, add); \ 490 } while (0) 491 #define HASH_REPLACE_STR(head,strfield,add,replaced) \ 492 do { \ 493 unsigned _uthash_hrstr_keylen = (unsigned)uthash_strlen((add)->strfield); \ 494 HASH_REPLACE(hh, head, strfield[0], _uthash_hrstr_keylen, add, replaced); \ 495 } while (0) 496 #define HASH_FIND_INT(head,findint,out) \ 497 HASH_FIND(hh,head,findint,sizeof(int),out) 498 #define HASH_ADD_INT(head,intfield,add) \ 499 HASH_ADD(hh,head,intfield,sizeof(int),add) 500 #define HASH_REPLACE_INT(head,intfield,add,replaced) \ 501 HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced) 502 #define HASH_FIND_PTR(head,findptr,out) \ 503 HASH_FIND(hh,head,findptr,sizeof(void *),out) 504 #define HASH_ADD_PTR(head,ptrfield,add) \ 505 HASH_ADD(hh,head,ptrfield,sizeof(void *),add) 506 #define HASH_REPLACE_PTR(head,ptrfield,add,replaced) \ 507 HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced) 508 #define HASH_DEL(head,delptr) \ 509 HASH_DELETE(hh,head,delptr) 510 511 /* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined. 512 * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined. 513 */ 514 #ifdef HASH_DEBUG 515 #include <stdio.h> /* fprintf, stderr */ 516 #define HASH_OOPS(...) do { fprintf(stderr, __VA_ARGS__); exit(-1); } while (0) 517 #define HASH_FSCK(hh,head,where) \ 518 do { \ 519 struct UT_hash_handle *_thh; \ 520 if (head) { \ 521 unsigned _bkt_i; \ 522 unsigned _count = 0; \ 523 char *_prev; \ 524 for (_bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; ++_bkt_i) { \ 525 unsigned _bkt_count = 0; \ 526 _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \ 527 _prev = NULL; \ 528 while (_thh) { \ 529 if (_prev != (char*)(_thh->hh_prev)) { \ 530 HASH_OOPS("%s: invalid hh_prev %p, actual %p\n", \ 531 (where), (void*)_thh->hh_prev, (void*)_prev); \ 532 } \ 533 _bkt_count++; \ 534 _prev = (char*)(_thh); \ 535 _thh = _thh->hh_next; \ 536 } \ 537 _count += _bkt_count; \ 538 if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \ 539 HASH_OOPS("%s: invalid bucket count %u, actual %u\n", \ 540 (where), (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \ 541 } \ 542 } \ 543 if (_count != (head)->hh.tbl->num_items) { \ 544 HASH_OOPS("%s: invalid hh item count %u, actual %u\n", \ 545 (where), (head)->hh.tbl->num_items, _count); \ 546 } \ 547 _count = 0; \ 548 _prev = NULL; \ 549 _thh = &(head)->hh; \ 550 while (_thh) { \ 551 _count++; \ 552 if (_prev != (char*)_thh->prev) { \ 553 HASH_OOPS("%s: invalid prev %p, actual %p\n", \ 554 (where), (void*)_thh->prev, (void*)_prev); \ 555 } \ 556 _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \ 557 _thh = (_thh->next ? HH_FROM_ELMT((head)->hh.tbl, _thh->next) : NULL); \ 558 } \ 559 if (_count != (head)->hh.tbl->num_items) { \ 560 HASH_OOPS("%s: invalid app item count %u, actual %u\n", \ 561 (where), (head)->hh.tbl->num_items, _count); \ 562 } \ 563 } \ 564 } while (0) 565 #else 566 #define HASH_FSCK(hh,head,where) 567 #endif 568 569 /* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to 570 * the descriptor to which this macro is defined for tuning the hash function. 571 * The app can #include <unistd.h> to get the prototype for write(2). */ 572 #ifdef HASH_EMIT_KEYS 573 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \ 574 do { \ 575 unsigned _klen = fieldlen; \ 576 write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \ 577 write(HASH_EMIT_KEYS, keyptr, (unsigned long)fieldlen); \ 578 } while (0) 579 #else 580 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) 581 #endif 582 583 /* The Bernstein hash function, used in Perl prior to v5.6. Note (x<<5+x)=x*33. */ 584 #define HASH_BER(key,keylen,hashv) \ 585 do { \ 586 unsigned _hb_keylen = (unsigned)keylen; \ 587 const unsigned char *_hb_key = (const unsigned char*)(key); \ 588 (hashv) = 0; \ 589 while (_hb_keylen-- != 0U) { \ 590 (hashv) = (((hashv) << 5) + (hashv)) + *_hb_key++; \ 591 } \ 592 } while (0) 593 594 595 /* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at 596 * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx 597 * (archive link: https://archive.is/Ivcan ) 598 */ 599 #define HASH_SAX(key,keylen,hashv) \ 600 do { \ 601 unsigned _sx_i; \ 602 const unsigned char *_hs_key = (const unsigned char*)(key); \ 603 hashv = 0; \ 604 for (_sx_i=0; _sx_i < keylen; _sx_i++) { \ 605 hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \ 606 } \ 607 } while (0) 608 /* FNV-1a variation */ 609 #define HASH_FNV(key,keylen,hashv) \ 610 do { \ 611 unsigned _fn_i; \ 612 const unsigned char *_hf_key = (const unsigned char*)(key); \ 613 (hashv) = 2166136261U; \ 614 for (_fn_i=0; _fn_i < keylen; _fn_i++) { \ 615 hashv = hashv ^ _hf_key[_fn_i]; \ 616 hashv = hashv * 16777619U; \ 617 } \ 618 } while (0) 619 620 #define HASH_OAT(key,keylen,hashv) \ 621 do { \ 622 unsigned _ho_i; \ 623 const unsigned char *_ho_key=(const unsigned char*)(key); \ 624 hashv = 0; \ 625 for(_ho_i=0; _ho_i < keylen; _ho_i++) { \ 626 hashv += _ho_key[_ho_i]; \ 627 hashv += (hashv << 10); \ 628 hashv ^= (hashv >> 6); \ 629 } \ 630 hashv += (hashv << 3); \ 631 hashv ^= (hashv >> 11); \ 632 hashv += (hashv << 15); \ 633 } while (0) 634 635 #define HASH_JEN_MIX(a,b,c) \ 636 do { \ 637 a -= b; a -= c; a ^= ( c >> 13 ); \ 638 b -= c; b -= a; b ^= ( a << 8 ); \ 639 c -= a; c -= b; c ^= ( b >> 13 ); \ 640 a -= b; a -= c; a ^= ( c >> 12 ); \ 641 b -= c; b -= a; b ^= ( a << 16 ); \ 642 c -= a; c -= b; c ^= ( b >> 5 ); \ 643 a -= b; a -= c; a ^= ( c >> 3 ); \ 644 b -= c; b -= a; b ^= ( a << 10 ); \ 645 c -= a; c -= b; c ^= ( b >> 15 ); \ 646 } while (0) 647 648 #define HASH_JEN(key,keylen,hashv) \ 649 do { \ 650 unsigned _hj_i,_hj_j,_hj_k; \ 651 unsigned const char *_hj_key=(unsigned const char*)(key); \ 652 hashv = 0xfeedbeefu; \ 653 _hj_i = _hj_j = 0x9e3779b9u; \ 654 _hj_k = (unsigned)(keylen); \ 655 while (_hj_k >= 12U) { \ 656 _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \ 657 + ( (unsigned)_hj_key[2] << 16 ) \ 658 + ( (unsigned)_hj_key[3] << 24 ) ); \ 659 _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \ 660 + ( (unsigned)_hj_key[6] << 16 ) \ 661 + ( (unsigned)_hj_key[7] << 24 ) ); \ 662 hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \ 663 + ( (unsigned)_hj_key[10] << 16 ) \ 664 + ( (unsigned)_hj_key[11] << 24 ) ); \ 665 \ 666 HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ 667 \ 668 _hj_key += 12; \ 669 _hj_k -= 12U; \ 670 } \ 671 hashv += (unsigned)(keylen); \ 672 switch ( _hj_k ) { \ 673 case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); /* FALLTHROUGH */ \ 674 case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); /* FALLTHROUGH */ \ 675 case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); /* FALLTHROUGH */ \ 676 case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); /* FALLTHROUGH */ \ 677 case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); /* FALLTHROUGH */ \ 678 case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); /* FALLTHROUGH */ \ 679 case 5: _hj_j += _hj_key[4]; /* FALLTHROUGH */ \ 680 case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); /* FALLTHROUGH */ \ 681 case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); /* FALLTHROUGH */ \ 682 case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); /* FALLTHROUGH */ \ 683 case 1: _hj_i += _hj_key[0]; /* FALLTHROUGH */ \ 684 default: ; \ 685 } \ 686 HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ 687 } while (0) 688 689 /* The Paul Hsieh hash function */ 690 #undef get16bits 691 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \ 692 || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__) 693 #define get16bits(d) (*((const uint16_t *) (d))) 694 #endif 695 696 #if !defined (get16bits) 697 #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \ 698 +(uint32_t)(((const uint8_t *)(d))[0]) ) 699 #endif 700 #define HASH_SFH(key,keylen,hashv) \ 701 do { \ 702 unsigned const char *_sfh_key=(unsigned const char*)(key); \ 703 uint32_t _sfh_tmp, _sfh_len = (uint32_t)keylen; \ 704 \ 705 unsigned _sfh_rem = _sfh_len & 3U; \ 706 _sfh_len >>= 2; \ 707 hashv = 0xcafebabeu; \ 708 \ 709 /* Main loop */ \ 710 for (;_sfh_len > 0U; _sfh_len--) { \ 711 hashv += get16bits (_sfh_key); \ 712 _sfh_tmp = ((uint32_t)(get16bits (_sfh_key+2)) << 11) ^ hashv; \ 713 hashv = (hashv << 16) ^ _sfh_tmp; \ 714 _sfh_key += 2U*sizeof (uint16_t); \ 715 hashv += hashv >> 11; \ 716 } \ 717 \ 718 /* Handle end cases */ \ 719 switch (_sfh_rem) { \ 720 case 3: hashv += get16bits (_sfh_key); \ 721 hashv ^= hashv << 16; \ 722 hashv ^= (uint32_t)(_sfh_key[sizeof (uint16_t)]) << 18; \ 723 hashv += hashv >> 11; \ 724 break; \ 725 case 2: hashv += get16bits (_sfh_key); \ 726 hashv ^= hashv << 11; \ 727 hashv += hashv >> 17; \ 728 break; \ 729 case 1: hashv += *_sfh_key; \ 730 hashv ^= hashv << 10; \ 731 hashv += hashv >> 1; \ 732 break; \ 733 default: ; \ 734 } \ 735 \ 736 /* Force "avalanching" of final 127 bits */ \ 737 hashv ^= hashv << 3; \ 738 hashv += hashv >> 5; \ 739 hashv ^= hashv << 4; \ 740 hashv += hashv >> 17; \ 741 hashv ^= hashv << 25; \ 742 hashv += hashv >> 6; \ 743 } while (0) 744 745 /* iterate over items in a known bucket to find desired item */ 746 #define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,hashval,out) \ 747 do { \ 748 if ((head).hh_head != NULL) { \ 749 DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (head).hh_head)); \ 750 } else { \ 751 (out) = NULL; \ 752 } \ 753 while ((out) != NULL) { \ 754 if ((out)->hh.hashv == (hashval) && (out)->hh.keylen == (keylen_in)) { \ 755 if (HASH_KEYCMP((out)->hh.key, keyptr, keylen_in) == 0) { \ 756 break; \ 757 } \ 758 } \ 759 if ((out)->hh.hh_next != NULL) { \ 760 DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (out)->hh.hh_next)); \ 761 } else { \ 762 (out) = NULL; \ 763 } \ 764 } \ 765 } while (0) 766 767 /* add an item to a bucket */ 768 #define HASH_ADD_TO_BKT(head,hh,addhh,oomed) \ 769 do { \ 770 UT_hash_bucket *_ha_head = &(head); \ 771 _ha_head->count++; \ 772 (addhh)->hh_next = _ha_head->hh_head; \ 773 (addhh)->hh_prev = NULL; \ 774 if (_ha_head->hh_head != NULL) { \ 775 _ha_head->hh_head->hh_prev = (addhh); \ 776 } \ 777 _ha_head->hh_head = (addhh); \ 778 if ((_ha_head->count >= ((_ha_head->expand_mult + 1U) * HASH_BKT_CAPACITY_THRESH)) \ 779 && !(addhh)->tbl->noexpand) { \ 780 HASH_EXPAND_BUCKETS(addhh,(addhh)->tbl, oomed); \ 781 IF_HASH_NONFATAL_OOM( \ 782 if (oomed) { \ 783 HASH_DEL_IN_BKT(head,addhh); \ 784 } \ 785 ) \ 786 } \ 787 } while (0) 788 789 /* remove an item from a given bucket */ 790 #define HASH_DEL_IN_BKT(head,delhh) \ 791 do { \ 792 UT_hash_bucket *_hd_head = &(head); \ 793 _hd_head->count--; \ 794 if (_hd_head->hh_head == (delhh)) { \ 795 _hd_head->hh_head = (delhh)->hh_next; \ 796 } \ 797 if ((delhh)->hh_prev) { \ 798 (delhh)->hh_prev->hh_next = (delhh)->hh_next; \ 799 } \ 800 if ((delhh)->hh_next) { \ 801 (delhh)->hh_next->hh_prev = (delhh)->hh_prev; \ 802 } \ 803 } while (0) 804 805 /* Bucket expansion has the effect of doubling the number of buckets 806 * and redistributing the items into the new buckets. Ideally the 807 * items will distribute more or less evenly into the new buckets 808 * (the extent to which this is true is a measure of the quality of 809 * the hash function as it applies to the key domain). 810 * 811 * With the items distributed into more buckets, the chain length 812 * (item count) in each bucket is reduced. Thus by expanding buckets 813 * the hash keeps a bound on the chain length. This bounded chain 814 * length is the essence of how a hash provides constant time lookup. 815 * 816 * The calculation of tbl->ideal_chain_maxlen below deserves some 817 * explanation. First, keep in mind that we're calculating the ideal 818 * maximum chain length based on the *new* (doubled) bucket count. 819 * In fractions this is just n/b (n=number of items,b=new num buckets). 820 * Since the ideal chain length is an integer, we want to calculate 821 * ceil(n/b). We don't depend on floating point arithmetic in this 822 * hash, so to calculate ceil(n/b) with integers we could write 823 * 824 * ceil(n/b) = (n/b) + ((n%b)?1:0) 825 * 826 * and in fact a previous version of this hash did just that. 827 * But now we have improved things a bit by recognizing that b is 828 * always a power of two. We keep its base 2 log handy (call it lb), 829 * so now we can write this with a bit shift and logical AND: 830 * 831 * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0) 832 * 833 */ 834 #define HASH_EXPAND_BUCKETS(hh,tbl,oomed) \ 835 do { \ 836 unsigned _he_bkt; \ 837 unsigned _he_bkt_i; \ 838 struct UT_hash_handle *_he_thh, *_he_hh_nxt; \ 839 UT_hash_bucket *_he_new_buckets, *_he_newbkt; \ 840 _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \ 841 sizeof(struct UT_hash_bucket) * (tbl)->num_buckets * 2U); \ 842 if (!_he_new_buckets) { \ 843 HASH_RECORD_OOM(oomed); \ 844 } else { \ 845 uthash_bzero(_he_new_buckets, \ 846 sizeof(struct UT_hash_bucket) * (tbl)->num_buckets * 2U); \ 847 (tbl)->ideal_chain_maxlen = \ 848 ((tbl)->num_items >> ((tbl)->log2_num_buckets+1U)) + \ 849 ((((tbl)->num_items & (((tbl)->num_buckets*2U)-1U)) != 0U) ? 1U : 0U); \ 850 (tbl)->nonideal_items = 0; \ 851 for (_he_bkt_i = 0; _he_bkt_i < (tbl)->num_buckets; _he_bkt_i++) { \ 852 _he_thh = (tbl)->buckets[ _he_bkt_i ].hh_head; \ 853 while (_he_thh != NULL) { \ 854 _he_hh_nxt = _he_thh->hh_next; \ 855 HASH_TO_BKT(_he_thh->hashv, (tbl)->num_buckets * 2U, _he_bkt); \ 856 _he_newbkt = &(_he_new_buckets[_he_bkt]); \ 857 if (++(_he_newbkt->count) > (tbl)->ideal_chain_maxlen) { \ 858 (tbl)->nonideal_items++; \ 859 if (_he_newbkt->count > _he_newbkt->expand_mult * (tbl)->ideal_chain_maxlen) { \ 860 _he_newbkt->expand_mult++; \ 861 } \ 862 } \ 863 _he_thh->hh_prev = NULL; \ 864 _he_thh->hh_next = _he_newbkt->hh_head; \ 865 if (_he_newbkt->hh_head != NULL) { \ 866 _he_newbkt->hh_head->hh_prev = _he_thh; \ 867 } \ 868 _he_newbkt->hh_head = _he_thh; \ 869 _he_thh = _he_hh_nxt; \ 870 } \ 871 } \ 872 uthash_free((tbl)->buckets, (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \ 873 (tbl)->num_buckets *= 2U; \ 874 (tbl)->log2_num_buckets++; \ 875 (tbl)->buckets = _he_new_buckets; \ 876 (tbl)->ineff_expands = ((tbl)->nonideal_items > ((tbl)->num_items >> 1)) ? \ 877 ((tbl)->ineff_expands+1U) : 0U; \ 878 if ((tbl)->ineff_expands > 1U) { \ 879 (tbl)->noexpand = 1; \ 880 uthash_noexpand_fyi(tbl); \ 881 } \ 882 uthash_expand_fyi(tbl); \ 883 } \ 884 } while (0) 885 886 887 /* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */ 888 /* Note that HASH_SORT assumes the hash handle name to be hh. 889 * HASH_SRT was added to allow the hash handle name to be passed in. */ 890 #define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn) 891 #define HASH_SRT(hh,head,cmpfcn) \ 892 do { \ 893 unsigned _hs_i; \ 894 unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \ 895 struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \ 896 if (head != NULL) { \ 897 _hs_insize = 1; \ 898 _hs_looping = 1; \ 899 _hs_list = &((head)->hh); \ 900 while (_hs_looping != 0U) { \ 901 _hs_p = _hs_list; \ 902 _hs_list = NULL; \ 903 _hs_tail = NULL; \ 904 _hs_nmerges = 0; \ 905 while (_hs_p != NULL) { \ 906 _hs_nmerges++; \ 907 _hs_q = _hs_p; \ 908 _hs_psize = 0; \ 909 for (_hs_i = 0; _hs_i < _hs_insize; ++_hs_i) { \ 910 _hs_psize++; \ 911 _hs_q = ((_hs_q->next != NULL) ? \ 912 HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \ 913 if (_hs_q == NULL) { \ 914 break; \ 915 } \ 916 } \ 917 _hs_qsize = _hs_insize; \ 918 while ((_hs_psize != 0U) || ((_hs_qsize != 0U) && (_hs_q != NULL))) { \ 919 if (_hs_psize == 0U) { \ 920 _hs_e = _hs_q; \ 921 _hs_q = ((_hs_q->next != NULL) ? \ 922 HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \ 923 _hs_qsize--; \ 924 } else if ((_hs_qsize == 0U) || (_hs_q == NULL)) { \ 925 _hs_e = _hs_p; \ 926 if (_hs_p != NULL) { \ 927 _hs_p = ((_hs_p->next != NULL) ? \ 928 HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL); \ 929 } \ 930 _hs_psize--; \ 931 } else if ((cmpfcn( \ 932 DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_p)), \ 933 DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_q)) \ 934 )) <= 0) { \ 935 _hs_e = _hs_p; \ 936 if (_hs_p != NULL) { \ 937 _hs_p = ((_hs_p->next != NULL) ? \ 938 HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL); \ 939 } \ 940 _hs_psize--; \ 941 } else { \ 942 _hs_e = _hs_q; \ 943 _hs_q = ((_hs_q->next != NULL) ? \ 944 HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \ 945 _hs_qsize--; \ 946 } \ 947 if ( _hs_tail != NULL ) { \ 948 _hs_tail->next = ((_hs_e != NULL) ? \ 949 ELMT_FROM_HH((head)->hh.tbl, _hs_e) : NULL); \ 950 } else { \ 951 _hs_list = _hs_e; \ 952 } \ 953 if (_hs_e != NULL) { \ 954 _hs_e->prev = ((_hs_tail != NULL) ? \ 955 ELMT_FROM_HH((head)->hh.tbl, _hs_tail) : NULL); \ 956 } \ 957 _hs_tail = _hs_e; \ 958 } \ 959 _hs_p = _hs_q; \ 960 } \ 961 if (_hs_tail != NULL) { \ 962 _hs_tail->next = NULL; \ 963 } \ 964 if (_hs_nmerges <= 1U) { \ 965 _hs_looping = 0; \ 966 (head)->hh.tbl->tail = _hs_tail; \ 967 DECLTYPE_ASSIGN(head, ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \ 968 } \ 969 _hs_insize *= 2U; \ 970 } \ 971 HASH_FSCK(hh, head, "HASH_SRT"); \ 972 } \ 973 } while (0) 974 975 /* This function selects items from one hash into another hash. 976 * The end result is that the selected items have dual presence 977 * in both hashes. There is no copy of the items made; rather 978 * they are added into the new hash through a secondary hash 979 * hash handle that must be present in the structure. */ 980 #define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \ 981 do { \ 982 unsigned _src_bkt, _dst_bkt; \ 983 void *_last_elt = NULL, *_elt; \ 984 UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \ 985 ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \ 986 if ((src) != NULL) { \ 987 for (_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \ 988 for (_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \ 989 _src_hh != NULL; \ 990 _src_hh = _src_hh->hh_next) { \ 991 _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \ 992 if (cond(_elt)) { \ 993 IF_HASH_NONFATAL_OOM( int _hs_oomed = 0; ) \ 994 _dst_hh = (UT_hash_handle*)(void*)(((char*)_elt) + _dst_hho); \ 995 _dst_hh->key = _src_hh->key; \ 996 _dst_hh->keylen = _src_hh->keylen; \ 997 _dst_hh->hashv = _src_hh->hashv; \ 998 _dst_hh->prev = _last_elt; \ 999 _dst_hh->next = NULL; \ 1000 if (_last_elt_hh != NULL) { \ 1001 _last_elt_hh->next = _elt; \ 1002 } \ 1003 if ((dst) == NULL) { \ 1004 DECLTYPE_ASSIGN(dst, _elt); \ 1005 HASH_MAKE_TABLE(hh_dst, dst, _hs_oomed); \ 1006 IF_HASH_NONFATAL_OOM( \ 1007 if (_hs_oomed) { \ 1008 uthash_nonfatal_oom(_elt); \ 1009 (dst) = NULL; \ 1010 continue; \ 1011 } \ 1012 ) \ 1013 } else { \ 1014 _dst_hh->tbl = (dst)->hh_dst.tbl; \ 1015 } \ 1016 HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \ 1017 HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt], hh_dst, _dst_hh, _hs_oomed); \ 1018 (dst)->hh_dst.tbl->num_items++; \ 1019 IF_HASH_NONFATAL_OOM( \ 1020 if (_hs_oomed) { \ 1021 HASH_ROLLBACK_BKT(hh_dst, dst, _dst_hh); \ 1022 HASH_DELETE_HH(hh_dst, dst, _dst_hh); \ 1023 _dst_hh->tbl = NULL; \ 1024 uthash_nonfatal_oom(_elt); \ 1025 continue; \ 1026 } \ 1027 ) \ 1028 HASH_BLOOM_ADD(_dst_hh->tbl, _dst_hh->hashv); \ 1029 _last_elt = _elt; \ 1030 _last_elt_hh = _dst_hh; \ 1031 } \ 1032 } \ 1033 } \ 1034 } \ 1035 HASH_FSCK(hh_dst, dst, "HASH_SELECT"); \ 1036 } while (0) 1037 1038 #define HASH_CLEAR(hh,head) \ 1039 do { \ 1040 if ((head) != NULL) { \ 1041 HASH_BLOOM_FREE((head)->hh.tbl); \ 1042 uthash_free((head)->hh.tbl->buckets, \ 1043 (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \ 1044 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ 1045 (head) = NULL; \ 1046 } \ 1047 } while (0) 1048 1049 #define HASH_OVERHEAD(hh,head) \ 1050 (((head) != NULL) ? ( \ 1051 (size_t)(((head)->hh.tbl->num_items * sizeof(UT_hash_handle)) + \ 1052 ((head)->hh.tbl->num_buckets * sizeof(UT_hash_bucket)) + \ 1053 sizeof(UT_hash_table) + \ 1054 (HASH_BLOOM_BYTELEN))) : 0U) 1055 1056 #ifdef NO_DECLTYPE 1057 #define HASH_ITER(hh,head,el,tmp) \ 1058 for(((el)=(head)), ((*(char**)(&(tmp)))=(char*)((head!=NULL)?(head)->hh.next:NULL)); \ 1059 (el) != NULL; ((el)=(tmp)), ((*(char**)(&(tmp)))=(char*)((tmp!=NULL)?(tmp)->hh.next:NULL))) 1060 #else 1061 #define HASH_ITER(hh,head,el,tmp) \ 1062 for(((el)=(head)), ((tmp)=DECLTYPE(el)((head!=NULL)?(head)->hh.next:NULL)); \ 1063 (el) != NULL; ((el)=(tmp)), ((tmp)=DECLTYPE(el)((tmp!=NULL)?(tmp)->hh.next:NULL))) 1064 #endif 1065 1066 /* obtain a count of items in the hash */ 1067 #define HASH_COUNT(head) HASH_CNT(hh,head) 1068 #define HASH_CNT(hh,head) ((head != NULL)?((head)->hh.tbl->num_items):0U) 1069 1070 typedef struct UT_hash_bucket { 1071 struct UT_hash_handle *hh_head; 1072 unsigned count; 1073 1074 /* expand_mult is normally set to 0. In this situation, the max chain length 1075 * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If 1076 * the bucket's chain exceeds this length, bucket expansion is triggered). 1077 * However, setting expand_mult to a non-zero value delays bucket expansion 1078 * (that would be triggered by additions to this particular bucket) 1079 * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH. 1080 * (The multiplier is simply expand_mult+1). The whole idea of this 1081 * multiplier is to reduce bucket expansions, since they are expensive, in 1082 * situations where we know that a particular bucket tends to be overused. 1083 * It is better to let its chain length grow to a longer yet-still-bounded 1084 * value, than to do an O(n) bucket expansion too often. 1085 */ 1086 unsigned expand_mult; 1087 1088 } UT_hash_bucket; 1089 1090 /* random signature used only to find hash tables in external analysis */ 1091 #define HASH_SIGNATURE 0xa0111fe1u 1092 #define HASH_BLOOM_SIGNATURE 0xb12220f2u 1093 1094 typedef struct UT_hash_table { 1095 UT_hash_bucket *buckets; 1096 unsigned num_buckets, log2_num_buckets; 1097 unsigned num_items; 1098 struct UT_hash_handle *tail; /* tail hh in app order, for fast append */ 1099 ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */ 1100 1101 /* in an ideal situation (all buckets used equally), no bucket would have 1102 * more than ceil(#items/#buckets) items. that's the ideal chain length. */ 1103 unsigned ideal_chain_maxlen; 1104 1105 /* nonideal_items is the number of items in the hash whose chain position 1106 * exceeds the ideal chain maxlen. these items pay the penalty for an uneven 1107 * hash distribution; reaching them in a chain traversal takes >ideal steps */ 1108 unsigned nonideal_items; 1109 1110 /* ineffective expands occur when a bucket doubling was performed, but 1111 * afterward, more than half the items in the hash had nonideal chain 1112 * positions. If this happens on two consecutive expansions we inhibit any 1113 * further expansion, as it's not helping; this happens when the hash 1114 * function isn't a good fit for the key domain. When expansion is inhibited 1115 * the hash will still work, albeit no longer in constant time. */ 1116 unsigned ineff_expands, noexpand; 1117 1118 uint32_t signature; /* used only to find hash tables in external analysis */ 1119 #ifdef HASH_BLOOM 1120 uint32_t bloom_sig; /* used only to test bloom exists in external analysis */ 1121 uint8_t *bloom_bv; 1122 uint8_t bloom_nbits; 1123 #endif 1124 1125 } UT_hash_table; 1126 1127 typedef struct UT_hash_handle { 1128 struct UT_hash_table *tbl; 1129 void *prev; /* prev element in app order */ 1130 void *next; /* next element in app order */ 1131 struct UT_hash_handle *hh_prev; /* previous hh in bucket order */ 1132 struct UT_hash_handle *hh_next; /* next hh in bucket order */ 1133 const void *key; /* ptr to enclosing struct's key */ 1134 unsigned keylen; /* enclosing struct's key len */ 1135 unsigned hashv; /* result of hash-fcn(key) */ 1136 } UT_hash_handle; 1137 1138 #endif /* UTHASH_H */