w32tex
About: TeX Live provides a comprehensive TeX system including all the major TeX-related programs, macro packages, and fonts that are free software. Windows sources.
  Fossies Dox: w32tex-src.tar.xz  ("unofficial" and yet experimental doxygen-generated source code documentation)  

uthash.h
Go to the documentation of this file.
1 /*
2 Copyright (c) 2003-2017, Troy D. Hanson http://troydhanson.github.com/uthash/
3 All rights reserved.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7 
8  * Redistributions of source code must retain the above copyright
9  notice, this list of conditions and the following disclaimer.
10 
11 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12 IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
13 TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
14 PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
15 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
16 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
17 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
18 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
19 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
20 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
21 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22 */
23 
24 #ifndef UTHASH_H
25 #define UTHASH_H
26 
27 #define UTHASH_VERSION 2.0.2
28 
29 #include <string.h> /* memcmp, memset, strlen */
30 #include <stddef.h> /* ptrdiff_t */
31 #include <stdlib.h> /* exit */
32 
33 /* These macros use decltype or the earlier __typeof GNU extension.
34  As decltype is only available in newer compilers (VS2010 or gcc 4.3+
35  when compiling c++ source) this code uses whatever method is needed
36  or, for VS2008 where neither is available, uses casting workarounds. */
37 #if !defined(DECLTYPE) && !defined(NO_DECLTYPE)
38 #if defined(_MSC_VER) /* MS compiler */
39 #if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */
40 #define DECLTYPE(x) (decltype(x))
41 #else /* VS2008 or older (or VS2010 in C mode) */
42 #define NO_DECLTYPE
43 #endif
44 #elif defined(__BORLANDC__) || defined(__ICCARM__) || defined(__LCC__) || defined(__WATCOMC__)
45 #define NO_DECLTYPE
46 #else /* GNU, Sun and other compilers */
47 #define DECLTYPE(x) (__typeof(x))
48 #endif
49 #endif
50 
51 #ifdef NO_DECLTYPE
52 #define DECLTYPE(x)
53 #define DECLTYPE_ASSIGN(dst,src) \
54 do { \
55  char **_da_dst = (char**)(&(dst)); \
56  *_da_dst = (char*)(src); \
57 } while (0)
58 #else
59 #define DECLTYPE_ASSIGN(dst,src) \
60 do { \
61  (dst) = DECLTYPE(dst)(src); \
62 } while (0)
63 #endif
64 
65 /* a number of the hash function use uint32_t which isn't defined on Pre VS2010 */
66 #if defined(_WIN32)
67 #if defined(_MSC_VER) && _MSC_VER >= 1600
68 #include <stdint.h>
69 #elif defined(__WATCOMC__) || defined(__MINGW32__) || defined(__CYGWIN__)
70 #include <stdint.h>
71 #else
72 typedef unsigned int uint32_t;
73 typedef unsigned char uint8_t;
74 #endif
75 #elif defined(__GNUC__) && !defined(__VXWORKS__)
76 #include <stdint.h>
77 #else
78 typedef unsigned int uint32_t;
79 typedef unsigned char uint8_t;
80 #endif
81 
82 #ifndef uthash_malloc
83 #define uthash_malloc(sz) malloc(sz) /* malloc fcn */
84 #endif
85 #ifndef uthash_free
86 #define uthash_free(ptr,sz) free(ptr) /* free fcn */
87 #endif
88 #ifndef uthash_bzero
89 #define uthash_bzero(a,n) memset(a,'\0',n)
90 #endif
91 #ifndef uthash_memcmp
92 #define uthash_memcmp(a,b,n) memcmp(a,b,n)
93 #endif
94 #ifndef uthash_strlen
95 #define uthash_strlen(s) strlen(s)
96 #endif
97 
98 #ifndef uthash_noexpand_fyi
99 #define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */
100 #endif
101 #ifndef uthash_expand_fyi
102 #define uthash_expand_fyi(tbl) /* can be defined to log expands */
103 #endif
104 
105 #ifndef HASH_NONFATAL_OOM
106 #define HASH_NONFATAL_OOM 0
107 #endif
108 
109 #if HASH_NONFATAL_OOM
110 /* malloc failures can be recovered from */
111 
112 #ifndef uthash_nonfatal_oom
113 #define uthash_nonfatal_oom(obj) do {} while (0) /* non-fatal OOM error */
114 #endif
115 
116 #define HASH_RECORD_OOM(oomed) do { (oomed) = 1; } while (0)
117 #define IF_HASH_NONFATAL_OOM(x) x
118 
119 #else
120 /* malloc failures result in lost memory, hash tables are unusable */
121 
122 #ifndef uthash_fatal
123 #define uthash_fatal(msg) exit(-1) /* fatal OOM error */
124 #endif
125 
126 #define HASH_RECORD_OOM(oomed) uthash_fatal("out of memory")
127 #define IF_HASH_NONFATAL_OOM(x)
128 
129 #endif
130 
131 /* initial number of buckets */
132 #define HASH_INITIAL_NUM_BUCKETS 32U /* initial number of buckets */
133 #define HASH_INITIAL_NUM_BUCKETS_LOG2 5U /* lg2 of initial number of buckets */
134 #define HASH_BKT_CAPACITY_THRESH 10U /* expand when bucket count reaches */
135 
136 /* calculate the element whose hash handle address is hhp */
137 #define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho)))
138 /* calculate the hash handle from element address elp */
139 #define HH_FROM_ELMT(tbl,elp) ((UT_hash_handle *)(((char*)(elp)) + ((tbl)->hho)))
140 
141 #define HASH_ROLLBACK_BKT(hh, head, itemptrhh) \
142 do { \
143  struct UT_hash_handle *_hd_hh_item = (itemptrhh); \
144  unsigned _hd_bkt; \
145  HASH_TO_BKT(_hd_hh_item->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \
146  (head)->hh.tbl->buckets[_hd_bkt].count++; \
147  _hd_hh_item->hh_next = NULL; \
148  _hd_hh_item->hh_prev = NULL; \
149 } while (0)
150 
151 #define HASH_VALUE(keyptr,keylen,hashv) \
152 do { \
153  HASH_FCN(keyptr, keylen, hashv); \
154 } while (0)
155 
156 #define HASH_FIND_BYHASHVALUE(hh,head,keyptr,keylen,hashval,out) \
157 do { \
158  (out) = NULL; \
159  if (head) { \
160  unsigned _hf_bkt; \
161  HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _hf_bkt); \
162  if (HASH_BLOOM_TEST((head)->hh.tbl, hashval) != 0) { \
163  HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], keyptr, keylen, hashval, out); \
164  } \
165  } \
166 } while (0)
167 
168 #define HASH_FIND(hh,head,keyptr,keylen,out) \
169 do { \
170  unsigned _hf_hashv; \
171  HASH_VALUE(keyptr, keylen, _hf_hashv); \
172  HASH_FIND_BYHASHVALUE(hh, head, keyptr, keylen, _hf_hashv, out); \
173 } while (0)
174 
175 #ifdef HASH_BLOOM
176 #define HASH_BLOOM_BITLEN (1UL << HASH_BLOOM)
177 #define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8UL) + (((HASH_BLOOM_BITLEN%8UL)!=0UL) ? 1UL : 0UL)
178 #define HASH_BLOOM_MAKE(tbl,oomed) \
179 do { \
180  (tbl)->bloom_nbits = HASH_BLOOM; \
181  (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \
182  if (!(tbl)->bloom_bv) { \
183  HASH_RECORD_OOM(oomed); \
184  } else { \
185  uthash_bzero((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \
186  (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \
187  } \
188 } while (0)
189 
190 #define HASH_BLOOM_FREE(tbl) \
191 do { \
192  uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \
193 } while (0)
194 
195 #define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8U] |= (1U << ((idx)%8U)))
196 #define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8U] & (1U << ((idx)%8U)))
197 
198 #define HASH_BLOOM_ADD(tbl,hashv) \
199  HASH_BLOOM_BITSET((tbl)->bloom_bv, ((hashv) & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U)))
200 
201 #define HASH_BLOOM_TEST(tbl,hashv) \
202  HASH_BLOOM_BITTEST((tbl)->bloom_bv, ((hashv) & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U)))
203 
204 #else
205 #define HASH_BLOOM_MAKE(tbl,oomed)
206 #define HASH_BLOOM_FREE(tbl)
207 #define HASH_BLOOM_ADD(tbl,hashv)
208 #define HASH_BLOOM_TEST(tbl,hashv) (1)
209 #define HASH_BLOOM_BYTELEN 0U
210 #endif
211 
212 #define HASH_MAKE_TABLE(hh,head,oomed) \
213 do { \
214  (head)->hh.tbl = (UT_hash_table*)uthash_malloc(sizeof(UT_hash_table)); \
215  if (!(head)->hh.tbl) { \
216  HASH_RECORD_OOM(oomed); \
217  } else { \
218  uthash_bzero((head)->hh.tbl, sizeof(UT_hash_table)); \
219  (head)->hh.tbl->tail = &((head)->hh); \
220  (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \
221  (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \
222  (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \
223  (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \
224  HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \
225  (head)->hh.tbl->signature = HASH_SIGNATURE; \
226  if (!(head)->hh.tbl->buckets) { \
227  HASH_RECORD_OOM(oomed); \
228  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
229  } else { \
230  uthash_bzero((head)->hh.tbl->buckets, \
231  HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \
232  HASH_BLOOM_MAKE((head)->hh.tbl, oomed); \
233  IF_HASH_NONFATAL_OOM( \
234  if (oomed) { \
235  uthash_free((head)->hh.tbl->buckets, \
236  HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
237  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
238  } \
239  ) \
240  } \
241  } \
242 } while (0)
243 
244 #define HASH_REPLACE_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,replaced,cmpfcn) \
245 do { \
246  (replaced) = NULL; \
247  HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \
248  if (replaced) { \
249  HASH_DELETE(hh, head, replaced); \
250  } \
251  HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn); \
252 } while (0)
253 
254 #define HASH_REPLACE_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add,replaced) \
255 do { \
256  (replaced) = NULL; \
257  HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \
258  if (replaced) { \
259  HASH_DELETE(hh, head, replaced); \
260  } \
261  HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add); \
262 } while (0)
263 
264 #define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced) \
265 do { \
266  unsigned _hr_hashv; \
267  HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \
268  HASH_REPLACE_BYHASHVALUE(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced); \
269 } while (0)
270 
271 #define HASH_REPLACE_INORDER(hh,head,fieldname,keylen_in,add,replaced,cmpfcn) \
272 do { \
273  unsigned _hr_hashv; \
274  HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \
275  HASH_REPLACE_BYHASHVALUE_INORDER(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced, cmpfcn); \
276 } while (0)
277 
278 #define HASH_APPEND_LIST(hh, head, add) \
279 do { \
280  (add)->hh.next = NULL; \
281  (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \
282  (head)->hh.tbl->tail->next = (add); \
283  (head)->hh.tbl->tail = &((add)->hh); \
284 } while (0)
285 
286 #define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \
287 do { \
288  do { \
289  if (cmpfcn(DECLTYPE(head)(_hs_iter), add) > 0) { \
290  break; \
291  } \
292  } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \
293 } while (0)
294 
295 #ifdef NO_DECLTYPE
296 #undef HASH_AKBI_INNER_LOOP
297 #define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \
298 do { \
299  char *_hs_saved_head = (char*)(head); \
300  do { \
301  DECLTYPE_ASSIGN(head, _hs_iter); \
302  if (cmpfcn(head, add) > 0) { \
303  DECLTYPE_ASSIGN(head, _hs_saved_head); \
304  break; \
305  } \
306  DECLTYPE_ASSIGN(head, _hs_saved_head); \
307  } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \
308 } while (0)
309 #endif
310 
311 #if HASH_NONFATAL_OOM
312 
313 #define HASH_ADD_TO_TABLE(hh,head,keyptr,keylen_in,hashval,add,oomed) \
314 do { \
315  if (!(oomed)) { \
316  unsigned _ha_bkt; \
317  (head)->hh.tbl->num_items++; \
318  HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \
319  HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], hh, &(add)->hh, oomed); \
320  if (oomed) { \
321  HASH_ROLLBACK_BKT(hh, head, &(add)->hh); \
322  HASH_DELETE_HH(hh, head, &(add)->hh); \
323  (add)->hh.tbl = NULL; \
324  uthash_nonfatal_oom(add); \
325  } else { \
326  HASH_BLOOM_ADD((head)->hh.tbl, hashval); \
327  HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \
328  } \
329  } else { \
330  (add)->hh.tbl = NULL; \
331  uthash_nonfatal_oom(add); \
332  } \
333 } while (0)
334 
335 #else
336 
337 #define HASH_ADD_TO_TABLE(hh,head,keyptr,keylen_in,hashval,add,oomed) \
338 do { \
339  unsigned _ha_bkt; \
340  (head)->hh.tbl->num_items++; \
341  HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \
342  HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], hh, &(add)->hh, oomed); \
343  HASH_BLOOM_ADD((head)->hh.tbl, hashval); \
344  HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \
345 } while (0)
346 
347 #endif
348 
349 
350 #define HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh,head,keyptr,keylen_in,hashval,add,cmpfcn) \
351 do { \
352  IF_HASH_NONFATAL_OOM( int _ha_oomed = 0; ) \
353  (add)->hh.hashv = (hashval); \
354  (add)->hh.key = (char*) (keyptr); \
355  (add)->hh.keylen = (unsigned) (keylen_in); \
356  if (!(head)) { \
357  (add)->hh.next = NULL; \
358  (add)->hh.prev = NULL; \
359  HASH_MAKE_TABLE(hh, add, _ha_oomed); \
360  IF_HASH_NONFATAL_OOM( if (!_ha_oomed) { ) \
361  (head) = (add); \
362  IF_HASH_NONFATAL_OOM( } ) \
363  } else { \
364  void *_hs_iter = (head); \
365  (add)->hh.tbl = (head)->hh.tbl; \
366  HASH_AKBI_INNER_LOOP(hh, head, add, cmpfcn); \
367  if (_hs_iter) { \
368  (add)->hh.next = _hs_iter; \
369  if (((add)->hh.prev = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev)) { \
370  HH_FROM_ELMT((head)->hh.tbl, (add)->hh.prev)->next = (add); \
371  } else { \
372  (head) = (add); \
373  } \
374  HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev = (add); \
375  } else { \
376  HASH_APPEND_LIST(hh, head, add); \
377  } \
378  } \
379  HASH_ADD_TO_TABLE(hh, head, keyptr, keylen_in, hashval, add, _ha_oomed); \
380  HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE_INORDER"); \
381 } while (0)
382 
383 #define HASH_ADD_KEYPTR_INORDER(hh,head,keyptr,keylen_in,add,cmpfcn) \
384 do { \
385  unsigned _hs_hashv; \
386  HASH_VALUE(keyptr, keylen_in, _hs_hashv); \
387  HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, keyptr, keylen_in, _hs_hashv, add, cmpfcn); \
388 } while (0)
389 
390 #define HASH_ADD_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,cmpfcn) \
391  HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn)
392 
393 #define HASH_ADD_INORDER(hh,head,fieldname,keylen_in,add,cmpfcn) \
394  HASH_ADD_KEYPTR_INORDER(hh, head, &((add)->fieldname), keylen_in, add, cmpfcn)
395 
396 #define HASH_ADD_KEYPTR_BYHASHVALUE(hh,head,keyptr,keylen_in,hashval,add) \
397 do { \
398  IF_HASH_NONFATAL_OOM( int _ha_oomed = 0; ) \
399  (add)->hh.hashv = (hashval); \
400  (add)->hh.key = (char*) (keyptr); \
401  (add)->hh.keylen = (unsigned) (keylen_in); \
402  if (!(head)) { \
403  (add)->hh.next = NULL; \
404  (add)->hh.prev = NULL; \
405  HASH_MAKE_TABLE(hh, add, _ha_oomed); \
406  IF_HASH_NONFATAL_OOM( if (!_ha_oomed) { ) \
407  (head) = (add); \
408  IF_HASH_NONFATAL_OOM( } ) \
409  } else { \
410  (add)->hh.tbl = (head)->hh.tbl; \
411  HASH_APPEND_LIST(hh, head, add); \
412  } \
413  HASH_ADD_TO_TABLE(hh, head, keyptr, keylen_in, hashval, add, _ha_oomed); \
414  HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE"); \
415 } while (0)
416 
417 #define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \
418 do { \
419  unsigned _ha_hashv; \
420  HASH_VALUE(keyptr, keylen_in, _ha_hashv); \
421  HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, keyptr, keylen_in, _ha_hashv, add); \
422 } while (0)
423 
424 #define HASH_ADD_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add) \
425  HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add)
426 
427 #define HASH_ADD(hh,head,fieldname,keylen_in,add) \
428  HASH_ADD_KEYPTR(hh, head, &((add)->fieldname), keylen_in, add)
429 
430 #define HASH_TO_BKT(hashv,num_bkts,bkt) \
431 do { \
432  bkt = ((hashv) & ((num_bkts) - 1U)); \
433 } while (0)
434 
435 /* delete "delptr" from the hash table.
436  * "the usual" patch-up process for the app-order doubly-linked-list.
437  * The use of _hd_hh_del below deserves special explanation.
438  * These used to be expressed using (delptr) but that led to a bug
439  * if someone used the same symbol for the head and deletee, like
440  * HASH_DELETE(hh,users,users);
441  * We want that to work, but by changing the head (users) below
442  * we were forfeiting our ability to further refer to the deletee (users)
443  * in the patch-up process. Solution: use scratch space to
444  * copy the deletee pointer, then the latter references are via that
445  * scratch pointer rather than through the repointed (users) symbol.
446  */
447 #define HASH_DELETE(hh,head,delptr) \
448  HASH_DELETE_HH(hh, head, &(delptr)->hh)
449 
450 #define HASH_DELETE_HH(hh,head,delptrhh) \
451 do { \
452  struct UT_hash_handle *_hd_hh_del = (delptrhh); \
453  if ((_hd_hh_del->prev == NULL) && (_hd_hh_del->next == NULL)) { \
454  HASH_BLOOM_FREE((head)->hh.tbl); \
455  uthash_free((head)->hh.tbl->buckets, \
456  (head)->hh.tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
457  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
458  (head) = NULL; \
459  } else { \
460  unsigned _hd_bkt; \
461  if (_hd_hh_del == (head)->hh.tbl->tail) { \
462  (head)->hh.tbl->tail = HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev); \
463  } \
464  if (_hd_hh_del->prev != NULL) { \
465  HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev)->next = _hd_hh_del->next; \
466  } else { \
467  DECLTYPE_ASSIGN(head, _hd_hh_del->next); \
468  } \
469  if (_hd_hh_del->next != NULL) { \
470  HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->next)->prev = _hd_hh_del->prev; \
471  } \
472  HASH_TO_BKT(_hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \
473  HASH_DEL_IN_BKT((head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \
474  (head)->hh.tbl->num_items--; \
475  } \
476  HASH_FSCK(hh, head, "HASH_DELETE_HH"); \
477 } while (0)
478 
479 /* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */
480 #define HASH_FIND_STR(head,findstr,out) \
481  HASH_FIND(hh,head,findstr,(unsigned)uthash_strlen(findstr),out)
482 #define HASH_ADD_STR(head,strfield,add) \
483  HASH_ADD(hh,head,strfield[0],(unsigned)uthash_strlen(add->strfield),add)
484 #define HASH_REPLACE_STR(head,strfield,add,replaced) \
485  HASH_REPLACE(hh,head,strfield[0],(unsigned)uthash_strlen(add->strfield),add,replaced)
486 #define HASH_FIND_INT(head,findint,out) \
487  HASH_FIND(hh,head,findint,sizeof(int),out)
488 #define HASH_ADD_INT(head,intfield,add) \
489  HASH_ADD(hh,head,intfield,sizeof(int),add)
490 #define HASH_REPLACE_INT(head,intfield,add,replaced) \
491  HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced)
492 #define HASH_FIND_PTR(head,findptr,out) \
493  HASH_FIND(hh,head,findptr,sizeof(void *),out)
494 #define HASH_ADD_PTR(head,ptrfield,add) \
495  HASH_ADD(hh,head,ptrfield,sizeof(void *),add)
496 #define HASH_REPLACE_PTR(head,ptrfield,add,replaced) \
497  HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced)
498 #define HASH_DEL(head,delptr) \
499  HASH_DELETE(hh,head,delptr)
500 
501 /* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined.
502  * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined.
503  */
504 #ifdef HASH_DEBUG
505 #define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0)
506 #define HASH_FSCK(hh,head,where) \
507 do { \
508  struct UT_hash_handle *_thh; \
509  if (head) { \
510  unsigned _bkt_i; \
511  unsigned _count = 0; \
512  char *_prev; \
513  for (_bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; ++_bkt_i) { \
514  unsigned _bkt_count = 0; \
515  _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \
516  _prev = NULL; \
517  while (_thh) { \
518  if (_prev != (char*)(_thh->hh_prev)) { \
519  HASH_OOPS("%s: invalid hh_prev %p, actual %p\n", \
520  (where), (void*)_thh->hh_prev, (void*)_prev); \
521  } \
522  _bkt_count++; \
523  _prev = (char*)(_thh); \
524  _thh = _thh->hh_next; \
525  } \
526  _count += _bkt_count; \
527  if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \
528  HASH_OOPS("%s: invalid bucket count %u, actual %u\n", \
529  (where), (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \
530  } \
531  } \
532  if (_count != (head)->hh.tbl->num_items) { \
533  HASH_OOPS("%s: invalid hh item count %u, actual %u\n", \
534  (where), (head)->hh.tbl->num_items, _count); \
535  } \
536  _count = 0; \
537  _prev = NULL; \
538  _thh = &(head)->hh; \
539  while (_thh) { \
540  _count++; \
541  if (_prev != (char*)_thh->prev) { \
542  HASH_OOPS("%s: invalid prev %p, actual %p\n", \
543  (where), (void*)_thh->prev, (void*)_prev); \
544  } \
545  _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \
546  _thh = (_thh->next ? HH_FROM_ELMT((head)->hh.tbl, _thh->next) : NULL); \
547  } \
548  if (_count != (head)->hh.tbl->num_items) { \
549  HASH_OOPS("%s: invalid app item count %u, actual %u\n", \
550  (where), (head)->hh.tbl->num_items, _count); \
551  } \
552  } \
553 } while (0)
554 #else
555 #define HASH_FSCK(hh,head,where)
556 #endif
557 
558 /* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to
559  * the descriptor to which this macro is defined for tuning the hash function.
560  * The app can #include <unistd.h> to get the prototype for write(2). */
561 #ifdef HASH_EMIT_KEYS
562 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \
563 do { \
564  unsigned _klen = fieldlen; \
565  write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \
566  write(HASH_EMIT_KEYS, keyptr, (unsigned long)fieldlen); \
567 } while (0)
568 #else
569 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen)
570 #endif
571 
572 /* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */
573 #ifdef HASH_FUNCTION
574 #define HASH_FCN HASH_FUNCTION
575 #else
576 #define HASH_FCN HASH_JEN
577 #endif
578 
579 /* The Bernstein hash function, used in Perl prior to v5.6. Note (x<<5+x)=x*33. */
580 #define HASH_BER(key,keylen,hashv) \
581 do { \
582  unsigned _hb_keylen = (unsigned)keylen; \
583  const unsigned char *_hb_key = (const unsigned char*)(key); \
584  (hashv) = 0; \
585  while (_hb_keylen-- != 0U) { \
586  (hashv) = (((hashv) << 5) + (hashv)) + *_hb_key++; \
587  } \
588 } while (0)
589 
590 
591 /* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at
592  * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */
593 #define HASH_SAX(key,keylen,hashv) \
594 do { \
595  unsigned _sx_i; \
596  const unsigned char *_hs_key = (const unsigned char*)(key); \
597  hashv = 0; \
598  for (_sx_i=0; _sx_i < keylen; _sx_i++) { \
599  hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \
600  } \
601 } while (0)
602 /* FNV-1a variation */
603 #define HASH_FNV(key,keylen,hashv) \
604 do { \
605  unsigned _fn_i; \
606  const unsigned char *_hf_key = (const unsigned char*)(key); \
607  (hashv) = 2166136261U; \
608  for (_fn_i=0; _fn_i < keylen; _fn_i++) { \
609  hashv = hashv ^ _hf_key[_fn_i]; \
610  hashv = hashv * 16777619U; \
611  } \
612 } while (0)
613 
614 #define HASH_OAT(key,keylen,hashv) \
615 do { \
616  unsigned _ho_i; \
617  const unsigned char *_ho_key=(const unsigned char*)(key); \
618  hashv = 0; \
619  for(_ho_i=0; _ho_i < keylen; _ho_i++) { \
620  hashv += _ho_key[_ho_i]; \
621  hashv += (hashv << 10); \
622  hashv ^= (hashv >> 6); \
623  } \
624  hashv += (hashv << 3); \
625  hashv ^= (hashv >> 11); \
626  hashv += (hashv << 15); \
627 } while (0)
628 
629 #define HASH_JEN_MIX(a,b,c) \
630 do { \
631  a -= b; a -= c; a ^= ( c >> 13 ); \
632  b -= c; b -= a; b ^= ( a << 8 ); \
633  c -= a; c -= b; c ^= ( b >> 13 ); \
634  a -= b; a -= c; a ^= ( c >> 12 ); \
635  b -= c; b -= a; b ^= ( a << 16 ); \
636  c -= a; c -= b; c ^= ( b >> 5 ); \
637  a -= b; a -= c; a ^= ( c >> 3 ); \
638  b -= c; b -= a; b ^= ( a << 10 ); \
639  c -= a; c -= b; c ^= ( b >> 15 ); \
640 } while (0)
641 
642 #define HASH_JEN(key,keylen,hashv) \
643 do { \
644  unsigned _hj_i,_hj_j,_hj_k; \
645  unsigned const char *_hj_key=(unsigned const char*)(key); \
646  hashv = 0xfeedbeefu; \
647  _hj_i = _hj_j = 0x9e3779b9u; \
648  _hj_k = (unsigned)(keylen); \
649  while (_hj_k >= 12U) { \
650  _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \
651  + ( (unsigned)_hj_key[2] << 16 ) \
652  + ( (unsigned)_hj_key[3] << 24 ) ); \
653  _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \
654  + ( (unsigned)_hj_key[6] << 16 ) \
655  + ( (unsigned)_hj_key[7] << 24 ) ); \
656  hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \
657  + ( (unsigned)_hj_key[10] << 16 ) \
658  + ( (unsigned)_hj_key[11] << 24 ) ); \
659  \
660  HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
661  \
662  _hj_key += 12; \
663  _hj_k -= 12U; \
664  } \
665  hashv += (unsigned)(keylen); \
666  switch ( _hj_k ) { \
667  case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); /* FALLTHROUGH */ \
668  case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); /* FALLTHROUGH */ \
669  case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); /* FALLTHROUGH */ \
670  case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); /* FALLTHROUGH */ \
671  case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); /* FALLTHROUGH */ \
672  case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); /* FALLTHROUGH */ \
673  case 5: _hj_j += _hj_key[4]; /* FALLTHROUGH */ \
674  case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); /* FALLTHROUGH */ \
675  case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); /* FALLTHROUGH */ \
676  case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); /* FALLTHROUGH */ \
677  case 1: _hj_i += _hj_key[0]; \
678  } \
679  HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
680 } while (0)
681 
682 /* The Paul Hsieh hash function */
683 #undef get16bits
684 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
685  || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
686 #define get16bits(d) (*((const uint16_t *) (d)))
687 #endif
688 
689 #if !defined (get16bits)
690 #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \
691  +(uint32_t)(((const uint8_t *)(d))[0]) )
692 #endif
693 #define HASH_SFH(key,keylen,hashv) \
694 do { \
695  unsigned const char *_sfh_key=(unsigned const char*)(key); \
696  uint32_t _sfh_tmp, _sfh_len = (uint32_t)keylen; \
697  \
698  unsigned _sfh_rem = _sfh_len & 3U; \
699  _sfh_len >>= 2; \
700  hashv = 0xcafebabeu; \
701  \
702  /* Main loop */ \
703  for (;_sfh_len > 0U; _sfh_len--) { \
704  hashv += get16bits (_sfh_key); \
705  _sfh_tmp = ((uint32_t)(get16bits (_sfh_key+2)) << 11) ^ hashv; \
706  hashv = (hashv << 16) ^ _sfh_tmp; \
707  _sfh_key += 2U*sizeof (uint16_t); \
708  hashv += hashv >> 11; \
709  } \
710  \
711  /* Handle end cases */ \
712  switch (_sfh_rem) { \
713  case 3: hashv += get16bits (_sfh_key); \
714  hashv ^= hashv << 16; \
715  hashv ^= (uint32_t)(_sfh_key[sizeof (uint16_t)]) << 18; \
716  hashv += hashv >> 11; \
717  break; \
718  case 2: hashv += get16bits (_sfh_key); \
719  hashv ^= hashv << 11; \
720  hashv += hashv >> 17; \
721  break; \
722  case 1: hashv += *_sfh_key; \
723  hashv ^= hashv << 10; \
724  hashv += hashv >> 1; \
725  } \
726  \
727  /* Force "avalanching" of final 127 bits */ \
728  hashv ^= hashv << 3; \
729  hashv += hashv >> 5; \
730  hashv ^= hashv << 4; \
731  hashv += hashv >> 17; \
732  hashv ^= hashv << 25; \
733  hashv += hashv >> 6; \
734 } while (0)
735 
736 #ifdef HASH_USING_NO_STRICT_ALIASING
737 /* The MurmurHash exploits some CPU's (x86,x86_64) tolerance for unaligned reads.
738  * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error.
739  * MurmurHash uses the faster approach only on CPU's where we know it's safe.
740  *
741  * Note the preprocessor built-in defines can be emitted using:
742  *
743  * gcc -m64 -dM -E - < /dev/null (on gcc)
744  * cc -## a.c (where a.c is a simple test file) (Sun Studio)
745  */
746 #if (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86))
747 #define MUR_GETBLOCK(p,i) p[i]
748 #else /* non intel */
749 #define MUR_PLUS0_ALIGNED(p) (((unsigned long)p & 3UL) == 0UL)
750 #define MUR_PLUS1_ALIGNED(p) (((unsigned long)p & 3UL) == 1UL)
751 #define MUR_PLUS2_ALIGNED(p) (((unsigned long)p & 3UL) == 2UL)
752 #define MUR_PLUS3_ALIGNED(p) (((unsigned long)p & 3UL) == 3UL)
753 #define WP(p) ((uint32_t*)((unsigned long)(p) & ~3UL))
754 #if (defined(__BIG_ENDIAN__) || defined(SPARC) || defined(__ppc__) || defined(__ppc64__))
755 #define MUR_THREE_ONE(p) ((((*WP(p))&0x00ffffff) << 8) | (((*(WP(p)+1))&0xff000000) >> 24))
756 #define MUR_TWO_TWO(p) ((((*WP(p))&0x0000ffff) <<16) | (((*(WP(p)+1))&0xffff0000) >> 16))
757 #define MUR_ONE_THREE(p) ((((*WP(p))&0x000000ff) <<24) | (((*(WP(p)+1))&0xffffff00) >> 8))
758 #else /* assume little endian non-intel */
759 #define MUR_THREE_ONE(p) ((((*WP(p))&0xffffff00) >> 8) | (((*(WP(p)+1))&0x000000ff) << 24))
760 #define MUR_TWO_TWO(p) ((((*WP(p))&0xffff0000) >>16) | (((*(WP(p)+1))&0x0000ffff) << 16))
761 #define MUR_ONE_THREE(p) ((((*WP(p))&0xff000000) >>24) | (((*(WP(p)+1))&0x00ffffff) << 8))
762 #endif
763 #define MUR_GETBLOCK(p,i) (MUR_PLUS0_ALIGNED(p) ? ((p)[i]) : \
764  (MUR_PLUS1_ALIGNED(p) ? MUR_THREE_ONE(p) : \
765  (MUR_PLUS2_ALIGNED(p) ? MUR_TWO_TWO(p) : \
766  MUR_ONE_THREE(p))))
767 #endif
768 #define MUR_ROTL32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
769 #define MUR_FMIX(_h) \
770 do { \
771  _h ^= _h >> 16; \
772  _h *= 0x85ebca6bu; \
773  _h ^= _h >> 13; \
774  _h *= 0xc2b2ae35u; \
775  _h ^= _h >> 16; \
776 } while (0)
777 
778 #define HASH_MUR(key,keylen,hashv) \
779 do { \
780  const uint8_t *_mur_data = (const uint8_t*)(key); \
781  const int _mur_nblocks = (int)(keylen) / 4; \
782  uint32_t _mur_h1 = 0xf88D5353u; \
783  uint32_t _mur_c1 = 0xcc9e2d51u; \
784  uint32_t _mur_c2 = 0x1b873593u; \
785  uint32_t _mur_k1 = 0; \
786  const uint8_t *_mur_tail; \
787  const uint32_t *_mur_blocks = (const uint32_t*)(_mur_data+(_mur_nblocks*4)); \
788  int _mur_i; \
789  for (_mur_i = -_mur_nblocks; _mur_i != 0; _mur_i++) { \
790  _mur_k1 = MUR_GETBLOCK(_mur_blocks,_mur_i); \
791  _mur_k1 *= _mur_c1; \
792  _mur_k1 = MUR_ROTL32(_mur_k1,15); \
793  _mur_k1 *= _mur_c2; \
794  \
795  _mur_h1 ^= _mur_k1; \
796  _mur_h1 = MUR_ROTL32(_mur_h1,13); \
797  _mur_h1 = (_mur_h1*5U) + 0xe6546b64u; \
798  } \
799  _mur_tail = (const uint8_t*)(_mur_data + (_mur_nblocks*4)); \
800  _mur_k1=0; \
801  switch ((keylen) & 3U) { \
802  case 0: break; \
803  case 3: _mur_k1 ^= (uint32_t)_mur_tail[2] << 16; /* FALLTHROUGH */ \
804  case 2: _mur_k1 ^= (uint32_t)_mur_tail[1] << 8; /* FALLTHROUGH */ \
805  case 1: _mur_k1 ^= (uint32_t)_mur_tail[0]; \
806  _mur_k1 *= _mur_c1; \
807  _mur_k1 = MUR_ROTL32(_mur_k1,15); \
808  _mur_k1 *= _mur_c2; \
809  _mur_h1 ^= _mur_k1; \
810  } \
811  _mur_h1 ^= (uint32_t)(keylen); \
812  MUR_FMIX(_mur_h1); \
813  hashv = _mur_h1; \
814 } while (0)
815 #endif /* HASH_USING_NO_STRICT_ALIASING */
816 
817 /* iterate over items in a known bucket to find desired item */
818 #define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,hashval,out) \
819 do { \
820  if ((head).hh_head != NULL) { \
821  DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (head).hh_head)); \
822  } else { \
823  (out) = NULL; \
824  } \
825  while ((out) != NULL) { \
826  if ((out)->hh.hashv == (hashval) && (out)->hh.keylen == (keylen_in)) { \
827  if (uthash_memcmp((out)->hh.key, keyptr, keylen_in) == 0) { \
828  break; \
829  } \
830  } \
831  if ((out)->hh.hh_next != NULL) { \
832  DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (out)->hh.hh_next)); \
833  } else { \
834  (out) = NULL; \
835  } \
836  } \
837 } while (0)
838 
839 /* add an item to a bucket */
840 #define HASH_ADD_TO_BKT(head,hh,addhh,oomed) \
841 do { \
842  UT_hash_bucket *_ha_head = &(head); \
843  _ha_head->count++; \
844  (addhh)->hh_next = _ha_head->hh_head; \
845  (addhh)->hh_prev = NULL; \
846  if (_ha_head->hh_head != NULL) { \
847  _ha_head->hh_head->hh_prev = (addhh); \
848  } \
849  _ha_head->hh_head = (addhh); \
850  if ((_ha_head->count >= ((_ha_head->expand_mult + 1U) * HASH_BKT_CAPACITY_THRESH)) \
851  && !(addhh)->tbl->noexpand) { \
852  HASH_EXPAND_BUCKETS(addhh,(addhh)->tbl, oomed); \
853  IF_HASH_NONFATAL_OOM( \
854  if (oomed) { \
855  HASH_DEL_IN_BKT(head,addhh); \
856  } \
857  ) \
858  } \
859 } while (0)
860 
861 /* remove an item from a given bucket */
862 #define HASH_DEL_IN_BKT(head,delhh) \
863 do { \
864  UT_hash_bucket *_hd_head = &(head); \
865  _hd_head->count--; \
866  if (_hd_head->hh_head == (delhh)) { \
867  _hd_head->hh_head = (delhh)->hh_next; \
868  } \
869  if ((delhh)->hh_prev) { \
870  (delhh)->hh_prev->hh_next = (delhh)->hh_next; \
871  } \
872  if ((delhh)->hh_next) { \
873  (delhh)->hh_next->hh_prev = (delhh)->hh_prev; \
874  } \
875 } while (0)
876 
877 /* Bucket expansion has the effect of doubling the number of buckets
878  * and redistributing the items into the new buckets. Ideally the
879  * items will distribute more or less evenly into the new buckets
880  * (the extent to which this is true is a measure of the quality of
881  * the hash function as it applies to the key domain).
882  *
883  * With the items distributed into more buckets, the chain length
884  * (item count) in each bucket is reduced. Thus by expanding buckets
885  * the hash keeps a bound on the chain length. This bounded chain
886  * length is the essence of how a hash provides constant time lookup.
887  *
888  * The calculation of tbl->ideal_chain_maxlen below deserves some
889  * explanation. First, keep in mind that we're calculating the ideal
890  * maximum chain length based on the *new* (doubled) bucket count.
891  * In fractions this is just n/b (n=number of items,b=new num buckets).
892  * Since the ideal chain length is an integer, we want to calculate
893  * ceil(n/b). We don't depend on floating point arithmetic in this
894  * hash, so to calculate ceil(n/b) with integers we could write
895  *
896  * ceil(n/b) = (n/b) + ((n%b)?1:0)
897  *
898  * and in fact a previous version of this hash did just that.
899  * But now we have improved things a bit by recognizing that b is
900  * always a power of two. We keep its base 2 log handy (call it lb),
901  * so now we can write this with a bit shift and logical AND:
902  *
903  * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0)
904  *
905  */
906 #define HASH_EXPAND_BUCKETS(hh,tbl,oomed) \
907 do { \
908  unsigned _he_bkt; \
909  unsigned _he_bkt_i; \
910  struct UT_hash_handle *_he_thh, *_he_hh_nxt; \
911  UT_hash_bucket *_he_new_buckets, *_he_newbkt; \
912  _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \
913  2UL * (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \
914  if (!_he_new_buckets) { \
915  HASH_RECORD_OOM(oomed); \
916  } else { \
917  uthash_bzero(_he_new_buckets, \
918  2UL * (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \
919  (tbl)->ideal_chain_maxlen = \
920  ((tbl)->num_items >> ((tbl)->log2_num_buckets+1U)) + \
921  ((((tbl)->num_items & (((tbl)->num_buckets*2U)-1U)) != 0U) ? 1U : 0U); \
922  (tbl)->nonideal_items = 0; \
923  for (_he_bkt_i = 0; _he_bkt_i < (tbl)->num_buckets; _he_bkt_i++) { \
924  _he_thh = (tbl)->buckets[ _he_bkt_i ].hh_head; \
925  while (_he_thh != NULL) { \
926  _he_hh_nxt = _he_thh->hh_next; \
927  HASH_TO_BKT(_he_thh->hashv, (tbl)->num_buckets * 2U, _he_bkt); \
928  _he_newbkt = &(_he_new_buckets[_he_bkt]); \
929  if (++(_he_newbkt->count) > (tbl)->ideal_chain_maxlen) { \
930  (tbl)->nonideal_items++; \
931  _he_newbkt->expand_mult = _he_newbkt->count / (tbl)->ideal_chain_maxlen; \
932  } \
933  _he_thh->hh_prev = NULL; \
934  _he_thh->hh_next = _he_newbkt->hh_head; \
935  if (_he_newbkt->hh_head != NULL) { \
936  _he_newbkt->hh_head->hh_prev = _he_thh; \
937  } \
938  _he_newbkt->hh_head = _he_thh; \
939  _he_thh = _he_hh_nxt; \
940  } \
941  } \
942  uthash_free((tbl)->buckets, (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \
943  (tbl)->num_buckets *= 2U; \
944  (tbl)->log2_num_buckets++; \
945  (tbl)->buckets = _he_new_buckets; \
946  (tbl)->ineff_expands = ((tbl)->nonideal_items > ((tbl)->num_items >> 1)) ? \
947  ((tbl)->ineff_expands+1U) : 0U; \
948  if ((tbl)->ineff_expands > 1U) { \
949  (tbl)->noexpand = 1; \
950  uthash_noexpand_fyi(tbl); \
951  } \
952  uthash_expand_fyi(tbl); \
953  } \
954 } while (0)
955 
956 
957 /* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */
958 /* Note that HASH_SORT assumes the hash handle name to be hh.
959  * HASH_SRT was added to allow the hash handle name to be passed in. */
960 #define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn)
961 #define HASH_SRT(hh,head,cmpfcn) \
962 do { \
963  unsigned _hs_i; \
964  unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \
965  struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \
966  if (head != NULL) { \
967  _hs_insize = 1; \
968  _hs_looping = 1; \
969  _hs_list = &((head)->hh); \
970  while (_hs_looping != 0U) { \
971  _hs_p = _hs_list; \
972  _hs_list = NULL; \
973  _hs_tail = NULL; \
974  _hs_nmerges = 0; \
975  while (_hs_p != NULL) { \
976  _hs_nmerges++; \
977  _hs_q = _hs_p; \
978  _hs_psize = 0; \
979  for (_hs_i = 0; _hs_i < _hs_insize; ++_hs_i) { \
980  _hs_psize++; \
981  _hs_q = ((_hs_q->next != NULL) ? \
982  HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \
983  if (_hs_q == NULL) { \
984  break; \
985  } \
986  } \
987  _hs_qsize = _hs_insize; \
988  while ((_hs_psize != 0U) || ((_hs_qsize != 0U) && (_hs_q != NULL))) { \
989  if (_hs_psize == 0U) { \
990  _hs_e = _hs_q; \
991  _hs_q = ((_hs_q->next != NULL) ? \
992  HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \
993  _hs_qsize--; \
994  } else if ((_hs_qsize == 0U) || (_hs_q == NULL)) { \
995  _hs_e = _hs_p; \
996  if (_hs_p != NULL) { \
997  _hs_p = ((_hs_p->next != NULL) ? \
998  HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL); \
999  } \
1000  _hs_psize--; \
1001  } else if ((cmpfcn( \
1002  DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_p)), \
1003  DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_q)) \
1004  )) <= 0) { \
1005  _hs_e = _hs_p; \
1006  if (_hs_p != NULL) { \
1007  _hs_p = ((_hs_p->next != NULL) ? \
1008  HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL); \
1009  } \
1010  _hs_psize--; \
1011  } else { \
1012  _hs_e = _hs_q; \
1013  _hs_q = ((_hs_q->next != NULL) ? \
1014  HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \
1015  _hs_qsize--; \
1016  } \
1017  if ( _hs_tail != NULL ) { \
1018  _hs_tail->next = ((_hs_e != NULL) ? \
1019  ELMT_FROM_HH((head)->hh.tbl, _hs_e) : NULL); \
1020  } else { \
1021  _hs_list = _hs_e; \
1022  } \
1023  if (_hs_e != NULL) { \
1024  _hs_e->prev = ((_hs_tail != NULL) ? \
1025  ELMT_FROM_HH((head)->hh.tbl, _hs_tail) : NULL); \
1026  } \
1027  _hs_tail = _hs_e; \
1028  } \
1029  _hs_p = _hs_q; \
1030  } \
1031  if (_hs_tail != NULL) { \
1032  _hs_tail->next = NULL; \
1033  } \
1034  if (_hs_nmerges <= 1U) { \
1035  _hs_looping = 0; \
1036  (head)->hh.tbl->tail = _hs_tail; \
1037  DECLTYPE_ASSIGN(head, ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \
1038  } \
1039  _hs_insize *= 2U; \
1040  } \
1041  HASH_FSCK(hh, head, "HASH_SRT"); \
1042  } \
1043 } while (0)
1044 
1045 /* This function selects items from one hash into another hash.
1046  * The end result is that the selected items have dual presence
1047  * in both hashes. There is no copy of the items made; rather
1048  * they are added into the new hash through a secondary hash
1049  * hash handle that must be present in the structure. */
1050 #define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \
1051 do { \
1052  unsigned _src_bkt, _dst_bkt; \
1053  void *_last_elt = NULL, *_elt; \
1054  UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \
1055  ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \
1056  if ((src) != NULL) { \
1057  for (_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \
1058  for (_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \
1059  _src_hh != NULL; \
1060  _src_hh = _src_hh->hh_next) { \
1061  _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \
1062  if (cond(_elt)) { \
1063  IF_HASH_NONFATAL_OOM( int _hs_oomed = 0; ) \
1064  _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \
1065  _dst_hh->key = _src_hh->key; \
1066  _dst_hh->keylen = _src_hh->keylen; \
1067  _dst_hh->hashv = _src_hh->hashv; \
1068  _dst_hh->prev = _last_elt; \
1069  _dst_hh->next = NULL; \
1070  if (_last_elt_hh != NULL) { \
1071  _last_elt_hh->next = _elt; \
1072  } \
1073  if ((dst) == NULL) { \
1074  DECLTYPE_ASSIGN(dst, _elt); \
1075  HASH_MAKE_TABLE(hh_dst, dst, _hs_oomed); \
1076  IF_HASH_NONFATAL_OOM( \
1077  if (_hs_oomed) { \
1078  uthash_nonfatal_oom(_elt); \
1079  (dst) = NULL; \
1080  continue; \
1081  } \
1082  ) \
1083  } else { \
1084  _dst_hh->tbl = (dst)->hh_dst.tbl; \
1085  } \
1086  HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \
1087  HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt], hh_dst, _dst_hh, _hs_oomed); \
1088  (dst)->hh_dst.tbl->num_items++; \
1089  IF_HASH_NONFATAL_OOM( \
1090  if (_hs_oomed) { \
1091  HASH_ROLLBACK_BKT(hh_dst, dst, _dst_hh); \
1092  HASH_DELETE_HH(hh_dst, dst, _dst_hh); \
1093  _dst_hh->tbl = NULL; \
1094  uthash_nonfatal_oom(_elt); \
1095  continue; \
1096  } \
1097  ) \
1098  HASH_BLOOM_ADD(_dst_hh->tbl, _dst_hh->hashv); \
1099  _last_elt = _elt; \
1100  _last_elt_hh = _dst_hh; \
1101  } \
1102  } \
1103  } \
1104  } \
1105  HASH_FSCK(hh_dst, dst, "HASH_SELECT"); \
1106 } while (0)
1107 
1108 #define HASH_CLEAR(hh,head) \
1109 do { \
1110  if ((head) != NULL) { \
1111  HASH_BLOOM_FREE((head)->hh.tbl); \
1112  uthash_free((head)->hh.tbl->buckets, \
1113  (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \
1114  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
1115  (head) = NULL; \
1116  } \
1117 } while (0)
1118 
1119 #define HASH_OVERHEAD(hh,head) \
1120  (((head) != NULL) ? ( \
1121  (size_t)(((head)->hh.tbl->num_items * sizeof(UT_hash_handle)) + \
1122  ((head)->hh.tbl->num_buckets * sizeof(UT_hash_bucket)) + \
1123  sizeof(UT_hash_table) + \
1124  (HASH_BLOOM_BYTELEN))) : 0U)
1125 
1126 #ifdef NO_DECLTYPE
1127 #define HASH_ITER(hh,head,el,tmp) \
1128 for(((el)=(head)), ((*(char**)(&(tmp)))=(char*)((head!=NULL)?(head)->hh.next:NULL)); \
1129  (el) != NULL; ((el)=(tmp)), ((*(char**)(&(tmp)))=(char*)((tmp!=NULL)?(tmp)->hh.next:NULL)))
1130 #else
1131 #define HASH_ITER(hh,head,el,tmp) \
1132 for(((el)=(head)), ((tmp)=DECLTYPE(el)((head!=NULL)?(head)->hh.next:NULL)); \
1133  (el) != NULL; ((el)=(tmp)), ((tmp)=DECLTYPE(el)((tmp!=NULL)?(tmp)->hh.next:NULL)))
1134 #endif
1135 
1136 /* obtain a count of items in the hash */
1137 #define HASH_COUNT(head) HASH_CNT(hh,head)
1138 #define HASH_CNT(hh,head) ((head != NULL)?((head)->hh.tbl->num_items):0U)
1139 
1140 typedef struct UT_hash_bucket {
1142  unsigned count;
1143 
1144  /* expand_mult is normally set to 0. In this situation, the max chain length
1145  * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If
1146  * the bucket's chain exceeds this length, bucket expansion is triggered).
1147  * However, setting expand_mult to a non-zero value delays bucket expansion
1148  * (that would be triggered by additions to this particular bucket)
1149  * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH.
1150  * (The multiplier is simply expand_mult+1). The whole idea of this
1151  * multiplier is to reduce bucket expansions, since they are expensive, in
1152  * situations where we know that a particular bucket tends to be overused.
1153  * It is better to let its chain length grow to a longer yet-still-bounded
1154  * value, than to do an O(n) bucket expansion too often.
1155  */
1156  unsigned expand_mult;
1157 
1159 
1160 /* random signature used only to find hash tables in external analysis */
1161 #define HASH_SIGNATURE 0xa0111fe1u
1162 #define HASH_BLOOM_SIGNATURE 0xb12220f2u
1163 
1164 typedef struct UT_hash_table {
1167  unsigned num_items;
1168  struct UT_hash_handle *tail; /* tail hh in app order, for fast append */
1169  ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */
1170 
1171  /* in an ideal situation (all buckets used equally), no bucket would have
1172  * more than ceil(#items/#buckets) items. that's the ideal chain length. */
1174 
1175  /* nonideal_items is the number of items in the hash whose chain position
1176  * exceeds the ideal chain maxlen. these items pay the penalty for an uneven
1177  * hash distribution; reaching them in a chain traversal takes >ideal steps */
1178  unsigned nonideal_items;
1179 
1180  /* ineffective expands occur when a bucket doubling was performed, but
1181  * afterward, more than half the items in the hash had nonideal chain
1182  * positions. If this happens on two consecutive expansions we inhibit any
1183  * further expansion, as it's not helping; this happens when the hash
1184  * function isn't a good fit for the key domain. When expansion is inhibited
1185  * the hash will still work, albeit no longer in constant time. */
1187 
1188  uint32_t signature; /* used only to find hash tables in external analysis */
1189 #ifdef HASH_BLOOM
1190  uint32_t bloom_sig; /* used only to test bloom exists in external analysis */
1191  uint8_t *bloom_bv;
1192  uint8_t bloom_nbits;
1193 #endif
1194 
1196 
1197 typedef struct UT_hash_handle {
1199  void *prev; /* prev element in app order */
1200  void *next; /* next element in app order */
1201  struct UT_hash_handle *hh_prev; /* previous hh in bucket order */
1202  struct UT_hash_handle *hh_next; /* next hh in bucket order */
1203  void *key; /* ptr to enclosing struct's key */
1204  unsigned keylen; /* enclosing struct's key len */
1205  unsigned hashv; /* result of hash-fcn(key) */
1207 
1208 #endif /* UTHASH_H */
int ptrdiff_t
Definition: CPAL.d:3845
unsigned int uint32_t
Definition: stdint.h:80
unsigned char uint8_t
Definition: stdint.h:78
unsigned count
Definition: uthash.h:1142
unsigned expand_mult
Definition: uthash.h:1156
struct UT_hash_handle * hh_head
Definition: uthash.h:1141
void * key
Definition: uthash.h:1203
struct UT_hash_handle * hh_next
Definition: uthash.h:1202
struct UT_hash_handle * hh_prev
Definition: uthash.h:1201
unsigned hashv
Definition: uthash.h:1205
void * next
Definition: uthash.h:1200
struct UT_hash_table * tbl
Definition: uthash.h:1198
void * prev
Definition: uthash.h:1199
unsigned keylen
Definition: uthash.h:1204
struct UT_hash_handle * tail
Definition: uthash.h:1168
unsigned ineff_expands
Definition: uthash.h:1186
unsigned num_buckets
Definition: uthash.h:1166
unsigned ideal_chain_maxlen
Definition: uthash.h:1173
unsigned noexpand
Definition: uthash.h:1186
unsigned num_items
Definition: uthash.h:1167
uint32_t signature
Definition: uthash.h:1188
unsigned nonideal_items
Definition: uthash.h:1178
UT_hash_bucket * buckets
Definition: uthash.h:1165
unsigned log2_num_buckets
Definition: uthash.h:1166
ptrdiff_t hho
Definition: uthash.h:1169
struct UT_hash_bucket UT_hash_bucket
unsigned int uint32_t
Definition: uthash.h:78
struct UT_hash_handle UT_hash_handle
unsigned char uint8_t
Definition: uthash.h:79
struct UT_hash_table UT_hash_table