This repository has been archived by the owner on Jun 28, 2024. It is now read-only.
generated from dthain/compilerbook-starter-code
-
Notifications
You must be signed in to change notification settings - Fork 0
/
hash_table.c
363 lines (314 loc) · 8.32 KB
/
hash_table.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
#include "hash_table.h"
#include <stdlib.h>
#include <string.h>
#define DEFAULT_SIZE 127
#define DEFAULT_LOAD 0.75
#define DEFAULT_FUNC hash_string
struct entry {
char *key;
void *value;
unsigned hash;
struct entry *next;
};
struct hash_table {
hash_func_t hash_func;
int bucket_count;
int size;
struct entry **buckets;
int ibucket;
struct entry *ientry;
};
struct hash_table *hash_table_create(int bucket_count, hash_func_t func)
{
struct hash_table *h;
h = (struct hash_table *) malloc(sizeof(struct hash_table));
if(!h)
return 0;
if(bucket_count < 1)
bucket_count = DEFAULT_SIZE;
if(!func)
func = DEFAULT_FUNC;
h->size = 0;
h->hash_func = func;
h->bucket_count = bucket_count;
h->buckets = (struct entry **) calloc(bucket_count, sizeof(struct entry *));
if(!h->buckets) {
free(h);
return 0;
}
return h;
}
void hash_table_clear(struct hash_table *h)
{
struct entry *e, *f;
int i;
for(i = 0; i < h->bucket_count; i++) {
e = h->buckets[i];
while(e) {
f = e->next;
free(e->key);
free(e);
e = f;
}
}
for(i = 0; i < h->bucket_count; i++) {
h->buckets[i] = 0;
}
}
void hash_table_delete(struct hash_table *h)
{
hash_table_clear(h);
free(h->buckets);
free(h);
}
void *hash_table_lookup(struct hash_table *h, const char *key)
{
struct entry *e;
unsigned hash, index;
hash = h->hash_func(key);
index = hash % h->bucket_count;
e = h->buckets[index];
while(e) {
if(hash == e->hash && !strcmp(key, e->key)) {
return e->value;
}
e = e->next;
}
return 0;
}
int hash_table_size(struct hash_table *h)
{
return h->size;
}
static int hash_table_double_buckets(struct hash_table *h)
{
struct hash_table *hn = hash_table_create(2 * h->bucket_count, h->hash_func);
if(!hn)
return 0;
/* Move pairs to new hash */
char *key;
void *value;
hash_table_firstkey(h);
while(hash_table_nextkey(h, &key, &value))
if(!hash_table_insert(hn, key, value))
{
hash_table_delete(hn);
return 0;
}
/* Delete all old pairs */
struct entry *e, *f;
int i;
for(i = 0; i < h->bucket_count; i++) {
e = h->buckets[i];
while(e) {
f = e->next;
free(e->key);
free(e);
e = f;
}
}
/* Make the old point to the new */
free(h->buckets);
h->buckets = hn->buckets;
h->bucket_count = hn->bucket_count;
h->size = hn->size;
/* Delete reference to new, so old is safe */
free(hn);
return 1;
}
int hash_table_insert(struct hash_table *h, const char *key, const void *value)
{
struct entry *e;
unsigned hash, index;
if( ((float) h->size / h->bucket_count) > DEFAULT_LOAD )
hash_table_double_buckets(h);
hash = h->hash_func(key);
index = hash % h->bucket_count;
e = h->buckets[index];
while(e) {
if(hash == e->hash && !strcmp(key, e->key))
return 0;
e = e->next;
}
e = (struct entry *) malloc(sizeof(struct entry));
if(!e)
return 0;
e->key = strdup(key);
if(!e->key) {
free(e);
return 0;
}
e->value = (void *) value;
e->hash = hash;
e->next = h->buckets[index];
h->buckets[index] = e;
h->size++;
return 1;
}
void *hash_table_remove(struct hash_table *h, const char *key)
{
struct entry *e, *f;
void *value;
unsigned hash, index;
hash = h->hash_func(key);
index = hash % h->bucket_count;
e = h->buckets[index];
f = 0;
while(e) {
if(hash == e->hash && !strcmp(key, e->key)) {
if(f) {
f->next = e->next;
} else {
h->buckets[index] = e->next;
}
value = e->value;
free(e->key);
free(e);
h->size--;
return value;
}
f = e;
e = e->next;
}
return 0;
}
void hash_table_firstkey(struct hash_table *h)
{
h->ientry = 0;
for(h->ibucket = 0; h->ibucket < h->bucket_count; h->ibucket++) {
h->ientry = h->buckets[h->ibucket];
if(h->ientry)
break;
}
}
int hash_table_nextkey(struct hash_table *h, char **key, void **value)
{
if(h->ientry) {
*key = h->ientry->key;
*value = h->ientry->value;
h->ientry = h->ientry->next;
if(!h->ientry) {
h->ibucket++;
for(; h->ibucket < h->bucket_count; h->ibucket++) {
h->ientry = h->buckets[h->ibucket];
if(h->ientry)
break;
}
}
return 1;
} else {
return 0;
}
}
typedef unsigned long int ub4; /* unsigned 4-byte quantities */
typedef unsigned char ub1; /* unsigned 1-byte quantities */
#define hashsize(n) ((ub4)1<<(n))
#define hashmask(n) (hashsize(n)-1)
/*
--------------------------------------------------------------------
mix -- mix 3 32-bit values reversibly.
For every delta with one or two bits set, and the deltas of all three
high bits or all three low bits, whether the original value of a,b,c
is almost all zero or is uniformly distributed,
* If mix() is run forward or backward, at least 32 bits in a,b,c
have at least 1/4 probability of changing.
* If mix() is run forward, every bit of c will change between 1/3 and
2/3 of the time. (Well, 22/100 and 78/100 for some 2-bit deltas.)
mix() was built out of 36 single-cycle latency instructions in a
structure that could supported 2x parallelism, like so:
a -= b;
a -= c; x = (c>>13);
b -= c; a ^= x;
b -= a; x = (a<<8);
c -= a; b ^= x;
c -= b; x = (b>>13);
...
Unfortunately, superscalar Pentiums and Sparcs can't take advantage
of that parallelism. They've also turned some of those single-cycle
latency instructions into multi-cycle latency instructions. Still,
this is the fastest good hash I could find. There were about 2^^68
to choose from. I only looked at a billion or so.
--------------------------------------------------------------------
*/
#define mix(a,b,c) \
{ \
a -= b; a -= c; a ^= (c>>13); \
b -= c; b -= a; b ^= (a<<8); \
c -= a; c -= b; c ^= (b>>13); \
a -= b; a -= c; a ^= (c>>12); \
b -= c; b -= a; b ^= (a<<16); \
c -= a; c -= b; c ^= (b>>5); \
a -= b; a -= c; a ^= (c>>3); \
b -= c; b -= a; b ^= (a<<10); \
c -= a; c -= b; c ^= (b>>15); \
}
/*
--------------------------------------------------------------------
hash() -- hash a variable-length key into a 32-bit value
k : the key (the unaligned variable-length array of bytes)
len : the length of the key, counting by bytes
initval : can be any 4-byte value
Returns a 32-bit value. Every bit of the key affects every bit of
the return value. Every 1-bit and 2-bit delta achieves avalanche.
About 6*len+35 instructions. The best hash table sizes are powers of 2. There is no need to do
mod a prime (mod is sooo slow!). If you need less than 32 bits,
use a bitmask. For example, if you need only 10 bits, do
h = (h & hashmask(10));
In which case, the hash table should have hashsize(10) elements. If you are hashing n strings (ub1 **)k, do it like this:
for (i=0, h=0; i<n; ++i) h = hash( k[i], len[i], h);
By Bob Jenkins, 1996. [email protected]. You may use this code any way you wish, private, educational, or commercial. It's free. See http://burtleburtle.net/bob/hash/evahash.html
Use for hash table lookup, or anything where one collision in 2^^32 is
acceptable. Do NOT use for cryptographic purposes.
--------------------------------------------------------------------
*/
static ub4 jenkins_hash(const ub1 *k, ub4 length, ub4 initval)
{
register ub4 a, b, c, len; /* Set up the internal state */
len = length;
a = b = 0x9e3779b9; /* the golden ratio; an arbitrary value */
c = initval; /* the previous hash value */
/*---------------------------------------- handle most of the key */
while(len >= 12) {
a += (k[0] + ((ub4) k[1] << 8) + ((ub4) k[2] << 16) + ((ub4) k[3] << 24));
b += (k[4] + ((ub4) k[5] << 8) + ((ub4) k[6] << 16) + ((ub4) k[7] << 24));
c += (k[8] + ((ub4) k[9] << 8) + ((ub4) k[10] << 16) + ((ub4) k[11] << 24));
mix(a, b, c);
k += 12;
len -= 12;
}
/*------------------------------------- handle the last 11 bytes */
c += length;
switch (len) { /* all the case statements fall through */
case 11:
c += ((ub4) k[10] << 24);
case 10:
c += ((ub4) k[9] << 16);
case 9:
c += ((ub4) k[8] << 8);
/* the first byte of c is reserved for the length */
case 8:
b += ((ub4) k[7] << 24);
case 7:
b += ((ub4) k[6] << 16);
case 6:
b += ((ub4) k[5] << 8);
case 5:
b += k[4];
case 4:
a += ((ub4) k[3] << 24);
case 3:
a += ((ub4) k[2] << 16);
case 2:
a += ((ub4) k[1] << 8);
case 1:
a += k[0];
/* case 0: nothing left to add */
}
mix(a, b, c);
/*-------------------------------------------- report the result */
return c;
}
unsigned hash_string(const char *s)
{
return jenkins_hash((const ub1 *) s, strlen(s), 0);
}