-/* Internal stuff, think well before changing this, it's how
- much the weights of two lexicograhically contiguous chars
- differ, i.e. (hash_weight('b')-hash_weight('a')) == HASHSTEP
- One seems to be fine but the alghoritm doesn't depend on it */
-#define HASHSTEP 1
-
-/* The smallest _prime_ int beeing HASHSTEP times bigger than a byte,
- that is the first prime bigger than the maximum hash_weight
- (since the maximum hash weight is gonne be the "biggest-byte * HASHSTEP")
- */
-#define HASHSHIFT 257
-
-/* Are we sure that HASHSHIFT is big enough ? */
-#if !(HASHSHIFT > (HASHSTEP*(CHAR_MAX-CHAR_MIN)))
-#error "No no, I cannot, please make HASHSHIFT a bigger prime !"
-#endif
-
-/* Now HASHSIZE doesn't need to be a prime, but we really don't want it
- to be an exact multiple of HASHSHIFT, that would make the distribution
- a LOT worse, once is not multiple of HASHSHIFT it can be anything */
-#if ((HASHSIZE%HASHSHIFT)==0)
-#error "Please set HASHSIZE to something not multiple of HASHSHIFT"
-#endif
-
-/* What type of integer do we need in our computations ? the largest
- value we need to work on is (HASHSIZE+HASHSHIFT+1), for memory
- operations we want to keep the tables compact (the cache will work
- better and we will run faster) while for work variables we prefer
- to roundup to 'int' if it is the case: on platforms where int!=short
- int arithmetic is often faster than short arithmetic, we prefer signed
- types if they are big enough since on some architectures they are faster
- than unsigned, but we always keep signedness of mem and regs the same,
- to avoid sign conversions that sometimes require time, the following
- precompile stuff will set HASHMEMS to an appropriate integer type for
- the tables stored in memory and HASHREGS to an appropriate int type
- for the work registers/variables/return types. Everything of type
- HASH???S will remain internal to this source file so I placed this stuff
- here and not in the header file. */
-
-#undef HASHMEMS
-#undef HASHREGS
-
-#if ((!defined(HASHMEMS)) && (HASHSIZE < (SHRT_MAX-HASHSHIFT)))
-#define HASHMEMS short
-#define HASHREGS int
-#endif
-
-#if ((!defined(HASHMEMS)) && (HASHSIZE < (USHRT_MAX-HASHSHIFT)))
-#define HASHMEMS unsigned short
-#define HASHREGS unsigned int
-#endif
-
-#if ((!defined(HASHMEMS)) && (HASHSIZE < (INT_MAX-HASHSHIFT)))
-#define HASHMEMS int
-#define HASHREGS int
-#endif
-
-#if ((!defined(HASHMEMS)) && (HASHSIZE < (UINT_MAX-HASHSHIFT)))
-#define HASHMEMS unsigned int
-#define HASHREGS unsigned int
-#endif
-
-#if ((!defined(HASHMEMS)) && (HASHSIZE < (LONG_MAX-HASHSHIFT)))
-#define HASHMEMS long
-#define HASHREGS long
-#endif
-
-#if ((!defined(HASHMEMS)) && (HASHSIZE < (ULONG_MAX-HASHSHIFT)))
-#define HASHMEMS unsigned long
-#define HASHREGS unsigned long
-#endif
-
-#if (!defined(HASHMEMS))
-#error "Uh oh... I have a problem, do you want a 16GB hash table ? !"
-#endif
-
-/* Now we are sure that HASHMEMS and HASHREGS can contain the following */
-#define HASHMAPSIZE (HASHSIZE+HASHSHIFT+1)
-
-/* Static memory structures */
-
-/* We need a first function that, given an integer h between 1 and
- HASHSIZE+HASHSHIFT, returns ( (h * HASHSHIFT) % HASHSIZE ) )
- We'll map this function in this table */
-static HASHMEMS hash_map[HASHMAPSIZE];
-
-/* Then we need a second function that "maps" a char to its weitgh,
- changed to a table this one too, with this macro we can use a char
- as index and not care if it is signed or not, no.. this will not
- cause an addition to take place at each access, trust me, the
- optimizer takes it out of the actual code and passes "label+shift"
- to the linker, and the linker does the addition :) */
-static HASHMEMS hash_weight_table[CHAR_MAX - CHAR_MIN + 1];
-#define hash_weight(ch) hash_weight_table[ch-CHAR_MIN]
-
-/* The actual hash tables, both MUST be of the same HASHSIZE, variable
- size tables could be supported but the rehash routine should also
- rebuild the transformation maps, I kept the tables of equal size
- so that I can use one hash function and one transformation map */