Update to revision 136 of MurmurHash3 (marked as final)

master
Vaizard institute 2011-07-25 13:22:25 +02:00
parent 16dad3b433
commit 9528c230b5
5 changed files with 280 additions and 427 deletions

View File

@ -2,3 +2,6 @@
(v0.1)
* Initial version
07-JUL-2011 16:45 CET 2011 Jaromir Capik <tavvva@email.cz>
(v0.2)
* Update to revision 136 of MurmurHash3 (marked as final)

View File

@ -1,7 +1,7 @@
/*
* The MIT License
*
* Copyright (c) 2010-2011 tanjent <tanj...@gmail.com>, http://code.google.com/p/smhasher/
* Copyright (c) 2010-2011 Austin Appleby (aapleby@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@ -23,28 +23,74 @@
*
*/
#include "MurmurHash3.hpp"
//-----------------------------------------------------------------------------
// MurmurHash3 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
#define _rotl(v, n) (uint32_t((v) << ((n)&0x1F)) | uint32_t((v) >> (32 - ((n)&0x1F))))
#define _rotr(v, n) (uint32_t((v) >> ((n)&0x1F)) | uint32_t((v) << (32 - ((n)&0x1F))))
#define _rotl64(v, n) (uint64_t((v) << ((n)&0x3F)) | uint64_t((v) >> (64 - ((n)&0x3F))))
#define _rotr64(v, n) (uint64_t((v) >> ((n)&0x3F)) | uint64_t((v) << (64 - ((n)&0x3F))))
// Note - The x86 and x64 versions do _not_ produce the same results, as the
// algorithms are optimized for their respective platforms. You can still
// compile and run any of them on any platform, but your performance with the
// non-native version will be less than optimal.
#include "MurmurHash3.h"
//-----------------------------------------------------------------------------
// Platform-specific functions and macros
// Microsoft Visual Studio
#if defined(_MSC_VER)
#define FORCE_INLINE __forceinline
#include <stdlib.h>
#define ROTL32(x,y) _rotl(x,y)
#define ROTL64(x,y) _rotl64(x,y)
#define BIG_CONSTANT(x) (x)
// Other compilers
#else // defined(_MSC_VER)
#define FORCE_INLINE __attribute__((always_inline))
inline uint32_t rotl32 ( uint32_t x, int8_t r )
{
return (x << r) | (x >> (32 - r));
}
inline uint64_t rotl64 ( uint64_t x, int8_t r )
{
return (x << r) | (x >> (64 - r));
}
#define ROTL32(x,y) rotl32(x,y)
#define ROTL64(x,y) rotl64(x,y)
#define BIG_CONSTANT(x) (x##LLU)
#endif // !defined(_MSC_VER)
//-----------------------------------------------------------------------------
// Block read - if your platform needs to do endian-swapping or can only
// handle aligned reads, do the conversion here
inline uint32_t getblock (const uint32_t * p, int i)
FORCE_INLINE uint32_t getblock ( const uint32_t * p, int i )
{
return p[i];
}
//----------
FORCE_INLINE uint64_t getblock ( const uint64_t * p, int i )
{
return p[i];
}
//-----------------------------------------------------------------------------
// Finalization mix - force all bits of a hash block to avalanche
// avalanches all bits to within 0.25% bias
inline uint32_t fmix32 (uint32_t h)
FORCE_INLINE uint32_t fmix ( uint32_t h )
{
h ^= h >> 16;
h *= 0x85ebca6b;
@ -55,60 +101,63 @@ inline uint32_t fmix32 (uint32_t h)
return h;
}
//-----------------------------------------------------------------------------
inline void bmix32 (uint32_t & h1, uint32_t & k1, uint32_t & c1,
uint32_t & c2)
{
k1 *= c1;
k1 = _rotl (k1, 11);
k1 *= c2;
h1 ^= k1;
h1 = h1 * 3 + 0x52dce729;
c1 = c1 * 5 + 0x7b7d159c;
c2 = c2 * 5 + 0x6bce6396;
}
//----------
void MurmurHash3_x86_32 (const void *key, int len, uint32_t seed, void *out)
FORCE_INLINE uint64_t fmix ( uint64_t k )
{
const uint8_t *data = (const uint8_t *) key;
k ^= k >> 33;
k *= BIG_CONSTANT(0xff51afd7ed558ccd);
k ^= k >> 33;
k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
return k;
}
//-----------------------------------------------------------------------------
void MurmurHash3_x86_32 ( const void * key, int len,
uint32_t seed, void * out )
{
const uint8_t * data = (const uint8_t*)key;
const int nblocks = len / 4;
uint32_t h1 = 0x971e137b ^ seed;
uint32_t h1 = seed;
uint32_t c1 = (uint32_t) 0x95543787;
uint32_t c2 = (uint32_t) 0x2ad7eb25;
uint32_t c1 = 0xcc9e2d51;
uint32_t c2 = 0x1b873593;
//----------
// body
const uint32_t *blocks = (const uint32_t *) (data + nblocks * 4);
const uint32_t * blocks = (const uint32_t *)(data + nblocks*4);
for (int i = -nblocks; i; i++) {
uint32_t k1 = getblock (blocks, i);
for(int i = -nblocks; i; i++)
{
uint32_t k1 = getblock(blocks,i);
bmix32 (h1, k1, c1, c2);
k1 *= c1;
k1 = ROTL32(k1,15);
k1 *= c2;
h1 ^= k1;
h1 = ROTL32(h1,13);
h1 = h1*5+0xe6546b64;
}
//----------
// tail
const uint8_t *tail = (const uint8_t *) (data + nblocks * 4);
const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
uint32_t k1 = 0;
switch (len & 3) {
case 3:
k1 ^= tail[2] << 16;
case 2:
k1 ^= tail[1] << 8;
case 1:
k1 ^= tail[0];
bmix32 (h1, k1, c1, c2);
switch(len & 3)
{
case 3: k1 ^= tail[2] << 16;
case 2: k1 ^= tail[1] << 8;
case 1: k1 ^= tail[0];
k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
};
//----------
@ -116,420 +165,196 @@ void MurmurHash3_x86_32 (const void *key, int len, uint32_t seed, void *out)
h1 ^= len;
h1 = fmix32 (h1);
h1 = fmix(h1);
*(uint32_t *) out = h1;
}
*(uint32_t*)out = h1;
}
//-----------------------------------------------------------------------------
inline void bmix32 (uint32_t & h1, uint32_t & h2, uint32_t & k1,
uint32_t & k2, uint32_t & c1, uint32_t & c2)
void MurmurHash3_x86_128 ( const void * key, const int len,
uint32_t seed, void * out )
{
k1 *= c1;
k1 = _rotl (k1, 11);
k1 *= c2;
h1 ^= k1;
h1 += h2;
h2 = _rotl (h2, 17);
k2 *= c2;
k2 = _rotl (k2, 11);
k2 *= c1;
h2 ^= k2;
h2 += h1;
h1 = h1 * 3 + 0x52dce729;
h2 = h2 * 3 + 0x38495ab5;
c1 = c1 * 5 + 0x7b7d159c;
c2 = c2 * 5 + 0x6bce6396;
}
//----------
void MurmurHash3_x86_64 (const void *key, const int len, const uint32_t seed,
void *out)
{
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 8;
uint32_t h1 = 0x8de1c3ac ^ seed;
uint32_t h2 = 0xbab98226 ^ seed;
uint32_t c1 = (uint32_t) 0x95543787;
uint32_t c2 = (uint32_t) 0x2ad7eb25;
//----------
// body
const uint32_t *blocks = (const uint32_t *) (data + nblocks * 8);
for (int i = -nblocks; i; i++) {
uint32_t k1 = getblock (blocks, i * 2 + 0);
uint32_t k2 = getblock (blocks, i * 2 + 1);
bmix32 (h1, h2, k1, k2, c1, c2);
}
//----------
// tail
const uint8_t *tail = (const uint8_t *) (data + nblocks * 8);
uint32_t k1 = 0;
uint32_t k2 = 0;
switch (len & 7) {
case 7:
k2 ^= tail[6] << 16;
case 6:
k2 ^= tail[5] << 8;
case 5:
k2 ^= tail[4] << 0;
case 4:
k1 ^= tail[3] << 24;
case 3:
k1 ^= tail[2] << 16;
case 2:
k1 ^= tail[1] << 8;
case 1:
k1 ^= tail[0] << 0;
bmix32 (h1, h2, k1, k2, c1, c2);
};
//----------
// finalization
h2 ^= len;
h1 += h2;
h2 += h1;
h1 = fmix32 (h1);
h2 = fmix32 (h2);
h1 += h2;
h2 += h1;
((uint32_t *) out)[0] = h1;
((uint32_t *) out)[1] = h2;
}
//-----------------------------------------------------------------------------
// This mix is large enough that VC++ refuses to inline it unless we use
// __forceinline. It's also not all that fast due to register spillage.
inline void bmix32 (uint32_t & h1, uint32_t & h2, uint32_t & h3,
uint32_t & h4, uint32_t & k1, uint32_t & k2,
uint32_t & k3, uint32_t & k4, uint32_t & c1,
uint32_t & c2)
{
k1 *= c1;
k1 = _rotl (k1, 11);
k1 *= c2;
h1 ^= k1;
h1 += h2;
h1 += h3;
h1 += h4;
h1 = _rotl (h1, 17);
k2 *= c2;
k2 = _rotl (k2, 11);
k2 *= c1;
h2 ^= k2;
h2 += h1;
h1 = h1 * 3 + 0x52dce729;
h2 = h2 * 3 + 0x38495ab5;
c1 = c1 * 5 + 0x7b7d159c;
c2 = c2 * 5 + 0x6bce6396;
k3 *= c1;
k3 = _rotl (k3, 11);
k3 *= c2;
h3 ^= k3;
h3 += h1;
k4 *= c2;
k4 = _rotl (k4, 11);
k4 *= c1;
h4 ^= k4;
h4 += h1;
h3 = h3 * 3 + 0x52dce729;
h4 = h4 * 3 + 0x38495ab5;
c1 = c1 * 5 + 0x7b7d159c;
c2 = c2 * 5 + 0x6bce6396;
}
//----------
void MurmurHash3_x86_128 (const void *key, const int len, const uint32_t seed,
void *out)
{
const uint8_t *data = (const uint8_t *) key;
const uint8_t * data = (const uint8_t*)key;
const int nblocks = len / 16;
uint32_t h1 = 0x8de1c3ac ^ seed;
uint32_t h2 = 0xbab98226 ^ seed;
uint32_t h3 = 0xfcba5b2d ^ seed;
uint32_t h4 = 0x32452e3e ^ seed;
uint32_t h1 = seed;
uint32_t h2 = seed;
uint32_t h3 = seed;
uint32_t h4 = seed;
uint32_t c1 = (uint32_t) 0x95543787;
uint32_t c2 = (uint32_t) 0x2ad7eb25;
uint32_t c1 = 0x239b961b;
uint32_t c2 = 0xab0e9789;
uint32_t c3 = 0x38b34ae5;
uint32_t c4 = 0xa1e38b93;
//----------
// body
const uint32_t *blocks = (const uint32_t *) (data);
const uint32_t * blocks = (const uint32_t *)(data + nblocks*16);
for (int i = 0; i < nblocks; i++) {
uint32_t k1 = getblock (blocks, i * 4 + 0);
uint32_t k2 = getblock (blocks, i * 4 + 1);
uint32_t k3 = getblock (blocks, i * 4 + 2);
uint32_t k4 = getblock (blocks, i * 4 + 3);
for(int i = -nblocks; i; i++)
{
uint32_t k1 = getblock(blocks,i*4+0);
uint32_t k2 = getblock(blocks,i*4+1);
uint32_t k3 = getblock(blocks,i*4+2);
uint32_t k4 = getblock(blocks,i*4+3);
bmix32 (h1, h2, h3, h4, k1, k2, k3, k4, c1, c2);
k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b;
k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747;
k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35;
k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17;
}
//----------
// tail
const uint8_t *tail = (const uint8_t *) (data + nblocks * 16);
const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
uint32_t k1 = 0;
uint32_t k2 = 0;
uint32_t k3 = 0;
uint32_t k4 = 0;
switch (len & 15) {
case 15:
k4 ^= tail[14] << 16;
case 14:
k4 ^= tail[13] << 8;
case 13:
k4 ^= tail[12] << 0;
case 12:
k3 ^= tail[11] << 24;
case 11:
k3 ^= tail[10] << 16;
case 10:
k3 ^= tail[9] << 8;
case 9:
k3 ^= tail[8] << 0;
case 8:
k2 ^= tail[7] << 24;
case 7:
k2 ^= tail[6] << 16;
case 6:
k2 ^= tail[5] << 8;
case 5:
k2 ^= tail[4] << 0;
case 4:
k1 ^= tail[3] << 24;
case 3:
k1 ^= tail[2] << 16;
case 2:
k1 ^= tail[1] << 8;
case 1:
k1 ^= tail[0] << 0;
bmix32 (h1, h2, h3, h4, k1, k2, k3, k4, c1, c2);
switch(len & 15)
{
case 15: k4 ^= tail[14] << 16;
case 14: k4 ^= tail[13] << 8;
case 13: k4 ^= tail[12] << 0;
k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
case 12: k3 ^= tail[11] << 24;
case 11: k3 ^= tail[10] << 16;
case 10: k3 ^= tail[ 9] << 8;
case 9: k3 ^= tail[ 8] << 0;
k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
case 8: k2 ^= tail[ 7] << 24;
case 7: k2 ^= tail[ 6] << 16;
case 6: k2 ^= tail[ 5] << 8;
case 5: k2 ^= tail[ 4] << 0;
k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
case 4: k1 ^= tail[ 3] << 24;
case 3: k1 ^= tail[ 2] << 16;
case 2: k1 ^= tail[ 1] << 8;
case 1: k1 ^= tail[ 0] << 0;
k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
};
//----------
// finalization
h4 ^= len;
h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
h1 += h2;
h1 += h3;
h1 += h4;
h2 += h1;
h3 += h1;
h4 += h1;
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
h1 = fmix32 (h1);
h2 = fmix32 (h2);
h3 = fmix32 (h3);
h4 = fmix32 (h4);
h1 = fmix(h1);
h2 = fmix(h2);
h3 = fmix(h3);
h4 = fmix(h4);
h1 += h2;
h1 += h3;
h1 += h4;
h2 += h1;
h3 += h1;
h4 += h1;
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
((uint32_t *) out)[0] = h1;
((uint32_t *) out)[1] = h2;
((uint32_t *) out)[2] = h3;
((uint32_t *) out)[3] = h4;
((uint32_t*)out)[0] = h1;
((uint32_t*)out)[1] = h2;
((uint32_t*)out)[2] = h3;
((uint32_t*)out)[3] = h4;
}
//-----------------------------------------------------------------------------
// Block read - if your platform needs to do endian-swapping or can only
// handle aligned reads, do the conversion here
inline uint64_t getblock (const uint64_t * p, int i)
void MurmurHash3_x64_128 ( const void * key, const int len,
const uint32_t seed, void * out )
{
return p[i];
}
//----------
// Block mix - combine the key bits with the hash bits and scramble everything
inline void bmix64 (uint64_t & h1, uint64_t & h2, uint64_t & k1,
uint64_t & k2, uint64_t & c1, uint64_t & c2)
{
k1 *= c1;
k1 = _rotl64 (k1, 23);
k1 *= c2;
h1 ^= k1;
h1 += h2;
h2 = _rotl64 (h2, 41);
k2 *= c2;
k2 = _rotl64 (k2, 23);
k2 *= c1;
h2 ^= k2;
h2 += h1;
h1 = h1 * 3 + 0x52dce729;
h2 = h2 * 3 + 0x38495ab5;
c1 = c1 * 5 + 0x7b7d159c;
c2 = c2 * 5 + 0x6bce6396;
}
//----------
// Finalization mix - avalanches all bits to within 0.05% bias
inline uint64_t fmix64 (uint64_t k)
{
k ^= k >> 33;
k *= 0xff51afd7ed558ccd;
k ^= k >> 33;
k *= 0xc4ceb9fe1a85ec53;
k ^= k >> 33;
return k;
}
//----------
void MurmurHash3_x64_128 (const void *key, const int len, const uint32_t seed,
void *out)
{
const uint8_t *data = (const uint8_t *) key;
const uint8_t * data = (const uint8_t*)key;
const int nblocks = len / 16;
uint64_t h1 = 0x9368e53c2f6af274 ^ seed;
uint64_t h2 = 0x586dcd208f7cd3fd ^ seed;
uint64_t h1 = seed;
uint64_t h2 = seed;
uint64_t c1 = 0x87c37b91114253d5;
uint64_t c2 = 0x4cf5ad432745937f;
uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
//----------
// body
const uint64_t *blocks = (const uint64_t *) (data);
const uint64_t * blocks = (const uint64_t *)(data);
for (int i = 0; i < nblocks; i++) {
uint64_t k1 = getblock (blocks, i * 2 + 0);
uint64_t k2 = getblock (blocks, i * 2 + 1);
for(int i = 0; i < nblocks; i++)
{
uint64_t k1 = getblock(blocks,i*2+0);
uint64_t k2 = getblock(blocks,i*2+1);
bmix64 (h1, h2, k1, k2, c1, c2);
k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729;
k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5;
}
//----------
// tail
const uint8_t *tail = (const uint8_t *) (data + nblocks * 16);
const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
uint64_t k1 = 0;
uint64_t k2 = 0;
switch (len & 15) {
case 15:
k2 ^= uint64_t (tail[14]) << 48;
case 14:
k2 ^= uint64_t (tail[13]) << 40;
case 13:
k2 ^= uint64_t (tail[12]) << 32;
case 12:
k2 ^= uint64_t (tail[11]) << 24;
case 11:
k2 ^= uint64_t (tail[10]) << 16;
case 10:
k2 ^= uint64_t (tail[9]) << 8;
case 9:
k2 ^= uint64_t (tail[8]) << 0;
switch(len & 15)
{
case 15: k2 ^= uint64_t(tail[14]) << 48;
case 14: k2 ^= uint64_t(tail[13]) << 40;
case 13: k2 ^= uint64_t(tail[12]) << 32;
case 12: k2 ^= uint64_t(tail[11]) << 24;
case 11: k2 ^= uint64_t(tail[10]) << 16;
case 10: k2 ^= uint64_t(tail[ 9]) << 8;
case 9: k2 ^= uint64_t(tail[ 8]) << 0;
k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
case 8:
k1 ^= uint64_t (tail[7]) << 56;
case 7:
k1 ^= uint64_t (tail[6]) << 48;
case 6:
k1 ^= uint64_t (tail[5]) << 40;
case 5:
k1 ^= uint64_t (tail[4]) << 32;
case 4:
k1 ^= uint64_t (tail[3]) << 24;
case 3:
k1 ^= uint64_t (tail[2]) << 16;
case 2:
k1 ^= uint64_t (tail[1]) << 8;
case 1:
k1 ^= uint64_t (tail[0]) << 0;
bmix64 (h1, h2, k1, k2, c1, c2);
case 8: k1 ^= uint64_t(tail[ 7]) << 56;
case 7: k1 ^= uint64_t(tail[ 6]) << 48;
case 6: k1 ^= uint64_t(tail[ 5]) << 40;
case 5: k1 ^= uint64_t(tail[ 4]) << 32;
case 4: k1 ^= uint64_t(tail[ 3]) << 24;
case 3: k1 ^= uint64_t(tail[ 2]) << 16;
case 2: k1 ^= uint64_t(tail[ 1]) << 8;
case 1: k1 ^= uint64_t(tail[ 0]) << 0;
k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
};
//----------
// finalization
h2 ^= len;
h1 ^= len; h2 ^= len;
h1 += h2;
h2 += h1;
h1 = fmix64 (h1);
h2 = fmix64 (h2);
h1 = fmix(h1);
h2 = fmix(h2);
h1 += h2;
h2 += h1;
((uint64_t *) out)[0] = h1;
((uint64_t *) out)[1] = h2;
((uint64_t*)out)[0] = h1;
((uint64_t*)out)[1] = h2;
}
//-----------------------------------------------------------------------------
// If we need a smaller hash value, it's faster to just use a portion of the
// 128-bit hash
void MurmurHash3_x64_32 (const void *key, int len, uint32_t seed, void *out)
{
uint32_t temp[4];
MurmurHash3_x64_128 (key, len, seed, temp);
*(uint32_t *) out = temp[0];
}
//----------
void MurmurHash3_x64_64 (const void *key, int len, uint32_t seed, void *out)
{
uint64_t temp[2];
MurmurHash3_x64_128 (key, len, seed, temp);
*(uint64_t *) out = temp[0];
}
//-----------------------------------------------------------------------------

62
MurmurHash3.h 100644
View File

@ -0,0 +1,62 @@
/*
* The MIT License
*
* Copyright (c) 2010-2011 Austin Appleby (aapleby@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
//-----------------------------------------------------------------------------
// MurmurHash3 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
#ifndef _MURMURHASH3_H_
#define _MURMURHASH3_H_
//-----------------------------------------------------------------------------
// Platform-specific functions and macros
// Microsoft Visual Studio
#if defined(_MSC_VER)
typedef unsigned char uint8_t;
typedef unsigned long uint32_t;
typedef unsigned __int64 uint64_t;
// Other compilers
#else // !defined(_MSC_VER)
#include <stdint.h>
#endif
//-----------------------------------------------------------------------------
void MurmurHash3_x86_32 ( const void * key, int len, uint32_t seed, void * out );
void MurmurHash3_x86_128 ( const void * key, int len, uint32_t seed, void * out );
void MurmurHash3_x64_128 ( const void * key, int len, uint32_t seed, void * out );
//-----------------------------------------------------------------------------
#endif // _MURMURHASH3_H_

View File

@ -1,39 +0,0 @@
/*
* MurmurHash3CLI - command line interface for MurmurHash3
* Copyright (C) 2011 Jaromir Capik <tavvva@email.cz>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
#ifndef __MURMURHASH3_HPP__
#define __MURMURHASH3_HPP__
#include <stdint.h>
#define MURMURHASH3_OUTPUT_LENGTH 16
typedef uint8_t MurmurHash3_output_t[MURMURHASH3_OUTPUT_LENGTH];
typedef uint32_t MurmurHash3_seed_t;
void MurmurHash3_x86_32 ( const void * key, int len, uint32_t seed, void * out );
void MurmurHash3_x86_64 ( const void * key, int len, uint32_t seed, void * out );
void MurmurHash3_x86_128 ( const void * key, int len, uint32_t seed, void * out );
void MurmurHash3_x64_32 ( const void * key, int len, uint32_t seed, void * out );
void MurmurHash3_x64_64 ( const void * key, int len, uint32_t seed, void * out );
void MurmurHash3_x64_128 ( const void * key, int len, uint32_t seed, void * out );
#endif

View File

@ -26,7 +26,9 @@ extern "C" {
#include "php_murmurhash3.h"
}
#include "MurmurHash3.hpp"
#include "MurmurHash3.h"
#define MURMURHASH3_OUTPUT_LENGTH 16
static function_entry murmurhash3_functions[] = {