diff --git a/README.md b/README.md index 0984b8e..993291a 100644 --- a/README.md +++ b/README.md @@ -71,7 +71,15 @@ After programming, you will only need to setup your Wifi and BTC address. 1. Setup your Wifi Network 1. Add your BTCaddress -Optional you can select other pool: +Recommended low difficulty share pools: + +| Pool URL | Port | URL | Status | +|--- |--- |--- |--- | +| public-pool.airdns.org | 21496 | https://public-pool.airdns.org:37273/ | Check your stats. Supporting open source miners discord group | +| nerdminers.org | | | Currently pointing to th Open Source Solo Bitcoin Mining Pool | +| nerdminer.io | 3333 | https://nerdminer.io | Mantained by CHMEX | + +Other standard pools not compatible with low difficulty share: | Pool URL | Port | URL | |--- |--- |--- | diff --git a/src/NerdMinerV2.ino.cpp b/src/NerdMinerV2.ino.cpp index 9ccd5a7..8c14847 100644 --- a/src/NerdMinerV2.ino.cpp +++ b/src/NerdMinerV2.ino.cpp @@ -16,7 +16,7 @@ #include "mining.h" #include "monitor.h" -#define CURRENT_VERSION "V1.5.2" +#define CURRENT_VERSION "V1.6.0" //3 seconds WDT #define WDT_TIMEOUT 3 diff --git a/src/ShaTests/customSHA256.cpp b/src/ShaTests/customSHA256.cpp deleted file mode 100644 index b123945..0000000 --- a/src/ShaTests/customSHA256.cpp +++ /dev/null @@ -1,222 +0,0 @@ -#include "customSHA256.h" - -#define TOTAL_LEN_LEN 8 - -/* - * Comments from pseudo-code at https://en.wikipedia.org/wiki/SHA-2 are reproduced here. - * When useful for clarification, portions of the pseudo-code are reproduced here too. - */ - -/* - * @brief Rotate a 32-bit value by a number of bits to the right. - * @param value The value to be rotated. - * @param count The number of bits to rotate by. - * @return The rotated value. - */ -static inline uint32_t right_rot(uint32_t value, unsigned int count) -{ - /* - * Defined behaviour in standard C for all count where 0 < count < 32, which is what we need here. - */ - return value >> count | value << (32 - count); -} - -/* - * @brief Update a hash value under calculation with a new chunk of data. - * @param h Pointer to the first hash item, of a total of eight. - * @param p Pointer to the chunk data, which has a standard length. - * - * @note This is the SHA-256 work horse. - */ -static inline void consume_chunk(uint32_t *h, const uint8_t *p) -{ - unsigned i, j; - uint32_t ah[8]; - - /* Initialize working variables to current hash value: */ - for (i = 0; i < 8; i++) - ah[i] = h[i]; - - /* - * The w-array is really w[64], but since we only need 16 of them at a time, we save stack by - * calculating 16 at a time. - * - * This optimization was not there initially and the rest of the comments about w[64] are kept in their - * initial state. - */ - - /* - * create a 64-entry message schedule array w[0..63] of 32-bit words (The initial values in w[0..63] - * don't matter, so many implementations zero them here) copy chunk into first 16 words w[0..15] of the - * message schedule array - */ - uint32_t w[16]; - - /* Compression function main loop: */ - for (i = 0; i < 4; i++) { - for (j = 0; j < 16; j++) { - if (i == 0) { - w[j] = - (uint32_t)p[0] << 24 | (uint32_t)p[1] << 16 | (uint32_t)p[2] << 8 | (uint32_t)p[3]; - p += 4; - } else { - /* Extend the first 16 words into the remaining 48 words w[16..63] of the - * message schedule array: */ - const uint32_t s0 = right_rot(w[(j + 1) & 0xf], 7) ^ right_rot(w[(j + 1) & 0xf], 18) ^ - (w[(j + 1) & 0xf] >> 3); - const uint32_t s1 = right_rot(w[(j + 14) & 0xf], 17) ^ - right_rot(w[(j + 14) & 0xf], 19) ^ (w[(j + 14) & 0xf] >> 10); - w[j] = w[j] + s0 + w[(j + 9) & 0xf] + s1; - } - const uint32_t s1 = right_rot(ah[4], 6) ^ right_rot(ah[4], 11) ^ right_rot(ah[4], 25); - const uint32_t ch = (ah[4] & ah[5]) ^ (~ah[4] & ah[6]); - - /* - * Initialize array of round constants: - * (first 32 bits of the fractional parts of the cube roots of the first 64 primes 2..311): - */ - static const uint32_t k[] = { - 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, - 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, - 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, - 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, - 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, - 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, - 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, - 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, - 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, - 0xc67178f2}; - - const uint32_t temp1 = ah[7] + s1 + ch + k[i << 4 | j] + w[j]; - const uint32_t s0 = right_rot(ah[0], 2) ^ right_rot(ah[0], 13) ^ right_rot(ah[0], 22); - const uint32_t maj = (ah[0] & ah[1]) ^ (ah[0] & ah[2]) ^ (ah[1] & ah[2]); - const uint32_t temp2 = s0 + maj; - - ah[7] = ah[6]; - ah[6] = ah[5]; - ah[5] = ah[4]; - ah[4] = ah[3] + temp1; - ah[3] = ah[2]; - ah[2] = ah[1]; - ah[1] = ah[0]; - ah[0] = temp1 + temp2; - } - } - - /* Add the compressed chunk to the current hash value: */ - for (i = 0; i < 8; i++) - h[i] += ah[i]; -} - -/* - * Public functions. See header file for documentation. - */ - -void sha_256_init(struct Sha_256 *sha_256, uint8_t hash[SIZE_OF_SHA_256_HASH]) -{ - sha_256->hash = hash; - sha_256->chunk_pos = sha_256->chunk; - sha_256->space_left = SIZE_OF_SHA_256_CHUNK; - sha_256->total_len = 0; - /* - * Initialize hash values (first 32 bits of the fractional parts of the square roots of the first 8 primes - * 2..19): - */ - sha_256->h[0] = 0x6a09e667; - sha_256->h[1] = 0xbb67ae85; - sha_256->h[2] = 0x3c6ef372; - sha_256->h[3] = 0xa54ff53a; - sha_256->h[4] = 0x510e527f; - sha_256->h[5] = 0x9b05688c; - sha_256->h[6] = 0x1f83d9ab; - sha_256->h[7] = 0x5be0cd19; -} - -void sha_256_write(struct Sha_256 *sha_256, const uint8_t *data, size_t len) -{ - sha_256->total_len += len; - - const uint8_t *p = data; - - while (len > 0) { - /* - * If the input chunks have sizes that are multiples of the calculation chunk size, no copies are - * necessary. We operate directly on the input data instead. - */ - if (sha_256->space_left == SIZE_OF_SHA_256_CHUNK && len >= SIZE_OF_SHA_256_CHUNK) { - consume_chunk(sha_256->h, p); - len -= SIZE_OF_SHA_256_CHUNK; - p += SIZE_OF_SHA_256_CHUNK; - continue; - } - /* General case, no particular optimization. */ - const size_t consumed_len = len < sha_256->space_left ? len : sha_256->space_left; - memcpy(sha_256->chunk_pos, p, consumed_len); - sha_256->space_left -= consumed_len; - len -= consumed_len; - p += consumed_len; - if (sha_256->space_left == 0) { - consume_chunk(sha_256->h, sha_256->chunk); - sha_256->chunk_pos = sha_256->chunk; - sha_256->space_left = SIZE_OF_SHA_256_CHUNK; - } else { - sha_256->chunk_pos += consumed_len; - } - } -} - -uint8_t *sha_256_close(struct Sha_256 *sha_256) -{ - uint8_t *pos = sha_256->chunk_pos; - size_t space_left = sha_256->space_left; - uint32_t *const h = sha_256->h; - - /* - * The current chunk cannot be full. Otherwise, it would already have been consumed. I.e. there is space left for - * at least one byte. The next step in the calculation is to add a single one-bit to the data. - */ - *pos++ = 0x80; - --space_left; - - /* - * Now, the last step is to add the total data length at the end of the last chunk, and zero padding before - * that. But we do not necessarily have enough space left. If not, we pad the current chunk with zeroes, and add - * an extra chunk at the end. - */ - if (space_left < TOTAL_LEN_LEN) { - memset(pos, 0x00, space_left); - consume_chunk(h, sha_256->chunk); - pos = sha_256->chunk; - space_left = SIZE_OF_SHA_256_CHUNK; - } - const size_t left = space_left - TOTAL_LEN_LEN; - memset(pos, 0x00, left); - pos += left; - size_t len = sha_256->total_len; - pos[7] = (uint8_t)(len << 3); - len >>= 5; - int i; - for (i = 6; i >= 0; --i) { - pos[i] = (uint8_t)len; - len >>= 8; - } - consume_chunk(h, sha_256->chunk); - /* Produce the final hash value (big-endian): */ - int j; - uint8_t *const hash = sha_256->hash; - for (i = 0, j = 0; i < 8; i++) { - hash[j++] = (uint8_t)(h[i] >> 24); - hash[j++] = (uint8_t)(h[i] >> 16); - hash[j++] = (uint8_t)(h[i] >> 8); - hash[j++] = (uint8_t)h[i]; - } - return sha_256->hash; -} - -void calc_sha_256(uint8_t hash[SIZE_OF_SHA_256_HASH], const uint8_t *input, size_t len) -{ - struct Sha_256 sha_256; - sha_256_init(&sha_256, hash); - sha_256_write(&sha_256, input, len); - (void)sha_256_close(&sha_256); -} \ No newline at end of file diff --git a/src/ShaTests/customSHA256.h b/src/ShaTests/customSHA256.h deleted file mode 100644 index 961a41e..0000000 --- a/src/ShaTests/customSHA256.h +++ /dev/null @@ -1,103 +0,0 @@ -#ifndef SHA_256_H -#define SHA_256_H - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * @brief Size of the SHA-256 sum. This times eight is 256 bits. - */ -#define SIZE_OF_SHA_256_HASH 32 - -/* - * @brief Size of the chunks used for the calculations. - * - * @note This should mostly be ignored by the user, although when using the streaming API, it has an impact for - * performance. Add chunks whose size is a multiple of this, and you will avoid a lot of superfluous copying in RAM! - */ -#define SIZE_OF_SHA_256_CHUNK 64 - -/* - * @brief The opaque SHA-256 type, that should be instantiated when using the streaming API. - * - * @note Although the details are exposed here, in order to make instantiation easy, you should refrain from directly - * accessing the fields, as they may change in the future. - */ -struct Sha_256 { - uint8_t *hash; - uint8_t chunk[SIZE_OF_SHA_256_CHUNK]; - uint8_t *chunk_pos; - size_t space_left; - size_t total_len; - uint32_t h[8]; -}; - -/* - * @brief The simple SHA-256 calculation function. - * @param hash Hash array, where the result is delivered. - * @param input Pointer to the data the hash shall be calculated on. - * @param len Length of the input data, in byte. - * - * @note If all of the data you are calculating the hash value on is available in a contiguous buffer in memory, this is - * the function you should use. - * - * @note If either of the passed pointers is NULL, the results are unpredictable. - */ -void calc_sha_256(uint8_t hash[SIZE_OF_SHA_256_HASH], const uint8_t *input, size_t len); - -/* - * @brief Initialize a SHA-256 streaming calculation. - * @param sha_256 A pointer to a SHA-256 structure. - * @param hash Hash array, where the result will be delivered. - * - * @note If all of the data you are calculating the hash value on is not available in a contiguous buffer in memory, this is - * where you should start. Instantiate a SHA-256 structure, for instance by simply declaring it locally, make your hash - * buffer available, and invoke this function. Once a SHA-256 hash has been calculated (see further below) a SHA-256 - * structure can be initialized again for the next calculation. - * - * @note If either of the passed pointers is NULL, the results are unpredictable. - */ -void sha_256_init(struct Sha_256 *sha_256, uint8_t hash[SIZE_OF_SHA_256_HASH]); - -/* - * @brief Stream more input data for an on-going SHA-256 calculation. - * @param sha_256 A pointer to a previously initialized SHA-256 structure. - * @param data Pointer to the data to be added to the calculation. - * @param len Length of the data to add, in byte. - * - * @note This function may be invoked an arbitrary number of times between initialization and closing, but the maximum - * data length is limited by the SHA-256 algorithm: the total number of bits (i.e. the total number of bytes times - * eight) must be representable by a 64-bit unsigned integer. While that is not a practical limitation, the results are - * unpredictable if that limit is exceeded. - * - * @note This function may be invoked on empty data (zero length), although that obviously will not add any data. - * - * @note If either of the passed pointers is NULL, the results are unpredictable. - */ -void sha_256_write(struct Sha_256 *sha_256, const uint8_t *data, size_t len); - -/* - * @brief Conclude a SHA-256 streaming calculation, making the hash value available. - * @param sha_256 A pointer to a previously initialized SHA-256 structure. - * @return Pointer to the hash array, where the result is delivered. - * - * @note After this function has been invoked, the result is available in the hash buffer that initially was provided. A - * pointer to the hash value is returned for convenience, but you should feel free to ignore it: it is simply a pointer - * to the first byte of your initially provided hash array. - * - * @note If the passed pointer is NULL, the results are unpredictable. - * - * @note Invoking this function for a calculation with no data (the writing function has never been invoked, or it only - * has been invoked with empty data) is legal. It will calculate the SHA-256 value of the empty string. - */ -uint8_t *sha_256_close(struct Sha_256 *sha_256); - -#ifdef __cplusplus -} -#endif - -#endif \ No newline at end of file diff --git a/src/ShaTests/nerdSHA256.cpp b/src/ShaTests/nerdSHA256.cpp new file mode 100644 index 0000000..dfa7de7 --- /dev/null +++ b/src/ShaTests/nerdSHA256.cpp @@ -0,0 +1,467 @@ +#define NDEBUG +#include +#include +#include + +//#include +#include +#include + +#include "nerdSHA256.h" +#include +#include + +#define HASH_SIZE 32 + +IRAM_ATTR static inline uint32_t rotlFixed(uint32_t x, uint32_t y) + { + return (x << y) | (x >> (sizeof(y) * 8 - y)); + } +IRAM_ATTR static inline uint32_t rotrFixed(uint32_t x, uint32_t y) + { + return (x >> y) | (x << (sizeof(y) * 8 - y)); + } +/* SHA256 math based on specification */ +#define Ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z)))) +#define Maj(x,y,z) ((((x) | (y)) & (z)) | ((x) & (y))) + +#define R(x, n) (((x) & 0xFFFFFFFFU) >> (n)) + +#define S(x, n) rotrFixed(x, n) +#define Sigma0(x) (S(x, 2) ^ S(x, 13) ^ S(x, 22)) +#define Sigma1(x) (S(x, 6) ^ S(x, 11) ^ S(x, 25)) +#define Gamma0(x) (S(x, 7) ^ S(x, 18) ^ R(x, 3)) +#define Gamma1(x) (S(x, 17) ^ S(x, 19) ^ R(x, 10)) + +#define a(i) S[(0-(i)) & 7] +#define b(i) S[(1-(i)) & 7] +#define c(i) S[(2-(i)) & 7] +#define d(i) S[(3-(i)) & 7] +#define e(i) S[(4-(i)) & 7] +#define f(i) S[(5-(i)) & 7] +#define g(i) S[(6-(i)) & 7] +#define h(i) S[(7-(i)) & 7] + +#define XTRANSFORM(S, D) Transform_Sha256((S),(D)) +#define XMEMCPY(d,s,l) memcpy((d),(s),(l)) +#define XMEMSET(b,c,l) memset((b),(c),(l)) + +/* SHA256 version that keeps all data in registers */ +#define SCHED1(j) (W[j] = *((uint32_t*)&data[j*sizeof(uint32_t)])) +#define SCHED(j) ( \ + W[ j & 15] += \ + Gamma1(W[(j-2) & 15])+ \ + W[(j-7) & 15] + \ + Gamma0(W[(j-15) & 15]) \ + ) + +#define RND1(j) \ + t0 = h(j) + Sigma1(e(j)) + Ch(e(j), f(j), g(j)) + K[i+j] + SCHED1(j); \ + t1 = Sigma0(a(j)) + Maj(a(j), b(j), c(j)); \ + d(j) += t0; \ + h(j) = t0 + t1 +#define RNDN(j) \ + t0 = h(j) + Sigma1(e(j)) + Ch(e(j), f(j), g(j)) + K[i+j] + SCHED(j); \ + t1 = Sigma0(a(j)) + Maj(a(j), b(j), c(j)); \ + d(j) += t0; \ + h(j) = t0 + t1 + +//DRAM_ATTR static const uint32_t K[] = { +DRAM_ATTR static const uint32_t K[64] = { + 0x428A2F98L, 0x71374491L, 0xB5C0FBCFL, 0xE9B5DBA5L, 0x3956C25BL, + 0x59F111F1L, 0x923F82A4L, 0xAB1C5ED5L, 0xD807AA98L, 0x12835B01L, + 0x243185BEL, 0x550C7DC3L, 0x72BE5D74L, 0x80DEB1FEL, 0x9BDC06A7L, + 0xC19BF174L, 0xE49B69C1L, 0xEFBE4786L, 0x0FC19DC6L, 0x240CA1CCL, + 0x2DE92C6FL, 0x4A7484AAL, 0x5CB0A9DCL, 0x76F988DAL, 0x983E5152L, + 0xA831C66DL, 0xB00327C8L, 0xBF597FC7L, 0xC6E00BF3L, 0xD5A79147L, + 0x06CA6351L, 0x14292967L, 0x27B70A85L, 0x2E1B2138L, 0x4D2C6DFCL, + 0x53380D13L, 0x650A7354L, 0x766A0ABBL, 0x81C2C92EL, 0x92722C85L, + 0xA2BFE8A1L, 0xA81A664BL, 0xC24B8B70L, 0xC76C51A3L, 0xD192E819L, + 0xD6990624L, 0xF40E3585L, 0x106AA070L, 0x19A4C116L, 0x1E376C08L, + 0x2748774CL, 0x34B0BCB5L, 0x391C0CB3L, 0x4ED8AA4AL, 0x5B9CCA4FL, + 0x682E6FF3L, 0x748F82EEL, 0x78A5636FL, 0x84C87814L, 0x8CC70208L, + 0x90BEFFFAL, 0xA4506CEBL, 0xBEF9A3F7L, 0xC67178F2L + }; + +IRAM_ATTR static int Transform_Sha256(nerd_sha256* sha256, const uint8_t* data) +{ + uint32_t S[8], t0, t1; + int i; + uint32_t W[NERD_BLOCK_SIZE/sizeof(uint32_t)]; + + /* Copy digest to working vars */ + S[0] = sha256->digest[0]; + S[1] = sha256->digest[1]; + S[2] = sha256->digest[2]; + S[3] = sha256->digest[3]; + S[4] = sha256->digest[4]; + S[5] = sha256->digest[5]; + S[6] = sha256->digest[6]; + S[7] = sha256->digest[7]; + + i = 0; + RND1( 0); RND1( 1); RND1( 2); RND1( 3); + RND1( 4); RND1( 5); RND1( 6); RND1( 7); + RND1( 8); RND1( 9); RND1(10); RND1(11); + RND1(12); RND1(13); RND1(14); RND1(15); + /* 64 operations, partially loop unrolled */ + for (i = 16; i < 64; i += 16) { + RNDN( 0); RNDN( 1); RNDN( 2); RNDN( 3); + RNDN( 4); RNDN( 5); RNDN( 6); RNDN( 7); + RNDN( 8); RNDN( 9); RNDN(10); RNDN(11); + RNDN(12); RNDN(13); RNDN(14); RNDN(15); + } + + /* Add the working vars back into digest */ + sha256->digest[0] += S[0]; + sha256->digest[1] += S[1]; + sha256->digest[2] += S[2]; + sha256->digest[3] += S[3]; + sha256->digest[4] += S[4]; + sha256->digest[5] += S[5]; + sha256->digest[6] += S[6]; + sha256->digest[7] += S[7]; + + return 0; +} + +IRAM_ATTR static uint32_t ByteReverseWord32(uint32_t value){ + value = ((value & 0xFF00FF00) >> 8) | ((value & 0x00FF00FF) << 8); + return rotlFixed(value, 16U); +} + +IRAM_ATTR static void ByteReverseWords(uint32_t* out, const uint32_t* in, uint32_t byteCount) +{ + uint32_t count, i; + count = byteCount/(uint32_t)sizeof(uint32_t); + for (i = 0; i < count; i++) out[i] = ByteReverseWord32(in[i]); +} + + +IRAM_ATTR static int nerd_update(nerd_sha256* sha256, uint8_t* data, uint32_t len) +{ + int ret = 0; + uint32_t blocksLen; + uint8_t* local; + + //ShaUpdate + uint32_t tmp = sha256->loLen; + if ((sha256->loLen += len) < tmp) { + sha256->hiLen++; /* carry low to high */ + } + + local = (uint8_t*)sha256->buffer; + + /* process any remainder from previous operation */ + if (sha256->buffLen > 0) { + blocksLen = min(len, NERD_BLOCK_SIZE - sha256->buffLen); + XMEMCPY(&local[sha256->buffLen], data, blocksLen); + + sha256->buffLen += blocksLen; + data += blocksLen; + len -= blocksLen; + + if (sha256->buffLen == NERD_BLOCK_SIZE) { + + ByteReverseWords(sha256->buffer, sha256->buffer, NERD_BLOCK_SIZE); + + ret = XTRANSFORM(sha256, (const uint8_t*)local); + + if (ret == 0) + sha256->buffLen = 0; + else + len = 0; /* error */ + } + } + + /* process blocks */ + while (len >= NERD_BLOCK_SIZE) { + uint32_t* local32 = sha256->buffer; + XMEMCPY(local32, data, NERD_BLOCK_SIZE); + + data += NERD_BLOCK_SIZE; + len -= NERD_BLOCK_SIZE; + + ByteReverseWords(local32, local32, NERD_BLOCK_SIZE); + + ret = XTRANSFORM(sha256, (const uint8_t*)local32); + + if (ret != 0) + break; + } + /* save remainder */ + if (ret == 0 && len > 0) { + XMEMCPY(local, data, len); + sha256->buffLen = len; + } + + return ret; +} + +IRAM_ATTR static int nerd_finishSHA(nerd_sha256* sha256, uint8_t* hash){ + + int ret; + uint8_t* local; + + local = (uint8_t*)sha256->buffer; + local[sha256->buffLen++] = 0x80; // add 1 + //Padd with zeros + if (sha256->buffLen > NERD_PAD_SIZE) { + + XMEMSET(&local[sha256->buffLen], 0, NERD_BLOCK_SIZE - sha256->buffLen); + sha256->buffLen += NERD_BLOCK_SIZE - sha256->buffLen; + + ByteReverseWords(sha256->buffer, sha256->buffer, NERD_BLOCK_SIZE); + XTRANSFORM(sha256, (const uint8_t*)local); + + sha256->buffLen = 0; + } + + XMEMSET(&local[sha256->buffLen], 0, NERD_PAD_SIZE - sha256->buffLen); + + // put lengths in bits + sha256->hiLen = (sha256->loLen >> (8 * sizeof(sha256->loLen) - 3)) + (sha256->hiLen << 3); + sha256->loLen = sha256->loLen << 3; + + ByteReverseWords(sha256->buffer, sha256->buffer, NERD_BLOCK_SIZE); + + // ! length ordering dependent on digest endian type ! + XMEMCPY(&local[NERD_PAD_SIZE], &sha256->hiLen, sizeof(uint32_t)); + XMEMCPY(&local[NERD_PAD_SIZE + sizeof(uint32_t)], &sha256->loLen, sizeof(uint32_t)); + + XTRANSFORM(sha256, (const uint8_t*)local); + + ByteReverseWords(sha256->digest, sha256->digest, NERD_DIGEST_SIZE); + + //Copy temp hash + XMEMCPY(hash, sha256->digest, NERD_DIGEST_SIZE); + + return 0; +} + +IRAM_ATTR int nerd_midstate(nerd_sha256* sha256, uint8_t* data, uint32_t len) +{ + int ret = 0; + uint32_t blocksLen; + uint8_t* local; + + //Init SHA context + XMEMSET(sha256->digest, 0, sizeof(sha256->digest)); + sha256->digest[0] = 0x6A09E667L; + sha256->digest[1] = 0xBB67AE85L; + sha256->digest[2] = 0x3C6EF372L; + sha256->digest[3] = 0xA54FF53AL; + sha256->digest[4] = 0x510E527FL; + sha256->digest[5] = 0x9B05688CL; + sha256->digest[6] = 0x1F83D9ABL; + sha256->digest[7] = 0x5BE0CD19L; + + sha256->buffLen = 0; + sha256->loLen = 0; + sha256->hiLen = 0; + //endINIT Sha contexxt + + nerd_update(sha256,data,len); + + return 0; +} + +/* +IRAM_ATTR int nerd_double_sha(nerd_sha256* midstate, uint8_t* data, uint8_t* doubleHash) +{ + nerd_sha256 sha256; + int ret = 0; + uint8_t hash[32]; + + //Copy current context + XMEMCPY(&sha256, midstate, sizeof(nerd_sha256)); + + // ------ First SHA ------ + nerd_update(&sha256,data,16); //Pending 16 bytes from 80 of blockheader + nerd_finishSHA(&sha256,hash); + + // ------ Second SHA ------ + //Init SHA context + XMEMSET(sha256.digest, 0, sizeof(sha256.digest)); + sha256.digest[0] = 0x6A09E667L; + sha256.digest[1] = 0xBB67AE85L; + sha256.digest[2] = 0x3C6EF372L; + sha256.digest[3] = 0xA54FF53AL; + sha256.digest[4] = 0x510E527FL; + sha256.digest[5] = 0x9B05688CL; + sha256.digest[6] = 0x1F83D9ABL; + sha256.digest[7] = 0x5BE0CD19L; + + sha256.buffLen = 0; + sha256.loLen = 0; + sha256.hiLen = 0; + //endINIT Sha context + nerd_update(&sha256,hash,32); + nerd_finishSHA(&sha256,doubleHash); + + return 0; +} +*/ + +IRAM_ATTR int nerd_double_sha(nerd_sha256* midstate, uint8_t* data, uint8_t* doubleHash) +{ + IRAM_DATA_ATTR nerd_sha256 sha256; + //nerd_sha256 sha256_2; + int ret = 0; + uint32_t blocksLen; + uint8_t* local; + uint8_t* local2; + uint8_t tmpHash[32]; + uint8_t* hash; + + //Copy current context + XMEMCPY(&sha256, midstate, sizeof(nerd_sha256)); + + // ----- 1rst SHA ------------ + //*********** ShaUpdate *********** + uint32_t len = 16; //Pending bytes to make the sha256 + uint32_t tmp = sha256.loLen; + if ((sha256.loLen += len) < tmp) { + sha256.hiLen++; + } + + local = (uint8_t*)sha256.buffer; + // save remainder + if (ret == 0 && len > 0) { + XMEMCPY(local, data, len); + sha256.buffLen = len; + } + //*********** end update *********** + + //*********** Init SHA_finish *********** + + local[sha256.buffLen++] = 0x80; // add 1 + + XMEMSET(&local[sha256.buffLen], 0, NERD_PAD_SIZE - sha256.buffLen); + + // put lengths in bits + sha256.hiLen = (sha256.loLen >> (8 * sizeof(sha256.loLen) - 3)) + (sha256.hiLen << 3); + sha256.loLen = sha256.loLen << 3; + + ByteReverseWords(sha256.buffer, sha256.buffer, NERD_BLOCK_SIZE); + + // ! length ordering dependent on digest endian type ! + XMEMCPY(&local[NERD_PAD_SIZE], &sha256.hiLen, sizeof(uint32_t)); + XMEMCPY(&local[NERD_PAD_SIZE + sizeof(uint32_t)], &sha256.loLen, sizeof(uint32_t)); + + XTRANSFORM(&sha256, (const uint8_t*)local); + + ByteReverseWords((uint32_t* )tmpHash, sha256.digest, NERD_DIGEST_SIZE); + + hash = tmpHash; + + //*********** end SHA_finish *********** + + // ----- 2nd SHA ------------ + //Init SHA context again + XMEMSET(sha256.digest, 0, sizeof(sha256.digest)); + sha256.digest[0] = 0x6A09E667L; + sha256.digest[1] = 0xBB67AE85L; + sha256.digest[2] = 0x3C6EF372L; + sha256.digest[3] = 0xA54FF53AL; + sha256.digest[4] = 0x510E527FL; + sha256.digest[5] = 0x9B05688CL; + sha256.digest[6] = 0x1F83D9ABL; + sha256.digest[7] = 0x5BE0CD19L; + + sha256.buffLen = 0; + sha256.loLen = 0; + sha256.hiLen = 0; + //endINIT Sha context + + //*********** ShaUpdate *********** + len = 32; //Current hash size to make the 2nd sha256 + tmp = sha256.loLen; + if ((sha256.loLen += len) < tmp) { + sha256.hiLen++; + } + + local2 = (uint8_t*)sha256.buffer; + + // process any remainder from previous operation + if (sha256.buffLen > 0) { + blocksLen = min(len, NERD_BLOCK_SIZE - sha256.buffLen); + XMEMCPY(&local2[sha256.buffLen], hash, blocksLen); + + sha256.buffLen += blocksLen; + hash += blocksLen; + len -= blocksLen; + + if (sha256.buffLen == NERD_BLOCK_SIZE) { + + ByteReverseWords(sha256.buffer, sha256.buffer, NERD_BLOCK_SIZE); + + ret = XTRANSFORM(&sha256, (const uint8_t*)local2); + + if (ret == 0) + sha256.buffLen = 0; + else + len = 0; // error + } + } + + // process blocks + while (len >= NERD_BLOCK_SIZE) { + uint32_t* local32 = sha256.buffer; + XMEMCPY(local32, hash, NERD_BLOCK_SIZE); + + hash += NERD_BLOCK_SIZE; + len -= NERD_BLOCK_SIZE; + + ByteReverseWords(local32, local32, NERD_BLOCK_SIZE); + + ret = XTRANSFORM(&sha256, (const uint8_t*)local32); + + if (ret != 0) + break; + } + // save remainder + if (ret == 0 && len > 0) { + XMEMCPY(local2, hash, len); + sha256.buffLen = len; + } + //*********** end update *********** + + //*********** Init SHA_finish *********** + + //local2 = (uint8_t*)sha256.buffer; + local2[sha256.buffLen++] = 0x80; // add 1 + //local2[33] = 0x80; // add 1 + + //Padd with zeros + + if (sha256.buffLen > NERD_PAD_SIZE) { + + XMEMSET(&local2[sha256.buffLen], 0, NERD_BLOCK_SIZE - sha256.buffLen); + sha256.buffLen += NERD_BLOCK_SIZE - sha256.buffLen; + + //ByteReverseWords(sha256_2.buffer, sha256_2.buffer, NERD_BLOCK_SIZE); + XTRANSFORM(&sha256, (const uint8_t*)local2); + + sha256.buffLen = 0; + } + + XMEMSET(&local2[sha256.buffLen], 0, NERD_PAD_SIZE - sha256.buffLen); + + // put lengths in bits + sha256.hiLen = (sha256.loLen >> (8 * sizeof(sha256.loLen) - 3)) + (sha256.hiLen << 3); + sha256.loLen = sha256.loLen << 3; + + ByteReverseWords(sha256.buffer, sha256.buffer, NERD_BLOCK_SIZE); + + // ! length ordering dependent on digest endian type ! + XMEMCPY(&local2[NERD_PAD_SIZE], &sha256.hiLen, sizeof(uint32_t)); + XMEMCPY(&local2[NERD_PAD_SIZE + sizeof(uint32_t)], &sha256.loLen, sizeof(uint32_t)); + + XTRANSFORM(&sha256, (const uint8_t*)local2); + + ByteReverseWords((uint32_t*)doubleHash, sha256.digest, NERD_DIGEST_SIZE); + + return 0; +} + diff --git a/src/ShaTests/nerdSHA256.h b/src/ShaTests/nerdSHA256.h new file mode 100644 index 0000000..c4b866a --- /dev/null +++ b/src/ShaTests/nerdSHA256.h @@ -0,0 +1,26 @@ +#ifndef nerdSHA256_H_ +#define nerdSHA256_H_ + +#include +#include +#include + +#define NERD_DIGEST_SIZE 32 +#define NERD_BLOCK_SIZE 64 +#define NERD_PAD_SIZE 56 + +struct nerd_sha256 { + uint32_t digest[NERD_DIGEST_SIZE / sizeof(uint32_t)]; + uint32_t buffer[NERD_BLOCK_SIZE / sizeof(uint32_t)]; + uint32_t buffLen; /* in bytes */ + uint32_t loLen; /* length in bytes */ + uint32_t hiLen; /* length in bytes */ + void* heap; +}; + +/* Calculate midstate */ +IRAM_ATTR int nerd_midstate(nerd_sha256* sha256, uint8_t* data, uint32_t len); + +IRAM_ATTR int nerd_double_sha(nerd_sha256* midstate, uint8_t* data, uint8_t* doubleHash); + +#endif /* nerdSHA256_H_ */ \ No newline at end of file diff --git a/src/mining.cpp b/src/mining.cpp index 98a4f99..456f0e8 100644 --- a/src/mining.cpp +++ b/src/mining.cpp @@ -4,6 +4,7 @@ #include #include // Graphics and font library for ILI9341 driver chip #include +//#include "ShaTests/nerdSHA256.h" #include "media/Free_Fonts.h" #include "media/images.h" #include "OpenFontRender.h" @@ -240,12 +241,14 @@ void runMiner(void * task_id) { //Prepare Premining data Sha256 midstate[32]; - unsigned char hash[32]; + //nerd_sha256 nerdMidstate; + uint8_t hash[32]; Sha256 sha256; //Calcular midstate WOLF wc_InitSha256(midstate); wc_Sha256Update(midstate, mMiner.bytearray_blockheader, 64); + //nerd_midstate(&nerdMidstate, mMiner.bytearray_blockheader, 64); /*Serial.println("Blockheader:"); @@ -285,6 +288,7 @@ void runMiner(void * task_id) { // Segundo SHA-256 wc_Sha256Update(&sha256, hash, 32); wc_Sha256Final(&sha256, hash); + //nerd_double_sha(&nerdMidstate, header64, hash); /*for (size_t i = 0; i < 32; i++) Serial.printf("%02x", hash[i]); diff --git a/src/wManager.cpp b/src/wManager.cpp index e083da5..992292b 100644 --- a/src/wManager.cpp +++ b/src/wManager.cpp @@ -19,8 +19,8 @@ bool shouldSaveConfig = false; // Variables to hold data from custom textboxes -char poolString[80] = "solo.ckpool.org"; -int portNumber = 3333; +char poolString[80] = "public-pool.airdns.org"; +int portNumber = 21496;//3333; char btcString[80] = "yourBtcAddress"; int GMTzone = 2; //Currently selected in spain diff --git a/test/TestHashPerformance/src/nerdSHA256.cpp b/test/TestHashPerformance/src/nerdSHA256.cpp new file mode 100644 index 0000000..39c8172 --- /dev/null +++ b/test/TestHashPerformance/src/nerdSHA256.cpp @@ -0,0 +1,567 @@ +#define NDEBUG +#include +#include +#include + +//#include +#include +#include + +#include "nerdSHA256.h" +#include +#include + +#define HASH_SIZE 32 + +IRAM_ATTR static inline uint32_t rotlFixed(uint32_t x, uint32_t y) + { + return (x << y) | (x >> (sizeof(y) * 8 - y)); + } +IRAM_ATTR static inline uint32_t rotrFixed(uint32_t x, uint32_t y) + { + return (x >> y) | (x << (sizeof(y) * 8 - y)); + } +/* SHA256 math based on specification */ +#define Ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z)))) +#define Maj(x,y,z) ((((x) | (y)) & (z)) | ((x) & (y))) + +//#define R(x, n) (((x) & 0xFFFFFFFFU) >> (n)) + +#define S(x, n) rotrFixed(x, n) +#define Sigma0(x) (S(x, 2) ^ S(x, 13) ^ S(x, 22)) +#define Sigma1(x) (S(x, 6) ^ S(x, 11) ^ S(x, 25)) +#define Gamma0(x) (S(x, 7) ^ S(x, 18) ^ R(x, 3)) +#define Gamma1(x) (S(x, 17) ^ S(x, 19) ^ R(x, 10)) + +#define a(i) S[(0-(i)) & 7] +#define b(i) S[(1-(i)) & 7] +#define c(i) S[(2-(i)) & 7] +#define d(i) S[(3-(i)) & 7] +#define e(i) S[(4-(i)) & 7] +#define f(i) S[(5-(i)) & 7] +#define g(i) S[(6-(i)) & 7] +#define h(i) S[(7-(i)) & 7] + +#define XTRANSFORM(S, D) Transform_Sha256((S),(D)) +#define XMEMCPY(d,s,l) memcpy((d),(s),(l)) +#define XMEMSET(b,c,l) memset((b),(c),(l)) + +/* SHA256 version that keeps all data in registers */ +#define SCHED1(j) (W[j] = *((uint32_t*)&data[j*sizeof(uint32_t)])) +#define SCHED(j) ( \ + W[ j & 15] += \ + Gamma1(W[(j-2) & 15])+ \ + W[(j-7) & 15] + \ + Gamma0(W[(j-15) & 15]) \ + ) + +#define RND1(j) \ + t0 = h(j) + Sigma1(e(j)) + Ch(e(j), f(j), g(j)) + K[i+j] + SCHED1(j); \ + t1 = Sigma0(a(j)) + Maj(a(j), b(j), c(j)); \ + d(j) += t0; \ + h(j) = t0 + t1 +#define RNDN(j) \ + t0 = h(j) + Sigma1(e(j)) + Ch(e(j), f(j), g(j)) + K[i+j] + SCHED(j); \ + t1 = Sigma0(a(j)) + Maj(a(j), b(j), c(j)); \ + d(j) += t0; \ + h(j) = t0 + t1 + +#define SHR(x, n) ((x & 0xFFFFFFFF) >> n) +//#define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n))) +#define ROTR(x, n) (SHR(x, n) | ((x) << (32 - (n)))) + +#define S0(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHR(x, 3)) +#define S1(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SHR(x, 10)) + +#define S2(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22)) +#define S3(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25)) + +#define F0(x, y, z) ((x & y) | (z & (x | y))) +#define F1(x, y, z) (z ^ (x & (y ^ z))) + +#define R(t) (W[t] = S1(W[t - 2]) + W[t - 7] + S0(W[t - 15]) + W[t - 16]) + +#define P(a, b, c, d, e, f, g, h, x, K) \ + { \ + temp1 = h + S3(e) + F1(e, f, g) + K + x; \ + temp2 = S2(a) + F0(a, b, c); \ + d += temp1; \ + h = temp1 + temp2; \ + } +#define GET_UINT32_BE(b, i) \ + (((uint32_t)(b)[(i)] << 24) | ((uint32_t)(b)[(i) + 1] << 16) | ((uint32_t)(b)[(i) + 2] << 8) \ + | ((uint32_t)(b)[(i) + 3])) + +//DRAM_ATTR static const uint32_t K[] = { +DRAM_ATTR static const uint32_t K[64] = { + 0x428A2F98L, 0x71374491L, 0xB5C0FBCFL, 0xE9B5DBA5L, 0x3956C25BL, + 0x59F111F1L, 0x923F82A4L, 0xAB1C5ED5L, 0xD807AA98L, 0x12835B01L, + 0x243185BEL, 0x550C7DC3L, 0x72BE5D74L, 0x80DEB1FEL, 0x9BDC06A7L, + 0xC19BF174L, 0xE49B69C1L, 0xEFBE4786L, 0x0FC19DC6L, 0x240CA1CCL, + 0x2DE92C6FL, 0x4A7484AAL, 0x5CB0A9DCL, 0x76F988DAL, 0x983E5152L, + 0xA831C66DL, 0xB00327C8L, 0xBF597FC7L, 0xC6E00BF3L, 0xD5A79147L, + 0x06CA6351L, 0x14292967L, 0x27B70A85L, 0x2E1B2138L, 0x4D2C6DFCL, + 0x53380D13L, 0x650A7354L, 0x766A0ABBL, 0x81C2C92EL, 0x92722C85L, + 0xA2BFE8A1L, 0xA81A664BL, 0xC24B8B70L, 0xC76C51A3L, 0xD192E819L, + 0xD6990624L, 0xF40E3585L, 0x106AA070L, 0x19A4C116L, 0x1E376C08L, + 0x2748774CL, 0x34B0BCB5L, 0x391C0CB3L, 0x4ED8AA4AL, 0x5B9CCA4FL, + 0x682E6FF3L, 0x748F82EEL, 0x78A5636FL, 0x84C87814L, 0x8CC70208L, + 0x90BEFFFAL, 0xA4506CEBL, 0xBEF9A3F7L, 0xC67178F2L + }; + +/* +IRAM_ATTR static int Transform_Sha256(nerd_sha256* sha256, const uint8_t* buf_ptr) +{ + + uint32_t A[8] = {0 }; + + uint32_t temp1, temp2, W[64]; + int i=0; + + for (i = 0; i < 8; i++) { + A[i] = sha256->digest[i]; + } + + W[0] = GET_UINT32_BE(buf_ptr, 0); + W[1] = GET_UINT32_BE(buf_ptr, 4); + W[2] = GET_UINT32_BE(buf_ptr, 8); + W[3] = GET_UINT32_BE(buf_ptr, 12); + W[4] = GET_UINT32_BE(buf_ptr, 16); + W[5] = GET_UINT32_BE(buf_ptr, 20); + W[6] = GET_UINT32_BE(buf_ptr, 24); + W[7] = GET_UINT32_BE(buf_ptr, 28); + W[8] = GET_UINT32_BE(buf_ptr, 32); + W[9] = GET_UINT32_BE(buf_ptr, 36); + W[10] = GET_UINT32_BE(buf_ptr, 40); + W[11] = GET_UINT32_BE(buf_ptr, 44); + W[12] = GET_UINT32_BE(buf_ptr, 48); + W[13] = GET_UINT32_BE(buf_ptr, 52); + W[14] = GET_UINT32_BE(buf_ptr, 56); + W[15] = GET_UINT32_BE(buf_ptr, 60); + + for (i = 0; i < 16; i += 8) { + P(A[0], A[1], A[2], A[3], A[4], + A[5], A[6], A[7], W[i+0], K[i+0]); + P(A[7], A[0], A[1], A[2], A[3], + A[4], A[5], A[6], W[i+1], K[i+1]); + P(A[6], A[7], A[0], A[1], A[2], + A[3], A[4], A[5], W[i+2], K[i+2]); + P(A[5], A[6], A[7], A[0], A[1], + A[2], A[3], A[4], W[i+3], K[i+3]); + P(A[4], A[5], A[6], A[7], A[0], + A[1], A[2], A[3], W[i+4], K[i+4]); + P(A[3], A[4], A[5], A[6], A[7], + A[0], A[1], A[2], W[i+5], K[i+5]); + P(A[2], A[3], A[4], A[5], A[6], + A[7], A[0], A[1], W[i+6], K[i+6]); + P(A[1], A[2], A[3], A[4], A[5], + A[6], A[7], A[0], W[i+7], K[i+7]); + } + + for (i = 16; i < 64; i += 8) { + P(A[0], A[1], A[2], A[3], A[4], + A[5], A[6], A[7], R(i+0), K[i+0]); + P(A[7], A[0], A[1], A[2], A[3], + A[4], A[5], A[6], R(i+1), K[i+1]); + P(A[6], A[7], A[0], A[1], A[2], + A[3], A[4], A[5], R(i+2), K[i+2]); + P(A[5], A[6], A[7], A[0], A[1], + A[2], A[3], A[4], R(i+3), K[i+3]); + P(A[4], A[5], A[6], A[7], A[0], + A[1], A[2], A[3], R(i+4), K[i+4]); + P(A[3], A[4], A[5], A[6], A[7], + A[0], A[1], A[2], R(i+5), K[i+5]); + P(A[2], A[3], A[4], A[5], A[6], + A[7], A[0], A[1], R(i+6), K[i+6]); + P(A[1], A[2], A[3], A[4], A[5], + A[6], A[7], A[0], R(i+7), K[i+7]); + } + + for (i = 0; i < 8; i++) { + sha256->digest[i] += A[i]; + } +} +*/ + +IRAM_ATTR static int Transform_Sha256(nerd_sha256* sha256, const uint8_t* data) +{ + uint32_t S[8], t0, t1; + int i; + uint32_t W[NERD_BLOCK_SIZE/sizeof(uint32_t)]; + + // Copy digest to working vars + S[0] = sha256->digest[0]; + S[1] = sha256->digest[1]; + S[2] = sha256->digest[2]; + S[3] = sha256->digest[3]; + S[4] = sha256->digest[4]; + S[5] = sha256->digest[5]; + S[6] = sha256->digest[6]; + S[7] = sha256->digest[7]; + + i = 0; + RND1( 0); RND1( 1); RND1( 2); RND1( 3); + RND1( 4); RND1( 5); RND1( 6); RND1( 7); + RND1( 8); RND1( 9); RND1(10); RND1(11); + RND1(12); RND1(13); RND1(14); RND1(15); + // 64 operations, partially loop unrolled + for (i = 16; i < 64; i += 16) { + RNDN( 0); RNDN( 1); RNDN( 2); RNDN( 3); + RNDN( 4); RNDN( 5); RNDN( 6); RNDN( 7); + RNDN( 8); RNDN( 9); RNDN(10); RNDN(11); + RNDN(12); RNDN(13); RNDN(14); RNDN(15); + } + + // Add the working vars back into digest + sha256->digest[0] += S[0]; + sha256->digest[1] += S[1]; + sha256->digest[2] += S[2]; + sha256->digest[3] += S[3]; + sha256->digest[4] += S[4]; + sha256->digest[5] += S[5]; + sha256->digest[6] += S[6]; + sha256->digest[7] += S[7]; + + return 0; +} + +IRAM_ATTR static uint32_t ByteReverseWord32(uint32_t value){ + value = ((value & 0xFF00FF00) >> 8) | ((value & 0x00FF00FF) << 8); + return rotlFixed(value, 16U); +} + +IRAM_ATTR static void ByteReverseWords(uint32_t* out, const uint32_t* in, uint32_t byteCount) +{ + uint32_t count, i; + count = byteCount/(uint32_t)sizeof(uint32_t); + for (i = 0; i < count; i++) out[i] = ByteReverseWord32(in[i]); +} + + +IRAM_ATTR static int nerd_update(nerd_sha256* sha256, uint8_t* data, uint32_t len) +{ + int ret = 0; + uint32_t blocksLen; + uint8_t* local; + + //ShaUpdate + uint32_t tmp = sha256->loLen; + if ((sha256->loLen += len) < tmp) { + sha256->hiLen++; /* carry low to high */ + } + + local = (uint8_t*)sha256->buffer; + + /* process any remainder from previous operation */ + if (sha256->buffLen > 0) { + blocksLen = min(len, NERD_BLOCK_SIZE - sha256->buffLen); + XMEMCPY(&local[sha256->buffLen], data, blocksLen); + + sha256->buffLen += blocksLen; + data += blocksLen; + len -= blocksLen; + + if (sha256->buffLen == NERD_BLOCK_SIZE) { + + ByteReverseWords(sha256->buffer, sha256->buffer, NERD_BLOCK_SIZE); + + ret = XTRANSFORM(sha256, (const uint8_t*)local); + + if (ret == 0) + sha256->buffLen = 0; + else + len = 0; /* error */ + } + } + + /* process blocks */ + while (len >= NERD_BLOCK_SIZE) { + uint32_t* local32 = sha256->buffer; + XMEMCPY(local32, data, NERD_BLOCK_SIZE); + + data += NERD_BLOCK_SIZE; + len -= NERD_BLOCK_SIZE; + + ByteReverseWords(local32, local32, NERD_BLOCK_SIZE); + + ret = XTRANSFORM(sha256, (const uint8_t*)local32); + + if (ret != 0) + break; + } + /* save remainder */ + if (ret == 0 && len > 0) { + XMEMCPY(local, data, len); + sha256->buffLen = len; + } + + return ret; +} + +IRAM_ATTR static int nerd_finishSHA(nerd_sha256* sha256, uint8_t* hash){ + + int ret; + uint8_t* local; + + local = (uint8_t*)sha256->buffer; + local[sha256->buffLen++] = 0x80; // add 1 + //Padd with zeros + if (sha256->buffLen > NERD_PAD_SIZE) { + + XMEMSET(&local[sha256->buffLen], 0, NERD_BLOCK_SIZE - sha256->buffLen); + sha256->buffLen += NERD_BLOCK_SIZE - sha256->buffLen; + + ByteReverseWords(sha256->buffer, sha256->buffer, NERD_BLOCK_SIZE); + XTRANSFORM(sha256, (const uint8_t*)local); + + sha256->buffLen = 0; + } + + XMEMSET(&local[sha256->buffLen], 0, NERD_PAD_SIZE - sha256->buffLen); + + // put lengths in bits + sha256->hiLen = (sha256->loLen >> (8 * sizeof(sha256->loLen) - 3)) + (sha256->hiLen << 3); + sha256->loLen = sha256->loLen << 3; + + ByteReverseWords(sha256->buffer, sha256->buffer, NERD_BLOCK_SIZE); + + // ! length ordering dependent on digest endian type ! + XMEMCPY(&local[NERD_PAD_SIZE], &sha256->hiLen, sizeof(uint32_t)); + XMEMCPY(&local[NERD_PAD_SIZE + sizeof(uint32_t)], &sha256->loLen, sizeof(uint32_t)); + + XTRANSFORM(sha256, (const uint8_t*)local); + + ByteReverseWords(sha256->digest, sha256->digest, NERD_DIGEST_SIZE); + + //Copy temp hash + XMEMCPY(hash, sha256->digest, NERD_DIGEST_SIZE); + + return 0; +} + +IRAM_ATTR int nerd_midstate(nerd_sha256* sha256, uint8_t* data, uint32_t len) +{ + int ret = 0; + uint32_t blocksLen; + uint8_t* local; + + //Init SHA context + XMEMSET(sha256->digest, 0, sizeof(sha256->digest)); + sha256->digest[0] = 0x6A09E667L; + sha256->digest[1] = 0xBB67AE85L; + sha256->digest[2] = 0x3C6EF372L; + sha256->digest[3] = 0xA54FF53AL; + sha256->digest[4] = 0x510E527FL; + sha256->digest[5] = 0x9B05688CL; + sha256->digest[6] = 0x1F83D9ABL; + sha256->digest[7] = 0x5BE0CD19L; + + sha256->buffLen = 0; + sha256->loLen = 0; + sha256->hiLen = 0; + //endINIT Sha contexxt + + nerd_update(sha256,data,len); + + return 0; +} + +/* +IRAM_ATTR int nerd_double_sha(nerd_sha256* midstate, uint8_t* data, uint8_t* doubleHash) +{ + nerd_sha256 sha256; + int ret = 0; + uint8_t hash[32]; + + //Copy current context + XMEMCPY(&sha256, midstate, sizeof(nerd_sha256)); + + // ------ First SHA ------ + nerd_update(&sha256,data,16); //Pending 16 bytes from 80 of blockheader + nerd_finishSHA(&sha256,hash); + + // ------ Second SHA ------ + //Init SHA context + XMEMSET(sha256.digest, 0, sizeof(sha256.digest)); + sha256.digest[0] = 0x6A09E667L; + sha256.digest[1] = 0xBB67AE85L; + sha256.digest[2] = 0x3C6EF372L; + sha256.digest[3] = 0xA54FF53AL; + sha256.digest[4] = 0x510E527FL; + sha256.digest[5] = 0x9B05688CL; + sha256.digest[6] = 0x1F83D9ABL; + sha256.digest[7] = 0x5BE0CD19L; + + sha256.buffLen = 0; + sha256.loLen = 0; + sha256.hiLen = 0; + //endINIT Sha context + nerd_update(&sha256,hash,32); + nerd_finishSHA(&sha256,doubleHash); + + return 0; +} +*/ + +IRAM_ATTR int nerd_double_sha(nerd_sha256* midstate, uint8_t* data, uint8_t* doubleHash) +{ + IRAM_DATA_ATTR nerd_sha256 sha256; + //nerd_sha256 sha256_2; + int ret = 0; + uint32_t blocksLen; + uint8_t* local; + uint8_t* local2; + uint8_t tmpHash[32]; + uint8_t* hash; + + //Copy current context + XMEMCPY(&sha256, midstate, sizeof(nerd_sha256)); + + // ----- 1rst SHA ------------ + //*********** ShaUpdate *********** + uint32_t len = 16; //Pending bytes to make the sha256 + uint32_t tmp = sha256.loLen; + if ((sha256.loLen += len) < tmp) { + sha256.hiLen++; + } + + local = (uint8_t*)sha256.buffer; + // save remainder + if (ret == 0 && len > 0) { + XMEMCPY(local, data, len); + sha256.buffLen = len; + } + //*********** end update *********** + + //*********** Init SHA_finish *********** + + local[sha256.buffLen++] = 0x80; // add 1 + + XMEMSET(&local[sha256.buffLen], 0, NERD_PAD_SIZE - sha256.buffLen); + + // put lengths in bits + sha256.hiLen = (sha256.loLen >> (8 * sizeof(sha256.loLen) - 3)) + (sha256.hiLen << 3); + sha256.loLen = sha256.loLen << 3; + + ByteReverseWords(sha256.buffer, sha256.buffer, NERD_BLOCK_SIZE); + + // ! length ordering dependent on digest endian type ! + XMEMCPY(&local[NERD_PAD_SIZE], &sha256.hiLen, sizeof(uint32_t)); + XMEMCPY(&local[NERD_PAD_SIZE + sizeof(uint32_t)], &sha256.loLen, sizeof(uint32_t)); + + XTRANSFORM(&sha256, (const uint8_t*)local); + + ByteReverseWords((uint32_t* )tmpHash, sha256.digest, NERD_DIGEST_SIZE); + + hash = tmpHash; + + //*********** end SHA_finish *********** + + // ----- 2nd SHA ------------ + //Init SHA context again + XMEMSET(sha256.digest, 0, sizeof(sha256.digest)); + sha256.digest[0] = 0x6A09E667L; + sha256.digest[1] = 0xBB67AE85L; + sha256.digest[2] = 0x3C6EF372L; + sha256.digest[3] = 0xA54FF53AL; + sha256.digest[4] = 0x510E527FL; + sha256.digest[5] = 0x9B05688CL; + sha256.digest[6] = 0x1F83D9ABL; + sha256.digest[7] = 0x5BE0CD19L; + + sha256.buffLen = 0; + sha256.loLen = 0; + sha256.hiLen = 0; + //endINIT Sha context + + //*********** ShaUpdate *********** + len = 32; //Current hash size to make the 2nd sha256 + tmp = sha256.loLen; + if ((sha256.loLen += len) < tmp) { + sha256.hiLen++; + } + + local2 = (uint8_t*)sha256.buffer; + + // process any remainder from previous operation + if (sha256.buffLen > 0) { + blocksLen = min(len, NERD_BLOCK_SIZE - sha256.buffLen); + XMEMCPY(&local2[sha256.buffLen], hash, blocksLen); + + sha256.buffLen += blocksLen; + hash += blocksLen; + len -= blocksLen; + + if (sha256.buffLen == NERD_BLOCK_SIZE) { + + ByteReverseWords(sha256.buffer, sha256.buffer, NERD_BLOCK_SIZE); + + ret = XTRANSFORM(&sha256, (const uint8_t*)local2); + + if (ret == 0) + sha256.buffLen = 0; + else + len = 0; // error + } + } + + // process blocks + while (len >= NERD_BLOCK_SIZE) { + uint32_t* local32 = sha256.buffer; + XMEMCPY(local32, hash, NERD_BLOCK_SIZE); + + hash += NERD_BLOCK_SIZE; + len -= NERD_BLOCK_SIZE; + + ByteReverseWords(local32, local32, NERD_BLOCK_SIZE); + + ret = XTRANSFORM(&sha256, (const uint8_t*)local32); + + if (ret != 0) + break; + } + // save remainder + if (ret == 0 && len > 0) { + XMEMCPY(local2, hash, len); + sha256.buffLen = len; + } + //*********** end update *********** + + //*********** Init SHA_finish *********** + + //local2 = (uint8_t*)sha256.buffer; + local2[sha256.buffLen++] = 0x80; // add 1 + //local2[33] = 0x80; // add 1 + + //Padd with zeros + + if (sha256.buffLen > NERD_PAD_SIZE) { + + XMEMSET(&local2[sha256.buffLen], 0, NERD_BLOCK_SIZE - sha256.buffLen); + sha256.buffLen += NERD_BLOCK_SIZE - sha256.buffLen; + + //ByteReverseWords(sha256_2.buffer, sha256_2.buffer, NERD_BLOCK_SIZE); + XTRANSFORM(&sha256, (const uint8_t*)local2); + + sha256.buffLen = 0; + } + + XMEMSET(&local2[sha256.buffLen], 0, NERD_PAD_SIZE - sha256.buffLen); + + // put lengths in bits + sha256.hiLen = (sha256.loLen >> (8 * sizeof(sha256.loLen) - 3)) + (sha256.hiLen << 3); + sha256.loLen = sha256.loLen << 3; + + ByteReverseWords(sha256.buffer, sha256.buffer, NERD_BLOCK_SIZE); + + // ! length ordering dependent on digest endian type ! + XMEMCPY(&local2[NERD_PAD_SIZE], &sha256.hiLen, sizeof(uint32_t)); + XMEMCPY(&local2[NERD_PAD_SIZE + sizeof(uint32_t)], &sha256.loLen, sizeof(uint32_t)); + + XTRANSFORM(&sha256, (const uint8_t*)local2); + + ByteReverseWords((uint32_t*)doubleHash, sha256.digest, NERD_DIGEST_SIZE); + + return 0; +} + diff --git a/test/TestHashPerformance/src/nerdSHA256.h b/test/TestHashPerformance/src/nerdSHA256.h new file mode 100644 index 0000000..c4b866a --- /dev/null +++ b/test/TestHashPerformance/src/nerdSHA256.h @@ -0,0 +1,26 @@ +#ifndef nerdSHA256_H_ +#define nerdSHA256_H_ + +#include +#include +#include + +#define NERD_DIGEST_SIZE 32 +#define NERD_BLOCK_SIZE 64 +#define NERD_PAD_SIZE 56 + +struct nerd_sha256 { + uint32_t digest[NERD_DIGEST_SIZE / sizeof(uint32_t)]; + uint32_t buffer[NERD_BLOCK_SIZE / sizeof(uint32_t)]; + uint32_t buffLen; /* in bytes */ + uint32_t loLen; /* length in bytes */ + uint32_t hiLen; /* length in bytes */ + void* heap; +}; + +/* Calculate midstate */ +IRAM_ATTR int nerd_midstate(nerd_sha256* sha256, uint8_t* data, uint32_t len); + +IRAM_ATTR int nerd_double_sha(nerd_sha256* midstate, uint8_t* data, uint8_t* doubleHash); + +#endif /* nerdSHA256_H_ */ \ No newline at end of file diff --git a/test/TestHashPerformance/src/testShaPerformance.cpp b/test/TestHashPerformance/src/testShaPerformance.cpp index bae45bd..65e0fda 100644 --- a/test/TestHashPerformance/src/testShaPerformance.cpp +++ b/test/TestHashPerformance/src/testShaPerformance.cpp @@ -5,6 +5,7 @@ #include "jadeSHA256.h" #include "customSHA256.h" +#include "nerdSHA256.h" #include "mbedtls/md.h" #include "mbedtls/sha256.h" #include @@ -69,11 +70,12 @@ void loop() { Sha256 sha256; uint8_t hash2[32]; wc_InitSha256(&midstate); - wc_Sha256Update(&midstate, blockheader, 64); - Serial.println("Wolf midstate:"); + wc_Sha256Update(&midstate, blockheader, 64); + Serial.print("Wolf midstate: "); for (size_t i = 0; i < 8; i++) Serial.printf("%02x", midstate.digest[i]); Serial.println(""); + // Mining starts here //Primer sha startT = micros(); @@ -144,5 +146,23 @@ void loop() { for (size_t i = 0; i < 32; i++) Serial.printf("%02x", midstate_cached.buffer[i]); Serial.println(""); + + //Test nerdSHA + nerd_sha256 nerdMidstate; + uint8_t nerdHash[32]; + nerd_midstate(&nerdMidstate, blockheader, 64); + Serial.print("Nerd midstate: "); + for (size_t i = 0; i < 8; i++) + Serial.printf("%02x", nerdMidstate.digest[i]); + Serial.println(""); -} \ No newline at end of file + //Mining starts here + startT = micros(); + nerd_double_sha(&nerdMidstate, blockheader+64,nerdHash); + expired = micros() - startT; + Serial.println("Nerd double SHA[" + String(expired) + "us]:"); + for (size_t i = 0; i < 32; i++) + Serial.printf("%02x", nerdHash[i]); + Serial.println(""); + +} \ No newline at end of file