change some parameters in the library to meet the needs of the website httpbin.org
Fork of MiniTLS-GPL by
Diff: crypto/crypto_macros.h
- Revision:
- 4:cbaf466d717d
- Parent:
- 2:527a66d0a1a9
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/crypto/crypto_macros.h Tue Jun 10 14:23:09 2014 +0000 @@ -0,0 +1,261 @@ +/* +MiniTLS - A super trimmed down TLS/SSL Library for embedded devices +Author: Donatien Garnier +Copyright (C) 2013-2014 AppNearMe Ltd + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +*//** + * \file crypto_macros.h + * \copyright Copyright (c) AppNearMe Ltd 2013 + * \author Donatien Garnier + */ + +#ifndef CRYPTO_MACROS_H_ +#define CRYPTO_MACROS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "core/fwk.h" +#include "inc/minitls_config.h" + +#define XMEMCPY memcpy + +#define CONST64(n) n ## ULL +typedef uint64_t ulong64; +typedef uint32_t ulong32; + +/* ---- HELPER MACROS ---- */ +#ifdef ENDIAN_NEUTRAL + +#define STORE32L(x, y) \ + { (y)[3] = (unsigned char)(((x)>>24)&255); (y)[2] = (unsigned char)(((x)>>16)&255); \ + (y)[1] = (unsigned char)(((x)>>8)&255); (y)[0] = (unsigned char)((x)&255); } + +#define LOAD32L(x, y) \ + { x = ((unsigned long)((y)[3] & 255)<<24) | \ + ((unsigned long)((y)[2] & 255)<<16) | \ + ((unsigned long)((y)[1] & 255)<<8) | \ + ((unsigned long)((y)[0] & 255)); } + +#define STORE64L(x, y) \ + { (y)[7] = (unsigned char)(((x)>>56)&255); (y)[6] = (unsigned char)(((x)>>48)&255); \ + (y)[5] = (unsigned char)(((x)>>40)&255); (y)[4] = (unsigned char)(((x)>>32)&255); \ + (y)[3] = (unsigned char)(((x)>>24)&255); (y)[2] = (unsigned char)(((x)>>16)&255); \ + (y)[1] = (unsigned char)(((x)>>8)&255); (y)[0] = (unsigned char)((x)&255); } + +#define LOAD64L(x, y) \ + { x = (((ulong64)((y)[7] & 255))<<56)|(((ulong64)((y)[6] & 255))<<48)| \ + (((ulong64)((y)[5] & 255))<<40)|(((ulong64)((y)[4] & 255))<<32)| \ + (((ulong64)((y)[3] & 255))<<24)|(((ulong64)((y)[2] & 255))<<16)| \ + (((ulong64)((y)[1] & 255))<<8)|(((ulong64)((y)[0] & 255))); } + +#define STORE32H(x, y) \ + { (y)[0] = (unsigned char)(((x)>>24)&255); (y)[1] = (unsigned char)(((x)>>16)&255); \ + (y)[2] = (unsigned char)(((x)>>8)&255); (y)[3] = (unsigned char)((x)&255); } + +#define LOAD32H(x, y) \ + { x = ((unsigned long)((y)[0] & 255)<<24) | \ + ((unsigned long)((y)[1] & 255)<<16) | \ + ((unsigned long)((y)[2] & 255)<<8) | \ + ((unsigned long)((y)[3] & 255)); } + +#define STORE64H(x, y) \ + { (y)[0] = (unsigned char)(((x)>>56)&255); (y)[1] = (unsigned char)(((x)>>48)&255); \ + (y)[2] = (unsigned char)(((x)>>40)&255); (y)[3] = (unsigned char)(((x)>>32)&255); \ + (y)[4] = (unsigned char)(((x)>>24)&255); (y)[5] = (unsigned char)(((x)>>16)&255); \ + (y)[6] = (unsigned char)(((x)>>8)&255); (y)[7] = (unsigned char)((x)&255); } + +#define LOAD64H(x, y) \ + { x = (((ulong64)((y)[0] & 255))<<56)|(((ulong64)((y)[1] & 255))<<48) | \ + (((ulong64)((y)[2] & 255))<<40)|(((ulong64)((y)[3] & 255))<<32) | \ + (((ulong64)((y)[4] & 255))<<24)|(((ulong64)((y)[5] & 255))<<16) | \ + (((ulong64)((y)[6] & 255))<<8)|(((ulong64)((y)[7] & 255))); } + +#endif /* ENDIAN_NEUTRAL */ + +#ifdef ENDIAN_LITTLE + +#define STORE32H(x, y) \ + { (y)[0] = (unsigned char)(((x)>>24)&255); (y)[1] = (unsigned char)(((x)>>16)&255); \ + (y)[2] = (unsigned char)(((x)>>8)&255); (y)[3] = (unsigned char)((x)&255); } + +#define LOAD32H(x, y) \ + { x = ((unsigned long)((y)[0] & 255)<<24) | \ + ((unsigned long)((y)[1] & 255)<<16) | \ + ((unsigned long)((y)[2] & 255)<<8) | \ + ((unsigned long)((y)[3] & 255)); } + +#define STORE64H(x, y) \ + { (y)[0] = (unsigned char)(((x)>>56)&255); (y)[1] = (unsigned char)(((x)>>48)&255); \ + (y)[2] = (unsigned char)(((x)>>40)&255); (y)[3] = (unsigned char)(((x)>>32)&255); \ + (y)[4] = (unsigned char)(((x)>>24)&255); (y)[5] = (unsigned char)(((x)>>16)&255); \ + (y)[6] = (unsigned char)(((x)>>8)&255); (y)[7] = (unsigned char)((x)&255); } + +#define LOAD64H(x, y) \ + { x = (((ulong64)((y)[0] & 255))<<56)|(((ulong64)((y)[1] & 255))<<48) | \ + (((ulong64)((y)[2] & 255))<<40)|(((ulong64)((y)[3] & 255))<<32) | \ + (((ulong64)((y)[4] & 255))<<24)|(((ulong64)((y)[5] & 255))<<16) | \ + (((ulong64)((y)[6] & 255))<<8)|(((ulong64)((y)[7] & 255))); } + + +#ifdef ENDIAN_32BITWORD + +#define STORE32L(x, y) \ + { ulong32 __t = (x); XMEMCPY(y, &__t, 4); } + +#define LOAD32L(x, y) \ + XMEMCPY(&(x), y, 4); + +#define STORE64L(x, y) \ + { (y)[7] = (unsigned char)(((x)>>56)&255); (y)[6] = (unsigned char)(((x)>>48)&255); \ + (y)[5] = (unsigned char)(((x)>>40)&255); (y)[4] = (unsigned char)(((x)>>32)&255); \ + (y)[3] = (unsigned char)(((x)>>24)&255); (y)[2] = (unsigned char)(((x)>>16)&255); \ + (y)[1] = (unsigned char)(((x)>>8)&255); (y)[0] = (unsigned char)((x)&255); } + +#define LOAD64L(x, y) \ + { x = (((ulong64)((y)[7] & 255))<<56)|(((ulong64)((y)[6] & 255))<<48)| \ + (((ulong64)((y)[5] & 255))<<40)|(((ulong64)((y)[4] & 255))<<32)| \ + (((ulong64)((y)[3] & 255))<<24)|(((ulong64)((y)[2] & 255))<<16)| \ + (((ulong64)((y)[1] & 255))<<8)|(((ulong64)((y)[0] & 255))); } + +#else /* 64-bit words then */ + +#define STORE32L(x, y) \ + { ulong32 __t = (x); XMEMCPY(y, &__t, 4); } + +#define LOAD32L(x, y) \ + { XMEMCPY(&(x), y, 4); x &= 0xFFFFFFFF; } + +#define STORE64L(x, y) \ + { ulong64 __t = (x); XMEMCPY(y, &__t, 8); } + +#define LOAD64L(x, y) \ + { XMEMCPY(&(x), y, 8); } + +#endif /* ENDIAN_64BITWORD */ + +#endif /* ENDIAN_LITTLE */ + +#ifdef ENDIAN_BIG +#define STORE32L(x, y) \ + { (y)[3] = (unsigned char)(((x)>>24)&255); (y)[2] = (unsigned char)(((x)>>16)&255); \ + (y)[1] = (unsigned char)(((x)>>8)&255); (y)[0] = (unsigned char)((x)&255); } + +#define LOAD32L(x, y) \ + { x = ((unsigned long)((y)[3] & 255)<<24) | \ + ((unsigned long)((y)[2] & 255)<<16) | \ + ((unsigned long)((y)[1] & 255)<<8) | \ + ((unsigned long)((y)[0] & 255)); } + +#define STORE64L(x, y) \ + { (y)[7] = (unsigned char)(((x)>>56)&255); (y)[6] = (unsigned char)(((x)>>48)&255); \ + (y)[5] = (unsigned char)(((x)>>40)&255); (y)[4] = (unsigned char)(((x)>>32)&255); \ + (y)[3] = (unsigned char)(((x)>>24)&255); (y)[2] = (unsigned char)(((x)>>16)&255); \ + (y)[1] = (unsigned char)(((x)>>8)&255); (y)[0] = (unsigned char)((x)&255); } + +#define LOAD64L(x, y) \ + { x = (((ulong64)((y)[7] & 255))<<56)|(((ulong64)((y)[6] & 255))<<48) | \ + (((ulong64)((y)[5] & 255))<<40)|(((ulong64)((y)[4] & 255))<<32) | \ + (((ulong64)((y)[3] & 255))<<24)|(((ulong64)((y)[2] & 255))<<16) | \ + (((ulong64)((y)[1] & 255))<<8)|(((ulong64)((y)[0] & 255))); } + +#ifdef ENDIAN_32BITWORD + +#define STORE32H(x, y) \ + { ulong32 __t = (x); XMEMCPY(y, &__t, 4); } + +#define LOAD32H(x, y) \ + XMEMCPY(&(x), y, 4); + +#define STORE64H(x, y) \ + { (y)[0] = (unsigned char)(((x)>>56)&255); (y)[1] = (unsigned char)(((x)>>48)&255); \ + (y)[2] = (unsigned char)(((x)>>40)&255); (y)[3] = (unsigned char)(((x)>>32)&255); \ + (y)[4] = (unsigned char)(((x)>>24)&255); (y)[5] = (unsigned char)(((x)>>16)&255); \ + (y)[6] = (unsigned char)(((x)>>8)&255); (y)[7] = (unsigned char)((x)&255); } + +#define LOAD64H(x, y) \ + { x = (((ulong64)((y)[0] & 255))<<56)|(((ulong64)((y)[1] & 255))<<48)| \ + (((ulong64)((y)[2] & 255))<<40)|(((ulong64)((y)[3] & 255))<<32)| \ + (((ulong64)((y)[4] & 255))<<24)|(((ulong64)((y)[5] & 255))<<16)| \ + (((ulong64)((y)[6] & 255))<<8)| (((ulong64)((y)[7] & 255))); } + +#else /* 64-bit words then */ + +#define STORE32H(x, y) \ + { ulong32 __t = (x); XMEMCPY(y, &__t, 4); } + +#define LOAD32H(x, y) \ + { XMEMCPY(&(x), y, 4); x &= 0xFFFFFFFF; } + +#define STORE64H(x, y) \ + { ulong64 __t = (x); XMEMCPY(y, &__t, 8); } + +#define LOAD64H(x, y) \ + { XMEMCPY(&(x), y, 8); } + +#endif /* ENDIAN_64BITWORD */ +#endif /* ENDIAN_BIG */ + +#define BSWAP(x) ( ((x>>24)&0x000000FFUL) | ((x<<24)&0xFF000000UL) | \ + ((x>>8)&0x0000FF00UL) | ((x<<8)&0x00FF0000UL) ) + + +/* 32-bit Rotates */ +//#define ROLc ROL +//#define RORc ROR + +/* rotates the hard way */ +#define ROL(x, y) ( (((unsigned long)(x)<<(unsigned long)((y)&31)) | (((unsigned long)(x)&0xFFFFFFFFUL)>>(unsigned long)(32-((y)&31)))) & 0xFFFFFFFFUL) +#define ROR(x, y) ( ((((unsigned long)(x)&0xFFFFFFFFUL)>>(unsigned long)((y)&31)) | ((unsigned long)(x)<<(unsigned long)(32-((y)&31)))) & 0xFFFFFFFFUL) +#define ROLc(x, y) ( (((unsigned long)(x)<<(unsigned long)((y)&31)) | (((unsigned long)(x)&0xFFFFFFFFUL)>>(unsigned long)(32-((y)&31)))) & 0xFFFFFFFFUL) +#define RORc(x, y) ( ((((unsigned long)(x)&0xFFFFFFFFUL)>>(unsigned long)((y)&31)) | ((unsigned long)(x)<<(unsigned long)(32-((y)&31)))) & 0xFFFFFFFFUL) + +//#define ROL64c ROL64 +//#define ROR64c ROR64 + +#define ROL64(x, y) \ + ( (((x)<<((ulong64)(y)&63)) | \ + (((x)&CONST64(0xFFFFFFFFFFFFFFFF))>>((ulong64)64-((y)&63)))) & CONST64(0xFFFFFFFFFFFFFFFF)) + +#define ROR64(x, y) \ + ( ((((x)&CONST64(0xFFFFFFFFFFFFFFFF))>>((ulong64)(y)&CONST64(63))) | \ + ((x)<<((ulong64)(64-((y)&CONST64(63)))))) & CONST64(0xFFFFFFFFFFFFFFFF)) + +#define ROL64c(x, y) \ + ( (((x)<<((ulong64)(y)&63)) | \ + (((x)&CONST64(0xFFFFFFFFFFFFFFFF))>>((ulong64)64-((y)&63)))) & CONST64(0xFFFFFFFFFFFFFFFF)) + +#define ROR64c(x, y) \ + ( ((((x)&CONST64(0xFFFFFFFFFFFFFFFF))>>((ulong64)(y)&CONST64(63))) | \ + ((x)<<((ulong64)(64-((y)&CONST64(63)))))) & CONST64(0xFFFFFFFFFFFFFFFF)) + +#ifndef MAX + #define MAX(x, y) ( ((x)>(y))?(x):(y) ) +#endif + +#ifndef MIN + #define MIN(x, y) ( ((x)<(y))?(x):(y) ) +#endif + +/* extract a byte portably */ +#define byte(x, n) (((x) >> (8 * (n))) & 255) + +#ifdef __cplusplus +} +#endif + +#endif /* CRYPTO_MACROS_H_ */