Sending IKS01A1 temperature sensor to LoRaWAN port-5 uplink

Dependencies:   X_NUCLEO_IKS01A1 mbed LoRaWAN-lib SX1276Lib

Fork of LoRaWAN-demo-76 by Semtech

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers aes.cpp Source File

aes.cpp

00001 /*
00002  ---------------------------------------------------------------------------
00003  Copyright (c) 1998-2008, Brian Gladman, Worcester, UK. All rights reserved.
00004 
00005  LICENSE TERMS
00006 
00007  The redistribution and use of this software (with or without changes)
00008  is allowed without the payment of fees or royalties provided that:
00009 
00010   1. source code distributions include the above copyright notice, this
00011      list of conditions and the following disclaimer;
00012 
00013   2. binary distributions include the above copyright notice, this list
00014      of conditions and the following disclaimer in their documentation;
00015 
00016   3. the name of the copyright holder is not used to endorse products
00017      built using this software without specific written permission.
00018 
00019  DISCLAIMER
00020 
00021  This software is provided 'as is' with no explicit or implied warranties
00022  in respect of its properties, including, but not limited to, correctness
00023  and/or fitness for purpose.
00024  ---------------------------------------------------------------------------
00025  Issue 09/09/2006
00026 
00027  This is an AES implementation that uses only 8-bit byte operations on the
00028  cipher state (there are options to use 32-bit types if available).
00029 
00030  The combination of mix columns and byte substitution used here is based on
00031  that developed by Karl Malbrain. His contribution is acknowledged.
00032  */
00033 
00034 /* define if you have a fast memcpy function on your system */
00035 #if 0
00036 #  define HAVE_MEMCPY
00037 #  include <string.h>
00038 #  if defined( _MSC_VER )
00039 #    include <intrin.h>
00040 #    pragma intrinsic( memcpy )
00041 #  endif
00042 #endif
00043 
00044 
00045 #include <stdlib.h>
00046 #include <stdint.h>
00047 
00048 /* define if you have fast 32-bit types on your system */
00049 #if ( __CORTEX_M != 0 ) // if Cortex is different from M0/M0+
00050 #  define HAVE_UINT_32T
00051 #endif
00052 
00053 /* define if you don't want any tables */
00054 #if 1
00055 #  define USE_TABLES
00056 #endif
00057 
00058 /*  On Intel Core 2 duo VERSION_1 is faster */
00059 
00060 /* alternative versions (test for performance on your system) */
00061 #if 1
00062 #  define VERSION_1
00063 #endif
00064 
00065 #include "aes.h"
00066 
00067 //#if defined( HAVE_UINT_32T )
00068 //  typedef unsigned long uint32_t;
00069 //#endif
00070 
00071 /* functions for finite field multiplication in the AES Galois field    */
00072 
00073 #define WPOLY   0x011b
00074 #define BPOLY     0x1b
00075 #define DPOLY   0x008d
00076 
00077 #define f1(x)   (x)
00078 #define f2(x)   ((x << 1) ^ (((x >> 7) & 1) * WPOLY))
00079 #define f4(x)   ((x << 2) ^ (((x >> 6) & 1) * WPOLY) ^ (((x >> 6) & 2) * WPOLY))
00080 #define f8(x)   ((x << 3) ^ (((x >> 5) & 1) * WPOLY) ^ (((x >> 5) & 2) * WPOLY) \
00081                           ^ (((x >> 5) & 4) * WPOLY))
00082 #define d2(x)   (((x) >> 1) ^ ((x) & 1 ? DPOLY : 0))
00083 
00084 #define f3(x)   (f2(x) ^ x)
00085 #define f9(x)   (f8(x) ^ x)
00086 #define fb(x)   (f8(x) ^ f2(x) ^ x)
00087 #define fd(x)   (f8(x) ^ f4(x) ^ x)
00088 #define fe(x)   (f8(x) ^ f4(x) ^ f2(x))
00089 
00090 #if defined( USE_TABLES )
00091 
00092 #define sb_data(w) {    /* S Box data values */                            \
00093     w(0x63), w(0x7c), w(0x77), w(0x7b), w(0xf2), w(0x6b), w(0x6f), w(0xc5),\
00094     w(0x30), w(0x01), w(0x67), w(0x2b), w(0xfe), w(0xd7), w(0xab), w(0x76),\
00095     w(0xca), w(0x82), w(0xc9), w(0x7d), w(0xfa), w(0x59), w(0x47), w(0xf0),\
00096     w(0xad), w(0xd4), w(0xa2), w(0xaf), w(0x9c), w(0xa4), w(0x72), w(0xc0),\
00097     w(0xb7), w(0xfd), w(0x93), w(0x26), w(0x36), w(0x3f), w(0xf7), w(0xcc),\
00098     w(0x34), w(0xa5), w(0xe5), w(0xf1), w(0x71), w(0xd8), w(0x31), w(0x15),\
00099     w(0x04), w(0xc7), w(0x23), w(0xc3), w(0x18), w(0x96), w(0x05), w(0x9a),\
00100     w(0x07), w(0x12), w(0x80), w(0xe2), w(0xeb), w(0x27), w(0xb2), w(0x75),\
00101     w(0x09), w(0x83), w(0x2c), w(0x1a), w(0x1b), w(0x6e), w(0x5a), w(0xa0),\
00102     w(0x52), w(0x3b), w(0xd6), w(0xb3), w(0x29), w(0xe3), w(0x2f), w(0x84),\
00103     w(0x53), w(0xd1), w(0x00), w(0xed), w(0x20), w(0xfc), w(0xb1), w(0x5b),\
00104     w(0x6a), w(0xcb), w(0xbe), w(0x39), w(0x4a), w(0x4c), w(0x58), w(0xcf),\
00105     w(0xd0), w(0xef), w(0xaa), w(0xfb), w(0x43), w(0x4d), w(0x33), w(0x85),\
00106     w(0x45), w(0xf9), w(0x02), w(0x7f), w(0x50), w(0x3c), w(0x9f), w(0xa8),\
00107     w(0x51), w(0xa3), w(0x40), w(0x8f), w(0x92), w(0x9d), w(0x38), w(0xf5),\
00108     w(0xbc), w(0xb6), w(0xda), w(0x21), w(0x10), w(0xff), w(0xf3), w(0xd2),\
00109     w(0xcd), w(0x0c), w(0x13), w(0xec), w(0x5f), w(0x97), w(0x44), w(0x17),\
00110     w(0xc4), w(0xa7), w(0x7e), w(0x3d), w(0x64), w(0x5d), w(0x19), w(0x73),\
00111     w(0x60), w(0x81), w(0x4f), w(0xdc), w(0x22), w(0x2a), w(0x90), w(0x88),\
00112     w(0x46), w(0xee), w(0xb8), w(0x14), w(0xde), w(0x5e), w(0x0b), w(0xdb),\
00113     w(0xe0), w(0x32), w(0x3a), w(0x0a), w(0x49), w(0x06), w(0x24), w(0x5c),\
00114     w(0xc2), w(0xd3), w(0xac), w(0x62), w(0x91), w(0x95), w(0xe4), w(0x79),\
00115     w(0xe7), w(0xc8), w(0x37), w(0x6d), w(0x8d), w(0xd5), w(0x4e), w(0xa9),\
00116     w(0x6c), w(0x56), w(0xf4), w(0xea), w(0x65), w(0x7a), w(0xae), w(0x08),\
00117     w(0xba), w(0x78), w(0x25), w(0x2e), w(0x1c), w(0xa6), w(0xb4), w(0xc6),\
00118     w(0xe8), w(0xdd), w(0x74), w(0x1f), w(0x4b), w(0xbd), w(0x8b), w(0x8a),\
00119     w(0x70), w(0x3e), w(0xb5), w(0x66), w(0x48), w(0x03), w(0xf6), w(0x0e),\
00120     w(0x61), w(0x35), w(0x57), w(0xb9), w(0x86), w(0xc1), w(0x1d), w(0x9e),\
00121     w(0xe1), w(0xf8), w(0x98), w(0x11), w(0x69), w(0xd9), w(0x8e), w(0x94),\
00122     w(0x9b), w(0x1e), w(0x87), w(0xe9), w(0xce), w(0x55), w(0x28), w(0xdf),\
00123     w(0x8c), w(0xa1), w(0x89), w(0x0d), w(0xbf), w(0xe6), w(0x42), w(0x68),\
00124     w(0x41), w(0x99), w(0x2d), w(0x0f), w(0xb0), w(0x54), w(0xbb), w(0x16) }
00125 
00126 #define isb_data(w) {   /* inverse S Box data values */                    \
00127     w(0x52), w(0x09), w(0x6a), w(0xd5), w(0x30), w(0x36), w(0xa5), w(0x38),\
00128     w(0xbf), w(0x40), w(0xa3), w(0x9e), w(0x81), w(0xf3), w(0xd7), w(0xfb),\
00129     w(0x7c), w(0xe3), w(0x39), w(0x82), w(0x9b), w(0x2f), w(0xff), w(0x87),\
00130     w(0x34), w(0x8e), w(0x43), w(0x44), w(0xc4), w(0xde), w(0xe9), w(0xcb),\
00131     w(0x54), w(0x7b), w(0x94), w(0x32), w(0xa6), w(0xc2), w(0x23), w(0x3d),\
00132     w(0xee), w(0x4c), w(0x95), w(0x0b), w(0x42), w(0xfa), w(0xc3), w(0x4e),\
00133     w(0x08), w(0x2e), w(0xa1), w(0x66), w(0x28), w(0xd9), w(0x24), w(0xb2),\
00134     w(0x76), w(0x5b), w(0xa2), w(0x49), w(0x6d), w(0x8b), w(0xd1), w(0x25),\
00135     w(0x72), w(0xf8), w(0xf6), w(0x64), w(0x86), w(0x68), w(0x98), w(0x16),\
00136     w(0xd4), w(0xa4), w(0x5c), w(0xcc), w(0x5d), w(0x65), w(0xb6), w(0x92),\
00137     w(0x6c), w(0x70), w(0x48), w(0x50), w(0xfd), w(0xed), w(0xb9), w(0xda),\
00138     w(0x5e), w(0x15), w(0x46), w(0x57), w(0xa7), w(0x8d), w(0x9d), w(0x84),\
00139     w(0x90), w(0xd8), w(0xab), w(0x00), w(0x8c), w(0xbc), w(0xd3), w(0x0a),\
00140     w(0xf7), w(0xe4), w(0x58), w(0x05), w(0xb8), w(0xb3), w(0x45), w(0x06),\
00141     w(0xd0), w(0x2c), w(0x1e), w(0x8f), w(0xca), w(0x3f), w(0x0f), w(0x02),\
00142     w(0xc1), w(0xaf), w(0xbd), w(0x03), w(0x01), w(0x13), w(0x8a), w(0x6b),\
00143     w(0x3a), w(0x91), w(0x11), w(0x41), w(0x4f), w(0x67), w(0xdc), w(0xea),\
00144     w(0x97), w(0xf2), w(0xcf), w(0xce), w(0xf0), w(0xb4), w(0xe6), w(0x73),\
00145     w(0x96), w(0xac), w(0x74), w(0x22), w(0xe7), w(0xad), w(0x35), w(0x85),\
00146     w(0xe2), w(0xf9), w(0x37), w(0xe8), w(0x1c), w(0x75), w(0xdf), w(0x6e),\
00147     w(0x47), w(0xf1), w(0x1a), w(0x71), w(0x1d), w(0x29), w(0xc5), w(0x89),\
00148     w(0x6f), w(0xb7), w(0x62), w(0x0e), w(0xaa), w(0x18), w(0xbe), w(0x1b),\
00149     w(0xfc), w(0x56), w(0x3e), w(0x4b), w(0xc6), w(0xd2), w(0x79), w(0x20),\
00150     w(0x9a), w(0xdb), w(0xc0), w(0xfe), w(0x78), w(0xcd), w(0x5a), w(0xf4),\
00151     w(0x1f), w(0xdd), w(0xa8), w(0x33), w(0x88), w(0x07), w(0xc7), w(0x31),\
00152     w(0xb1), w(0x12), w(0x10), w(0x59), w(0x27), w(0x80), w(0xec), w(0x5f),\
00153     w(0x60), w(0x51), w(0x7f), w(0xa9), w(0x19), w(0xb5), w(0x4a), w(0x0d),\
00154     w(0x2d), w(0xe5), w(0x7a), w(0x9f), w(0x93), w(0xc9), w(0x9c), w(0xef),\
00155     w(0xa0), w(0xe0), w(0x3b), w(0x4d), w(0xae), w(0x2a), w(0xf5), w(0xb0),\
00156     w(0xc8), w(0xeb), w(0xbb), w(0x3c), w(0x83), w(0x53), w(0x99), w(0x61),\
00157     w(0x17), w(0x2b), w(0x04), w(0x7e), w(0xba), w(0x77), w(0xd6), w(0x26),\
00158     w(0xe1), w(0x69), w(0x14), w(0x63), w(0x55), w(0x21), w(0x0c), w(0x7d) }
00159 
00160 #define mm_data(w) {    /* basic data for forming finite field tables */   \
00161     w(0x00), w(0x01), w(0x02), w(0x03), w(0x04), w(0x05), w(0x06), w(0x07),\
00162     w(0x08), w(0x09), w(0x0a), w(0x0b), w(0x0c), w(0x0d), w(0x0e), w(0x0f),\
00163     w(0x10), w(0x11), w(0x12), w(0x13), w(0x14), w(0x15), w(0x16), w(0x17),\
00164     w(0x18), w(0x19), w(0x1a), w(0x1b), w(0x1c), w(0x1d), w(0x1e), w(0x1f),\
00165     w(0x20), w(0x21), w(0x22), w(0x23), w(0x24), w(0x25), w(0x26), w(0x27),\
00166     w(0x28), w(0x29), w(0x2a), w(0x2b), w(0x2c), w(0x2d), w(0x2e), w(0x2f),\
00167     w(0x30), w(0x31), w(0x32), w(0x33), w(0x34), w(0x35), w(0x36), w(0x37),\
00168     w(0x38), w(0x39), w(0x3a), w(0x3b), w(0x3c), w(0x3d), w(0x3e), w(0x3f),\
00169     w(0x40), w(0x41), w(0x42), w(0x43), w(0x44), w(0x45), w(0x46), w(0x47),\
00170     w(0x48), w(0x49), w(0x4a), w(0x4b), w(0x4c), w(0x4d), w(0x4e), w(0x4f),\
00171     w(0x50), w(0x51), w(0x52), w(0x53), w(0x54), w(0x55), w(0x56), w(0x57),\
00172     w(0x58), w(0x59), w(0x5a), w(0x5b), w(0x5c), w(0x5d), w(0x5e), w(0x5f),\
00173     w(0x60), w(0x61), w(0x62), w(0x63), w(0x64), w(0x65), w(0x66), w(0x67),\
00174     w(0x68), w(0x69), w(0x6a), w(0x6b), w(0x6c), w(0x6d), w(0x6e), w(0x6f),\
00175     w(0x70), w(0x71), w(0x72), w(0x73), w(0x74), w(0x75), w(0x76), w(0x77),\
00176     w(0x78), w(0x79), w(0x7a), w(0x7b), w(0x7c), w(0x7d), w(0x7e), w(0x7f),\
00177     w(0x80), w(0x81), w(0x82), w(0x83), w(0x84), w(0x85), w(0x86), w(0x87),\
00178     w(0x88), w(0x89), w(0x8a), w(0x8b), w(0x8c), w(0x8d), w(0x8e), w(0x8f),\
00179     w(0x90), w(0x91), w(0x92), w(0x93), w(0x94), w(0x95), w(0x96), w(0x97),\
00180     w(0x98), w(0x99), w(0x9a), w(0x9b), w(0x9c), w(0x9d), w(0x9e), w(0x9f),\
00181     w(0xa0), w(0xa1), w(0xa2), w(0xa3), w(0xa4), w(0xa5), w(0xa6), w(0xa7),\
00182     w(0xa8), w(0xa9), w(0xaa), w(0xab), w(0xac), w(0xad), w(0xae), w(0xaf),\
00183     w(0xb0), w(0xb1), w(0xb2), w(0xb3), w(0xb4), w(0xb5), w(0xb6), w(0xb7),\
00184     w(0xb8), w(0xb9), w(0xba), w(0xbb), w(0xbc), w(0xbd), w(0xbe), w(0xbf),\
00185     w(0xc0), w(0xc1), w(0xc2), w(0xc3), w(0xc4), w(0xc5), w(0xc6), w(0xc7),\
00186     w(0xc8), w(0xc9), w(0xca), w(0xcb), w(0xcc), w(0xcd), w(0xce), w(0xcf),\
00187     w(0xd0), w(0xd1), w(0xd2), w(0xd3), w(0xd4), w(0xd5), w(0xd6), w(0xd7),\
00188     w(0xd8), w(0xd9), w(0xda), w(0xdb), w(0xdc), w(0xdd), w(0xde), w(0xdf),\
00189     w(0xe0), w(0xe1), w(0xe2), w(0xe3), w(0xe4), w(0xe5), w(0xe6), w(0xe7),\
00190     w(0xe8), w(0xe9), w(0xea), w(0xeb), w(0xec), w(0xed), w(0xee), w(0xef),\
00191     w(0xf0), w(0xf1), w(0xf2), w(0xf3), w(0xf4), w(0xf5), w(0xf6), w(0xf7),\
00192     w(0xf8), w(0xf9), w(0xfa), w(0xfb), w(0xfc), w(0xfd), w(0xfe), w(0xff) }
00193 
00194 static const uint8_t sbox[256]  =  sb_data(f1);
00195 
00196 #if defined( AES_DEC_PREKEYED )
00197 static const uint8_t isbox[256] = isb_data(f1);
00198 #endif
00199 
00200 static const uint8_t gfm2_sbox[256] = sb_data(f2);
00201 static const uint8_t gfm3_sbox[256] = sb_data(f3);
00202 
00203 #if defined( AES_DEC_PREKEYED )
00204 static const uint8_t gfmul_9[256] = mm_data(f9);
00205 static const uint8_t gfmul_b[256] = mm_data(fb);
00206 static const uint8_t gfmul_d[256] = mm_data(fd);
00207 static const uint8_t gfmul_e[256] = mm_data(fe);
00208 #endif
00209 
00210 #define s_box(x)     sbox[(x)]
00211 #if defined( AES_DEC_PREKEYED )
00212 #define is_box(x)    isbox[(x)]
00213 #endif
00214 #define gfm2_sb(x)   gfm2_sbox[(x)]
00215 #define gfm3_sb(x)   gfm3_sbox[(x)]
00216 #if defined( AES_DEC_PREKEYED )
00217 #define gfm_9(x)     gfmul_9[(x)]
00218 #define gfm_b(x)     gfmul_b[(x)]
00219 #define gfm_d(x)     gfmul_d[(x)]
00220 #define gfm_e(x)     gfmul_e[(x)]
00221 #endif
00222 #else
00223 
00224 /* this is the high bit of x right shifted by 1 */
00225 /* position. Since the starting polynomial has  */
00226 /* 9 bits (0x11b), this right shift keeps the   */
00227 /* values of all top bits within a byte         */
00228 
00229 static uint8_t hibit(const uint8_t x)
00230 {   uint8_t r = (uint8_t)((x >> 1) | (x >> 2));
00231 
00232     r |= (r >> 2);
00233     r |= (r >> 4);
00234     return (r + 1) >> 1;
00235 }
00236 
00237 /* return the inverse of the finite field element x */
00238 
00239 static uint8_t gf_inv(const uint8_t x)
00240 {   uint8_t p1 = x, p2 = BPOLY, n1 = hibit(x), n2 = 0x80, v1 = 1, v2 = 0;
00241 
00242     if(x < 2)
00243         return x;
00244 
00245     for( ; ; )
00246     {
00247         if(n1)
00248             while(n2 >= n1)             /* divide polynomial p2 by p1    */
00249             {
00250                 n2 /= n1;               /* shift smaller polynomial left */
00251                 p2 ^= (p1 * n2) & 0xff; /* and remove from larger one    */
00252                 v2 ^= (v1 * n2);        /* shift accumulated value and   */
00253                 n2 = hibit(p2);         /* add into result               */
00254             }
00255         else
00256             return v1;
00257 
00258         if(n2)                          /* repeat with values swapped    */
00259             while(n1 >= n2)
00260             {
00261                 n1 /= n2;
00262                 p1 ^= p2 * n1;
00263                 v1 ^= v2 * n1;
00264                 n1 = hibit(p1);
00265             }
00266         else
00267             return v2;
00268     }
00269 }
00270 
00271 /* The forward and inverse affine transformations used in the S-box */
00272 uint8_t fwd_affine(const uint8_t x)
00273 {
00274 #if defined( HAVE_UINT_32T )
00275     uint32_t w = x;
00276     w ^= (w << 1) ^ (w << 2) ^ (w << 3) ^ (w << 4);
00277     return 0x63 ^ ((w ^ (w >> 8)) & 0xff);
00278 #else
00279     return 0x63 ^ x ^ (x << 1) ^ (x << 2) ^ (x << 3) ^ (x << 4)
00280                     ^ (x >> 7) ^ (x >> 6) ^ (x >> 5) ^ (x >> 4);
00281 #endif
00282 }
00283 
00284 uint8_t inv_affine(const uint8_t x)
00285 {
00286 #if defined( HAVE_UINT_32T )
00287     uint32_t w = x;
00288     w = (w << 1) ^ (w << 3) ^ (w << 6);
00289     return 0x05 ^ ((w ^ (w >> 8)) & 0xff);
00290 #else
00291     return 0x05 ^ (x << 1) ^ (x << 3) ^ (x << 6)
00292                 ^ (x >> 7) ^ (x >> 5) ^ (x >> 2);
00293 #endif
00294 }
00295 
00296 #define s_box(x)   fwd_affine(gf_inv(x))
00297 #define is_box(x)  gf_inv(inv_affine(x))
00298 #define gfm2_sb(x) f2(s_box(x))
00299 #define gfm3_sb(x) f3(s_box(x))
00300 #define gfm_9(x)   f9(x)
00301 #define gfm_b(x)   fb(x)
00302 #define gfm_d(x)   fd(x)
00303 #define gfm_e(x)   fe(x)
00304 
00305 #endif
00306 
00307 #if defined( HAVE_MEMCPY )
00308 #  define block_copy_nn(d, s, l)    memcpy(d, s, l)
00309 #  define block_copy(d, s)          memcpy(d, s, N_BLOCK)
00310 #else
00311 #  define block_copy_nn(d, s, l)    copy_block_nn(d, s, l)
00312 #  define block_copy(d, s)          copy_block(d, s)
00313 #endif
00314 
00315 static void copy_block( void *d, const void *s )
00316 {
00317 #if defined( HAVE_UINT_32T )
00318     ((uint32_t*)d)[ 0] = ((uint32_t*)s)[ 0];
00319     ((uint32_t*)d)[ 1] = ((uint32_t*)s)[ 1];
00320     ((uint32_t*)d)[ 2] = ((uint32_t*)s)[ 2];
00321     ((uint32_t*)d)[ 3] = ((uint32_t*)s)[ 3];
00322 #else
00323     ((uint8_t*)d)[ 0] = ((uint8_t*)s)[ 0];
00324     ((uint8_t*)d)[ 1] = ((uint8_t*)s)[ 1];
00325     ((uint8_t*)d)[ 2] = ((uint8_t*)s)[ 2];
00326     ((uint8_t*)d)[ 3] = ((uint8_t*)s)[ 3];
00327     ((uint8_t*)d)[ 4] = ((uint8_t*)s)[ 4];
00328     ((uint8_t*)d)[ 5] = ((uint8_t*)s)[ 5];
00329     ((uint8_t*)d)[ 6] = ((uint8_t*)s)[ 6];
00330     ((uint8_t*)d)[ 7] = ((uint8_t*)s)[ 7];
00331     ((uint8_t*)d)[ 8] = ((uint8_t*)s)[ 8];
00332     ((uint8_t*)d)[ 9] = ((uint8_t*)s)[ 9];
00333     ((uint8_t*)d)[10] = ((uint8_t*)s)[10];
00334     ((uint8_t*)d)[11] = ((uint8_t*)s)[11];
00335     ((uint8_t*)d)[12] = ((uint8_t*)s)[12];
00336     ((uint8_t*)d)[13] = ((uint8_t*)s)[13];
00337     ((uint8_t*)d)[14] = ((uint8_t*)s)[14];
00338     ((uint8_t*)d)[15] = ((uint8_t*)s)[15];
00339 #endif
00340 }
00341 
00342 static void copy_block_nn( uint8_t * d, const uint8_t *s, uint8_t nn )
00343 {
00344     while( nn-- )
00345         //*((uint8_t*)d)++ = *((uint8_t*)s)++;
00346         *d++ = *s++;
00347 }
00348 
00349 static void xor_block( void *d, const void *s )
00350 {
00351 #if defined( HAVE_UINT_32T )
00352     ((uint32_t*)d)[ 0] ^= ((uint32_t*)s)[ 0];
00353     ((uint32_t*)d)[ 1] ^= ((uint32_t*)s)[ 1];
00354     ((uint32_t*)d)[ 2] ^= ((uint32_t*)s)[ 2];
00355     ((uint32_t*)d)[ 3] ^= ((uint32_t*)s)[ 3];
00356 #else
00357     ((uint8_t*)d)[ 0] ^= ((uint8_t*)s)[ 0];
00358     ((uint8_t*)d)[ 1] ^= ((uint8_t*)s)[ 1];
00359     ((uint8_t*)d)[ 2] ^= ((uint8_t*)s)[ 2];
00360     ((uint8_t*)d)[ 3] ^= ((uint8_t*)s)[ 3];
00361     ((uint8_t*)d)[ 4] ^= ((uint8_t*)s)[ 4];
00362     ((uint8_t*)d)[ 5] ^= ((uint8_t*)s)[ 5];
00363     ((uint8_t*)d)[ 6] ^= ((uint8_t*)s)[ 6];
00364     ((uint8_t*)d)[ 7] ^= ((uint8_t*)s)[ 7];
00365     ((uint8_t*)d)[ 8] ^= ((uint8_t*)s)[ 8];
00366     ((uint8_t*)d)[ 9] ^= ((uint8_t*)s)[ 9];
00367     ((uint8_t*)d)[10] ^= ((uint8_t*)s)[10];
00368     ((uint8_t*)d)[11] ^= ((uint8_t*)s)[11];
00369     ((uint8_t*)d)[12] ^= ((uint8_t*)s)[12];
00370     ((uint8_t*)d)[13] ^= ((uint8_t*)s)[13];
00371     ((uint8_t*)d)[14] ^= ((uint8_t*)s)[14];
00372     ((uint8_t*)d)[15] ^= ((uint8_t*)s)[15];
00373 #endif
00374 }
00375 
00376 static void copy_and_key( void *d, const void *s, const void *k )
00377 {
00378 #if defined( HAVE_UINT_32T )
00379     ((uint32_t*)d)[ 0] = ((uint32_t*)s)[ 0] ^ ((uint32_t*)k)[ 0];
00380     ((uint32_t*)d)[ 1] = ((uint32_t*)s)[ 1] ^ ((uint32_t*)k)[ 1];
00381     ((uint32_t*)d)[ 2] = ((uint32_t*)s)[ 2] ^ ((uint32_t*)k)[ 2];
00382     ((uint32_t*)d)[ 3] = ((uint32_t*)s)[ 3] ^ ((uint32_t*)k)[ 3];
00383 #elif 1
00384     ((uint8_t*)d)[ 0] = ((uint8_t*)s)[ 0] ^ ((uint8_t*)k)[ 0];
00385     ((uint8_t*)d)[ 1] = ((uint8_t*)s)[ 1] ^ ((uint8_t*)k)[ 1];
00386     ((uint8_t*)d)[ 2] = ((uint8_t*)s)[ 2] ^ ((uint8_t*)k)[ 2];
00387     ((uint8_t*)d)[ 3] = ((uint8_t*)s)[ 3] ^ ((uint8_t*)k)[ 3];
00388     ((uint8_t*)d)[ 4] = ((uint8_t*)s)[ 4] ^ ((uint8_t*)k)[ 4];
00389     ((uint8_t*)d)[ 5] = ((uint8_t*)s)[ 5] ^ ((uint8_t*)k)[ 5];
00390     ((uint8_t*)d)[ 6] = ((uint8_t*)s)[ 6] ^ ((uint8_t*)k)[ 6];
00391     ((uint8_t*)d)[ 7] = ((uint8_t*)s)[ 7] ^ ((uint8_t*)k)[ 7];
00392     ((uint8_t*)d)[ 8] = ((uint8_t*)s)[ 8] ^ ((uint8_t*)k)[ 8];
00393     ((uint8_t*)d)[ 9] = ((uint8_t*)s)[ 9] ^ ((uint8_t*)k)[ 9];
00394     ((uint8_t*)d)[10] = ((uint8_t*)s)[10] ^ ((uint8_t*)k)[10];
00395     ((uint8_t*)d)[11] = ((uint8_t*)s)[11] ^ ((uint8_t*)k)[11];
00396     ((uint8_t*)d)[12] = ((uint8_t*)s)[12] ^ ((uint8_t*)k)[12];
00397     ((uint8_t*)d)[13] = ((uint8_t*)s)[13] ^ ((uint8_t*)k)[13];
00398     ((uint8_t*)d)[14] = ((uint8_t*)s)[14] ^ ((uint8_t*)k)[14];
00399     ((uint8_t*)d)[15] = ((uint8_t*)s)[15] ^ ((uint8_t*)k)[15];
00400 #else
00401     block_copy(d, s);
00402     xor_block(d, k);
00403 #endif
00404 }
00405 
00406 static void add_round_key( uint8_t d[N_BLOCK], const uint8_t k[N_BLOCK] )
00407 {
00408     xor_block(d, k);
00409 }
00410 
00411 static void shift_sub_rows( uint8_t st[N_BLOCK] )
00412 {   uint8_t tt;
00413 
00414     st[ 0] = s_box(st[ 0]); st[ 4] = s_box(st[ 4]);
00415     st[ 8] = s_box(st[ 8]); st[12] = s_box(st[12]);
00416 
00417     tt = st[1]; st[ 1] = s_box(st[ 5]); st[ 5] = s_box(st[ 9]);
00418     st[ 9] = s_box(st[13]); st[13] = s_box( tt );
00419 
00420     tt = st[2]; st[ 2] = s_box(st[10]); st[10] = s_box( tt );
00421     tt = st[6]; st[ 6] = s_box(st[14]); st[14] = s_box( tt );
00422 
00423     tt = st[15]; st[15] = s_box(st[11]); st[11] = s_box(st[ 7]);
00424     st[ 7] = s_box(st[ 3]); st[ 3] = s_box( tt );
00425 }
00426 
00427 #if defined( AES_DEC_PREKEYED )
00428 
00429 static void inv_shift_sub_rows( uint8_t st[N_BLOCK] )
00430 {   uint8_t tt;
00431 
00432     st[ 0] = is_box(st[ 0]); st[ 4] = is_box(st[ 4]);
00433     st[ 8] = is_box(st[ 8]); st[12] = is_box(st[12]);
00434 
00435     tt = st[13]; st[13] = is_box(st[9]); st[ 9] = is_box(st[5]);
00436     st[ 5] = is_box(st[1]); st[ 1] = is_box( tt );
00437 
00438     tt = st[2]; st[ 2] = is_box(st[10]); st[10] = is_box( tt );
00439     tt = st[6]; st[ 6] = is_box(st[14]); st[14] = is_box( tt );
00440 
00441     tt = st[3]; st[ 3] = is_box(st[ 7]); st[ 7] = is_box(st[11]);
00442     st[11] = is_box(st[15]); st[15] = is_box( tt );
00443 }
00444 
00445 #endif
00446 
00447 #if defined( VERSION_1 )
00448   static void mix_sub_columns( uint8_t dt[N_BLOCK] )
00449   { uint8_t st[N_BLOCK];
00450     block_copy(st, dt);
00451 #else
00452   static void mix_sub_columns( uint8_t dt[N_BLOCK], uint8_t st[N_BLOCK] )
00453   {
00454 #endif
00455     dt[ 0] = gfm2_sb(st[0]) ^ gfm3_sb(st[5]) ^ s_box(st[10]) ^ s_box(st[15]);
00456     dt[ 1] = s_box(st[0]) ^ gfm2_sb(st[5]) ^ gfm3_sb(st[10]) ^ s_box(st[15]);
00457     dt[ 2] = s_box(st[0]) ^ s_box(st[5]) ^ gfm2_sb(st[10]) ^ gfm3_sb(st[15]);
00458     dt[ 3] = gfm3_sb(st[0]) ^ s_box(st[5]) ^ s_box(st[10]) ^ gfm2_sb(st[15]);
00459 
00460     dt[ 4] = gfm2_sb(st[4]) ^ gfm3_sb(st[9]) ^ s_box(st[14]) ^ s_box(st[3]);
00461     dt[ 5] = s_box(st[4]) ^ gfm2_sb(st[9]) ^ gfm3_sb(st[14]) ^ s_box(st[3]);
00462     dt[ 6] = s_box(st[4]) ^ s_box(st[9]) ^ gfm2_sb(st[14]) ^ gfm3_sb(st[3]);
00463     dt[ 7] = gfm3_sb(st[4]) ^ s_box(st[9]) ^ s_box(st[14]) ^ gfm2_sb(st[3]);
00464 
00465     dt[ 8] = gfm2_sb(st[8]) ^ gfm3_sb(st[13]) ^ s_box(st[2]) ^ s_box(st[7]);
00466     dt[ 9] = s_box(st[8]) ^ gfm2_sb(st[13]) ^ gfm3_sb(st[2]) ^ s_box(st[7]);
00467     dt[10] = s_box(st[8]) ^ s_box(st[13]) ^ gfm2_sb(st[2]) ^ gfm3_sb(st[7]);
00468     dt[11] = gfm3_sb(st[8]) ^ s_box(st[13]) ^ s_box(st[2]) ^ gfm2_sb(st[7]);
00469 
00470     dt[12] = gfm2_sb(st[12]) ^ gfm3_sb(st[1]) ^ s_box(st[6]) ^ s_box(st[11]);
00471     dt[13] = s_box(st[12]) ^ gfm2_sb(st[1]) ^ gfm3_sb(st[6]) ^ s_box(st[11]);
00472     dt[14] = s_box(st[12]) ^ s_box(st[1]) ^ gfm2_sb(st[6]) ^ gfm3_sb(st[11]);
00473     dt[15] = gfm3_sb(st[12]) ^ s_box(st[1]) ^ s_box(st[6]) ^ gfm2_sb(st[11]);
00474   }
00475 
00476 #if defined( AES_DEC_PREKEYED )
00477 
00478 #if defined( VERSION_1 )
00479   static void inv_mix_sub_columns( uint8_t dt[N_BLOCK] )
00480   { uint8_t st[N_BLOCK];
00481     block_copy(st, dt);
00482 #else
00483   static void inv_mix_sub_columns( uint8_t dt[N_BLOCK], uint8_t st[N_BLOCK] )
00484   {
00485 #endif
00486     dt[ 0] = is_box(gfm_e(st[ 0]) ^ gfm_b(st[ 1]) ^ gfm_d(st[ 2]) ^ gfm_9(st[ 3]));
00487     dt[ 5] = is_box(gfm_9(st[ 0]) ^ gfm_e(st[ 1]) ^ gfm_b(st[ 2]) ^ gfm_d(st[ 3]));
00488     dt[10] = is_box(gfm_d(st[ 0]) ^ gfm_9(st[ 1]) ^ gfm_e(st[ 2]) ^ gfm_b(st[ 3]));
00489     dt[15] = is_box(gfm_b(st[ 0]) ^ gfm_d(st[ 1]) ^ gfm_9(st[ 2]) ^ gfm_e(st[ 3]));
00490 
00491     dt[ 4] = is_box(gfm_e(st[ 4]) ^ gfm_b(st[ 5]) ^ gfm_d(st[ 6]) ^ gfm_9(st[ 7]));
00492     dt[ 9] = is_box(gfm_9(st[ 4]) ^ gfm_e(st[ 5]) ^ gfm_b(st[ 6]) ^ gfm_d(st[ 7]));
00493     dt[14] = is_box(gfm_d(st[ 4]) ^ gfm_9(st[ 5]) ^ gfm_e(st[ 6]) ^ gfm_b(st[ 7]));
00494     dt[ 3] = is_box(gfm_b(st[ 4]) ^ gfm_d(st[ 5]) ^ gfm_9(st[ 6]) ^ gfm_e(st[ 7]));
00495 
00496     dt[ 8] = is_box(gfm_e(st[ 8]) ^ gfm_b(st[ 9]) ^ gfm_d(st[10]) ^ gfm_9(st[11]));
00497     dt[13] = is_box(gfm_9(st[ 8]) ^ gfm_e(st[ 9]) ^ gfm_b(st[10]) ^ gfm_d(st[11]));
00498     dt[ 2] = is_box(gfm_d(st[ 8]) ^ gfm_9(st[ 9]) ^ gfm_e(st[10]) ^ gfm_b(st[11]));
00499     dt[ 7] = is_box(gfm_b(st[ 8]) ^ gfm_d(st[ 9]) ^ gfm_9(st[10]) ^ gfm_e(st[11]));
00500 
00501     dt[12] = is_box(gfm_e(st[12]) ^ gfm_b(st[13]) ^ gfm_d(st[14]) ^ gfm_9(st[15]));
00502     dt[ 1] = is_box(gfm_9(st[12]) ^ gfm_e(st[13]) ^ gfm_b(st[14]) ^ gfm_d(st[15]));
00503     dt[ 6] = is_box(gfm_d(st[12]) ^ gfm_9(st[13]) ^ gfm_e(st[14]) ^ gfm_b(st[15]));
00504     dt[11] = is_box(gfm_b(st[12]) ^ gfm_d(st[13]) ^ gfm_9(st[14]) ^ gfm_e(st[15]));
00505   }
00506 
00507 #endif
00508 
00509 #if defined( AES_ENC_PREKEYED ) || defined( AES_DEC_PREKEYED )
00510 
00511 /*  Set the cipher key for the pre-keyed version */
00512 
00513 return_type aes_set_key( const uint8_t key[], length_type keylen, aes_context ctx[1] )
00514 {
00515     uint8_t cc, rc, hi;
00516 
00517     switch( keylen )
00518     {
00519     case 16:
00520     case 24:
00521     case 32:
00522         break;
00523     default:
00524         ctx->rnd = 0;
00525         return ( uint8_t )-1;
00526     }
00527     block_copy_nn(ctx->ksch, key, keylen);
00528     hi = (keylen + 28) << 2;
00529     ctx->rnd = (hi >> 4) - 1;
00530     for( cc = keylen, rc = 1; cc < hi; cc += 4 )
00531     {   uint8_t tt, t0, t1, t2, t3;
00532 
00533         t0 = ctx->ksch[cc - 4];
00534         t1 = ctx->ksch[cc - 3];
00535         t2 = ctx->ksch[cc - 2];
00536         t3 = ctx->ksch[cc - 1];
00537         if( cc % keylen == 0 )
00538         {
00539             tt = t0;
00540             t0 = s_box(t1) ^ rc;
00541             t1 = s_box(t2);
00542             t2 = s_box(t3);
00543             t3 = s_box(tt);
00544             rc = f2(rc);
00545         }
00546         else if( keylen > 24 && cc % keylen == 16 )
00547         {
00548             t0 = s_box(t0);
00549             t1 = s_box(t1);
00550             t2 = s_box(t2);
00551             t3 = s_box(t3);
00552         }
00553         tt = cc - keylen;
00554         ctx->ksch[cc + 0] = ctx->ksch[tt + 0] ^ t0;
00555         ctx->ksch[cc + 1] = ctx->ksch[tt + 1] ^ t1;
00556         ctx->ksch[cc + 2] = ctx->ksch[tt + 2] ^ t2;
00557         ctx->ksch[cc + 3] = ctx->ksch[tt + 3] ^ t3;
00558     }
00559     return 0;
00560 }
00561 
00562 #endif
00563 
00564 #if defined( AES_ENC_PREKEYED )
00565 
00566 /*  Encrypt a single block of 16 bytes */
00567 
00568 return_type aes_encrypt( const uint8_t in[N_BLOCK], uint8_t  out[N_BLOCK], const aes_context ctx[1] )
00569 {
00570     if( ctx->rnd )
00571     {
00572         uint8_t s1[N_BLOCK], r;
00573         copy_and_key( s1, in, ctx->ksch );
00574 
00575         for( r = 1 ; r < ctx->rnd ; ++r )
00576 #if defined( VERSION_1 )
00577         {
00578             mix_sub_columns( s1 );
00579             add_round_key( s1, ctx->ksch + r * N_BLOCK);
00580         }
00581 #else
00582         {   uint8_t s2[N_BLOCK];
00583             mix_sub_columns( s2, s1 );
00584             copy_and_key( s1, s2, ctx->ksch + r * N_BLOCK);
00585         }
00586 #endif
00587         shift_sub_rows( s1 );
00588         copy_and_key( out, s1, ctx->ksch + r * N_BLOCK );
00589     }
00590     else
00591         return ( uint8_t )-1;
00592     return 0;
00593 }
00594 
00595 /* CBC encrypt a number of blocks (input and return an IV) */
00596 
00597 return_type aes_cbc_encrypt( const uint8_t *in, uint8_t *out,
00598                          int32_t n_block, uint8_t iv[N_BLOCK], const aes_context ctx[1] )
00599 {
00600 
00601     while(n_block--)
00602     {
00603         xor_block(iv, in);
00604         if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS)
00605             return EXIT_FAILURE;
00606         //memcpy(out, iv, N_BLOCK);
00607         block_copy(out, iv);
00608         in += N_BLOCK;
00609         out += N_BLOCK;
00610     }
00611     return EXIT_SUCCESS;
00612 }
00613 
00614 #endif
00615 
00616 #if defined( AES_DEC_PREKEYED )
00617 
00618 /*  Decrypt a single block of 16 bytes */
00619 
00620 return_type aes_decrypt( const uint8_t in[N_BLOCK], uint8_t out[N_BLOCK], const aes_context ctx[1] )
00621 {
00622     if( ctx->rnd )
00623     {
00624         uint8_t s1[N_BLOCK], r;
00625         copy_and_key( s1, in, ctx->ksch + ctx->rnd * N_BLOCK );
00626         inv_shift_sub_rows( s1 );
00627 
00628         for( r = ctx->rnd ; --r ; )
00629 #if defined( VERSION_1 )
00630         {
00631             add_round_key( s1, ctx->ksch + r * N_BLOCK );
00632             inv_mix_sub_columns( s1 );
00633         }
00634 #else
00635         {   uint8_t s2[N_BLOCK];
00636             copy_and_key( s2, s1, ctx->ksch + r * N_BLOCK );
00637             inv_mix_sub_columns( s1, s2 );
00638         }
00639 #endif
00640         copy_and_key( out, s1, ctx->ksch );
00641     }
00642     else
00643         return -1;
00644     return 0;
00645 }
00646 
00647 /* CBC decrypt a number of blocks (input and return an IV) */
00648 
00649 return_type aes_cbc_decrypt( const uint8_t *in, uint8_t *out,
00650                          int32_t n_block, uint8_t iv[N_BLOCK], const aes_context ctx[1] )
00651 {
00652     while(n_block--)
00653     {   uint8_t tmp[N_BLOCK];
00654 
00655         //memcpy(tmp, in, N_BLOCK);
00656         block_copy(tmp, in);
00657         if(aes_decrypt(in, out, ctx) != EXIT_SUCCESS)
00658             return EXIT_FAILURE;
00659         xor_block(out, iv);
00660         //memcpy(iv, tmp, N_BLOCK);
00661         block_copy(iv, tmp);
00662         in += N_BLOCK;
00663         out += N_BLOCK;
00664     }
00665     return EXIT_SUCCESS;
00666 }
00667 
00668 #endif
00669 
00670 #if defined( AES_ENC_128_OTFK )
00671 
00672 /*  The 'on the fly' encryption key update for for 128 bit keys */
00673 
00674 static void update_encrypt_key_128( uint8_t k[N_BLOCK], uint8_t *rc )
00675 {   uint8_t cc;
00676 
00677     k[0] ^= s_box(k[13]) ^ *rc;
00678     k[1] ^= s_box(k[14]);
00679     k[2] ^= s_box(k[15]);
00680     k[3] ^= s_box(k[12]);
00681     *rc = f2( *rc );
00682 
00683     for(cc = 4; cc < 16; cc += 4 )
00684     {
00685         k[cc + 0] ^= k[cc - 4];
00686         k[cc + 1] ^= k[cc - 3];
00687         k[cc + 2] ^= k[cc - 2];
00688         k[cc + 3] ^= k[cc - 1];
00689     }
00690 }
00691 
00692 /*  Encrypt a single block of 16 bytes with 'on the fly' 128 bit keying */
00693 
00694 void aes_encrypt_128( const uint8_t in[N_BLOCK], uint8_t out[N_BLOCK],
00695                      const uint8_t key[N_BLOCK], uint8_t o_key[N_BLOCK] )
00696 {   uint8_t s1[N_BLOCK], r, rc = 1;
00697 
00698     if(o_key != key)
00699         block_copy( o_key, key );
00700     copy_and_key( s1, in, o_key );
00701 
00702     for( r = 1 ; r < 10 ; ++r )
00703 #if defined( VERSION_1 )
00704     {
00705         mix_sub_columns( s1 );
00706         update_encrypt_key_128( o_key, &rc );
00707         add_round_key( s1, o_key );
00708     }
00709 #else
00710     {   uint8_t s2[N_BLOCK];
00711         mix_sub_columns( s2, s1 );
00712         update_encrypt_key_128( o_key, &rc );
00713         copy_and_key( s1, s2, o_key );
00714     }
00715 #endif
00716 
00717     shift_sub_rows( s1 );
00718     update_encrypt_key_128( o_key, &rc );
00719     copy_and_key( out, s1, o_key );
00720 }
00721 
00722 #endif
00723 
00724 #if defined( AES_DEC_128_OTFK )
00725 
00726 /*  The 'on the fly' decryption key update for for 128 bit keys */
00727 
00728 static void update_decrypt_key_128( uint8_t k[N_BLOCK], uint8_t *rc )
00729 {   uint8_t cc;
00730 
00731     for( cc = 12; cc > 0; cc -= 4 )
00732     {
00733         k[cc + 0] ^= k[cc - 4];
00734         k[cc + 1] ^= k[cc - 3];
00735         k[cc + 2] ^= k[cc - 2];
00736         k[cc + 3] ^= k[cc - 1];
00737     }
00738     *rc = d2(*rc);
00739     k[0] ^= s_box(k[13]) ^ *rc;
00740     k[1] ^= s_box(k[14]);
00741     k[2] ^= s_box(k[15]);
00742     k[3] ^= s_box(k[12]);
00743 }
00744 
00745 /*  Decrypt a single block of 16 bytes with 'on the fly' 128 bit keying */
00746 
00747 void aes_decrypt_128( const uint8_t in[N_BLOCK], uint8_t out[N_BLOCK],
00748                       const uint8_t key[N_BLOCK], uint8_t o_key[N_BLOCK] )
00749 {
00750     uint8_t s1[N_BLOCK], r, rc = 0x6c;
00751     if(o_key != key)
00752         block_copy( o_key, key );
00753 
00754     copy_and_key( s1, in, o_key );
00755     inv_shift_sub_rows( s1 );
00756 
00757     for( r = 10 ; --r ; )
00758 #if defined( VERSION_1 )
00759     {
00760         update_decrypt_key_128( o_key, &rc );
00761         add_round_key( s1, o_key );
00762         inv_mix_sub_columns( s1 );
00763     }
00764 #else
00765     {   uint8_t s2[N_BLOCK];
00766         update_decrypt_key_128( o_key, &rc );
00767         copy_and_key( s2, s1, o_key );
00768         inv_mix_sub_columns( s1, s2 );
00769     }
00770 #endif
00771     update_decrypt_key_128( o_key, &rc );
00772     copy_and_key( out, s1, o_key );
00773 }
00774 
00775 #endif
00776 
00777 #if defined( AES_ENC_256_OTFK )
00778 
00779 /*  The 'on the fly' encryption key update for for 256 bit keys */
00780 
00781 static void update_encrypt_key_256( uint8_t k[2 * N_BLOCK], uint8_t *rc )
00782 {   uint8_t cc;
00783 
00784     k[0] ^= s_box(k[29]) ^ *rc;
00785     k[1] ^= s_box(k[30]);
00786     k[2] ^= s_box(k[31]);
00787     k[3] ^= s_box(k[28]);
00788     *rc = f2( *rc );
00789 
00790     for(cc = 4; cc < 16; cc += 4)
00791     {
00792         k[cc + 0] ^= k[cc - 4];
00793         k[cc + 1] ^= k[cc - 3];
00794         k[cc + 2] ^= k[cc - 2];
00795         k[cc + 3] ^= k[cc - 1];
00796     }
00797 
00798     k[16] ^= s_box(k[12]);
00799     k[17] ^= s_box(k[13]);
00800     k[18] ^= s_box(k[14]);
00801     k[19] ^= s_box(k[15]);
00802 
00803     for( cc = 20; cc < 32; cc += 4 )
00804     {
00805         k[cc + 0] ^= k[cc - 4];
00806         k[cc + 1] ^= k[cc - 3];
00807         k[cc + 2] ^= k[cc - 2];
00808         k[cc + 3] ^= k[cc - 1];
00809     }
00810 }
00811 
00812 /*  Encrypt a single block of 16 bytes with 'on the fly' 256 bit keying */
00813 
00814 void aes_encrypt_256( const uint8_t in[N_BLOCK], uint8_t out[N_BLOCK],
00815                       const uint8_t key[2 * N_BLOCK], uint8_t o_key[2 * N_BLOCK] )
00816 {
00817     uint8_t s1[N_BLOCK], r, rc = 1;
00818     if(o_key != key)
00819     {
00820         block_copy( o_key, key );
00821         block_copy( o_key + 16, key + 16 );
00822     }
00823     copy_and_key( s1, in, o_key );
00824 
00825     for( r = 1 ; r < 14 ; ++r )
00826 #if defined( VERSION_1 )
00827     {
00828         mix_sub_columns(s1);
00829         if( r & 1 )
00830             add_round_key( s1, o_key + 16 );
00831         else
00832         {
00833             update_encrypt_key_256( o_key, &rc );
00834             add_round_key( s1, o_key );
00835         }
00836     }
00837 #else
00838     {   uint8_t s2[N_BLOCK];
00839         mix_sub_columns( s2, s1 );
00840         if( r & 1 )
00841             copy_and_key( s1, s2, o_key + 16 );
00842         else
00843         {
00844             update_encrypt_key_256( o_key, &rc );
00845             copy_and_key( s1, s2, o_key );
00846         }
00847     }
00848 #endif
00849 
00850     shift_sub_rows( s1 );
00851     update_encrypt_key_256( o_key, &rc );
00852     copy_and_key( out, s1, o_key );
00853 }
00854 
00855 #endif
00856 
00857 #if defined( AES_DEC_256_OTFK )
00858 
00859 /*  The 'on the fly' encryption key update for for 256 bit keys */
00860 
00861 static void update_decrypt_key_256( uint8_t k[2 * N_BLOCK], uint8_t *rc )
00862 {   uint8_t cc;
00863 
00864     for(cc = 28; cc > 16; cc -= 4)
00865     {
00866         k[cc + 0] ^= k[cc - 4];
00867         k[cc + 1] ^= k[cc - 3];
00868         k[cc + 2] ^= k[cc - 2];
00869         k[cc + 3] ^= k[cc - 1];
00870     }
00871 
00872     k[16] ^= s_box(k[12]);
00873     k[17] ^= s_box(k[13]);
00874     k[18] ^= s_box(k[14]);
00875     k[19] ^= s_box(k[15]);
00876 
00877     for(cc = 12; cc > 0; cc -= 4)
00878     {
00879         k[cc + 0] ^= k[cc - 4];
00880         k[cc + 1] ^= k[cc - 3];
00881         k[cc + 2] ^= k[cc - 2];
00882         k[cc + 3] ^= k[cc - 1];
00883     }
00884 
00885     *rc = d2(*rc);
00886     k[0] ^= s_box(k[29]) ^ *rc;
00887     k[1] ^= s_box(k[30]);
00888     k[2] ^= s_box(k[31]);
00889     k[3] ^= s_box(k[28]);
00890 }
00891 
00892 /*  Decrypt a single block of 16 bytes with 'on the fly'
00893     256 bit keying
00894 */
00895 void aes_decrypt_256( const uint8_t in[N_BLOCK], uint8_t out[N_BLOCK],
00896                       const uint8_t key[2 * N_BLOCK], uint8_t o_key[2 * N_BLOCK] )
00897 {
00898     uint8_t s1[N_BLOCK], r, rc = 0x80;
00899 
00900     if(o_key != key)
00901     {
00902         block_copy( o_key, key );
00903         block_copy( o_key + 16, key + 16 );
00904     }
00905 
00906     copy_and_key( s1, in, o_key );
00907     inv_shift_sub_rows( s1 );
00908 
00909     for( r = 14 ; --r ; )
00910 #if defined( VERSION_1 )
00911     {
00912         if( ( r & 1 ) )
00913         {
00914             update_decrypt_key_256( o_key, &rc );
00915             add_round_key( s1, o_key + 16 );
00916         }
00917         else
00918             add_round_key( s1, o_key );
00919         inv_mix_sub_columns( s1 );
00920     }
00921 #else
00922     {   uint8_t s2[N_BLOCK];
00923         if( ( r & 1 ) )
00924         {
00925             update_decrypt_key_256( o_key, &rc );
00926             copy_and_key( s2, s1, o_key + 16 );
00927         }
00928         else
00929             copy_and_key( s2, s1, o_key );
00930         inv_mix_sub_columns( s1, s2 );
00931     }
00932 #endif
00933     copy_and_key( out, s1, o_key );
00934 }
00935 
00936 #endif