Espotel / Mbed 2 deprecated LoRaWAN_Semtech_stack

Dependencies:   SX1272lib mbed

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers aes.cpp Source File

aes.cpp

00001 /*
00002  ---------------------------------------------------------------------------
00003  Copyright (c) 1998-2008, Brian Gladman, Worcester, UK. All rights reserved.
00004  LICENSE TERMS
00005  The redistribution and use of this software (with or without changes)
00006  is allowed without the payment of fees or royalties provided that:
00007   1. source code distributions include the above copyright notice, this
00008      list of conditions and the following disclaimer;
00009   2. binary distributions include the above copyright notice, this list
00010      of conditions and the following disclaimer in their documentation;
00011   3. the name of the copyright holder is not used to endorse products
00012      built using this software without specific written permission.
00013  DISCLAIMER
00014  This software is provided 'as is' with no explicit or implied warranties
00015  in respect of its properties, including, but not limited to, correctness
00016  and/or fitness for purpose.
00017  ---------------------------------------------------------------------------
00018  Issue 09/09/2006
00019  This is an AES implementation that uses only 8-bit byte operations on the
00020  cipher state (there are options to use 32-bit types if available).
00021  The combination of mix columns and byte substitution used here is based on
00022  that developed by Karl Malbrain. His contribution is acknowledged.
00023  */
00024 
00025 /* define if you have a fast memcpy function on your system */
00026 #if 0
00027 #  define HAVE_MEMCPY
00028 #  include <string.h>
00029 #  if defined( _MSC_VER )
00030 #    include <intrin.h>
00031 #    pragma intrinsic( memcpy )
00032 #  endif
00033 #endif
00034 
00035 
00036 #include <stdlib.h>
00037 #include <stdint.h>
00038 
00039 /* define if you have fast 32-bit types on your system */
00040 #if 1
00041 #  define HAVE_UINT_32T
00042 #endif
00043 
00044 /* define if you don't want any tables */
00045 #if 1
00046 #  define USE_TABLES
00047 #endif
00048 
00049 /*  On Intel Core 2 duo VERSION_1 is faster */
00050 
00051 /* alternative versions (test for performance on your system) */
00052 #if 1
00053 #  define VERSION_1
00054 #endif
00055 
00056 #include "aes.h"
00057 
00058 //#if defined( HAVE_UINT_32T )
00059 //  typedef unsigned long uint32_t;
00060 //#endif
00061 
00062 /* functions for finite field multiplication in the AES Galois field    */
00063 
00064 #define WPOLY   0x011b
00065 #define BPOLY     0x1b
00066 #define DPOLY   0x008d
00067 
00068 #define f1(x)   (x)
00069 #define f2(x)   ((x << 1) ^ (((x >> 7) & 1) * WPOLY))
00070 #define f4(x)   ((x << 2) ^ (((x >> 6) & 1) * WPOLY) ^ (((x >> 6) & 2) * WPOLY))
00071 #define f8(x)   ((x << 3) ^ (((x >> 5) & 1) * WPOLY) ^ (((x >> 5) & 2) * WPOLY) \
00072                           ^ (((x >> 5) & 4) * WPOLY))
00073 #define d2(x)   (((x) >> 1) ^ ((x) & 1 ? DPOLY : 0))
00074 
00075 #define f3(x)   (f2(x) ^ x)
00076 #define f9(x)   (f8(x) ^ x)
00077 #define fb(x)   (f8(x) ^ f2(x) ^ x)
00078 #define fd(x)   (f8(x) ^ f4(x) ^ x)
00079 #define fe(x)   (f8(x) ^ f4(x) ^ f2(x))
00080 
00081 #if defined( USE_TABLES )
00082 
00083 #define sb_data(w) {    /* S Box data values */                            \
00084     w(0x63), w(0x7c), w(0x77), w(0x7b), w(0xf2), w(0x6b), w(0x6f), w(0xc5),\
00085     w(0x30), w(0x01), w(0x67), w(0x2b), w(0xfe), w(0xd7), w(0xab), w(0x76),\
00086     w(0xca), w(0x82), w(0xc9), w(0x7d), w(0xfa), w(0x59), w(0x47), w(0xf0),\
00087     w(0xad), w(0xd4), w(0xa2), w(0xaf), w(0x9c), w(0xa4), w(0x72), w(0xc0),\
00088     w(0xb7), w(0xfd), w(0x93), w(0x26), w(0x36), w(0x3f), w(0xf7), w(0xcc),\
00089     w(0x34), w(0xa5), w(0xe5), w(0xf1), w(0x71), w(0xd8), w(0x31), w(0x15),\
00090     w(0x04), w(0xc7), w(0x23), w(0xc3), w(0x18), w(0x96), w(0x05), w(0x9a),\
00091     w(0x07), w(0x12), w(0x80), w(0xe2), w(0xeb), w(0x27), w(0xb2), w(0x75),\
00092     w(0x09), w(0x83), w(0x2c), w(0x1a), w(0x1b), w(0x6e), w(0x5a), w(0xa0),\
00093     w(0x52), w(0x3b), w(0xd6), w(0xb3), w(0x29), w(0xe3), w(0x2f), w(0x84),\
00094     w(0x53), w(0xd1), w(0x00), w(0xed), w(0x20), w(0xfc), w(0xb1), w(0x5b),\
00095     w(0x6a), w(0xcb), w(0xbe), w(0x39), w(0x4a), w(0x4c), w(0x58), w(0xcf),\
00096     w(0xd0), w(0xef), w(0xaa), w(0xfb), w(0x43), w(0x4d), w(0x33), w(0x85),\
00097     w(0x45), w(0xf9), w(0x02), w(0x7f), w(0x50), w(0x3c), w(0x9f), w(0xa8),\
00098     w(0x51), w(0xa3), w(0x40), w(0x8f), w(0x92), w(0x9d), w(0x38), w(0xf5),\
00099     w(0xbc), w(0xb6), w(0xda), w(0x21), w(0x10), w(0xff), w(0xf3), w(0xd2),\
00100     w(0xcd), w(0x0c), w(0x13), w(0xec), w(0x5f), w(0x97), w(0x44), w(0x17),\
00101     w(0xc4), w(0xa7), w(0x7e), w(0x3d), w(0x64), w(0x5d), w(0x19), w(0x73),\
00102     w(0x60), w(0x81), w(0x4f), w(0xdc), w(0x22), w(0x2a), w(0x90), w(0x88),\
00103     w(0x46), w(0xee), w(0xb8), w(0x14), w(0xde), w(0x5e), w(0x0b), w(0xdb),\
00104     w(0xe0), w(0x32), w(0x3a), w(0x0a), w(0x49), w(0x06), w(0x24), w(0x5c),\
00105     w(0xc2), w(0xd3), w(0xac), w(0x62), w(0x91), w(0x95), w(0xe4), w(0x79),\
00106     w(0xe7), w(0xc8), w(0x37), w(0x6d), w(0x8d), w(0xd5), w(0x4e), w(0xa9),\
00107     w(0x6c), w(0x56), w(0xf4), w(0xea), w(0x65), w(0x7a), w(0xae), w(0x08),\
00108     w(0xba), w(0x78), w(0x25), w(0x2e), w(0x1c), w(0xa6), w(0xb4), w(0xc6),\
00109     w(0xe8), w(0xdd), w(0x74), w(0x1f), w(0x4b), w(0xbd), w(0x8b), w(0x8a),\
00110     w(0x70), w(0x3e), w(0xb5), w(0x66), w(0x48), w(0x03), w(0xf6), w(0x0e),\
00111     w(0x61), w(0x35), w(0x57), w(0xb9), w(0x86), w(0xc1), w(0x1d), w(0x9e),\
00112     w(0xe1), w(0xf8), w(0x98), w(0x11), w(0x69), w(0xd9), w(0x8e), w(0x94),\
00113     w(0x9b), w(0x1e), w(0x87), w(0xe9), w(0xce), w(0x55), w(0x28), w(0xdf),\
00114     w(0x8c), w(0xa1), w(0x89), w(0x0d), w(0xbf), w(0xe6), w(0x42), w(0x68),\
00115     w(0x41), w(0x99), w(0x2d), w(0x0f), w(0xb0), w(0x54), w(0xbb), w(0x16) }
00116 
00117 #define isb_data(w) {   /* inverse S Box data values */                    \
00118     w(0x52), w(0x09), w(0x6a), w(0xd5), w(0x30), w(0x36), w(0xa5), w(0x38),\
00119     w(0xbf), w(0x40), w(0xa3), w(0x9e), w(0x81), w(0xf3), w(0xd7), w(0xfb),\
00120     w(0x7c), w(0xe3), w(0x39), w(0x82), w(0x9b), w(0x2f), w(0xff), w(0x87),\
00121     w(0x34), w(0x8e), w(0x43), w(0x44), w(0xc4), w(0xde), w(0xe9), w(0xcb),\
00122     w(0x54), w(0x7b), w(0x94), w(0x32), w(0xa6), w(0xc2), w(0x23), w(0x3d),\
00123     w(0xee), w(0x4c), w(0x95), w(0x0b), w(0x42), w(0xfa), w(0xc3), w(0x4e),\
00124     w(0x08), w(0x2e), w(0xa1), w(0x66), w(0x28), w(0xd9), w(0x24), w(0xb2),\
00125     w(0x76), w(0x5b), w(0xa2), w(0x49), w(0x6d), w(0x8b), w(0xd1), w(0x25),\
00126     w(0x72), w(0xf8), w(0xf6), w(0x64), w(0x86), w(0x68), w(0x98), w(0x16),\
00127     w(0xd4), w(0xa4), w(0x5c), w(0xcc), w(0x5d), w(0x65), w(0xb6), w(0x92),\
00128     w(0x6c), w(0x70), w(0x48), w(0x50), w(0xfd), w(0xed), w(0xb9), w(0xda),\
00129     w(0x5e), w(0x15), w(0x46), w(0x57), w(0xa7), w(0x8d), w(0x9d), w(0x84),\
00130     w(0x90), w(0xd8), w(0xab), w(0x00), w(0x8c), w(0xbc), w(0xd3), w(0x0a),\
00131     w(0xf7), w(0xe4), w(0x58), w(0x05), w(0xb8), w(0xb3), w(0x45), w(0x06),\
00132     w(0xd0), w(0x2c), w(0x1e), w(0x8f), w(0xca), w(0x3f), w(0x0f), w(0x02),\
00133     w(0xc1), w(0xaf), w(0xbd), w(0x03), w(0x01), w(0x13), w(0x8a), w(0x6b),\
00134     w(0x3a), w(0x91), w(0x11), w(0x41), w(0x4f), w(0x67), w(0xdc), w(0xea),\
00135     w(0x97), w(0xf2), w(0xcf), w(0xce), w(0xf0), w(0xb4), w(0xe6), w(0x73),\
00136     w(0x96), w(0xac), w(0x74), w(0x22), w(0xe7), w(0xad), w(0x35), w(0x85),\
00137     w(0xe2), w(0xf9), w(0x37), w(0xe8), w(0x1c), w(0x75), w(0xdf), w(0x6e),\
00138     w(0x47), w(0xf1), w(0x1a), w(0x71), w(0x1d), w(0x29), w(0xc5), w(0x89),\
00139     w(0x6f), w(0xb7), w(0x62), w(0x0e), w(0xaa), w(0x18), w(0xbe), w(0x1b),\
00140     w(0xfc), w(0x56), w(0x3e), w(0x4b), w(0xc6), w(0xd2), w(0x79), w(0x20),\
00141     w(0x9a), w(0xdb), w(0xc0), w(0xfe), w(0x78), w(0xcd), w(0x5a), w(0xf4),\
00142     w(0x1f), w(0xdd), w(0xa8), w(0x33), w(0x88), w(0x07), w(0xc7), w(0x31),\
00143     w(0xb1), w(0x12), w(0x10), w(0x59), w(0x27), w(0x80), w(0xec), w(0x5f),\
00144     w(0x60), w(0x51), w(0x7f), w(0xa9), w(0x19), w(0xb5), w(0x4a), w(0x0d),\
00145     w(0x2d), w(0xe5), w(0x7a), w(0x9f), w(0x93), w(0xc9), w(0x9c), w(0xef),\
00146     w(0xa0), w(0xe0), w(0x3b), w(0x4d), w(0xae), w(0x2a), w(0xf5), w(0xb0),\
00147     w(0xc8), w(0xeb), w(0xbb), w(0x3c), w(0x83), w(0x53), w(0x99), w(0x61),\
00148     w(0x17), w(0x2b), w(0x04), w(0x7e), w(0xba), w(0x77), w(0xd6), w(0x26),\
00149     w(0xe1), w(0x69), w(0x14), w(0x63), w(0x55), w(0x21), w(0x0c), w(0x7d) }
00150 
00151 #define mm_data(w) {    /* basic data for forming finite field tables */   \
00152     w(0x00), w(0x01), w(0x02), w(0x03), w(0x04), w(0x05), w(0x06), w(0x07),\
00153     w(0x08), w(0x09), w(0x0a), w(0x0b), w(0x0c), w(0x0d), w(0x0e), w(0x0f),\
00154     w(0x10), w(0x11), w(0x12), w(0x13), w(0x14), w(0x15), w(0x16), w(0x17),\
00155     w(0x18), w(0x19), w(0x1a), w(0x1b), w(0x1c), w(0x1d), w(0x1e), w(0x1f),\
00156     w(0x20), w(0x21), w(0x22), w(0x23), w(0x24), w(0x25), w(0x26), w(0x27),\
00157     w(0x28), w(0x29), w(0x2a), w(0x2b), w(0x2c), w(0x2d), w(0x2e), w(0x2f),\
00158     w(0x30), w(0x31), w(0x32), w(0x33), w(0x34), w(0x35), w(0x36), w(0x37),\
00159     w(0x38), w(0x39), w(0x3a), w(0x3b), w(0x3c), w(0x3d), w(0x3e), w(0x3f),\
00160     w(0x40), w(0x41), w(0x42), w(0x43), w(0x44), w(0x45), w(0x46), w(0x47),\
00161     w(0x48), w(0x49), w(0x4a), w(0x4b), w(0x4c), w(0x4d), w(0x4e), w(0x4f),\
00162     w(0x50), w(0x51), w(0x52), w(0x53), w(0x54), w(0x55), w(0x56), w(0x57),\
00163     w(0x58), w(0x59), w(0x5a), w(0x5b), w(0x5c), w(0x5d), w(0x5e), w(0x5f),\
00164     w(0x60), w(0x61), w(0x62), w(0x63), w(0x64), w(0x65), w(0x66), w(0x67),\
00165     w(0x68), w(0x69), w(0x6a), w(0x6b), w(0x6c), w(0x6d), w(0x6e), w(0x6f),\
00166     w(0x70), w(0x71), w(0x72), w(0x73), w(0x74), w(0x75), w(0x76), w(0x77),\
00167     w(0x78), w(0x79), w(0x7a), w(0x7b), w(0x7c), w(0x7d), w(0x7e), w(0x7f),\
00168     w(0x80), w(0x81), w(0x82), w(0x83), w(0x84), w(0x85), w(0x86), w(0x87),\
00169     w(0x88), w(0x89), w(0x8a), w(0x8b), w(0x8c), w(0x8d), w(0x8e), w(0x8f),\
00170     w(0x90), w(0x91), w(0x92), w(0x93), w(0x94), w(0x95), w(0x96), w(0x97),\
00171     w(0x98), w(0x99), w(0x9a), w(0x9b), w(0x9c), w(0x9d), w(0x9e), w(0x9f),\
00172     w(0xa0), w(0xa1), w(0xa2), w(0xa3), w(0xa4), w(0xa5), w(0xa6), w(0xa7),\
00173     w(0xa8), w(0xa9), w(0xaa), w(0xab), w(0xac), w(0xad), w(0xae), w(0xaf),\
00174     w(0xb0), w(0xb1), w(0xb2), w(0xb3), w(0xb4), w(0xb5), w(0xb6), w(0xb7),\
00175     w(0xb8), w(0xb9), w(0xba), w(0xbb), w(0xbc), w(0xbd), w(0xbe), w(0xbf),\
00176     w(0xc0), w(0xc1), w(0xc2), w(0xc3), w(0xc4), w(0xc5), w(0xc6), w(0xc7),\
00177     w(0xc8), w(0xc9), w(0xca), w(0xcb), w(0xcc), w(0xcd), w(0xce), w(0xcf),\
00178     w(0xd0), w(0xd1), w(0xd2), w(0xd3), w(0xd4), w(0xd5), w(0xd6), w(0xd7),\
00179     w(0xd8), w(0xd9), w(0xda), w(0xdb), w(0xdc), w(0xdd), w(0xde), w(0xdf),\
00180     w(0xe0), w(0xe1), w(0xe2), w(0xe3), w(0xe4), w(0xe5), w(0xe6), w(0xe7),\
00181     w(0xe8), w(0xe9), w(0xea), w(0xeb), w(0xec), w(0xed), w(0xee), w(0xef),\
00182     w(0xf0), w(0xf1), w(0xf2), w(0xf3), w(0xf4), w(0xf5), w(0xf6), w(0xf7),\
00183     w(0xf8), w(0xf9), w(0xfa), w(0xfb), w(0xfc), w(0xfd), w(0xfe), w(0xff) }
00184 
00185 static const uint8_t sbox[256]  =  sb_data(f1);
00186 
00187 #if defined( AES_DEC_PREKEYED )
00188 static const uint8_t isbox[256] = isb_data(f1);
00189 #endif
00190 
00191 static const uint8_t gfm2_sbox[256] = sb_data(f2);
00192 static const uint8_t gfm3_sbox[256] = sb_data(f3);
00193 
00194 #if defined( AES_DEC_PREKEYED )
00195 static const uint8_t gfmul_9[256] = mm_data(f9);
00196 static const uint8_t gfmul_b[256] = mm_data(fb);
00197 static const uint8_t gfmul_d[256] = mm_data(fd);
00198 static const uint8_t gfmul_e[256] = mm_data(fe);
00199 #endif
00200 
00201 #define s_box(x)     sbox[(x)]
00202 #if defined( AES_DEC_PREKEYED )
00203 #define is_box(x)    isbox[(x)]
00204 #endif
00205 #define gfm2_sb(x)   gfm2_sbox[(x)]
00206 #define gfm3_sb(x)   gfm3_sbox[(x)]
00207 #if defined( AES_DEC_PREKEYED )
00208 #define gfm_9(x)     gfmul_9[(x)]
00209 #define gfm_b(x)     gfmul_b[(x)]
00210 #define gfm_d(x)     gfmul_d[(x)]
00211 #define gfm_e(x)     gfmul_e[(x)]
00212 #endif
00213 #else
00214 
00215 /* this is the high bit of x right shifted by 1 */
00216 /* position. Since the starting polynomial has  */
00217 /* 9 bits (0x11b), this right shift keeps the   */
00218 /* values of all top bits within a byte         */
00219 
00220 static uint8_t hibit(const uint8_t x)
00221 {   uint8_t r = (uint8_t)((x >> 1) | (x >> 2));
00222 
00223     r |= (r >> 2);
00224     r |= (r >> 4);
00225     return (r + 1) >> 1;
00226 }
00227 
00228 /* return the inverse of the finite field element x */
00229 
00230 static uint8_t gf_inv(const uint8_t x)
00231 {   uint8_t p1 = x, p2 = BPOLY, n1 = hibit(x), n2 = 0x80, v1 = 1, v2 = 0;
00232 
00233     if(x < 2) 
00234         return x;
00235 
00236     for( ; ; )
00237     {
00238         if(n1)
00239             while(n2 >= n1)             /* divide polynomial p2 by p1    */
00240             {
00241                 n2 /= n1;               /* shift smaller polynomial left */ 
00242                 p2 ^= (p1 * n2) & 0xff; /* and remove from larger one    */
00243                 v2 ^= (v1 * n2);        /* shift accumulated value and   */ 
00244                 n2 = hibit(p2);         /* add into result               */
00245             }
00246         else
00247             return v1;
00248 
00249         if(n2)                          /* repeat with values swapped    */ 
00250             while(n1 >= n2)
00251             {
00252                 n1 /= n2; 
00253                 p1 ^= p2 * n1; 
00254                 v1 ^= v2 * n1; 
00255                 n1 = hibit(p1);
00256             }
00257         else
00258             return v2;
00259     }
00260 }
00261 
00262 /* The forward and inverse affine transformations used in the S-box */
00263 uint8_t fwd_affine(const uint8_t x)
00264 {   
00265 #if defined( HAVE_UINT_32T )
00266     uint32_t w = x;
00267     w ^= (w << 1) ^ (w << 2) ^ (w << 3) ^ (w << 4);
00268     return 0x63 ^ ((w ^ (w >> 8)) & 0xff);
00269 #else
00270     return 0x63 ^ x ^ (x << 1) ^ (x << 2) ^ (x << 3) ^ (x << 4) 
00271                     ^ (x >> 7) ^ (x >> 6) ^ (x >> 5) ^ (x >> 4);
00272 #endif
00273 }
00274 
00275 uint8_t inv_affine(const uint8_t x)
00276 {
00277 #if defined( HAVE_UINT_32T )
00278     uint32_t w = x;
00279     w = (w << 1) ^ (w << 3) ^ (w << 6);
00280     return 0x05 ^ ((w ^ (w >> 8)) & 0xff);
00281 #else
00282     return 0x05 ^ (x << 1) ^ (x << 3) ^ (x << 6) 
00283                 ^ (x >> 7) ^ (x >> 5) ^ (x >> 2);
00284 #endif
00285 }
00286 
00287 #define s_box(x)   fwd_affine(gf_inv(x))
00288 #define is_box(x)  gf_inv(inv_affine(x))
00289 #define gfm2_sb(x) f2(s_box(x))
00290 #define gfm3_sb(x) f3(s_box(x))
00291 #define gfm_9(x)   f9(x)
00292 #define gfm_b(x)   fb(x)
00293 #define gfm_d(x)   fd(x)
00294 #define gfm_e(x)   fe(x)
00295 
00296 #endif
00297 
00298 #if defined( HAVE_MEMCPY )
00299 #  define block_copy_nn(d, s, l)    memcpy(d, s, l)
00300 #  define block_copy(d, s)          memcpy(d, s, N_BLOCK)
00301 #else
00302 #  define block_copy_nn(d, s, l)    copy_block_nn(d, s, l)
00303 #  define block_copy(d, s)          copy_block(d, s)
00304 #endif
00305 
00306 static void copy_block( void *d, const void *s )
00307 {
00308 #if defined( HAVE_UINT_32T )
00309     ((uint32_t*)d)[ 0] = ((uint32_t*)s)[ 0];
00310     ((uint32_t*)d)[ 1] = ((uint32_t*)s)[ 1];
00311     ((uint32_t*)d)[ 2] = ((uint32_t*)s)[ 2];
00312     ((uint32_t*)d)[ 3] = ((uint32_t*)s)[ 3];
00313 #else
00314     ((uint8_t*)d)[ 0] = ((uint8_t*)s)[ 0];
00315     ((uint8_t*)d)[ 1] = ((uint8_t*)s)[ 1];
00316     ((uint8_t*)d)[ 2] = ((uint8_t*)s)[ 2];
00317     ((uint8_t*)d)[ 3] = ((uint8_t*)s)[ 3];
00318     ((uint8_t*)d)[ 4] = ((uint8_t*)s)[ 4];
00319     ((uint8_t*)d)[ 5] = ((uint8_t*)s)[ 5];
00320     ((uint8_t*)d)[ 6] = ((uint8_t*)s)[ 6];
00321     ((uint8_t*)d)[ 7] = ((uint8_t*)s)[ 7];
00322     ((uint8_t*)d)[ 8] = ((uint8_t*)s)[ 8];
00323     ((uint8_t*)d)[ 9] = ((uint8_t*)s)[ 9];
00324     ((uint8_t*)d)[10] = ((uint8_t*)s)[10];
00325     ((uint8_t*)d)[11] = ((uint8_t*)s)[11];
00326     ((uint8_t*)d)[12] = ((uint8_t*)s)[12];
00327     ((uint8_t*)d)[13] = ((uint8_t*)s)[13];
00328     ((uint8_t*)d)[14] = ((uint8_t*)s)[14];
00329     ((uint8_t*)d)[15] = ((uint8_t*)s)[15];
00330 #endif
00331 }
00332 
00333 static void copy_block_nn( uint8_t * d, const uint8_t *s, uint8_t nn )
00334 {
00335     while( nn-- )
00336         //*((uint8_t*)d)++ = *((uint8_t*)s)++;
00337         *d++ = *s++;
00338 }
00339 
00340 static void xor_block( void *d, const void *s )
00341 {
00342 #if defined( HAVE_UINT_32T )
00343     ((uint32_t*)d)[ 0] ^= ((uint32_t*)s)[ 0];
00344     ((uint32_t*)d)[ 1] ^= ((uint32_t*)s)[ 1];
00345     ((uint32_t*)d)[ 2] ^= ((uint32_t*)s)[ 2];
00346     ((uint32_t*)d)[ 3] ^= ((uint32_t*)s)[ 3];
00347 #else
00348     ((uint8_t*)d)[ 0] ^= ((uint8_t*)s)[ 0];
00349     ((uint8_t*)d)[ 1] ^= ((uint8_t*)s)[ 1];
00350     ((uint8_t*)d)[ 2] ^= ((uint8_t*)s)[ 2];
00351     ((uint8_t*)d)[ 3] ^= ((uint8_t*)s)[ 3];
00352     ((uint8_t*)d)[ 4] ^= ((uint8_t*)s)[ 4];
00353     ((uint8_t*)d)[ 5] ^= ((uint8_t*)s)[ 5];
00354     ((uint8_t*)d)[ 6] ^= ((uint8_t*)s)[ 6];
00355     ((uint8_t*)d)[ 7] ^= ((uint8_t*)s)[ 7];
00356     ((uint8_t*)d)[ 8] ^= ((uint8_t*)s)[ 8];
00357     ((uint8_t*)d)[ 9] ^= ((uint8_t*)s)[ 9];
00358     ((uint8_t*)d)[10] ^= ((uint8_t*)s)[10];
00359     ((uint8_t*)d)[11] ^= ((uint8_t*)s)[11];
00360     ((uint8_t*)d)[12] ^= ((uint8_t*)s)[12];
00361     ((uint8_t*)d)[13] ^= ((uint8_t*)s)[13];
00362     ((uint8_t*)d)[14] ^= ((uint8_t*)s)[14];
00363     ((uint8_t*)d)[15] ^= ((uint8_t*)s)[15];
00364 #endif
00365 }
00366 
00367 static void copy_and_key( void *d, const void *s, const void *k )
00368 {
00369 #if defined( HAVE_UINT_32T )
00370     ((uint32_t*)d)[ 0] = ((uint32_t*)s)[ 0] ^ ((uint32_t*)k)[ 0];
00371     ((uint32_t*)d)[ 1] = ((uint32_t*)s)[ 1] ^ ((uint32_t*)k)[ 1];
00372     ((uint32_t*)d)[ 2] = ((uint32_t*)s)[ 2] ^ ((uint32_t*)k)[ 2];
00373     ((uint32_t*)d)[ 3] = ((uint32_t*)s)[ 3] ^ ((uint32_t*)k)[ 3];
00374 #elif 1
00375     ((uint8_t*)d)[ 0] = ((uint8_t*)s)[ 0] ^ ((uint8_t*)k)[ 0];
00376     ((uint8_t*)d)[ 1] = ((uint8_t*)s)[ 1] ^ ((uint8_t*)k)[ 1];
00377     ((uint8_t*)d)[ 2] = ((uint8_t*)s)[ 2] ^ ((uint8_t*)k)[ 2];
00378     ((uint8_t*)d)[ 3] = ((uint8_t*)s)[ 3] ^ ((uint8_t*)k)[ 3];
00379     ((uint8_t*)d)[ 4] = ((uint8_t*)s)[ 4] ^ ((uint8_t*)k)[ 4];
00380     ((uint8_t*)d)[ 5] = ((uint8_t*)s)[ 5] ^ ((uint8_t*)k)[ 5];
00381     ((uint8_t*)d)[ 6] = ((uint8_t*)s)[ 6] ^ ((uint8_t*)k)[ 6];
00382     ((uint8_t*)d)[ 7] = ((uint8_t*)s)[ 7] ^ ((uint8_t*)k)[ 7];
00383     ((uint8_t*)d)[ 8] = ((uint8_t*)s)[ 8] ^ ((uint8_t*)k)[ 8];
00384     ((uint8_t*)d)[ 9] = ((uint8_t*)s)[ 9] ^ ((uint8_t*)k)[ 9];
00385     ((uint8_t*)d)[10] = ((uint8_t*)s)[10] ^ ((uint8_t*)k)[10];
00386     ((uint8_t*)d)[11] = ((uint8_t*)s)[11] ^ ((uint8_t*)k)[11];
00387     ((uint8_t*)d)[12] = ((uint8_t*)s)[12] ^ ((uint8_t*)k)[12];
00388     ((uint8_t*)d)[13] = ((uint8_t*)s)[13] ^ ((uint8_t*)k)[13];
00389     ((uint8_t*)d)[14] = ((uint8_t*)s)[14] ^ ((uint8_t*)k)[14];
00390     ((uint8_t*)d)[15] = ((uint8_t*)s)[15] ^ ((uint8_t*)k)[15];
00391 #else
00392     block_copy(d, s);
00393     xor_block(d, k);
00394 #endif
00395 }
00396 
00397 static void add_round_key( uint8_t d[N_BLOCK], const uint8_t k[N_BLOCK] )
00398 {
00399     xor_block(d, k);
00400 }
00401 
00402 static void shift_sub_rows( uint8_t st[N_BLOCK] )
00403 {   uint8_t tt;
00404 
00405     st[ 0] = s_box(st[ 0]); st[ 4] = s_box(st[ 4]);
00406     st[ 8] = s_box(st[ 8]); st[12] = s_box(st[12]);
00407 
00408     tt = st[1]; st[ 1] = s_box(st[ 5]); st[ 5] = s_box(st[ 9]);
00409     st[ 9] = s_box(st[13]); st[13] = s_box( tt );
00410 
00411     tt = st[2]; st[ 2] = s_box(st[10]); st[10] = s_box( tt );
00412     tt = st[6]; st[ 6] = s_box(st[14]); st[14] = s_box( tt );
00413 
00414     tt = st[15]; st[15] = s_box(st[11]); st[11] = s_box(st[ 7]);
00415     st[ 7] = s_box(st[ 3]); st[ 3] = s_box( tt );
00416 }
00417 
00418 #if defined( AES_DEC_PREKEYED )
00419 
00420 static void inv_shift_sub_rows( uint8_t st[N_BLOCK] )
00421 {   uint8_t tt;
00422 
00423     st[ 0] = is_box(st[ 0]); st[ 4] = is_box(st[ 4]);
00424     st[ 8] = is_box(st[ 8]); st[12] = is_box(st[12]);
00425 
00426     tt = st[13]; st[13] = is_box(st[9]); st[ 9] = is_box(st[5]);
00427     st[ 5] = is_box(st[1]); st[ 1] = is_box( tt );
00428 
00429     tt = st[2]; st[ 2] = is_box(st[10]); st[10] = is_box( tt );
00430     tt = st[6]; st[ 6] = is_box(st[14]); st[14] = is_box( tt );
00431 
00432     tt = st[3]; st[ 3] = is_box(st[ 7]); st[ 7] = is_box(st[11]);
00433     st[11] = is_box(st[15]); st[15] = is_box( tt );
00434 }
00435 
00436 #endif
00437 
00438 #if defined( VERSION_1 )
00439   static void mix_sub_columns( uint8_t dt[N_BLOCK] )
00440   { uint8_t st[N_BLOCK];
00441     block_copy(st, dt);
00442 #else
00443   static void mix_sub_columns( uint8_t dt[N_BLOCK], uint8_t st[N_BLOCK] )
00444   {
00445 #endif
00446     dt[ 0] = gfm2_sb(st[0]) ^ gfm3_sb(st[5]) ^ s_box(st[10]) ^ s_box(st[15]);
00447     dt[ 1] = s_box(st[0]) ^ gfm2_sb(st[5]) ^ gfm3_sb(st[10]) ^ s_box(st[15]);
00448     dt[ 2] = s_box(st[0]) ^ s_box(st[5]) ^ gfm2_sb(st[10]) ^ gfm3_sb(st[15]);
00449     dt[ 3] = gfm3_sb(st[0]) ^ s_box(st[5]) ^ s_box(st[10]) ^ gfm2_sb(st[15]);
00450 
00451     dt[ 4] = gfm2_sb(st[4]) ^ gfm3_sb(st[9]) ^ s_box(st[14]) ^ s_box(st[3]);
00452     dt[ 5] = s_box(st[4]) ^ gfm2_sb(st[9]) ^ gfm3_sb(st[14]) ^ s_box(st[3]);
00453     dt[ 6] = s_box(st[4]) ^ s_box(st[9]) ^ gfm2_sb(st[14]) ^ gfm3_sb(st[3]);
00454     dt[ 7] = gfm3_sb(st[4]) ^ s_box(st[9]) ^ s_box(st[14]) ^ gfm2_sb(st[3]);
00455 
00456     dt[ 8] = gfm2_sb(st[8]) ^ gfm3_sb(st[13]) ^ s_box(st[2]) ^ s_box(st[7]);
00457     dt[ 9] = s_box(st[8]) ^ gfm2_sb(st[13]) ^ gfm3_sb(st[2]) ^ s_box(st[7]);
00458     dt[10] = s_box(st[8]) ^ s_box(st[13]) ^ gfm2_sb(st[2]) ^ gfm3_sb(st[7]);
00459     dt[11] = gfm3_sb(st[8]) ^ s_box(st[13]) ^ s_box(st[2]) ^ gfm2_sb(st[7]);
00460 
00461     dt[12] = gfm2_sb(st[12]) ^ gfm3_sb(st[1]) ^ s_box(st[6]) ^ s_box(st[11]);
00462     dt[13] = s_box(st[12]) ^ gfm2_sb(st[1]) ^ gfm3_sb(st[6]) ^ s_box(st[11]);
00463     dt[14] = s_box(st[12]) ^ s_box(st[1]) ^ gfm2_sb(st[6]) ^ gfm3_sb(st[11]);
00464     dt[15] = gfm3_sb(st[12]) ^ s_box(st[1]) ^ s_box(st[6]) ^ gfm2_sb(st[11]);
00465   }
00466 
00467 #if defined( AES_DEC_PREKEYED )
00468 
00469 #if defined( VERSION_1 )
00470   static void inv_mix_sub_columns( uint8_t dt[N_BLOCK] )
00471   { uint8_t st[N_BLOCK];
00472     block_copy(st, dt);
00473 #else
00474   static void inv_mix_sub_columns( uint8_t dt[N_BLOCK], uint8_t st[N_BLOCK] )
00475   {
00476 #endif
00477     dt[ 0] = is_box(gfm_e(st[ 0]) ^ gfm_b(st[ 1]) ^ gfm_d(st[ 2]) ^ gfm_9(st[ 3]));
00478     dt[ 5] = is_box(gfm_9(st[ 0]) ^ gfm_e(st[ 1]) ^ gfm_b(st[ 2]) ^ gfm_d(st[ 3]));
00479     dt[10] = is_box(gfm_d(st[ 0]) ^ gfm_9(st[ 1]) ^ gfm_e(st[ 2]) ^ gfm_b(st[ 3]));
00480     dt[15] = is_box(gfm_b(st[ 0]) ^ gfm_d(st[ 1]) ^ gfm_9(st[ 2]) ^ gfm_e(st[ 3]));
00481 
00482     dt[ 4] = is_box(gfm_e(st[ 4]) ^ gfm_b(st[ 5]) ^ gfm_d(st[ 6]) ^ gfm_9(st[ 7]));
00483     dt[ 9] = is_box(gfm_9(st[ 4]) ^ gfm_e(st[ 5]) ^ gfm_b(st[ 6]) ^ gfm_d(st[ 7]));
00484     dt[14] = is_box(gfm_d(st[ 4]) ^ gfm_9(st[ 5]) ^ gfm_e(st[ 6]) ^ gfm_b(st[ 7]));
00485     dt[ 3] = is_box(gfm_b(st[ 4]) ^ gfm_d(st[ 5]) ^ gfm_9(st[ 6]) ^ gfm_e(st[ 7]));
00486 
00487     dt[ 8] = is_box(gfm_e(st[ 8]) ^ gfm_b(st[ 9]) ^ gfm_d(st[10]) ^ gfm_9(st[11]));
00488     dt[13] = is_box(gfm_9(st[ 8]) ^ gfm_e(st[ 9]) ^ gfm_b(st[10]) ^ gfm_d(st[11]));
00489     dt[ 2] = is_box(gfm_d(st[ 8]) ^ gfm_9(st[ 9]) ^ gfm_e(st[10]) ^ gfm_b(st[11]));
00490     dt[ 7] = is_box(gfm_b(st[ 8]) ^ gfm_d(st[ 9]) ^ gfm_9(st[10]) ^ gfm_e(st[11]));
00491 
00492     dt[12] = is_box(gfm_e(st[12]) ^ gfm_b(st[13]) ^ gfm_d(st[14]) ^ gfm_9(st[15]));
00493     dt[ 1] = is_box(gfm_9(st[12]) ^ gfm_e(st[13]) ^ gfm_b(st[14]) ^ gfm_d(st[15]));
00494     dt[ 6] = is_box(gfm_d(st[12]) ^ gfm_9(st[13]) ^ gfm_e(st[14]) ^ gfm_b(st[15]));
00495     dt[11] = is_box(gfm_b(st[12]) ^ gfm_d(st[13]) ^ gfm_9(st[14]) ^ gfm_e(st[15]));
00496   }
00497 
00498 #endif
00499 
00500 #if defined( AES_ENC_PREKEYED ) || defined( AES_DEC_PREKEYED )
00501 
00502 /*  Set the cipher key for the pre-keyed version */
00503 
00504 return_type aes_set_key( const uint8_t key[], length_type keylen, aes_context ctx[1] )
00505 {
00506     uint8_t cc, rc, hi;
00507 
00508     switch( keylen )
00509     {
00510     case 16:
00511     case 24:
00512     case 32:
00513         break;
00514     default: 
00515         ctx->rnd = 0; 
00516         return ( uint8_t )-1;
00517     }
00518     block_copy_nn(ctx->ksch, key, keylen);
00519     hi = (keylen + 28) << 2;
00520     ctx->rnd = (hi >> 4) - 1;
00521     for( cc = keylen, rc = 1; cc < hi; cc += 4 )
00522     {   uint8_t tt, t0, t1, t2, t3;
00523 
00524         t0 = ctx->ksch[cc - 4];
00525         t1 = ctx->ksch[cc - 3];
00526         t2 = ctx->ksch[cc - 2];
00527         t3 = ctx->ksch[cc - 1];
00528         if( cc % keylen == 0 )
00529         {
00530             tt = t0;
00531             t0 = s_box(t1) ^ rc;
00532             t1 = s_box(t2);
00533             t2 = s_box(t3);
00534             t3 = s_box(tt);
00535             rc = f2(rc);
00536         }
00537         else if( keylen > 24 && cc % keylen == 16 )
00538         {
00539             t0 = s_box(t0);
00540             t1 = s_box(t1);
00541             t2 = s_box(t2);
00542             t3 = s_box(t3);
00543         }
00544         tt = cc - keylen;
00545         ctx->ksch[cc + 0] = ctx->ksch[tt + 0] ^ t0;
00546         ctx->ksch[cc + 1] = ctx->ksch[tt + 1] ^ t1;
00547         ctx->ksch[cc + 2] = ctx->ksch[tt + 2] ^ t2;
00548         ctx->ksch[cc + 3] = ctx->ksch[tt + 3] ^ t3;
00549     }
00550     return 0;
00551 }
00552 
00553 #endif
00554 
00555 #if defined( AES_ENC_PREKEYED )
00556 
00557 /*  Encrypt a single block of 16 bytes */
00558 
00559 return_type aes_encrypt( const uint8_t in[N_BLOCK], uint8_t  out[N_BLOCK], const aes_context ctx[1] )
00560 {
00561     if( ctx->rnd )
00562     {
00563         uint8_t s1[N_BLOCK], r;
00564         copy_and_key( s1, in, ctx->ksch );
00565 
00566         for( r = 1 ; r < ctx->rnd ; ++r )
00567 #if defined( VERSION_1 )
00568         {
00569             mix_sub_columns( s1 );
00570             add_round_key( s1, ctx->ksch + r * N_BLOCK);
00571         }
00572 #else
00573         {   uint8_t s2[N_BLOCK];
00574             mix_sub_columns( s2, s1 );
00575             copy_and_key( s1, s2, ctx->ksch + r * N_BLOCK);
00576         }
00577 #endif
00578         shift_sub_rows( s1 );
00579         copy_and_key( out, s1, ctx->ksch + r * N_BLOCK );
00580     }
00581     else
00582         return ( uint8_t )-1;
00583     return 0;
00584 }
00585 
00586 /* CBC encrypt a number of blocks (input and return an IV) */
00587 
00588 return_type aes_cbc_encrypt( const uint8_t *in, uint8_t *out,
00589                          int32_t n_block, uint8_t iv[N_BLOCK], const aes_context ctx[1] )
00590 {
00591 
00592     while(n_block--)
00593     {
00594         xor_block(iv, in);
00595         if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS)
00596             return EXIT_FAILURE;
00597         //memcpy(out, iv, N_BLOCK);
00598         block_copy(out, iv);
00599         in += N_BLOCK;
00600         out += N_BLOCK;
00601     }
00602     return EXIT_SUCCESS;
00603 }
00604 
00605 #endif
00606 
00607 #if defined( AES_DEC_PREKEYED )
00608 
00609 /*  Decrypt a single block of 16 bytes */
00610 
00611 return_type aes_decrypt( const uint8_t in[N_BLOCK], uint8_t out[N_BLOCK], const aes_context ctx[1] )
00612 {
00613     if( ctx->rnd )
00614     {
00615         uint8_t s1[N_BLOCK], r;
00616         copy_and_key( s1, in, ctx->ksch + ctx->rnd * N_BLOCK );
00617         inv_shift_sub_rows( s1 );
00618 
00619         for( r = ctx->rnd ; --r ; )
00620 #if defined( VERSION_1 )
00621         {
00622             add_round_key( s1, ctx->ksch + r * N_BLOCK );
00623             inv_mix_sub_columns( s1 );
00624         }
00625 #else
00626         {   uint8_t s2[N_BLOCK];
00627             copy_and_key( s2, s1, ctx->ksch + r * N_BLOCK );
00628             inv_mix_sub_columns( s1, s2 );
00629         }
00630 #endif
00631         copy_and_key( out, s1, ctx->ksch );
00632     }
00633     else
00634         return -1;
00635     return 0;
00636 }
00637 
00638 /* CBC decrypt a number of blocks (input and return an IV) */
00639 
00640 return_type aes_cbc_decrypt( const uint8_t *in, uint8_t *out,
00641                          int32_t n_block, uint8_t iv[N_BLOCK], const aes_context ctx[1] )
00642 {   
00643     while(n_block--)
00644     {   uint8_t tmp[N_BLOCK];
00645         
00646         //memcpy(tmp, in, N_BLOCK);
00647         block_copy(tmp, in);
00648         if(aes_decrypt(in, out, ctx) != EXIT_SUCCESS)
00649             return EXIT_FAILURE;
00650         xor_block(out, iv);
00651         //memcpy(iv, tmp, N_BLOCK);
00652         block_copy(iv, tmp);
00653         in += N_BLOCK;
00654         out += N_BLOCK;
00655     }
00656     return EXIT_SUCCESS;
00657 }
00658 
00659 #endif
00660 
00661 #if defined( AES_ENC_128_OTFK )
00662 
00663 /*  The 'on the fly' encryption key update for for 128 bit keys */
00664 
00665 static void update_encrypt_key_128( uint8_t k[N_BLOCK], uint8_t *rc )
00666 {   uint8_t cc;
00667 
00668     k[0] ^= s_box(k[13]) ^ *rc;
00669     k[1] ^= s_box(k[14]);
00670     k[2] ^= s_box(k[15]);
00671     k[3] ^= s_box(k[12]);
00672     *rc = f2( *rc );
00673 
00674     for(cc = 4; cc < 16; cc += 4 )
00675     {
00676         k[cc + 0] ^= k[cc - 4];
00677         k[cc + 1] ^= k[cc - 3];
00678         k[cc + 2] ^= k[cc - 2];
00679         k[cc + 3] ^= k[cc - 1];
00680     }
00681 }
00682 
00683 /*  Encrypt a single block of 16 bytes with 'on the fly' 128 bit keying */
00684 
00685 void aes_encrypt_128( const uint8_t in[N_BLOCK], uint8_t out[N_BLOCK],
00686                      const uint8_t key[N_BLOCK], uint8_t o_key[N_BLOCK] )
00687 {   uint8_t s1[N_BLOCK], r, rc = 1;
00688 
00689     if(o_key != key)
00690         block_copy( o_key, key );
00691     copy_and_key( s1, in, o_key );
00692 
00693     for( r = 1 ; r < 10 ; ++r )
00694 #if defined( VERSION_1 )
00695     {
00696         mix_sub_columns( s1 );
00697         update_encrypt_key_128( o_key, &rc );
00698         add_round_key( s1, o_key );
00699     }
00700 #else
00701     {   uint8_t s2[N_BLOCK];
00702         mix_sub_columns( s2, s1 );
00703         update_encrypt_key_128( o_key, &rc );
00704         copy_and_key( s1, s2, o_key );
00705     }
00706 #endif
00707 
00708     shift_sub_rows( s1 );
00709     update_encrypt_key_128( o_key, &rc );
00710     copy_and_key( out, s1, o_key );
00711 }
00712 
00713 #endif
00714 
00715 #if defined( AES_DEC_128_OTFK )
00716 
00717 /*  The 'on the fly' decryption key update for for 128 bit keys */
00718 
00719 static void update_decrypt_key_128( uint8_t k[N_BLOCK], uint8_t *rc )
00720 {   uint8_t cc;
00721 
00722     for( cc = 12; cc > 0; cc -= 4 )
00723     {
00724         k[cc + 0] ^= k[cc - 4];
00725         k[cc + 1] ^= k[cc - 3];
00726         k[cc + 2] ^= k[cc - 2];
00727         k[cc + 3] ^= k[cc - 1];
00728     }
00729     *rc = d2(*rc);
00730     k[0] ^= s_box(k[13]) ^ *rc;
00731     k[1] ^= s_box(k[14]);
00732     k[2] ^= s_box(k[15]);
00733     k[3] ^= s_box(k[12]);
00734 }
00735 
00736 /*  Decrypt a single block of 16 bytes with 'on the fly' 128 bit keying */
00737 
00738 void aes_decrypt_128( const uint8_t in[N_BLOCK], uint8_t out[N_BLOCK],
00739                       const uint8_t key[N_BLOCK], uint8_t o_key[N_BLOCK] )
00740 {
00741     uint8_t s1[N_BLOCK], r, rc = 0x6c;
00742     if(o_key != key)
00743         block_copy( o_key, key );
00744 
00745     copy_and_key( s1, in, o_key );
00746     inv_shift_sub_rows( s1 );
00747 
00748     for( r = 10 ; --r ; )
00749 #if defined( VERSION_1 )
00750     {
00751         update_decrypt_key_128( o_key, &rc );
00752         add_round_key( s1, o_key );
00753         inv_mix_sub_columns( s1 );
00754     }
00755 #else
00756     {   uint8_t s2[N_BLOCK];
00757         update_decrypt_key_128( o_key, &rc );
00758         copy_and_key( s2, s1, o_key );
00759         inv_mix_sub_columns( s1, s2 );
00760     }
00761 #endif
00762     update_decrypt_key_128( o_key, &rc );
00763     copy_and_key( out, s1, o_key );
00764 }
00765 
00766 #endif
00767 
00768 #if defined( AES_ENC_256_OTFK )
00769 
00770 /*  The 'on the fly' encryption key update for for 256 bit keys */
00771 
00772 static void update_encrypt_key_256( uint8_t k[2 * N_BLOCK], uint8_t *rc )
00773 {   uint8_t cc;
00774 
00775     k[0] ^= s_box(k[29]) ^ *rc;
00776     k[1] ^= s_box(k[30]);
00777     k[2] ^= s_box(k[31]);
00778     k[3] ^= s_box(k[28]);
00779     *rc = f2( *rc );
00780 
00781     for(cc = 4; cc < 16; cc += 4)
00782     {
00783         k[cc + 0] ^= k[cc - 4];
00784         k[cc + 1] ^= k[cc - 3];
00785         k[cc + 2] ^= k[cc - 2];
00786         k[cc + 3] ^= k[cc - 1];
00787     }
00788 
00789     k[16] ^= s_box(k[12]);
00790     k[17] ^= s_box(k[13]);
00791     k[18] ^= s_box(k[14]);
00792     k[19] ^= s_box(k[15]);
00793 
00794     for( cc = 20; cc < 32; cc += 4 )
00795     {
00796         k[cc + 0] ^= k[cc - 4];
00797         k[cc + 1] ^= k[cc - 3];
00798         k[cc + 2] ^= k[cc - 2];
00799         k[cc + 3] ^= k[cc - 1];
00800     }
00801 }
00802 
00803 /*  Encrypt a single block of 16 bytes with 'on the fly' 256 bit keying */
00804 
00805 void aes_encrypt_256( const uint8_t in[N_BLOCK], uint8_t out[N_BLOCK],
00806                       const uint8_t key[2 * N_BLOCK], uint8_t o_key[2 * N_BLOCK] )
00807 {
00808     uint8_t s1[N_BLOCK], r, rc = 1;
00809     if(o_key != key)
00810     {
00811         block_copy( o_key, key );
00812         block_copy( o_key + 16, key + 16 );
00813     }
00814     copy_and_key( s1, in, o_key );
00815 
00816     for( r = 1 ; r < 14 ; ++r )
00817 #if defined( VERSION_1 )
00818     {
00819         mix_sub_columns(s1);
00820         if( r & 1 )
00821             add_round_key( s1, o_key + 16 );
00822         else
00823         {
00824             update_encrypt_key_256( o_key, &rc );
00825             add_round_key( s1, o_key );
00826         }
00827     }
00828 #else
00829     {   uint8_t s2[N_BLOCK];
00830         mix_sub_columns( s2, s1 );
00831         if( r & 1 )
00832             copy_and_key( s1, s2, o_key + 16 );
00833         else
00834         {
00835             update_encrypt_key_256( o_key, &rc );
00836             copy_and_key( s1, s2, o_key );
00837         }
00838     }
00839 #endif
00840 
00841     shift_sub_rows( s1 );
00842     update_encrypt_key_256( o_key, &rc );
00843     copy_and_key( out, s1, o_key );
00844 }
00845 
00846 #endif
00847 
00848 #if defined( AES_DEC_256_OTFK )
00849 
00850 /*  The 'on the fly' encryption key update for for 256 bit keys */
00851 
00852 static void update_decrypt_key_256( uint8_t k[2 * N_BLOCK], uint8_t *rc )
00853 {   uint8_t cc;
00854 
00855     for(cc = 28; cc > 16; cc -= 4)
00856     {
00857         k[cc + 0] ^= k[cc - 4];
00858         k[cc + 1] ^= k[cc - 3];
00859         k[cc + 2] ^= k[cc - 2];
00860         k[cc + 3] ^= k[cc - 1];
00861     }
00862 
00863     k[16] ^= s_box(k[12]);
00864     k[17] ^= s_box(k[13]);
00865     k[18] ^= s_box(k[14]);
00866     k[19] ^= s_box(k[15]);
00867 
00868     for(cc = 12; cc > 0; cc -= 4)
00869     {
00870         k[cc + 0] ^= k[cc - 4];
00871         k[cc + 1] ^= k[cc - 3];
00872         k[cc + 2] ^= k[cc - 2];
00873         k[cc + 3] ^= k[cc - 1];
00874     }
00875 
00876     *rc = d2(*rc);
00877     k[0] ^= s_box(k[29]) ^ *rc;
00878     k[1] ^= s_box(k[30]);
00879     k[2] ^= s_box(k[31]);
00880     k[3] ^= s_box(k[28]);
00881 }
00882 
00883 /*  Decrypt a single block of 16 bytes with 'on the fly'
00884     256 bit keying
00885 */
00886 void aes_decrypt_256( const uint8_t in[N_BLOCK], uint8_t out[N_BLOCK],
00887                       const uint8_t key[2 * N_BLOCK], uint8_t o_key[2 * N_BLOCK] )
00888 {
00889     uint8_t s1[N_BLOCK], r, rc = 0x80;
00890 
00891     if(o_key != key)
00892     {
00893         block_copy( o_key, key );
00894         block_copy( o_key + 16, key + 16 );
00895     }
00896 
00897     copy_and_key( s1, in, o_key );
00898     inv_shift_sub_rows( s1 );
00899 
00900     for( r = 14 ; --r ; )
00901 #if defined( VERSION_1 )
00902     {
00903         if( ( r & 1 ) )
00904         {
00905             update_decrypt_key_256( o_key, &rc );
00906             add_round_key( s1, o_key + 16 );
00907         }
00908         else
00909             add_round_key( s1, o_key );
00910         inv_mix_sub_columns( s1 );
00911     }
00912 #else
00913     {   uint8_t s2[N_BLOCK];
00914         if( ( r & 1 ) )
00915         {
00916             update_decrypt_key_256( o_key, &rc );
00917             copy_and_key( s2, s1, o_key + 16 );
00918         }
00919         else
00920             copy_and_key( s2, s1, o_key );
00921         inv_mix_sub_columns( s1, s2 );
00922     }
00923 #endif
00924     copy_and_key( out, s1, o_key );
00925 }
00926 
00927 #endif