Aded CMSIS5 DSP and NN folder. Needs some work

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers arm_softmax_q7.c Source File

arm_softmax_q7.c

00001 /*
00002  * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved.
00003  *
00004  * SPDX-License-Identifier: Apache-2.0
00005  *
00006  * Licensed under the Apache License, Version 2.0 (the License); you may
00007  * not use this file except in compliance with the License.
00008  * You may obtain a copy of the License at
00009  *
00010  * www.apache.org/licenses/LICENSE-2.0
00011  *
00012  * Unless required by applicable law or agreed to in writing, software
00013  * distributed under the License is distributed on an AS IS BASIS, WITHOUT
00014  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00015  * See the License for the specific language governing permissions and
00016  * limitations under the License.
00017  */
00018 
00019 /* ----------------------------------------------------------------------
00020  * Project:      CMSIS NN Library
00021  * Title:        arm_softmax_q7.c
00022  * Description:  Q7 softmax function
00023  *
00024  * $Date:        17. January 2018
00025  * $Revision:    V.1.0.0
00026  *
00027  * Target Processor:  Cortex-M cores
00028  *
00029  * -------------------------------------------------------------------- */
00030 
00031 #include "arm_math.h"
00032 #include "arm_nnfunctions.h"
00033 
00034 /**
00035  *  @ingroup groupNN
00036  */
00037 
00038 /**
00039  * @addtogroup Softmax
00040  * @{
00041  */
00042 
00043   /**
00044    * @brief Q7 softmax function
00045    * @param[in]       vec_in      pointer to input vector
00046    * @param[in]       dim_vec     input vector dimention
00047    * @param[out]      p_out       pointer to output vector
00048    * @return none.
00049    *
00050    * @details
00051    *
00052    *  Here, instead of typical natural logarithm e based softmax, we use
00053    *  2-based softmax here, i.e.,:
00054    * 
00055    *  y_i = 2^(x_i) / sum(2^x_j)
00056    *
00057    *  The relative output will be different here.
00058    *  But mathematically, the gradient will be the same
00059    *  with a log(2) scaling factor.
00060    *
00061    */
00062 
00063 void arm_softmax_q7(const q7_t * vec_in, const uint16_t dim_vec, q7_t * p_out)
00064 {
00065     q31_t     sum;
00066     int16_t   i;
00067     q15_t     min, max;
00068     max = -257;
00069     min = 257;
00070     for (i = 0; i < dim_vec; i++)
00071     {
00072         if (vec_in[i] > max)
00073         {
00074             max = vec_in[i];
00075         }
00076         if (vec_in[i] < min)
00077         {
00078             min = vec_in[i];
00079         }
00080     }
00081 
00082     /* we ignore really small values  
00083      * anyway, they will be 0 after shrinking
00084      * to q7_t
00085      */
00086     if (max - min > 8)
00087     {
00088         min = max - 8;
00089     }
00090 
00091     sum = 0;
00092 
00093     for (i = 0; i < dim_vec; i++)
00094     {
00095         sum += 0x1 << (vec_in[i] - min);
00096     }
00097 
00098     for (i = 0; i < dim_vec; i++)
00099     {
00100         /* we leave 7-bit dynamic range, so that 128 -> 100% confidence */
00101         p_out[i] = (q7_t) __SSAT(((0x1 << (vec_in[i] - min + 20)) / sum) >> 13, 8);
00102     }
00103 
00104 }
00105 
00106 /**
00107  * @} end of Softmax group
00108  */
00109