source: S-port/trunk/Drivers/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q7.c

Last change on this file was 1, checked in by AlexLir, 3 years ago
File size: 3.2 KB
Line 
1/*
2 * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Licensed under the Apache License, Version 2.0 (the License); you may
7 * not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19/* ----------------------------------------------------------------------
20 * Project: CMSIS NN Library
21 * Title: arm_softmax_q7.c
22 * Description: Q7 softmax function
23 *
24 * $Date: 20. February 2018
25 * $Revision: V.1.0.0
26 *
27 * Target Processor: Cortex-M cores
28 *
29 * -------------------------------------------------------------------- */
30
31#include "arm_math.h"
32#include "arm_nnfunctions.h"
33
34/**
35 * @ingroup groupNN
36 */
37
38/**
39 * @addtogroup Softmax
40 * @{
41 */
42
43 /**
44 * @brief Q7 softmax function
45 * @param[in] vec_in pointer to input vector
46 * @param[in] dim_vec input vector dimention
47 * @param[out] p_out pointer to output vector
48 * @return none.
49 *
50 * @details
51 *
52 * Here, instead of typical natural logarithm e based softmax, we use
53 * 2-based softmax here, i.e.,:
54 *
55 * y_i = 2^(x_i) / sum(2^x_j)
56 *
57 * The relative output will be different here.
58 * But mathematically, the gradient will be the same
59 * with a log(2) scaling factor.
60 *
61 */
62
63void arm_softmax_q7(const q7_t * vec_in, const uint16_t dim_vec, q7_t * p_out)
64{
65 q31_t sum;
66 int16_t i;
67 uint8_t shift;
68 q15_t base;
69 base = -257;
70
71 /* We first search for the maximum */
72 for (i = 0; i < dim_vec; i++)
73 {
74 if (vec_in[i] > base)
75 {
76 base = vec_in[i];
77 }
78 }
79
80 /*
81 * So the base is set to max-8, meaning
82 * that we ignore really small values.
83 * anyway, they will be 0 after shrinking to q7_t.
84 */
85 base = base - 8;
86
87 sum = 0;
88
89 for (i = 0; i < dim_vec; i++)
90 {
91 if (vec_in[i] > base)
92 {
93 shift = (uint8_t)__USAT(vec_in[i] - base, 5);
94 sum += 0x1 << shift;
95 }
96 }
97
98 /* This is effectively (0x1 << 20) / sum */
99 int output_base = 0x100000 / sum;
100
101 /*
102 * Final confidence will be output_base >> ( 13 - (vec_in[i] - base) )
103 * so 128 (0x1<<7) -> 100% confidence when sum = 0x1 << 8, output_base = 0x1 << 12
104 * and vec_in[i]-base = 8
105 */
106 for (i = 0; i < dim_vec; i++)
107 {
108 if (vec_in[i] > base)
109 {
110 /* Here minimum value of 13+base-vec_in[i] will be 5 */
111 shift = (uint8_t)__USAT(13+base-vec_in[i], 5);
112 p_out[i] = (q7_t) __SSAT((output_base >> shift), 8);
113 } else {
114 p_out[i] = 0;
115 }
116 }
117}
118
119/**
120 * @} end of Softmax group
121 */
Note: See TracBrowser for help on using the repository browser.