1 | /*
|
---|
2 | * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved.
|
---|
3 | *
|
---|
4 | * SPDX-License-Identifier: Apache-2.0
|
---|
5 | *
|
---|
6 | * Licensed under the Apache License, Version 2.0 (the License); you may
|
---|
7 | * not use this file except in compliance with the License.
|
---|
8 | * You may obtain a copy of the License at
|
---|
9 | *
|
---|
10 | * www.apache.org/licenses/LICENSE-2.0
|
---|
11 | *
|
---|
12 | * Unless required by applicable law or agreed to in writing, software
|
---|
13 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT
|
---|
14 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
---|
15 | * See the License for the specific language governing permissions and
|
---|
16 | * limitations under the License.
|
---|
17 | */
|
---|
18 |
|
---|
19 | /* ----------------------------------------------------------------------
|
---|
20 | * Project: CMSIS NN Library
|
---|
21 | * Title: arm_softmax_q15.c
|
---|
22 | * Description: Q15 softmax function
|
---|
23 | *
|
---|
24 | * $Date: 20. February 2018
|
---|
25 | * $Revision: V.1.0.0
|
---|
26 | *
|
---|
27 | * Target Processor: Cortex-M cores
|
---|
28 | *
|
---|
29 | * -------------------------------------------------------------------- */
|
---|
30 |
|
---|
31 | #include "arm_math.h"
|
---|
32 | #include "arm_nnfunctions.h"
|
---|
33 |
|
---|
34 | /**
|
---|
35 | * @ingroup groupNN
|
---|
36 | */
|
---|
37 |
|
---|
38 | /**
|
---|
39 | * @addtogroup Softmax
|
---|
40 | * @{
|
---|
41 | */
|
---|
42 |
|
---|
43 | /**
|
---|
44 | * @brief Q15 softmax function
|
---|
45 | * @param[in] vec_in pointer to input vector
|
---|
46 | * @param[in] dim_vec input vector dimention
|
---|
47 | * @param[out] p_out pointer to output vector
|
---|
48 | * @return none.
|
---|
49 | *
|
---|
50 | * @details
|
---|
51 | *
|
---|
52 | * Here, instead of typical e based softmax, we use
|
---|
53 | * 2-based softmax, i.e.,:
|
---|
54 | *
|
---|
55 | * y_i = 2^(x_i) / sum(2^x_j)
|
---|
56 | *
|
---|
57 | * The relative output will be different here.
|
---|
58 | * But mathematically, the gradient will be the same
|
---|
59 | * with a log(2) scaling factor.
|
---|
60 | *
|
---|
61 | */
|
---|
62 |
|
---|
63 | void arm_softmax_q15(const q15_t * vec_in, const uint16_t dim_vec, q15_t * p_out)
|
---|
64 | {
|
---|
65 | q31_t sum;
|
---|
66 | int16_t i;
|
---|
67 | uint8_t shift;
|
---|
68 | q31_t base;
|
---|
69 | base = -1 * 0x100000;
|
---|
70 | for (i = 0; i < dim_vec; i++)
|
---|
71 | {
|
---|
72 | if (vec_in[i] > base)
|
---|
73 | {
|
---|
74 | base = vec_in[i];
|
---|
75 | }
|
---|
76 | }
|
---|
77 |
|
---|
78 | /* we ignore really small values
|
---|
79 | * anyway, they will be 0 after shrinking
|
---|
80 | * to q15_t
|
---|
81 | */
|
---|
82 | base = base - 16;
|
---|
83 |
|
---|
84 | sum = 0;
|
---|
85 |
|
---|
86 | for (i = 0; i < dim_vec; i++)
|
---|
87 | {
|
---|
88 | if (vec_in[i] > base)
|
---|
89 | {
|
---|
90 | shift = (uint8_t)__USAT(vec_in[i] - base, 5);
|
---|
91 | sum += 0x1 << shift;
|
---|
92 | }
|
---|
93 | }
|
---|
94 |
|
---|
95 | /* This is effectively (0x1 << 32) / sum */
|
---|
96 | int64_t div_base = 0x100000000LL;
|
---|
97 | int output_base = (int32_t)(div_base / sum);
|
---|
98 |
|
---|
99 | /* Final confidence will be output_base >> ( 17 - (vec_in[i] - base) )
|
---|
100 | * so 32768 (0x1<<15) -> 100% confidence when sum = 0x1 << 16, output_base = 0x1 << 16
|
---|
101 | * and vec_in[i]-base = 16
|
---|
102 | */
|
---|
103 | for (i = 0; i < dim_vec; i++)
|
---|
104 | {
|
---|
105 | if (vec_in[i] > base)
|
---|
106 | {
|
---|
107 | /* Here minimum value of 17+base-vec[i] will be 1 */
|
---|
108 | shift = (uint8_t)__USAT(17+base-vec_in[i], 5);
|
---|
109 | p_out[i] = (q15_t) __SSAT((output_base >> shift), 16);
|
---|
110 | } else
|
---|
111 | {
|
---|
112 | p_out[i] = 0;
|
---|
113 | }
|
---|
114 | }
|
---|
115 |
|
---|
116 | }
|
---|
117 |
|
---|
118 | /**
|
---|
119 | * @} end of Softmax group
|
---|
120 | */
|
---|