luisomoreau commited on
Commit
b7b614e
·
1 Parent(s): 3b40a75

Upload 1028 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. ei-cpp-export/CMakeLists.txt +14 -0
  3. ei-cpp-export/README.txt +27 -0
  4. ei-cpp-export/edge-impulse-sdk/.gitignore +9 -0
  5. ei-cpp-export/edge-impulse-sdk/.mbedignore +12 -0
  6. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/cachel1_armv7.h +411 -0
  7. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/cmsis_armcc.h +887 -0
  8. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/cmsis_armclang.h +1489 -0
  9. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/cmsis_armclang_ltm.h +1914 -0
  10. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/cmsis_compiler.h +283 -0
  11. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/cmsis_gcc.h +2215 -0
  12. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/cmsis_iccarm.h +971 -0
  13. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/cmsis_version.h +39 -0
  14. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_armv81mml.h +0 -0
  15. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_armv8mbl.h +0 -0
  16. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_armv8mml.h +0 -0
  17. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_cm0.h +952 -0
  18. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_cm0plus.h +1087 -0
  19. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_cm1.h +979 -0
  20. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_cm23.h +0 -0
  21. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_cm3.h +0 -0
  22. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_cm33.h +0 -0
  23. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_cm35p.h +0 -0
  24. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_cm4.h +0 -0
  25. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_cm55.h +0 -0
  26. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_cm7.h +0 -0
  27. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_sc000.h +1030 -0
  28. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_sc300.h +0 -0
  29. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/mpu_armv7.h +275 -0
  30. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/mpu_armv8.h +352 -0
  31. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/pmu_armv8.h +337 -0
  32. ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/tz_context.h +70 -0
  33. ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables.h +529 -0
  34. ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables_f16.h +132 -0
  35. ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_const_structs.h +86 -0
  36. ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_const_structs_f16.h +77 -0
  37. ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h +753 -0
  38. ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_math.h +258 -0
  39. ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_f16.h +58 -0
  40. ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_memory.h +240 -0
  41. ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h +591 -0
  42. ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types_f16.h +155 -0
  43. ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_mve_tables.h +230 -0
  44. ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_mve_tables_f16.h +108 -0
  45. ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_sorting.h +200 -0
  46. ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_fft.h +58 -0
  47. ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_filtering.h +0 -0
  48. ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_math.h +372 -0
  49. ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_math_f16.h +314 -0
  50. ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h +763 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ ei-cpp-export/edge-impulse-sdk/third_party/arc_mli_package/bin/emsdp_em11d_em9d_dfss/release/libmli.a filter=lfs diff=lfs merge=lfs -text
ei-cpp-export/CMakeLists.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cmake_minimum_required(VERSION 3.13.1)
2
+
3
+ if(NOT TARGET app)
4
+ message(FATAL_ERROR "Please create a target named 'app' (ex: add_executable(app)) before adding this file")
5
+ endif()
6
+
7
+ include(edge-impulse-sdk/cmake/utils.cmake)
8
+ add_subdirectory(edge-impulse-sdk/cmake/zephyr)
9
+
10
+ RECURSIVE_FIND_FILE_APPEND(MODEL_SOURCE "tflite-model" "*.cpp")
11
+ target_include_directories(app PRIVATE .)
12
+
13
+ # add all sources to the project
14
+ target_sources(app PRIVATE ${MODEL_SOURCE})
ei-cpp-export/README.txt ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Edge Impulse library for Face detection - FOMO - bigger dataset
3
+
4
+ This is a C++ library that lets you run the impulse for "Face detection - FOMO - bigger dataset" (http://localhost:4800/studio/116394) on any device. It consists of the Edge Impulse inferencing SDK - with implementations of both processing and learning blocks - and your model. You will need to include this library in your project to run your impulse locally.
5
+
6
+ ## Getting Started
7
+
8
+ Please refer to the following documentation to learn how to use this library:
9
+
10
+ * Deploy your model as a C++ library: https://docs.edgeimpulse.com/docs/deploy-your-model-as-a-c-library
11
+ * Running your impulse locally: https://docs.edgeimpulse.com/docs/running-your-impulse-locally-1
12
+ * C++ Inference SDK Library API Reference: https://docs.edgeimpulse.com/reference/inferencing-sdk
13
+
14
+ ## Example Projects
15
+
16
+ For example projects, see 'Running your impulse locally' in the docs (https://docs.edgeimpulse.com/docs/running-your-impulse-locally-1). These pages have examples for virtually any platform under the sun including Linux, Mbed OS, Zephyr, FreeRTOS, and bare-metal on a wide range of platforms. These examples also show how to enable hardware acceleration on Cortex-M, Cortex-A, ARC DSPs and other platforms.
17
+
18
+ ## License
19
+
20
+ Unless specifically indicated otherwise in a file, files are licensed under the Apache 2.0 license, as can be found in edge-impulse-sdk/LICENSE-apache-2.0.txt. Folders containing files under different permissive license than Apache 2.0 are listed below. Each folder contains its own license specified for its files.
21
+
22
+ * edge-impulse-sdk/CMSIS - Apache 2.0
23
+ * edge-impulse-sdk/dsp/kissfft - BSD-3-Clause
24
+ * edge-impulse-sdk/dsp/dct - MIT
25
+ * edge-impulse-sdk/tensorflow - Apache 2.0
26
+ * edge-impulse-sdk/third_party/flatbuffers - Apache 2.0
27
+ * edge-impulse-sdk/third_party/gemmlowp - Apache 2.0
ei-cpp-export/edge-impulse-sdk/.gitignore ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ utensor/CTestTestfile.cmake
2
+ utensor/cmake_install.cmake
3
+ utensor/CMakeFiles/
4
+ utensor/Makefile
5
+ utensor/CMakeCache.txt
6
+ utensor.lib
7
+ utensor/libutensor.a
8
+ *.o
9
+ *.d
ei-cpp-export/edge-impulse-sdk/.mbedignore ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ utensor/CMakeFiles/
2
+ tensorflow/lite/micro/mbed/
3
+ porting/arduino/
4
+ porting/espressif/
5
+ porting/himax/
6
+ porting/posix/
7
+ porting/silabs/
8
+ porting/stm32-cubeai/
9
+ porting/zephyr/
10
+ classifier/ei_run_classifier_c*
11
+ third_party/arc_mli_package/
12
+ tensorflow-lite/
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/cachel1_armv7.h ADDED
@@ -0,0 +1,411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * @file cachel1_armv7.h
3
+ * @brief CMSIS Level 1 Cache API for Armv7-M and later
4
+ * @version V1.0.0
5
+ * @date 03. March 2020
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2020 Arm Limited. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ #if defined ( __ICCARM__ )
26
+ #pragma system_include /* treat file as system include file for MISRA check */
27
+ #elif defined (__clang__)
28
+ #pragma clang system_header /* treat file as system include file */
29
+ #endif
30
+
31
+ #ifndef ARM_CACHEL1_ARMV7_H
32
+ #define ARM_CACHEL1_ARMV7_H
33
+
34
+ /**
35
+ \ingroup CMSIS_Core_FunctionInterface
36
+ \defgroup CMSIS_Core_CacheFunctions Cache Functions
37
+ \brief Functions that configure Instruction and Data cache.
38
+ @{
39
+ */
40
+
41
+ /* Cache Size ID Register Macros */
42
+ #define CCSIDR_WAYS(x) (((x) & SCB_CCSIDR_ASSOCIATIVITY_Msk) >> SCB_CCSIDR_ASSOCIATIVITY_Pos)
43
+ #define CCSIDR_SETS(x) (((x) & SCB_CCSIDR_NUMSETS_Msk ) >> SCB_CCSIDR_NUMSETS_Pos )
44
+
45
+ #ifndef __SCB_DCACHE_LINE_SIZE
46
+ #define __SCB_DCACHE_LINE_SIZE 32U /*!< Cortex-M7 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */
47
+ #endif
48
+
49
+ #ifndef __SCB_ICACHE_LINE_SIZE
50
+ #define __SCB_ICACHE_LINE_SIZE 32U /*!< Cortex-M7 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */
51
+ #endif
52
+
53
+ /**
54
+ \brief Enable I-Cache
55
+ \details Turns on I-Cache
56
+ */
57
+ __STATIC_FORCEINLINE void SCB_EnableICache (void)
58
+ {
59
+ #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)
60
+ if (SCB->CCR & SCB_CCR_IC_Msk) return; /* return if ICache is already enabled */
61
+
62
+ __DSB();
63
+ __ISB();
64
+ SCB->ICIALLU = 0UL; /* invalidate I-Cache */
65
+ __DSB();
66
+ __ISB();
67
+ SCB->CCR |= (uint32_t)SCB_CCR_IC_Msk; /* enable I-Cache */
68
+ __DSB();
69
+ __ISB();
70
+ #endif
71
+ }
72
+
73
+
74
+ /**
75
+ \brief Disable I-Cache
76
+ \details Turns off I-Cache
77
+ */
78
+ __STATIC_FORCEINLINE void SCB_DisableICache (void)
79
+ {
80
+ #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)
81
+ __DSB();
82
+ __ISB();
83
+ SCB->CCR &= ~(uint32_t)SCB_CCR_IC_Msk; /* disable I-Cache */
84
+ SCB->ICIALLU = 0UL; /* invalidate I-Cache */
85
+ __DSB();
86
+ __ISB();
87
+ #endif
88
+ }
89
+
90
+
91
+ /**
92
+ \brief Invalidate I-Cache
93
+ \details Invalidates I-Cache
94
+ */
95
+ __STATIC_FORCEINLINE void SCB_InvalidateICache (void)
96
+ {
97
+ #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)
98
+ __DSB();
99
+ __ISB();
100
+ SCB->ICIALLU = 0UL;
101
+ __DSB();
102
+ __ISB();
103
+ #endif
104
+ }
105
+
106
+
107
+ /**
108
+ \brief I-Cache Invalidate by address
109
+ \details Invalidates I-Cache for the given address.
110
+ I-Cache is invalidated starting from a 32 byte aligned address in 32 byte granularity.
111
+ I-Cache memory blocks which are part of given address + given size are invalidated.
112
+ \param[in] addr address
113
+ \param[in] isize size of memory block (in number of bytes)
114
+ */
115
+ __STATIC_FORCEINLINE void SCB_InvalidateICache_by_Addr (void *addr, int32_t isize)
116
+ {
117
+ #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)
118
+ if ( isize > 0 ) {
119
+ int32_t op_size = isize + (((uint32_t)addr) & (__SCB_ICACHE_LINE_SIZE - 1U));
120
+ uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_ICACHE_LINE_SIZE - 1U) */;
121
+
122
+ __DSB();
123
+
124
+ do {
125
+ SCB->ICIMVAU = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */
126
+ op_addr += __SCB_ICACHE_LINE_SIZE;
127
+ op_size -= __SCB_ICACHE_LINE_SIZE;
128
+ } while ( op_size > 0 );
129
+
130
+ __DSB();
131
+ __ISB();
132
+ }
133
+ #endif
134
+ }
135
+
136
+
137
+ /**
138
+ \brief Enable D-Cache
139
+ \details Turns on D-Cache
140
+ */
141
+ __STATIC_FORCEINLINE void SCB_EnableDCache (void)
142
+ {
143
+ #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
144
+ uint32_t ccsidr;
145
+ uint32_t sets;
146
+ uint32_t ways;
147
+
148
+ if (SCB->CCR & SCB_CCR_DC_Msk) return; /* return if DCache is already enabled */
149
+
150
+ SCB->CSSELR = 0U; /* select Level 1 data cache */
151
+ __DSB();
152
+
153
+ ccsidr = SCB->CCSIDR;
154
+
155
+ /* invalidate D-Cache */
156
+ sets = (uint32_t)(CCSIDR_SETS(ccsidr));
157
+ do {
158
+ ways = (uint32_t)(CCSIDR_WAYS(ccsidr));
159
+ do {
160
+ SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) |
161
+ ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) );
162
+ #if defined ( __CC_ARM )
163
+ __schedule_barrier();
164
+ #endif
165
+ } while (ways-- != 0U);
166
+ } while(sets-- != 0U);
167
+ __DSB();
168
+
169
+ SCB->CCR |= (uint32_t)SCB_CCR_DC_Msk; /* enable D-Cache */
170
+
171
+ __DSB();
172
+ __ISB();
173
+ #endif
174
+ }
175
+
176
+
177
+ /**
178
+ \brief Disable D-Cache
179
+ \details Turns off D-Cache
180
+ */
181
+ __STATIC_FORCEINLINE void SCB_DisableDCache (void)
182
+ {
183
+ #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
184
+ uint32_t ccsidr;
185
+ uint32_t sets;
186
+ uint32_t ways;
187
+
188
+ SCB->CSSELR = 0U; /* select Level 1 data cache */
189
+ __DSB();
190
+
191
+ SCB->CCR &= ~(uint32_t)SCB_CCR_DC_Msk; /* disable D-Cache */
192
+ __DSB();
193
+
194
+ ccsidr = SCB->CCSIDR;
195
+
196
+ /* clean & invalidate D-Cache */
197
+ sets = (uint32_t)(CCSIDR_SETS(ccsidr));
198
+ do {
199
+ ways = (uint32_t)(CCSIDR_WAYS(ccsidr));
200
+ do {
201
+ SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) |
202
+ ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) );
203
+ #if defined ( __CC_ARM )
204
+ __schedule_barrier();
205
+ #endif
206
+ } while (ways-- != 0U);
207
+ } while(sets-- != 0U);
208
+
209
+ __DSB();
210
+ __ISB();
211
+ #endif
212
+ }
213
+
214
+
215
+ /**
216
+ \brief Invalidate D-Cache
217
+ \details Invalidates D-Cache
218
+ */
219
+ __STATIC_FORCEINLINE void SCB_InvalidateDCache (void)
220
+ {
221
+ #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
222
+ uint32_t ccsidr;
223
+ uint32_t sets;
224
+ uint32_t ways;
225
+
226
+ SCB->CSSELR = 0U; /* select Level 1 data cache */
227
+ __DSB();
228
+
229
+ ccsidr = SCB->CCSIDR;
230
+
231
+ /* invalidate D-Cache */
232
+ sets = (uint32_t)(CCSIDR_SETS(ccsidr));
233
+ do {
234
+ ways = (uint32_t)(CCSIDR_WAYS(ccsidr));
235
+ do {
236
+ SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) |
237
+ ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) );
238
+ #if defined ( __CC_ARM )
239
+ __schedule_barrier();
240
+ #endif
241
+ } while (ways-- != 0U);
242
+ } while(sets-- != 0U);
243
+
244
+ __DSB();
245
+ __ISB();
246
+ #endif
247
+ }
248
+
249
+
250
+ /**
251
+ \brief Clean D-Cache
252
+ \details Cleans D-Cache
253
+ */
254
+ __STATIC_FORCEINLINE void SCB_CleanDCache (void)
255
+ {
256
+ #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
257
+ uint32_t ccsidr;
258
+ uint32_t sets;
259
+ uint32_t ways;
260
+
261
+ SCB->CSSELR = 0U; /* select Level 1 data cache */
262
+ __DSB();
263
+
264
+ ccsidr = SCB->CCSIDR;
265
+
266
+ /* clean D-Cache */
267
+ sets = (uint32_t)(CCSIDR_SETS(ccsidr));
268
+ do {
269
+ ways = (uint32_t)(CCSIDR_WAYS(ccsidr));
270
+ do {
271
+ SCB->DCCSW = (((sets << SCB_DCCSW_SET_Pos) & SCB_DCCSW_SET_Msk) |
272
+ ((ways << SCB_DCCSW_WAY_Pos) & SCB_DCCSW_WAY_Msk) );
273
+ #if defined ( __CC_ARM )
274
+ __schedule_barrier();
275
+ #endif
276
+ } while (ways-- != 0U);
277
+ } while(sets-- != 0U);
278
+
279
+ __DSB();
280
+ __ISB();
281
+ #endif
282
+ }
283
+
284
+
285
+ /**
286
+ \brief Clean & Invalidate D-Cache
287
+ \details Cleans and Invalidates D-Cache
288
+ */
289
+ __STATIC_FORCEINLINE void SCB_CleanInvalidateDCache (void)
290
+ {
291
+ #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
292
+ uint32_t ccsidr;
293
+ uint32_t sets;
294
+ uint32_t ways;
295
+
296
+ SCB->CSSELR = 0U; /* select Level 1 data cache */
297
+ __DSB();
298
+
299
+ ccsidr = SCB->CCSIDR;
300
+
301
+ /* clean & invalidate D-Cache */
302
+ sets = (uint32_t)(CCSIDR_SETS(ccsidr));
303
+ do {
304
+ ways = (uint32_t)(CCSIDR_WAYS(ccsidr));
305
+ do {
306
+ SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) |
307
+ ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) );
308
+ #if defined ( __CC_ARM )
309
+ __schedule_barrier();
310
+ #endif
311
+ } while (ways-- != 0U);
312
+ } while(sets-- != 0U);
313
+
314
+ __DSB();
315
+ __ISB();
316
+ #endif
317
+ }
318
+
319
+
320
+ /**
321
+ \brief D-Cache Invalidate by address
322
+ \details Invalidates D-Cache for the given address.
323
+ D-Cache is invalidated starting from a 32 byte aligned address in 32 byte granularity.
324
+ D-Cache memory blocks which are part of given address + given size are invalidated.
325
+ \param[in] addr address
326
+ \param[in] dsize size of memory block (in number of bytes)
327
+ */
328
+ __STATIC_FORCEINLINE void SCB_InvalidateDCache_by_Addr (void *addr, int32_t dsize)
329
+ {
330
+ #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
331
+ if ( dsize > 0 ) {
332
+ int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U));
333
+ uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */;
334
+
335
+ __DSB();
336
+
337
+ do {
338
+ SCB->DCIMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */
339
+ op_addr += __SCB_DCACHE_LINE_SIZE;
340
+ op_size -= __SCB_DCACHE_LINE_SIZE;
341
+ } while ( op_size > 0 );
342
+
343
+ __DSB();
344
+ __ISB();
345
+ }
346
+ #endif
347
+ }
348
+
349
+
350
+ /**
351
+ \brief D-Cache Clean by address
352
+ \details Cleans D-Cache for the given address
353
+ D-Cache is cleaned starting from a 32 byte aligned address in 32 byte granularity.
354
+ D-Cache memory blocks which are part of given address + given size are cleaned.
355
+ \param[in] addr address
356
+ \param[in] dsize size of memory block (in number of bytes)
357
+ */
358
+ __STATIC_FORCEINLINE void SCB_CleanDCache_by_Addr (uint32_t *addr, int32_t dsize)
359
+ {
360
+ #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
361
+ if ( dsize > 0 ) {
362
+ int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U));
363
+ uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */;
364
+
365
+ __DSB();
366
+
367
+ do {
368
+ SCB->DCCMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */
369
+ op_addr += __SCB_DCACHE_LINE_SIZE;
370
+ op_size -= __SCB_DCACHE_LINE_SIZE;
371
+ } while ( op_size > 0 );
372
+
373
+ __DSB();
374
+ __ISB();
375
+ }
376
+ #endif
377
+ }
378
+
379
+
380
+ /**
381
+ \brief D-Cache Clean and Invalidate by address
382
+ \details Cleans and invalidates D_Cache for the given address
383
+ D-Cache is cleaned and invalidated starting from a 32 byte aligned address in 32 byte granularity.
384
+ D-Cache memory blocks which are part of given address + given size are cleaned and invalidated.
385
+ \param[in] addr address (aligned to 32-byte boundary)
386
+ \param[in] dsize size of memory block (in number of bytes)
387
+ */
388
+ __STATIC_FORCEINLINE void SCB_CleanInvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize)
389
+ {
390
+ #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
391
+ if ( dsize > 0 ) {
392
+ int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U));
393
+ uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */;
394
+
395
+ __DSB();
396
+
397
+ do {
398
+ SCB->DCCIMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */
399
+ op_addr += __SCB_DCACHE_LINE_SIZE;
400
+ op_size -= __SCB_DCACHE_LINE_SIZE;
401
+ } while ( op_size > 0 );
402
+
403
+ __DSB();
404
+ __ISB();
405
+ }
406
+ #endif
407
+ }
408
+
409
+ /*@} end of CMSIS_Core_CacheFunctions */
410
+
411
+ #endif /* ARM_CACHEL1_ARMV7_H */
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/cmsis_armcc.h ADDED
@@ -0,0 +1,887 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**************************************************************************//**
2
+ * @file cmsis_armcc.h
3
+ * @brief CMSIS compiler ARMCC (Arm Compiler 5) header file
4
+ * @version V5.3.0
5
+ * @date 19. February 2021
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2009-2021 Arm Limited. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ #ifndef __CMSIS_ARMCC_H
26
+ #define __CMSIS_ARMCC_H
27
+
28
+
29
+ #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 400677)
30
+ #error "Please use Arm Compiler Toolchain V4.0.677 or later!"
31
+ #endif
32
+
33
+ /* CMSIS compiler control architecture macros */
34
+ #if ((defined (__TARGET_ARCH_6_M ) && (__TARGET_ARCH_6_M == 1)) || \
35
+ (defined (__TARGET_ARCH_6S_M ) && (__TARGET_ARCH_6S_M == 1)) )
36
+ #define __ARM_ARCH_6M__ 1
37
+ #endif
38
+
39
+ #if (defined (__TARGET_ARCH_7_M ) && (__TARGET_ARCH_7_M == 1))
40
+ #define __ARM_ARCH_7M__ 1
41
+ #endif
42
+
43
+ #if (defined (__TARGET_ARCH_7E_M) && (__TARGET_ARCH_7E_M == 1))
44
+ #define __ARM_ARCH_7EM__ 1
45
+ #endif
46
+
47
+ /* __ARM_ARCH_8M_BASE__ not applicable */
48
+ /* __ARM_ARCH_8M_MAIN__ not applicable */
49
+ /* __ARM_ARCH_8_1M_MAIN__ not applicable */
50
+
51
+ /* CMSIS compiler control DSP macros */
52
+ #if ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) )
53
+ #define __ARM_FEATURE_DSP 1
54
+ #endif
55
+
56
+ /* CMSIS compiler specific defines */
57
+ #ifndef __ASM
58
+ #define __ASM __asm
59
+ #endif
60
+ #ifndef __INLINE
61
+ #define __INLINE __inline
62
+ #endif
63
+ #ifndef __STATIC_INLINE
64
+ #define __STATIC_INLINE static __inline
65
+ #endif
66
+ #ifndef __STATIC_FORCEINLINE
67
+ #define __STATIC_FORCEINLINE static __forceinline
68
+ #endif
69
+ #ifndef __NO_RETURN
70
+ #define __NO_RETURN __declspec(noreturn)
71
+ #endif
72
+ #ifndef __USED
73
+ #define __USED __attribute__((used))
74
+ #endif
75
+ #ifndef __WEAK
76
+ #define __WEAK __attribute__((weak))
77
+ #endif
78
+ #ifndef __PACKED
79
+ #define __PACKED __attribute__((packed))
80
+ #endif
81
+ #ifndef __PACKED_STRUCT
82
+ #define __PACKED_STRUCT __packed struct
83
+ #endif
84
+ #ifndef __PACKED_UNION
85
+ #define __PACKED_UNION __packed union
86
+ #endif
87
+ #ifndef __UNALIGNED_UINT32 /* deprecated */
88
+ #define __UNALIGNED_UINT32(x) (*((__packed uint32_t *)(x)))
89
+ #endif
90
+ #ifndef __UNALIGNED_UINT16_WRITE
91
+ #define __UNALIGNED_UINT16_WRITE(addr, val) ((*((__packed uint16_t *)(addr))) = (val))
92
+ #endif
93
+ #ifndef __UNALIGNED_UINT16_READ
94
+ #define __UNALIGNED_UINT16_READ(addr) (*((const __packed uint16_t *)(addr)))
95
+ #endif
96
+ #ifndef __UNALIGNED_UINT32_WRITE
97
+ #define __UNALIGNED_UINT32_WRITE(addr, val) ((*((__packed uint32_t *)(addr))) = (val))
98
+ #endif
99
+ #ifndef __UNALIGNED_UINT32_READ
100
+ #define __UNALIGNED_UINT32_READ(addr) (*((const __packed uint32_t *)(addr)))
101
+ #endif
102
+ #ifndef __ALIGNED
103
+ #define __ALIGNED(x) __attribute__((aligned(x)))
104
+ #endif
105
+ #ifndef __RESTRICT
106
+ #define __RESTRICT __restrict
107
+ #endif
108
+ #ifndef __COMPILER_BARRIER
109
+ #define __COMPILER_BARRIER() __memory_changed()
110
+ #endif
111
+
112
+ /* ######################### Startup and Lowlevel Init ######################## */
113
+
114
+ #ifndef __PROGRAM_START
115
+ #define __PROGRAM_START __main
116
+ #endif
117
+
118
+ #ifndef __INITIAL_SP
119
+ #define __INITIAL_SP Image$$ARM_LIB_STACK$$ZI$$Limit
120
+ #endif
121
+
122
+ #ifndef __STACK_LIMIT
123
+ #define __STACK_LIMIT Image$$ARM_LIB_STACK$$ZI$$Base
124
+ #endif
125
+
126
+ #ifndef __VECTOR_TABLE
127
+ #define __VECTOR_TABLE __Vectors
128
+ #endif
129
+
130
+ #ifndef __VECTOR_TABLE_ATTRIBUTE
131
+ #define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET")))
132
+ #endif
133
+
134
+ /* ########################### Core Function Access ########################### */
135
+ /** \ingroup CMSIS_Core_FunctionInterface
136
+ \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
137
+ @{
138
+ */
139
+
140
+ /**
141
+ \brief Enable IRQ Interrupts
142
+ \details Enables IRQ interrupts by clearing the I-bit in the CPSR.
143
+ Can only be executed in Privileged modes.
144
+ */
145
+ /* intrinsic void __enable_irq(); */
146
+
147
+
148
+ /**
149
+ \brief Disable IRQ Interrupts
150
+ \details Disables IRQ interrupts by setting the I-bit in the CPSR.
151
+ Can only be executed in Privileged modes.
152
+ */
153
+ /* intrinsic void __disable_irq(); */
154
+
155
+ /**
156
+ \brief Get Control Register
157
+ \details Returns the content of the Control Register.
158
+ \return Control Register value
159
+ */
160
+ __STATIC_INLINE uint32_t __get_CONTROL(void)
161
+ {
162
+ register uint32_t __regControl __ASM("control");
163
+ return(__regControl);
164
+ }
165
+
166
+
167
+ /**
168
+ \brief Set Control Register
169
+ \details Writes the given value to the Control Register.
170
+ \param [in] control Control Register value to set
171
+ */
172
+ __STATIC_INLINE void __set_CONTROL(uint32_t control)
173
+ {
174
+ register uint32_t __regControl __ASM("control");
175
+ __regControl = control;
176
+ }
177
+
178
+
179
+ /**
180
+ \brief Get IPSR Register
181
+ \details Returns the content of the IPSR Register.
182
+ \return IPSR Register value
183
+ */
184
+ __STATIC_INLINE uint32_t __get_IPSR(void)
185
+ {
186
+ register uint32_t __regIPSR __ASM("ipsr");
187
+ return(__regIPSR);
188
+ }
189
+
190
+
191
+ /**
192
+ \brief Get APSR Register
193
+ \details Returns the content of the APSR Register.
194
+ \return APSR Register value
195
+ */
196
+ __STATIC_INLINE uint32_t __get_APSR(void)
197
+ {
198
+ register uint32_t __regAPSR __ASM("apsr");
199
+ return(__regAPSR);
200
+ }
201
+
202
+
203
+ /**
204
+ \brief Get xPSR Register
205
+ \details Returns the content of the xPSR Register.
206
+ \return xPSR Register value
207
+ */
208
+ __STATIC_INLINE uint32_t __get_xPSR(void)
209
+ {
210
+ register uint32_t __regXPSR __ASM("xpsr");
211
+ return(__regXPSR);
212
+ }
213
+
214
+
215
+ /**
216
+ \brief Get Process Stack Pointer
217
+ \details Returns the current value of the Process Stack Pointer (PSP).
218
+ \return PSP Register value
219
+ */
220
+ __STATIC_INLINE uint32_t __get_PSP(void)
221
+ {
222
+ register uint32_t __regProcessStackPointer __ASM("psp");
223
+ return(__regProcessStackPointer);
224
+ }
225
+
226
+
227
+ /**
228
+ \brief Set Process Stack Pointer
229
+ \details Assigns the given value to the Process Stack Pointer (PSP).
230
+ \param [in] topOfProcStack Process Stack Pointer value to set
231
+ */
232
+ __STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)
233
+ {
234
+ register uint32_t __regProcessStackPointer __ASM("psp");
235
+ __regProcessStackPointer = topOfProcStack;
236
+ }
237
+
238
+
239
+ /**
240
+ \brief Get Main Stack Pointer
241
+ \details Returns the current value of the Main Stack Pointer (MSP).
242
+ \return MSP Register value
243
+ */
244
+ __STATIC_INLINE uint32_t __get_MSP(void)
245
+ {
246
+ register uint32_t __regMainStackPointer __ASM("msp");
247
+ return(__regMainStackPointer);
248
+ }
249
+
250
+
251
+ /**
252
+ \brief Set Main Stack Pointer
253
+ \details Assigns the given value to the Main Stack Pointer (MSP).
254
+ \param [in] topOfMainStack Main Stack Pointer value to set
255
+ */
256
+ __STATIC_INLINE void __set_MSP(uint32_t topOfMainStack)
257
+ {
258
+ register uint32_t __regMainStackPointer __ASM("msp");
259
+ __regMainStackPointer = topOfMainStack;
260
+ }
261
+
262
+
263
+ /**
264
+ \brief Get Priority Mask
265
+ \details Returns the current state of the priority mask bit from the Priority Mask Register.
266
+ \return Priority Mask value
267
+ */
268
+ __STATIC_INLINE uint32_t __get_PRIMASK(void)
269
+ {
270
+ register uint32_t __regPriMask __ASM("primask");
271
+ return(__regPriMask);
272
+ }
273
+
274
+
275
+ /**
276
+ \brief Set Priority Mask
277
+ \details Assigns the given value to the Priority Mask Register.
278
+ \param [in] priMask Priority Mask
279
+ */
280
+ __STATIC_INLINE void __set_PRIMASK(uint32_t priMask)
281
+ {
282
+ register uint32_t __regPriMask __ASM("primask");
283
+ __regPriMask = (priMask);
284
+ }
285
+
286
+
287
+ #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
288
+ (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) )
289
+
290
+ /**
291
+ \brief Enable FIQ
292
+ \details Enables FIQ interrupts by clearing the F-bit in the CPSR.
293
+ Can only be executed in Privileged modes.
294
+ */
295
+ #define __enable_fault_irq __enable_fiq
296
+
297
+
298
+ /**
299
+ \brief Disable FIQ
300
+ \details Disables FIQ interrupts by setting the F-bit in the CPSR.
301
+ Can only be executed in Privileged modes.
302
+ */
303
+ #define __disable_fault_irq __disable_fiq
304
+
305
+
306
+ /**
307
+ \brief Get Base Priority
308
+ \details Returns the current value of the Base Priority register.
309
+ \return Base Priority register value
310
+ */
311
+ __STATIC_INLINE uint32_t __get_BASEPRI(void)
312
+ {
313
+ register uint32_t __regBasePri __ASM("basepri");
314
+ return(__regBasePri);
315
+ }
316
+
317
+
318
+ /**
319
+ \brief Set Base Priority
320
+ \details Assigns the given value to the Base Priority register.
321
+ \param [in] basePri Base Priority value to set
322
+ */
323
+ __STATIC_INLINE void __set_BASEPRI(uint32_t basePri)
324
+ {
325
+ register uint32_t __regBasePri __ASM("basepri");
326
+ __regBasePri = (basePri & 0xFFU);
327
+ }
328
+
329
+
330
+ /**
331
+ \brief Set Base Priority with condition
332
+ \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled,
333
+ or the new value increases the BASEPRI priority level.
334
+ \param [in] basePri Base Priority value to set
335
+ */
336
+ __STATIC_INLINE void __set_BASEPRI_MAX(uint32_t basePri)
337
+ {
338
+ register uint32_t __regBasePriMax __ASM("basepri_max");
339
+ __regBasePriMax = (basePri & 0xFFU);
340
+ }
341
+
342
+
343
+ /**
344
+ \brief Get Fault Mask
345
+ \details Returns the current value of the Fault Mask register.
346
+ \return Fault Mask register value
347
+ */
348
+ __STATIC_INLINE uint32_t __get_FAULTMASK(void)
349
+ {
350
+ register uint32_t __regFaultMask __ASM("faultmask");
351
+ return(__regFaultMask);
352
+ }
353
+
354
+
355
+ /**
356
+ \brief Set Fault Mask
357
+ \details Assigns the given value to the Fault Mask register.
358
+ \param [in] faultMask Fault Mask value to set
359
+ */
360
+ __STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask)
361
+ {
362
+ register uint32_t __regFaultMask __ASM("faultmask");
363
+ __regFaultMask = (faultMask & (uint32_t)1U);
364
+ }
365
+
366
+ #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
367
+ (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */
368
+
369
+
370
+ /**
371
+ \brief Get FPSCR
372
+ \details Returns the current value of the Floating Point Status/Control register.
373
+ \return Floating Point Status/Control register value
374
+ */
375
+ __STATIC_INLINE uint32_t __get_FPSCR(void)
376
+ {
377
+ #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
378
+ (defined (__FPU_USED ) && (__FPU_USED == 1U)) )
379
+ register uint32_t __regfpscr __ASM("fpscr");
380
+ return(__regfpscr);
381
+ #else
382
+ return(0U);
383
+ #endif
384
+ }
385
+
386
+
387
+ /**
388
+ \brief Set FPSCR
389
+ \details Assigns the given value to the Floating Point Status/Control register.
390
+ \param [in] fpscr Floating Point Status/Control value to set
391
+ */
392
+ __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
393
+ {
394
+ #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
395
+ (defined (__FPU_USED ) && (__FPU_USED == 1U)) )
396
+ register uint32_t __regfpscr __ASM("fpscr");
397
+ __regfpscr = (fpscr);
398
+ #else
399
+ (void)fpscr;
400
+ #endif
401
+ }
402
+
403
+
404
+ /*@} end of CMSIS_Core_RegAccFunctions */
405
+
406
+
407
+ /* ########################## Core Instruction Access ######################### */
408
+ /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
409
+ Access to dedicated instructions
410
+ @{
411
+ */
412
+
413
+ /**
414
+ \brief No Operation
415
+ \details No Operation does nothing. This instruction can be used for code alignment purposes.
416
+ */
417
+ #define __NOP __nop
418
+
419
+
420
+ /**
421
+ \brief Wait For Interrupt
422
+ \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
423
+ */
424
+ #define __WFI __wfi
425
+
426
+
427
+ /**
428
+ \brief Wait For Event
429
+ \details Wait For Event is a hint instruction that permits the processor to enter
430
+ a low-power state until one of a number of events occurs.
431
+ */
432
+ #define __WFE __wfe
433
+
434
+
435
+ /**
436
+ \brief Send Event
437
+ \details Send Event is a hint instruction. It causes an event to be signaled to the CPU.
438
+ */
439
+ #define __SEV __sev
440
+
441
+
442
+ /**
443
+ \brief Instruction Synchronization Barrier
444
+ \details Instruction Synchronization Barrier flushes the pipeline in the processor,
445
+ so that all instructions following the ISB are fetched from cache or memory,
446
+ after the instruction has been completed.
447
+ */
448
+ #define __ISB() __isb(0xF)
449
+
450
+ /**
451
+ \brief Data Synchronization Barrier
452
+ \details Acts as a special kind of Data Memory Barrier.
453
+ It completes when all explicit memory accesses before this instruction complete.
454
+ */
455
+ #define __DSB() __dsb(0xF)
456
+
457
+ /**
458
+ \brief Data Memory Barrier
459
+ \details Ensures the apparent order of the explicit memory operations before
460
+ and after the instruction, without ensuring their completion.
461
+ */
462
+ #define __DMB() __dmb(0xF)
463
+
464
+
465
+ /**
466
+ \brief Reverse byte order (32 bit)
467
+ \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
468
+ \param [in] value Value to reverse
469
+ \return Reversed value
470
+ */
471
+ #define __REV __rev
472
+
473
+
474
+ /**
475
+ \brief Reverse byte order (16 bit)
476
+ \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
477
+ \param [in] value Value to reverse
478
+ \return Reversed value
479
+ */
480
+ #ifndef __NO_EMBEDDED_ASM
481
+ __attribute__((section(".rev16_text"))) __STATIC_INLINE __ASM uint32_t __REV16(uint32_t value)
482
+ {
483
+ rev16 r0, r0
484
+ bx lr
485
+ }
486
+ #endif
487
+
488
+
489
+ /**
490
+ \brief Reverse byte order (16 bit)
491
+ \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
492
+ \param [in] value Value to reverse
493
+ \return Reversed value
494
+ */
495
+ #ifndef __NO_EMBEDDED_ASM
496
+ __attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int16_t __REVSH(int16_t value)
497
+ {
498
+ revsh r0, r0
499
+ bx lr
500
+ }
501
+ #endif
502
+
503
+
504
+ /**
505
+ \brief Rotate Right in unsigned value (32 bit)
506
+ \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
507
+ \param [in] op1 Value to rotate
508
+ \param [in] op2 Number of Bits to rotate
509
+ \return Rotated value
510
+ */
511
+ #define __ROR __ror
512
+
513
+
514
+ /**
515
+ \brief Breakpoint
516
+ \details Causes the processor to enter Debug state.
517
+ Debug tools can use this to investigate system state when the instruction at a particular address is reached.
518
+ \param [in] value is ignored by the processor.
519
+ If required, a debugger can use it to store additional information about the breakpoint.
520
+ */
521
+ #define __BKPT(value) __breakpoint(value)
522
+
523
+
524
+ /**
525
+ \brief Reverse bit order of value
526
+ \details Reverses the bit order of the given value.
527
+ \param [in] value Value to reverse
528
+ \return Reversed value
529
+ */
530
+ #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
531
+ (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) )
532
+ #define __RBIT __rbit
533
+ #else
534
+ __attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value)
535
+ {
536
+ uint32_t result;
537
+ uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */
538
+
539
+ result = value; /* r will be reversed bits of v; first get LSB of v */
540
+ for (value >>= 1U; value != 0U; value >>= 1U)
541
+ {
542
+ result <<= 1U;
543
+ result |= value & 1U;
544
+ s--;
545
+ }
546
+ result <<= s; /* shift when v's highest bits are zero */
547
+ return result;
548
+ }
549
+ #endif
550
+
551
+
552
+ /**
553
+ \brief Count leading zeros
554
+ \details Counts the number of leading zeros of a data value.
555
+ \param [in] value Value to count the leading zeros
556
+ \return number of leading zeros in value
557
+ */
558
+ #define __CLZ __clz
559
+
560
+
561
+ #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
562
+ (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) )
563
+
564
+ /**
565
+ \brief LDR Exclusive (8 bit)
566
+ \details Executes a exclusive LDR instruction for 8 bit value.
567
+ \param [in] ptr Pointer to data
568
+ \return value of type uint8_t at (*ptr)
569
+ */
570
+ #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
571
+ #define __LDREXB(ptr) ((uint8_t ) __ldrex(ptr))
572
+ #else
573
+ #define __LDREXB(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint8_t ) __ldrex(ptr)) _Pragma("pop")
574
+ #endif
575
+
576
+
577
+ /**
578
+ \brief LDR Exclusive (16 bit)
579
+ \details Executes a exclusive LDR instruction for 16 bit values.
580
+ \param [in] ptr Pointer to data
581
+ \return value of type uint16_t at (*ptr)
582
+ */
583
+ #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
584
+ #define __LDREXH(ptr) ((uint16_t) __ldrex(ptr))
585
+ #else
586
+ #define __LDREXH(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint16_t) __ldrex(ptr)) _Pragma("pop")
587
+ #endif
588
+
589
+
590
+ /**
591
+ \brief LDR Exclusive (32 bit)
592
+ \details Executes a exclusive LDR instruction for 32 bit values.
593
+ \param [in] ptr Pointer to data
594
+ \return value of type uint32_t at (*ptr)
595
+ */
596
+ #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
597
+ #define __LDREXW(ptr) ((uint32_t ) __ldrex(ptr))
598
+ #else
599
+ #define __LDREXW(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint32_t ) __ldrex(ptr)) _Pragma("pop")
600
+ #endif
601
+
602
+
603
+ /**
604
+ \brief STR Exclusive (8 bit)
605
+ \details Executes a exclusive STR instruction for 8 bit values.
606
+ \param [in] value Value to store
607
+ \param [in] ptr Pointer to location
608
+ \return 0 Function succeeded
609
+ \return 1 Function failed
610
+ */
611
+ #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
612
+ #define __STREXB(value, ptr) __strex(value, ptr)
613
+ #else
614
+ #define __STREXB(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop")
615
+ #endif
616
+
617
+
618
+ /**
619
+ \brief STR Exclusive (16 bit)
620
+ \details Executes a exclusive STR instruction for 16 bit values.
621
+ \param [in] value Value to store
622
+ \param [in] ptr Pointer to location
623
+ \return 0 Function succeeded
624
+ \return 1 Function failed
625
+ */
626
+ #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
627
+ #define __STREXH(value, ptr) __strex(value, ptr)
628
+ #else
629
+ #define __STREXH(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop")
630
+ #endif
631
+
632
+
633
+ /**
634
+ \brief STR Exclusive (32 bit)
635
+ \details Executes a exclusive STR instruction for 32 bit values.
636
+ \param [in] value Value to store
637
+ \param [in] ptr Pointer to location
638
+ \return 0 Function succeeded
639
+ \return 1 Function failed
640
+ */
641
+ #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
642
+ #define __STREXW(value, ptr) __strex(value, ptr)
643
+ #else
644
+ #define __STREXW(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop")
645
+ #endif
646
+
647
+
648
+ /**
649
+ \brief Remove the exclusive lock
650
+ \details Removes the exclusive lock which is created by LDREX.
651
+ */
652
+ #define __CLREX __clrex
653
+
654
+
655
+ /**
656
+ \brief Signed Saturate
657
+ \details Saturates a signed value.
658
+ \param [in] value Value to be saturated
659
+ \param [in] sat Bit position to saturate to (1..32)
660
+ \return Saturated value
661
+ */
662
+ #define __SSAT __ssat
663
+
664
+
665
+ /**
666
+ \brief Unsigned Saturate
667
+ \details Saturates an unsigned value.
668
+ \param [in] value Value to be saturated
669
+ \param [in] sat Bit position to saturate to (0..31)
670
+ \return Saturated value
671
+ */
672
+ #define __USAT __usat
673
+
674
+
675
+ /**
676
+ \brief Rotate Right with Extend (32 bit)
677
+ \details Moves each bit of a bitstring right by one bit.
678
+ The carry input is shifted in at the left end of the bitstring.
679
+ \param [in] value Value to rotate
680
+ \return Rotated value
681
+ */
682
+ #ifndef __NO_EMBEDDED_ASM
683
+ __attribute__((section(".rrx_text"))) __STATIC_INLINE __ASM uint32_t __RRX(uint32_t value)
684
+ {
685
+ rrx r0, r0
686
+ bx lr
687
+ }
688
+ #endif
689
+
690
+
691
+ /**
692
+ \brief LDRT Unprivileged (8 bit)
693
+ \details Executes a Unprivileged LDRT instruction for 8 bit value.
694
+ \param [in] ptr Pointer to data
695
+ \return value of type uint8_t at (*ptr)
696
+ */
697
+ #define __LDRBT(ptr) ((uint8_t ) __ldrt(ptr))
698
+
699
+
700
+ /**
701
+ \brief LDRT Unprivileged (16 bit)
702
+ \details Executes a Unprivileged LDRT instruction for 16 bit values.
703
+ \param [in] ptr Pointer to data
704
+ \return value of type uint16_t at (*ptr)
705
+ */
706
+ #define __LDRHT(ptr) ((uint16_t) __ldrt(ptr))
707
+
708
+
709
+ /**
710
+ \brief LDRT Unprivileged (32 bit)
711
+ \details Executes a Unprivileged LDRT instruction for 32 bit values.
712
+ \param [in] ptr Pointer to data
713
+ \return value of type uint32_t at (*ptr)
714
+ */
715
+ #define __LDRT(ptr) ((uint32_t ) __ldrt(ptr))
716
+
717
+
718
+ /**
719
+ \brief STRT Unprivileged (8 bit)
720
+ \details Executes a Unprivileged STRT instruction for 8 bit values.
721
+ \param [in] value Value to store
722
+ \param [in] ptr Pointer to location
723
+ */
724
+ #define __STRBT(value, ptr) __strt(value, ptr)
725
+
726
+
727
+ /**
728
+ \brief STRT Unprivileged (16 bit)
729
+ \details Executes a Unprivileged STRT instruction for 16 bit values.
730
+ \param [in] value Value to store
731
+ \param [in] ptr Pointer to location
732
+ */
733
+ #define __STRHT(value, ptr) __strt(value, ptr)
734
+
735
+
736
+ /**
737
+ \brief STRT Unprivileged (32 bit)
738
+ \details Executes a Unprivileged STRT instruction for 32 bit values.
739
+ \param [in] value Value to store
740
+ \param [in] ptr Pointer to location
741
+ */
742
+ #define __STRT(value, ptr) __strt(value, ptr)
743
+
744
+ #else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
745
+ (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */
746
+
747
+ /**
748
+ \brief Signed Saturate
749
+ \details Saturates a signed value.
750
+ \param [in] value Value to be saturated
751
+ \param [in] sat Bit position to saturate to (1..32)
752
+ \return Saturated value
753
+ */
754
+ __attribute__((always_inline)) __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat)
755
+ {
756
+ if ((sat >= 1U) && (sat <= 32U))
757
+ {
758
+ const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U);
759
+ const int32_t min = -1 - max ;
760
+ if (val > max)
761
+ {
762
+ return max;
763
+ }
764
+ else if (val < min)
765
+ {
766
+ return min;
767
+ }
768
+ }
769
+ return val;
770
+ }
771
+
772
+ /**
773
+ \brief Unsigned Saturate
774
+ \details Saturates an unsigned value.
775
+ \param [in] value Value to be saturated
776
+ \param [in] sat Bit position to saturate to (0..31)
777
+ \return Saturated value
778
+ */
779
+ __attribute__((always_inline)) __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat)
780
+ {
781
+ if (sat <= 31U)
782
+ {
783
+ const uint32_t max = ((1U << sat) - 1U);
784
+ if (val > (int32_t)max)
785
+ {
786
+ return max;
787
+ }
788
+ else if (val < 0)
789
+ {
790
+ return 0U;
791
+ }
792
+ }
793
+ return (uint32_t)val;
794
+ }
795
+
796
+ #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
797
+ (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */
798
+
799
+ /*@}*/ /* end of group CMSIS_Core_InstructionInterface */
800
+
801
+
802
+ /* ################### Compiler specific Intrinsics ########################### */
803
+ /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
804
+ Access to dedicated SIMD instructions
805
+ @{
806
+ */
807
+
808
+ #if ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) )
809
+
810
+ #define __SADD8 __sadd8
811
+ #define __QADD8 __qadd8
812
+ #define __SHADD8 __shadd8
813
+ #define __UADD8 __uadd8
814
+ #define __UQADD8 __uqadd8
815
+ #define __UHADD8 __uhadd8
816
+ #define __SSUB8 __ssub8
817
+ #define __QSUB8 __qsub8
818
+ #define __SHSUB8 __shsub8
819
+ #define __USUB8 __usub8
820
+ #define __UQSUB8 __uqsub8
821
+ #define __UHSUB8 __uhsub8
822
+ #define __SADD16 __sadd16
823
+ #define __QADD16 __qadd16
824
+ #define __SHADD16 __shadd16
825
+ #define __UADD16 __uadd16
826
+ #define __UQADD16 __uqadd16
827
+ #define __UHADD16 __uhadd16
828
+ #define __SSUB16 __ssub16
829
+ #define __QSUB16 __qsub16
830
+ #define __SHSUB16 __shsub16
831
+ #define __USUB16 __usub16
832
+ #define __UQSUB16 __uqsub16
833
+ #define __UHSUB16 __uhsub16
834
+ #define __SASX __sasx
835
+ #define __QASX __qasx
836
+ #define __SHASX __shasx
837
+ #define __UASX __uasx
838
+ #define __UQASX __uqasx
839
+ #define __UHASX __uhasx
840
+ #define __SSAX __ssax
841
+ #define __QSAX __qsax
842
+ #define __SHSAX __shsax
843
+ #define __USAX __usax
844
+ #define __UQSAX __uqsax
845
+ #define __UHSAX __uhsax
846
+ #define __USAD8 __usad8
847
+ #define __USADA8 __usada8
848
+ #define __SSAT16 __ssat16
849
+ #define __USAT16 __usat16
850
+ #define __UXTB16 __uxtb16
851
+ #define __UXTAB16 __uxtab16
852
+ #define __SXTB16 __sxtb16
853
+ #define __SXTAB16 __sxtab16
854
+ #define __SMUAD __smuad
855
+ #define __SMUADX __smuadx
856
+ #define __SMLAD __smlad
857
+ #define __SMLADX __smladx
858
+ #define __SMLALD __smlald
859
+ #define __SMLALDX __smlaldx
860
+ #define __SMUSD __smusd
861
+ #define __SMUSDX __smusdx
862
+ #define __SMLSD __smlsd
863
+ #define __SMLSDX __smlsdx
864
+ #define __SMLSLD __smlsld
865
+ #define __SMLSLDX __smlsldx
866
+ #define __SEL __sel
867
+ #define __QADD __qadd
868
+ #define __QSUB __qsub
869
+
870
+ #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
871
+ ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
872
+
873
+ #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
874
+ ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
875
+
876
+ #define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \
877
+ ((int64_t)(ARG3) << 32U) ) >> 32U))
878
+
879
+ #define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2))
880
+
881
+ #define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3))
882
+
883
+ #endif /* ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */
884
+ /*@} end of group CMSIS_SIMD_intrinsics */
885
+
886
+
887
+ #endif /* __CMSIS_ARMCC_H */
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/cmsis_armclang.h ADDED
@@ -0,0 +1,1489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**************************************************************************//**
2
+ * @file cmsis_armclang.h
3
+ * @brief CMSIS compiler armclang (Arm Compiler 6) header file
4
+ * @version V5.4.0
5
+ * @date 19. February 2020
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2009-2021 Arm Limited. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ /*lint -esym(9058, IRQn)*/ /* disable MISRA 2012 Rule 2.4 for IRQn */
26
+
27
+ #ifndef __CMSIS_ARMCLANG_H
28
+ #define __CMSIS_ARMCLANG_H
29
+
30
+ #pragma clang system_header /* treat file as system include file */
31
+
32
+ #ifndef __ARM_COMPAT_H
33
+ #include <arm_compat.h> /* Compatibility header for Arm Compiler 5 intrinsics */
34
+ #endif
35
+
36
+ /* CMSIS compiler specific defines */
37
+ #ifndef __ASM
38
+ #define __ASM __asm
39
+ #endif
40
+ #ifndef __INLINE
41
+ #define __INLINE __inline
42
+ #endif
43
+ #ifndef __STATIC_INLINE
44
+ #define __STATIC_INLINE static __inline
45
+ #endif
46
+ #ifndef __STATIC_FORCEINLINE
47
+ #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline
48
+ #endif
49
+ #ifndef __NO_RETURN
50
+ #define __NO_RETURN __attribute__((__noreturn__))
51
+ #endif
52
+ #ifndef __USED
53
+ #define __USED __attribute__((used))
54
+ #endif
55
+ #ifndef __WEAK
56
+ #define __WEAK __attribute__((weak))
57
+ #endif
58
+ #ifndef __PACKED
59
+ #define __PACKED __attribute__((packed, aligned(1)))
60
+ #endif
61
+ #ifndef __PACKED_STRUCT
62
+ #define __PACKED_STRUCT struct __attribute__((packed, aligned(1)))
63
+ #endif
64
+ #ifndef __PACKED_UNION
65
+ #define __PACKED_UNION union __attribute__((packed, aligned(1)))
66
+ #endif
67
+ #ifndef __UNALIGNED_UINT32 /* deprecated */
68
+ #pragma clang diagnostic push
69
+ #pragma clang diagnostic ignored "-Wpacked"
70
+ /*lint -esym(9058, T_UINT32)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32 */
71
+ struct __attribute__((packed)) T_UINT32 { uint32_t v; };
72
+ #pragma clang diagnostic pop
73
+ #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v)
74
+ #endif
75
+ #ifndef __UNALIGNED_UINT16_WRITE
76
+ #pragma clang diagnostic push
77
+ #pragma clang diagnostic ignored "-Wpacked"
78
+ /*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */
79
+ __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
80
+ #pragma clang diagnostic pop
81
+ #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
82
+ #endif
83
+ #ifndef __UNALIGNED_UINT16_READ
84
+ #pragma clang diagnostic push
85
+ #pragma clang diagnostic ignored "-Wpacked"
86
+ /*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */
87
+ __PACKED_STRUCT T_UINT16_READ { uint16_t v; };
88
+ #pragma clang diagnostic pop
89
+ #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v)
90
+ #endif
91
+ #ifndef __UNALIGNED_UINT32_WRITE
92
+ #pragma clang diagnostic push
93
+ #pragma clang diagnostic ignored "-Wpacked"
94
+ /*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */
95
+ __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
96
+ #pragma clang diagnostic pop
97
+ #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
98
+ #endif
99
+ #ifndef __UNALIGNED_UINT32_READ
100
+ #pragma clang diagnostic push
101
+ #pragma clang diagnostic ignored "-Wpacked"
102
+ /*lint -esym(9058, T_UINT32_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_READ */
103
+ __PACKED_STRUCT T_UINT32_READ { uint32_t v; };
104
+ #pragma clang diagnostic pop
105
+ #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v)
106
+ #endif
107
+ #ifndef __ALIGNED
108
+ #define __ALIGNED(x) __attribute__((aligned(x)))
109
+ #endif
110
+ #ifndef __RESTRICT
111
+ #define __RESTRICT __restrict
112
+ #endif
113
+ #ifndef __COMPILER_BARRIER
114
+ #define __COMPILER_BARRIER() __ASM volatile("":::"memory")
115
+ #endif
116
+
117
+ /* ######################### Startup and Lowlevel Init ######################## */
118
+
119
+ #ifndef __PROGRAM_START
120
+ #define __PROGRAM_START __main
121
+ #endif
122
+
123
+ #ifndef __INITIAL_SP
124
+ #define __INITIAL_SP Image$$ARM_LIB_STACK$$ZI$$Limit
125
+ #endif
126
+
127
+ #ifndef __STACK_LIMIT
128
+ #define __STACK_LIMIT Image$$ARM_LIB_STACK$$ZI$$Base
129
+ #endif
130
+
131
+ #ifndef __VECTOR_TABLE
132
+ #define __VECTOR_TABLE __Vectors
133
+ #endif
134
+
135
+ #ifndef __VECTOR_TABLE_ATTRIBUTE
136
+ #define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET")))
137
+ #endif
138
+
139
+ #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U)
140
+ #ifndef __STACK_SEAL
141
+ #define __STACK_SEAL Image$$STACKSEAL$$ZI$$Base
142
+ #endif
143
+
144
+ #ifndef __TZ_STACK_SEAL_SIZE
145
+ #define __TZ_STACK_SEAL_SIZE 8U
146
+ #endif
147
+
148
+ #ifndef __TZ_STACK_SEAL_VALUE
149
+ #define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL
150
+ #endif
151
+
152
+
153
+ __STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) {
154
+ *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE;
155
+ }
156
+ #endif
157
+
158
+
159
+ /* ########################### Core Function Access ########################### */
160
+ /** \ingroup CMSIS_Core_FunctionInterface
161
+ \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
162
+ @{
163
+ */
164
+
165
+ /**
166
+ \brief Enable IRQ Interrupts
167
+ \details Enables IRQ interrupts by clearing the I-bit in the CPSR.
168
+ Can only be executed in Privileged modes.
169
+ */
170
+ /* intrinsic void __enable_irq(); see arm_compat.h */
171
+
172
+
173
+ /**
174
+ \brief Disable IRQ Interrupts
175
+ \details Disables IRQ interrupts by setting the I-bit in the CPSR.
176
+ Can only be executed in Privileged modes.
177
+ */
178
+ /* intrinsic void __disable_irq(); see arm_compat.h */
179
+
180
+
181
+ /**
182
+ \brief Get Control Register
183
+ \details Returns the content of the Control Register.
184
+ \return Control Register value
185
+ */
186
+ __STATIC_FORCEINLINE uint32_t __get_CONTROL(void)
187
+ {
188
+ uint32_t result;
189
+
190
+ __ASM volatile ("MRS %0, control" : "=r" (result) );
191
+ return(result);
192
+ }
193
+
194
+
195
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
196
+ /**
197
+ \brief Get Control Register (non-secure)
198
+ \details Returns the content of the non-secure Control Register when in secure mode.
199
+ \return non-secure Control Register value
200
+ */
201
+ __STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void)
202
+ {
203
+ uint32_t result;
204
+
205
+ __ASM volatile ("MRS %0, control_ns" : "=r" (result) );
206
+ return(result);
207
+ }
208
+ #endif
209
+
210
+
211
+ /**
212
+ \brief Set Control Register
213
+ \details Writes the given value to the Control Register.
214
+ \param [in] control Control Register value to set
215
+ */
216
+ __STATIC_FORCEINLINE void __set_CONTROL(uint32_t control)
217
+ {
218
+ __ASM volatile ("MSR control, %0" : : "r" (control) : "memory");
219
+ }
220
+
221
+
222
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
223
+ /**
224
+ \brief Set Control Register (non-secure)
225
+ \details Writes the given value to the non-secure Control Register when in secure state.
226
+ \param [in] control Control Register value to set
227
+ */
228
+ __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control)
229
+ {
230
+ __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory");
231
+ }
232
+ #endif
233
+
234
+
235
+ /**
236
+ \brief Get IPSR Register
237
+ \details Returns the content of the IPSR Register.
238
+ \return IPSR Register value
239
+ */
240
+ __STATIC_FORCEINLINE uint32_t __get_IPSR(void)
241
+ {
242
+ uint32_t result;
243
+
244
+ __ASM volatile ("MRS %0, ipsr" : "=r" (result) );
245
+ return(result);
246
+ }
247
+
248
+
249
+ /**
250
+ \brief Get APSR Register
251
+ \details Returns the content of the APSR Register.
252
+ \return APSR Register value
253
+ */
254
+ __STATIC_FORCEINLINE uint32_t __get_APSR(void)
255
+ {
256
+ uint32_t result;
257
+
258
+ __ASM volatile ("MRS %0, apsr" : "=r" (result) );
259
+ return(result);
260
+ }
261
+
262
+
263
+ /**
264
+ \brief Get xPSR Register
265
+ \details Returns the content of the xPSR Register.
266
+ \return xPSR Register value
267
+ */
268
+ __STATIC_FORCEINLINE uint32_t __get_xPSR(void)
269
+ {
270
+ uint32_t result;
271
+
272
+ __ASM volatile ("MRS %0, xpsr" : "=r" (result) );
273
+ return(result);
274
+ }
275
+
276
+
277
+ /**
278
+ \brief Get Process Stack Pointer
279
+ \details Returns the current value of the Process Stack Pointer (PSP).
280
+ \return PSP Register value
281
+ */
282
+ __STATIC_FORCEINLINE uint32_t __get_PSP(void)
283
+ {
284
+ uint32_t result;
285
+
286
+ __ASM volatile ("MRS %0, psp" : "=r" (result) );
287
+ return(result);
288
+ }
289
+
290
+
291
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
292
+ /**
293
+ \brief Get Process Stack Pointer (non-secure)
294
+ \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state.
295
+ \return PSP Register value
296
+ */
297
+ __STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void)
298
+ {
299
+ uint32_t result;
300
+
301
+ __ASM volatile ("MRS %0, psp_ns" : "=r" (result) );
302
+ return(result);
303
+ }
304
+ #endif
305
+
306
+
307
+ /**
308
+ \brief Set Process Stack Pointer
309
+ \details Assigns the given value to the Process Stack Pointer (PSP).
310
+ \param [in] topOfProcStack Process Stack Pointer value to set
311
+ */
312
+ __STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack)
313
+ {
314
+ __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : );
315
+ }
316
+
317
+
318
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
319
+ /**
320
+ \brief Set Process Stack Pointer (non-secure)
321
+ \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state.
322
+ \param [in] topOfProcStack Process Stack Pointer value to set
323
+ */
324
+ __STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack)
325
+ {
326
+ __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : );
327
+ }
328
+ #endif
329
+
330
+
331
+ /**
332
+ \brief Get Main Stack Pointer
333
+ \details Returns the current value of the Main Stack Pointer (MSP).
334
+ \return MSP Register value
335
+ */
336
+ __STATIC_FORCEINLINE uint32_t __get_MSP(void)
337
+ {
338
+ uint32_t result;
339
+
340
+ __ASM volatile ("MRS %0, msp" : "=r" (result) );
341
+ return(result);
342
+ }
343
+
344
+
345
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
346
+ /**
347
+ \brief Get Main Stack Pointer (non-secure)
348
+ \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state.
349
+ \return MSP Register value
350
+ */
351
+ __STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void)
352
+ {
353
+ uint32_t result;
354
+
355
+ __ASM volatile ("MRS %0, msp_ns" : "=r" (result) );
356
+ return(result);
357
+ }
358
+ #endif
359
+
360
+
361
+ /**
362
+ \brief Set Main Stack Pointer
363
+ \details Assigns the given value to the Main Stack Pointer (MSP).
364
+ \param [in] topOfMainStack Main Stack Pointer value to set
365
+ */
366
+ __STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack)
367
+ {
368
+ __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : );
369
+ }
370
+
371
+
372
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
373
+ /**
374
+ \brief Set Main Stack Pointer (non-secure)
375
+ \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state.
376
+ \param [in] topOfMainStack Main Stack Pointer value to set
377
+ */
378
+ __STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack)
379
+ {
380
+ __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : );
381
+ }
382
+ #endif
383
+
384
+
385
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
386
+ /**
387
+ \brief Get Stack Pointer (non-secure)
388
+ \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state.
389
+ \return SP Register value
390
+ */
391
+ __STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void)
392
+ {
393
+ uint32_t result;
394
+
395
+ __ASM volatile ("MRS %0, sp_ns" : "=r" (result) );
396
+ return(result);
397
+ }
398
+
399
+
400
+ /**
401
+ \brief Set Stack Pointer (non-secure)
402
+ \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state.
403
+ \param [in] topOfStack Stack Pointer value to set
404
+ */
405
+ __STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack)
406
+ {
407
+ __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : );
408
+ }
409
+ #endif
410
+
411
+
412
+ /**
413
+ \brief Get Priority Mask
414
+ \details Returns the current state of the priority mask bit from the Priority Mask Register.
415
+ \return Priority Mask value
416
+ */
417
+ __STATIC_FORCEINLINE uint32_t __get_PRIMASK(void)
418
+ {
419
+ uint32_t result;
420
+
421
+ __ASM volatile ("MRS %0, primask" : "=r" (result) );
422
+ return(result);
423
+ }
424
+
425
+
426
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
427
+ /**
428
+ \brief Get Priority Mask (non-secure)
429
+ \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state.
430
+ \return Priority Mask value
431
+ */
432
+ __STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void)
433
+ {
434
+ uint32_t result;
435
+
436
+ __ASM volatile ("MRS %0, primask_ns" : "=r" (result) );
437
+ return(result);
438
+ }
439
+ #endif
440
+
441
+
442
+ /**
443
+ \brief Set Priority Mask
444
+ \details Assigns the given value to the Priority Mask Register.
445
+ \param [in] priMask Priority Mask
446
+ */
447
+ __STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask)
448
+ {
449
+ __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory");
450
+ }
451
+
452
+
453
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
454
+ /**
455
+ \brief Set Priority Mask (non-secure)
456
+ \details Assigns the given value to the non-secure Priority Mask Register when in secure state.
457
+ \param [in] priMask Priority Mask
458
+ */
459
+ __STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask)
460
+ {
461
+ __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory");
462
+ }
463
+ #endif
464
+
465
+
466
+ #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
467
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
468
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
469
+ (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) )
470
+ /**
471
+ \brief Enable FIQ
472
+ \details Enables FIQ interrupts by clearing the F-bit in the CPSR.
473
+ Can only be executed in Privileged modes.
474
+ */
475
+ #define __enable_fault_irq __enable_fiq /* see arm_compat.h */
476
+
477
+
478
+ /**
479
+ \brief Disable FIQ
480
+ \details Disables FIQ interrupts by setting the F-bit in the CPSR.
481
+ Can only be executed in Privileged modes.
482
+ */
483
+ #define __disable_fault_irq __disable_fiq /* see arm_compat.h */
484
+
485
+
486
+ /**
487
+ \brief Get Base Priority
488
+ \details Returns the current value of the Base Priority register.
489
+ \return Base Priority register value
490
+ */
491
+ __STATIC_FORCEINLINE uint32_t __get_BASEPRI(void)
492
+ {
493
+ uint32_t result;
494
+
495
+ __ASM volatile ("MRS %0, basepri" : "=r" (result) );
496
+ return(result);
497
+ }
498
+
499
+
500
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
501
+ /**
502
+ \brief Get Base Priority (non-secure)
503
+ \details Returns the current value of the non-secure Base Priority register when in secure state.
504
+ \return Base Priority register value
505
+ */
506
+ __STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void)
507
+ {
508
+ uint32_t result;
509
+
510
+ __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) );
511
+ return(result);
512
+ }
513
+ #endif
514
+
515
+
516
+ /**
517
+ \brief Set Base Priority
518
+ \details Assigns the given value to the Base Priority register.
519
+ \param [in] basePri Base Priority value to set
520
+ */
521
+ __STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri)
522
+ {
523
+ __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory");
524
+ }
525
+
526
+
527
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
528
+ /**
529
+ \brief Set Base Priority (non-secure)
530
+ \details Assigns the given value to the non-secure Base Priority register when in secure state.
531
+ \param [in] basePri Base Priority value to set
532
+ */
533
+ __STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri)
534
+ {
535
+ __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory");
536
+ }
537
+ #endif
538
+
539
+
540
+ /**
541
+ \brief Set Base Priority with condition
542
+ \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled,
543
+ or the new value increases the BASEPRI priority level.
544
+ \param [in] basePri Base Priority value to set
545
+ */
546
+ __STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri)
547
+ {
548
+ __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory");
549
+ }
550
+
551
+
552
+ /**
553
+ \brief Get Fault Mask
554
+ \details Returns the current value of the Fault Mask register.
555
+ \return Fault Mask register value
556
+ */
557
+ __STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void)
558
+ {
559
+ uint32_t result;
560
+
561
+ __ASM volatile ("MRS %0, faultmask" : "=r" (result) );
562
+ return(result);
563
+ }
564
+
565
+
566
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
567
+ /**
568
+ \brief Get Fault Mask (non-secure)
569
+ \details Returns the current value of the non-secure Fault Mask register when in secure state.
570
+ \return Fault Mask register value
571
+ */
572
+ __STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void)
573
+ {
574
+ uint32_t result;
575
+
576
+ __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) );
577
+ return(result);
578
+ }
579
+ #endif
580
+
581
+
582
+ /**
583
+ \brief Set Fault Mask
584
+ \details Assigns the given value to the Fault Mask register.
585
+ \param [in] faultMask Fault Mask value to set
586
+ */
587
+ __STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask)
588
+ {
589
+ __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory");
590
+ }
591
+
592
+
593
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
594
+ /**
595
+ \brief Set Fault Mask (non-secure)
596
+ \details Assigns the given value to the non-secure Fault Mask register when in secure state.
597
+ \param [in] faultMask Fault Mask value to set
598
+ */
599
+ __STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask)
600
+ {
601
+ __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory");
602
+ }
603
+ #endif
604
+
605
+ #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
606
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
607
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
608
+ (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */
609
+
610
+
611
+ #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
612
+ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \
613
+ (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) )
614
+
615
+ /**
616
+ \brief Get Process Stack Pointer Limit
617
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
618
+ Stack Pointer Limit register hence zero is returned always in non-secure
619
+ mode.
620
+
621
+ \details Returns the current value of the Process Stack Pointer Limit (PSPLIM).
622
+ \return PSPLIM Register value
623
+ */
624
+ __STATIC_FORCEINLINE uint32_t __get_PSPLIM(void)
625
+ {
626
+ #if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
627
+ (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \
628
+ (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
629
+ // without main extensions, the non-secure PSPLIM is RAZ/WI
630
+ return 0U;
631
+ #else
632
+ uint32_t result;
633
+ __ASM volatile ("MRS %0, psplim" : "=r" (result) );
634
+ return result;
635
+ #endif
636
+ }
637
+
638
+ #if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3))
639
+ /**
640
+ \brief Get Process Stack Pointer Limit (non-secure)
641
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
642
+ Stack Pointer Limit register hence zero is returned always in non-secure
643
+ mode.
644
+
645
+ \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state.
646
+ \return PSPLIM Register value
647
+ */
648
+ __STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void)
649
+ {
650
+ #if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
651
+ (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) )
652
+ // without main extensions, the non-secure PSPLIM is RAZ/WI
653
+ return 0U;
654
+ #else
655
+ uint32_t result;
656
+ __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) );
657
+ return result;
658
+ #endif
659
+ }
660
+ #endif
661
+
662
+
663
+ /**
664
+ \brief Set Process Stack Pointer Limit
665
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
666
+ Stack Pointer Limit register hence the write is silently ignored in non-secure
667
+ mode.
668
+
669
+ \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM).
670
+ \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set
671
+ */
672
+ __STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit)
673
+ {
674
+ #if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
675
+ (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \
676
+ (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
677
+ // without main extensions, the non-secure PSPLIM is RAZ/WI
678
+ (void)ProcStackPtrLimit;
679
+ #else
680
+ __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit));
681
+ #endif
682
+ }
683
+
684
+
685
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
686
+ /**
687
+ \brief Set Process Stack Pointer (non-secure)
688
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
689
+ Stack Pointer Limit register hence the write is silently ignored in non-secure
690
+ mode.
691
+
692
+ \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state.
693
+ \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set
694
+ */
695
+ __STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit)
696
+ {
697
+ #if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
698
+ (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) )
699
+ // without main extensions, the non-secure PSPLIM is RAZ/WI
700
+ (void)ProcStackPtrLimit;
701
+ #else
702
+ __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit));
703
+ #endif
704
+ }
705
+ #endif
706
+
707
+
708
+ /**
709
+ \brief Get Main Stack Pointer Limit
710
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
711
+ Stack Pointer Limit register hence zero is returned always.
712
+
713
+ \details Returns the current value of the Main Stack Pointer Limit (MSPLIM).
714
+ \return MSPLIM Register value
715
+ */
716
+ __STATIC_FORCEINLINE uint32_t __get_MSPLIM(void)
717
+ {
718
+ #if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
719
+ (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \
720
+ (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
721
+ // without main extensions, the non-secure MSPLIM is RAZ/WI
722
+ return 0U;
723
+ #else
724
+ uint32_t result;
725
+ __ASM volatile ("MRS %0, msplim" : "=r" (result) );
726
+ return result;
727
+ #endif
728
+ }
729
+
730
+
731
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
732
+ /**
733
+ \brief Get Main Stack Pointer Limit (non-secure)
734
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
735
+ Stack Pointer Limit register hence zero is returned always.
736
+
737
+ \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state.
738
+ \return MSPLIM Register value
739
+ */
740
+ __STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void)
741
+ {
742
+ #if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
743
+ (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) )
744
+ // without main extensions, the non-secure MSPLIM is RAZ/WI
745
+ return 0U;
746
+ #else
747
+ uint32_t result;
748
+ __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) );
749
+ return result;
750
+ #endif
751
+ }
752
+ #endif
753
+
754
+
755
+ /**
756
+ \brief Set Main Stack Pointer Limit
757
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
758
+ Stack Pointer Limit register hence the write is silently ignored.
759
+
760
+ \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM).
761
+ \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set
762
+ */
763
+ __STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit)
764
+ {
765
+ #if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
766
+ (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \
767
+ (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
768
+ // without main extensions, the non-secure MSPLIM is RAZ/WI
769
+ (void)MainStackPtrLimit;
770
+ #else
771
+ __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit));
772
+ #endif
773
+ }
774
+
775
+
776
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
777
+ /**
778
+ \brief Set Main Stack Pointer Limit (non-secure)
779
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
780
+ Stack Pointer Limit register hence the write is silently ignored.
781
+
782
+ \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state.
783
+ \param [in] MainStackPtrLimit Main Stack Pointer value to set
784
+ */
785
+ __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit)
786
+ {
787
+ #if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
788
+ (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) )
789
+ // without main extensions, the non-secure MSPLIM is RAZ/WI
790
+ (void)MainStackPtrLimit;
791
+ #else
792
+ __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit));
793
+ #endif
794
+ }
795
+ #endif
796
+
797
+ #endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
798
+ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \
799
+ (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */
800
+
801
+ /**
802
+ \brief Get FPSCR
803
+ \details Returns the current value of the Floating Point Status/Control register.
804
+ \return Floating Point Status/Control register value
805
+ */
806
+ #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
807
+ (defined (__FPU_USED ) && (__FPU_USED == 1U)) )
808
+ #define __get_FPSCR (uint32_t)__builtin_arm_get_fpscr
809
+ #else
810
+ #define __get_FPSCR() ((uint32_t)0U)
811
+ #endif
812
+
813
+ /**
814
+ \brief Set FPSCR
815
+ \details Assigns the given value to the Floating Point Status/Control register.
816
+ \param [in] fpscr Floating Point Status/Control value to set
817
+ */
818
+ #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
819
+ (defined (__FPU_USED ) && (__FPU_USED == 1U)) )
820
+ #define __set_FPSCR __builtin_arm_set_fpscr
821
+ #else
822
+ #define __set_FPSCR(x) ((void)(x))
823
+ #endif
824
+
825
+
826
+ /*@} end of CMSIS_Core_RegAccFunctions */
827
+
828
+
829
+ /* ########################## Core Instruction Access ######################### */
830
+ /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
831
+ Access to dedicated instructions
832
+ @{
833
+ */
834
+
835
+ /* Define macros for porting to both thumb1 and thumb2.
836
+ * For thumb1, use low register (r0-r7), specified by constraint "l"
837
+ * Otherwise, use general registers, specified by constraint "r" */
838
+ #if defined (__thumb__) && !defined (__thumb2__)
839
+ #define __CMSIS_GCC_OUT_REG(r) "=l" (r)
840
+ #define __CMSIS_GCC_RW_REG(r) "+l" (r)
841
+ #define __CMSIS_GCC_USE_REG(r) "l" (r)
842
+ #else
843
+ #define __CMSIS_GCC_OUT_REG(r) "=r" (r)
844
+ #define __CMSIS_GCC_RW_REG(r) "+r" (r)
845
+ #define __CMSIS_GCC_USE_REG(r) "r" (r)
846
+ #endif
847
+
848
+ /**
849
+ \brief No Operation
850
+ \details No Operation does nothing. This instruction can be used for code alignment purposes.
851
+ */
852
+ #define __NOP __builtin_arm_nop
853
+
854
+ /**
855
+ \brief Wait For Interrupt
856
+ \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
857
+ */
858
+ #define __WFI __builtin_arm_wfi
859
+
860
+
861
+ /**
862
+ \brief Wait For Event
863
+ \details Wait For Event is a hint instruction that permits the processor to enter
864
+ a low-power state until one of a number of events occurs.
865
+ */
866
+ #define __WFE __builtin_arm_wfe
867
+
868
+
869
+ /**
870
+ \brief Send Event
871
+ \details Send Event is a hint instruction. It causes an event to be signaled to the CPU.
872
+ */
873
+ #define __SEV __builtin_arm_sev
874
+
875
+
876
+ /**
877
+ \brief Instruction Synchronization Barrier
878
+ \details Instruction Synchronization Barrier flushes the pipeline in the processor,
879
+ so that all instructions following the ISB are fetched from cache or memory,
880
+ after the instruction has been completed.
881
+ */
882
+ #define __ISB() __builtin_arm_isb(0xF)
883
+
884
+ /**
885
+ \brief Data Synchronization Barrier
886
+ \details Acts as a special kind of Data Memory Barrier.
887
+ It completes when all explicit memory accesses before this instruction complete.
888
+ */
889
+ #define __DSB() __builtin_arm_dsb(0xF)
890
+
891
+
892
+ /**
893
+ \brief Data Memory Barrier
894
+ \details Ensures the apparent order of the explicit memory operations before
895
+ and after the instruction, without ensuring their completion.
896
+ */
897
+ #define __DMB() __builtin_arm_dmb(0xF)
898
+
899
+
900
+ /**
901
+ \brief Reverse byte order (32 bit)
902
+ \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
903
+ \param [in] value Value to reverse
904
+ \return Reversed value
905
+ */
906
+ #define __REV(value) __builtin_bswap32(value)
907
+
908
+
909
+ /**
910
+ \brief Reverse byte order (16 bit)
911
+ \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
912
+ \param [in] value Value to reverse
913
+ \return Reversed value
914
+ */
915
+ #define __REV16(value) __ROR(__REV(value), 16)
916
+
917
+
918
+ /**
919
+ \brief Reverse byte order (16 bit)
920
+ \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
921
+ \param [in] value Value to reverse
922
+ \return Reversed value
923
+ */
924
+ #define __REVSH(value) (int16_t)__builtin_bswap16(value)
925
+
926
+
927
+ /**
928
+ \brief Rotate Right in unsigned value (32 bit)
929
+ \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
930
+ \param [in] op1 Value to rotate
931
+ \param [in] op2 Number of Bits to rotate
932
+ \return Rotated value
933
+ */
934
+ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
935
+ {
936
+ op2 %= 32U;
937
+ if (op2 == 0U)
938
+ {
939
+ return op1;
940
+ }
941
+ return (op1 >> op2) | (op1 << (32U - op2));
942
+ }
943
+
944
+
945
+ /**
946
+ \brief Breakpoint
947
+ \details Causes the processor to enter Debug state.
948
+ Debug tools can use this to investigate system state when the instruction at a particular address is reached.
949
+ \param [in] value is ignored by the processor.
950
+ If required, a debugger can use it to store additional information about the breakpoint.
951
+ */
952
+ #define __BKPT(value) __ASM volatile ("bkpt "#value)
953
+
954
+
955
+ /**
956
+ \brief Reverse bit order of value
957
+ \details Reverses the bit order of the given value.
958
+ \param [in] value Value to reverse
959
+ \return Reversed value
960
+ */
961
+ #define __RBIT __builtin_arm_rbit
962
+
963
+ /**
964
+ \brief Count leading zeros
965
+ \details Counts the number of leading zeros of a data value.
966
+ \param [in] value Value to count the leading zeros
967
+ \return number of leading zeros in value
968
+ */
969
+ __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value)
970
+ {
971
+ /* Even though __builtin_clz produces a CLZ instruction on ARM, formally
972
+ __builtin_clz(0) is undefined behaviour, so handle this case specially.
973
+ This guarantees ARM-compatible results if happening to compile on a non-ARM
974
+ target, and ensures the compiler doesn't decide to activate any
975
+ optimisations using the logic "value was passed to __builtin_clz, so it
976
+ is non-zero".
977
+ ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a
978
+ single CLZ instruction.
979
+ */
980
+ if (value == 0U)
981
+ {
982
+ return 32U;
983
+ }
984
+ return __builtin_clz(value);
985
+ }
986
+
987
+
988
+ #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
989
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
990
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
991
+ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \
992
+ (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) )
993
+
994
+ /**
995
+ \brief LDR Exclusive (8 bit)
996
+ \details Executes a exclusive LDR instruction for 8 bit value.
997
+ \param [in] ptr Pointer to data
998
+ \return value of type uint8_t at (*ptr)
999
+ */
1000
+ #define __LDREXB (uint8_t)__builtin_arm_ldrex
1001
+
1002
+
1003
+ /**
1004
+ \brief LDR Exclusive (16 bit)
1005
+ \details Executes a exclusive LDR instruction for 16 bit values.
1006
+ \param [in] ptr Pointer to data
1007
+ \return value of type uint16_t at (*ptr)
1008
+ */
1009
+ #define __LDREXH (uint16_t)__builtin_arm_ldrex
1010
+
1011
+
1012
+ /**
1013
+ \brief LDR Exclusive (32 bit)
1014
+ \details Executes a exclusive LDR instruction for 32 bit values.
1015
+ \param [in] ptr Pointer to data
1016
+ \return value of type uint32_t at (*ptr)
1017
+ */
1018
+ #define __LDREXW (uint32_t)__builtin_arm_ldrex
1019
+
1020
+
1021
+ /**
1022
+ \brief STR Exclusive (8 bit)
1023
+ \details Executes a exclusive STR instruction for 8 bit values.
1024
+ \param [in] value Value to store
1025
+ \param [in] ptr Pointer to location
1026
+ \return 0 Function succeeded
1027
+ \return 1 Function failed
1028
+ */
1029
+ #define __STREXB (uint32_t)__builtin_arm_strex
1030
+
1031
+
1032
+ /**
1033
+ \brief STR Exclusive (16 bit)
1034
+ \details Executes a exclusive STR instruction for 16 bit values.
1035
+ \param [in] value Value to store
1036
+ \param [in] ptr Pointer to location
1037
+ \return 0 Function succeeded
1038
+ \return 1 Function failed
1039
+ */
1040
+ #define __STREXH (uint32_t)__builtin_arm_strex
1041
+
1042
+
1043
+ /**
1044
+ \brief STR Exclusive (32 bit)
1045
+ \details Executes a exclusive STR instruction for 32 bit values.
1046
+ \param [in] value Value to store
1047
+ \param [in] ptr Pointer to location
1048
+ \return 0 Function succeeded
1049
+ \return 1 Function failed
1050
+ */
1051
+ #define __STREXW (uint32_t)__builtin_arm_strex
1052
+
1053
+
1054
+ /**
1055
+ \brief Remove the exclusive lock
1056
+ \details Removes the exclusive lock which is created by LDREX.
1057
+ */
1058
+ #define __CLREX __builtin_arm_clrex
1059
+
1060
+ #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1061
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1062
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1063
+ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \
1064
+ (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */
1065
+
1066
+
1067
+ #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1068
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1069
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1070
+ (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) )
1071
+
1072
+ /**
1073
+ \brief Signed Saturate
1074
+ \details Saturates a signed value.
1075
+ \param [in] value Value to be saturated
1076
+ \param [in] sat Bit position to saturate to (1..32)
1077
+ \return Saturated value
1078
+ */
1079
+ #define __SSAT __builtin_arm_ssat
1080
+
1081
+
1082
+ /**
1083
+ \brief Unsigned Saturate
1084
+ \details Saturates an unsigned value.
1085
+ \param [in] value Value to be saturated
1086
+ \param [in] sat Bit position to saturate to (0..31)
1087
+ \return Saturated value
1088
+ */
1089
+ #define __USAT __builtin_arm_usat
1090
+
1091
+
1092
+ /**
1093
+ \brief Rotate Right with Extend (32 bit)
1094
+ \details Moves each bit of a bitstring right by one bit.
1095
+ The carry input is shifted in at the left end of the bitstring.
1096
+ \param [in] value Value to rotate
1097
+ \return Rotated value
1098
+ */
1099
+ __STATIC_FORCEINLINE uint32_t __RRX(uint32_t value)
1100
+ {
1101
+ uint32_t result;
1102
+
1103
+ __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
1104
+ return(result);
1105
+ }
1106
+
1107
+
1108
+ /**
1109
+ \brief LDRT Unprivileged (8 bit)
1110
+ \details Executes a Unprivileged LDRT instruction for 8 bit value.
1111
+ \param [in] ptr Pointer to data
1112
+ \return value of type uint8_t at (*ptr)
1113
+ */
1114
+ __STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr)
1115
+ {
1116
+ uint32_t result;
1117
+
1118
+ __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) );
1119
+ return ((uint8_t) result); /* Add explicit type cast here */
1120
+ }
1121
+
1122
+
1123
+ /**
1124
+ \brief LDRT Unprivileged (16 bit)
1125
+ \details Executes a Unprivileged LDRT instruction for 16 bit values.
1126
+ \param [in] ptr Pointer to data
1127
+ \return value of type uint16_t at (*ptr)
1128
+ */
1129
+ __STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr)
1130
+ {
1131
+ uint32_t result;
1132
+
1133
+ __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) );
1134
+ return ((uint16_t) result); /* Add explicit type cast here */
1135
+ }
1136
+
1137
+
1138
+ /**
1139
+ \brief LDRT Unprivileged (32 bit)
1140
+ \details Executes a Unprivileged LDRT instruction for 32 bit values.
1141
+ \param [in] ptr Pointer to data
1142
+ \return value of type uint32_t at (*ptr)
1143
+ */
1144
+ __STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr)
1145
+ {
1146
+ uint32_t result;
1147
+
1148
+ __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) );
1149
+ return(result);
1150
+ }
1151
+
1152
+
1153
+ /**
1154
+ \brief STRT Unprivileged (8 bit)
1155
+ \details Executes a Unprivileged STRT instruction for 8 bit values.
1156
+ \param [in] value Value to store
1157
+ \param [in] ptr Pointer to location
1158
+ */
1159
+ __STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr)
1160
+ {
1161
+ __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
1162
+ }
1163
+
1164
+
1165
+ /**
1166
+ \brief STRT Unprivileged (16 bit)
1167
+ \details Executes a Unprivileged STRT instruction for 16 bit values.
1168
+ \param [in] value Value to store
1169
+ \param [in] ptr Pointer to location
1170
+ */
1171
+ __STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr)
1172
+ {
1173
+ __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
1174
+ }
1175
+
1176
+
1177
+ /**
1178
+ \brief STRT Unprivileged (32 bit)
1179
+ \details Executes a Unprivileged STRT instruction for 32 bit values.
1180
+ \param [in] value Value to store
1181
+ \param [in] ptr Pointer to location
1182
+ */
1183
+ __STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr)
1184
+ {
1185
+ __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) );
1186
+ }
1187
+
1188
+ #else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1189
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1190
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1191
+ (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */
1192
+
1193
+ /**
1194
+ \brief Signed Saturate
1195
+ \details Saturates a signed value.
1196
+ \param [in] value Value to be saturated
1197
+ \param [in] sat Bit position to saturate to (1..32)
1198
+ \return Saturated value
1199
+ */
1200
+ __STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat)
1201
+ {
1202
+ if ((sat >= 1U) && (sat <= 32U))
1203
+ {
1204
+ const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U);
1205
+ const int32_t min = -1 - max ;
1206
+ if (val > max)
1207
+ {
1208
+ return max;
1209
+ }
1210
+ else if (val < min)
1211
+ {
1212
+ return min;
1213
+ }
1214
+ }
1215
+ return val;
1216
+ }
1217
+
1218
+ /**
1219
+ \brief Unsigned Saturate
1220
+ \details Saturates an unsigned value.
1221
+ \param [in] value Value to be saturated
1222
+ \param [in] sat Bit position to saturate to (0..31)
1223
+ \return Saturated value
1224
+ */
1225
+ __STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat)
1226
+ {
1227
+ if (sat <= 31U)
1228
+ {
1229
+ const uint32_t max = ((1U << sat) - 1U);
1230
+ if (val > (int32_t)max)
1231
+ {
1232
+ return max;
1233
+ }
1234
+ else if (val < 0)
1235
+ {
1236
+ return 0U;
1237
+ }
1238
+ }
1239
+ return (uint32_t)val;
1240
+ }
1241
+
1242
+ #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1243
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1244
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1245
+ (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */
1246
+
1247
+
1248
+ #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1249
+ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \
1250
+ (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) )
1251
+
1252
+ /**
1253
+ \brief Load-Acquire (8 bit)
1254
+ \details Executes a LDAB instruction for 8 bit value.
1255
+ \param [in] ptr Pointer to data
1256
+ \return value of type uint8_t at (*ptr)
1257
+ */
1258
+ __STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr)
1259
+ {
1260
+ uint32_t result;
1261
+
1262
+ __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1263
+ return ((uint8_t) result);
1264
+ }
1265
+
1266
+
1267
+ /**
1268
+ \brief Load-Acquire (16 bit)
1269
+ \details Executes a LDAH instruction for 16 bit values.
1270
+ \param [in] ptr Pointer to data
1271
+ \return value of type uint16_t at (*ptr)
1272
+ */
1273
+ __STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr)
1274
+ {
1275
+ uint32_t result;
1276
+
1277
+ __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1278
+ return ((uint16_t) result);
1279
+ }
1280
+
1281
+
1282
+ /**
1283
+ \brief Load-Acquire (32 bit)
1284
+ \details Executes a LDA instruction for 32 bit values.
1285
+ \param [in] ptr Pointer to data
1286
+ \return value of type uint32_t at (*ptr)
1287
+ */
1288
+ __STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr)
1289
+ {
1290
+ uint32_t result;
1291
+
1292
+ __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1293
+ return(result);
1294
+ }
1295
+
1296
+
1297
+ /**
1298
+ \brief Store-Release (8 bit)
1299
+ \details Executes a STLB instruction for 8 bit values.
1300
+ \param [in] value Value to store
1301
+ \param [in] ptr Pointer to location
1302
+ */
1303
+ __STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr)
1304
+ {
1305
+ __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1306
+ }
1307
+
1308
+
1309
+ /**
1310
+ \brief Store-Release (16 bit)
1311
+ \details Executes a STLH instruction for 16 bit values.
1312
+ \param [in] value Value to store
1313
+ \param [in] ptr Pointer to location
1314
+ */
1315
+ __STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr)
1316
+ {
1317
+ __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1318
+ }
1319
+
1320
+
1321
+ /**
1322
+ \brief Store-Release (32 bit)
1323
+ \details Executes a STL instruction for 32 bit values.
1324
+ \param [in] value Value to store
1325
+ \param [in] ptr Pointer to location
1326
+ */
1327
+ __STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr)
1328
+ {
1329
+ __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1330
+ }
1331
+
1332
+
1333
+ /**
1334
+ \brief Load-Acquire Exclusive (8 bit)
1335
+ \details Executes a LDAB exclusive instruction for 8 bit value.
1336
+ \param [in] ptr Pointer to data
1337
+ \return value of type uint8_t at (*ptr)
1338
+ */
1339
+ #define __LDAEXB (uint8_t)__builtin_arm_ldaex
1340
+
1341
+
1342
+ /**
1343
+ \brief Load-Acquire Exclusive (16 bit)
1344
+ \details Executes a LDAH exclusive instruction for 16 bit values.
1345
+ \param [in] ptr Pointer to data
1346
+ \return value of type uint16_t at (*ptr)
1347
+ */
1348
+ #define __LDAEXH (uint16_t)__builtin_arm_ldaex
1349
+
1350
+
1351
+ /**
1352
+ \brief Load-Acquire Exclusive (32 bit)
1353
+ \details Executes a LDA exclusive instruction for 32 bit values.
1354
+ \param [in] ptr Pointer to data
1355
+ \return value of type uint32_t at (*ptr)
1356
+ */
1357
+ #define __LDAEX (uint32_t)__builtin_arm_ldaex
1358
+
1359
+
1360
+ /**
1361
+ \brief Store-Release Exclusive (8 bit)
1362
+ \details Executes a STLB exclusive instruction for 8 bit values.
1363
+ \param [in] value Value to store
1364
+ \param [in] ptr Pointer to location
1365
+ \return 0 Function succeeded
1366
+ \return 1 Function failed
1367
+ */
1368
+ #define __STLEXB (uint32_t)__builtin_arm_stlex
1369
+
1370
+
1371
+ /**
1372
+ \brief Store-Release Exclusive (16 bit)
1373
+ \details Executes a STLH exclusive instruction for 16 bit values.
1374
+ \param [in] value Value to store
1375
+ \param [in] ptr Pointer to location
1376
+ \return 0 Function succeeded
1377
+ \return 1 Function failed
1378
+ */
1379
+ #define __STLEXH (uint32_t)__builtin_arm_stlex
1380
+
1381
+
1382
+ /**
1383
+ \brief Store-Release Exclusive (32 bit)
1384
+ \details Executes a STL exclusive instruction for 32 bit values.
1385
+ \param [in] value Value to store
1386
+ \param [in] ptr Pointer to location
1387
+ \return 0 Function succeeded
1388
+ \return 1 Function failed
1389
+ */
1390
+ #define __STLEX (uint32_t)__builtin_arm_stlex
1391
+
1392
+ #endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1393
+ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \
1394
+ (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */
1395
+
1396
+ /*@}*/ /* end of group CMSIS_Core_InstructionInterface */
1397
+
1398
+
1399
+ /* ################### Compiler specific Intrinsics ########################### */
1400
+ /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
1401
+ Access to dedicated SIMD instructions
1402
+ @{
1403
+ */
1404
+
1405
+ #if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1))
1406
+
1407
+ #define __SADD8 __builtin_arm_sadd8
1408
+ #define __QADD8 __builtin_arm_qadd8
1409
+ #define __SHADD8 __builtin_arm_shadd8
1410
+ #define __UADD8 __builtin_arm_uadd8
1411
+ #define __UQADD8 __builtin_arm_uqadd8
1412
+ #define __UHADD8 __builtin_arm_uhadd8
1413
+ #define __SSUB8 __builtin_arm_ssub8
1414
+ #define __QSUB8 __builtin_arm_qsub8
1415
+ #define __SHSUB8 __builtin_arm_shsub8
1416
+ #define __USUB8 __builtin_arm_usub8
1417
+ #define __UQSUB8 __builtin_arm_uqsub8
1418
+ #define __UHSUB8 __builtin_arm_uhsub8
1419
+ #define __SADD16 __builtin_arm_sadd16
1420
+ #define __QADD16 __builtin_arm_qadd16
1421
+ #define __SHADD16 __builtin_arm_shadd16
1422
+ #define __UADD16 __builtin_arm_uadd16
1423
+ #define __UQADD16 __builtin_arm_uqadd16
1424
+ #define __UHADD16 __builtin_arm_uhadd16
1425
+ #define __SSUB16 __builtin_arm_ssub16
1426
+ #define __QSUB16 __builtin_arm_qsub16
1427
+ #define __SHSUB16 __builtin_arm_shsub16
1428
+ #define __USUB16 __builtin_arm_usub16
1429
+ #define __UQSUB16 __builtin_arm_uqsub16
1430
+ #define __UHSUB16 __builtin_arm_uhsub16
1431
+ #define __SASX __builtin_arm_sasx
1432
+ #define __QASX __builtin_arm_qasx
1433
+ #define __SHASX __builtin_arm_shasx
1434
+ #define __UASX __builtin_arm_uasx
1435
+ #define __UQASX __builtin_arm_uqasx
1436
+ #define __UHASX __builtin_arm_uhasx
1437
+ #define __SSAX __builtin_arm_ssax
1438
+ #define __QSAX __builtin_arm_qsax
1439
+ #define __SHSAX __builtin_arm_shsax
1440
+ #define __USAX __builtin_arm_usax
1441
+ #define __UQSAX __builtin_arm_uqsax
1442
+ #define __UHSAX __builtin_arm_uhsax
1443
+ #define __USAD8 __builtin_arm_usad8
1444
+ #define __USADA8 __builtin_arm_usada8
1445
+ #define __SSAT16 __builtin_arm_ssat16
1446
+ #define __USAT16 __builtin_arm_usat16
1447
+ #define __UXTB16 __builtin_arm_uxtb16
1448
+ #define __UXTAB16 __builtin_arm_uxtab16
1449
+ #define __SXTB16 __builtin_arm_sxtb16
1450
+ #define __SXTAB16 __builtin_arm_sxtab16
1451
+ #define __SMUAD __builtin_arm_smuad
1452
+ #define __SMUADX __builtin_arm_smuadx
1453
+ #define __SMLAD __builtin_arm_smlad
1454
+ #define __SMLADX __builtin_arm_smladx
1455
+ #define __SMLALD __builtin_arm_smlald
1456
+ #define __SMLALDX __builtin_arm_smlaldx
1457
+ #define __SMUSD __builtin_arm_smusd
1458
+ #define __SMUSDX __builtin_arm_smusdx
1459
+ #define __SMLSD __builtin_arm_smlsd
1460
+ #define __SMLSDX __builtin_arm_smlsdx
1461
+ #define __SMLSLD __builtin_arm_smlsld
1462
+ #define __SMLSLDX __builtin_arm_smlsldx
1463
+ #define __SEL __builtin_arm_sel
1464
+ #define __QADD __builtin_arm_qadd
1465
+ #define __QSUB __builtin_arm_qsub
1466
+
1467
+ #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
1468
+ ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
1469
+
1470
+ #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
1471
+ ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
1472
+
1473
+ #define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2))
1474
+
1475
+ #define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3))
1476
+
1477
+ __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
1478
+ {
1479
+ int32_t result;
1480
+
1481
+ __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
1482
+ return(result);
1483
+ }
1484
+
1485
+ #endif /* (__ARM_FEATURE_DSP == 1) */
1486
+ /*@} end of group CMSIS_SIMD_intrinsics */
1487
+
1488
+
1489
+ #endif /* __CMSIS_ARMCLANG_H */
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/cmsis_armclang_ltm.h ADDED
@@ -0,0 +1,1914 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**************************************************************************//**
2
+ * @file cmsis_armclang_ltm.h
3
+ * @brief CMSIS compiler armclang (Arm Compiler 6) header file
4
+ * @version V1.5.0
5
+ * @date 19. February 2021
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2018-2021 Arm Limited. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ /*lint -esym(9058, IRQn)*/ /* disable MISRA 2012 Rule 2.4 for IRQn */
26
+
27
+ #ifndef __CMSIS_ARMCLANG_H
28
+ #define __CMSIS_ARMCLANG_H
29
+
30
+ #pragma clang system_header /* treat file as system include file */
31
+
32
+ #ifndef __ARM_COMPAT_H
33
+ #include <arm_compat.h> /* Compatibility header for Arm Compiler 5 intrinsics */
34
+ #endif
35
+
36
+ /* CMSIS compiler specific defines */
37
+ #ifndef __ASM
38
+ #define __ASM __asm
39
+ #endif
40
+ #ifndef __INLINE
41
+ #define __INLINE __inline
42
+ #endif
43
+ #ifndef __STATIC_INLINE
44
+ #define __STATIC_INLINE static __inline
45
+ #endif
46
+ #ifndef __STATIC_FORCEINLINE
47
+ #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline
48
+ #endif
49
+ #ifndef __NO_RETURN
50
+ #define __NO_RETURN __attribute__((__noreturn__))
51
+ #endif
52
+ #ifndef __USED
53
+ #define __USED __attribute__((used))
54
+ #endif
55
+ #ifndef __WEAK
56
+ #define __WEAK __attribute__((weak))
57
+ #endif
58
+ #ifndef __PACKED
59
+ #define __PACKED __attribute__((packed, aligned(1)))
60
+ #endif
61
+ #ifndef __PACKED_STRUCT
62
+ #define __PACKED_STRUCT struct __attribute__((packed, aligned(1)))
63
+ #endif
64
+ #ifndef __PACKED_UNION
65
+ #define __PACKED_UNION union __attribute__((packed, aligned(1)))
66
+ #endif
67
+ #ifndef __UNALIGNED_UINT32 /* deprecated */
68
+ #pragma clang diagnostic push
69
+ #pragma clang diagnostic ignored "-Wpacked"
70
+ /*lint -esym(9058, T_UINT32)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32 */
71
+ struct __attribute__((packed)) T_UINT32 { uint32_t v; };
72
+ #pragma clang diagnostic pop
73
+ #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v)
74
+ #endif
75
+ #ifndef __UNALIGNED_UINT16_WRITE
76
+ #pragma clang diagnostic push
77
+ #pragma clang diagnostic ignored "-Wpacked"
78
+ /*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */
79
+ __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
80
+ #pragma clang diagnostic pop
81
+ #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
82
+ #endif
83
+ #ifndef __UNALIGNED_UINT16_READ
84
+ #pragma clang diagnostic push
85
+ #pragma clang diagnostic ignored "-Wpacked"
86
+ /*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */
87
+ __PACKED_STRUCT T_UINT16_READ { uint16_t v; };
88
+ #pragma clang diagnostic pop
89
+ #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v)
90
+ #endif
91
+ #ifndef __UNALIGNED_UINT32_WRITE
92
+ #pragma clang diagnostic push
93
+ #pragma clang diagnostic ignored "-Wpacked"
94
+ /*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */
95
+ __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
96
+ #pragma clang diagnostic pop
97
+ #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
98
+ #endif
99
+ #ifndef __UNALIGNED_UINT32_READ
100
+ #pragma clang diagnostic push
101
+ #pragma clang diagnostic ignored "-Wpacked"
102
+ /*lint -esym(9058, T_UINT32_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_READ */
103
+ __PACKED_STRUCT T_UINT32_READ { uint32_t v; };
104
+ #pragma clang diagnostic pop
105
+ #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v)
106
+ #endif
107
+ #ifndef __ALIGNED
108
+ #define __ALIGNED(x) __attribute__((aligned(x)))
109
+ #endif
110
+ #ifndef __RESTRICT
111
+ #define __RESTRICT __restrict
112
+ #endif
113
+ #ifndef __COMPILER_BARRIER
114
+ #define __COMPILER_BARRIER() __ASM volatile("":::"memory")
115
+ #endif
116
+
117
+ /* ######################### Startup and Lowlevel Init ######################## */
118
+
119
+ #ifndef __PROGRAM_START
120
+ #define __PROGRAM_START __main
121
+ #endif
122
+
123
+ #ifndef __INITIAL_SP
124
+ #define __INITIAL_SP Image$$ARM_LIB_STACK$$ZI$$Limit
125
+ #endif
126
+
127
+ #ifndef __STACK_LIMIT
128
+ #define __STACK_LIMIT Image$$ARM_LIB_STACK$$ZI$$Base
129
+ #endif
130
+
131
+ #ifndef __VECTOR_TABLE
132
+ #define __VECTOR_TABLE __Vectors
133
+ #endif
134
+
135
+ #ifndef __VECTOR_TABLE_ATTRIBUTE
136
+ #define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET")))
137
+ #endif
138
+
139
+ #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U)
140
+ #ifndef __STACK_SEAL
141
+ #define __STACK_SEAL Image$$STACKSEAL$$ZI$$Base
142
+ #endif
143
+
144
+ #ifndef __TZ_STACK_SEAL_SIZE
145
+ #define __TZ_STACK_SEAL_SIZE 8U
146
+ #endif
147
+
148
+ #ifndef __TZ_STACK_SEAL_VALUE
149
+ #define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL
150
+ #endif
151
+
152
+
153
+ __STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) {
154
+ *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE;
155
+ }
156
+ #endif
157
+
158
+
159
+ /* ########################### Core Function Access ########################### */
160
+ /** \ingroup CMSIS_Core_FunctionInterface
161
+ \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
162
+ @{
163
+ */
164
+
165
+ /**
166
+ \brief Enable IRQ Interrupts
167
+ \details Enables IRQ interrupts by clearing the I-bit in the CPSR.
168
+ Can only be executed in Privileged modes.
169
+ */
170
+ /* intrinsic void __enable_irq(); see arm_compat.h */
171
+
172
+
173
+ /**
174
+ \brief Disable IRQ Interrupts
175
+ \details Disables IRQ interrupts by setting the I-bit in the CPSR.
176
+ Can only be executed in Privileged modes.
177
+ */
178
+ /* intrinsic void __disable_irq(); see arm_compat.h */
179
+
180
+
181
+ /**
182
+ \brief Get Control Register
183
+ \details Returns the content of the Control Register.
184
+ \return Control Register value
185
+ */
186
+ __STATIC_FORCEINLINE uint32_t __get_CONTROL(void)
187
+ {
188
+ uint32_t result;
189
+
190
+ __ASM volatile ("MRS %0, control" : "=r" (result) );
191
+ return(result);
192
+ }
193
+
194
+
195
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
196
+ /**
197
+ \brief Get Control Register (non-secure)
198
+ \details Returns the content of the non-secure Control Register when in secure mode.
199
+ \return non-secure Control Register value
200
+ */
201
+ __STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void)
202
+ {
203
+ uint32_t result;
204
+
205
+ __ASM volatile ("MRS %0, control_ns" : "=r" (result) );
206
+ return(result);
207
+ }
208
+ #endif
209
+
210
+
211
+ /**
212
+ \brief Set Control Register
213
+ \details Writes the given value to the Control Register.
214
+ \param [in] control Control Register value to set
215
+ */
216
+ __STATIC_FORCEINLINE void __set_CONTROL(uint32_t control)
217
+ {
218
+ __ASM volatile ("MSR control, %0" : : "r" (control) : "memory");
219
+ }
220
+
221
+
222
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
223
+ /**
224
+ \brief Set Control Register (non-secure)
225
+ \details Writes the given value to the non-secure Control Register when in secure state.
226
+ \param [in] control Control Register value to set
227
+ */
228
+ __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control)
229
+ {
230
+ __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory");
231
+ }
232
+ #endif
233
+
234
+
235
+ /**
236
+ \brief Get IPSR Register
237
+ \details Returns the content of the IPSR Register.
238
+ \return IPSR Register value
239
+ */
240
+ __STATIC_FORCEINLINE uint32_t __get_IPSR(void)
241
+ {
242
+ uint32_t result;
243
+
244
+ __ASM volatile ("MRS %0, ipsr" : "=r" (result) );
245
+ return(result);
246
+ }
247
+
248
+
249
+ /**
250
+ \brief Get APSR Register
251
+ \details Returns the content of the APSR Register.
252
+ \return APSR Register value
253
+ */
254
+ __STATIC_FORCEINLINE uint32_t __get_APSR(void)
255
+ {
256
+ uint32_t result;
257
+
258
+ __ASM volatile ("MRS %0, apsr" : "=r" (result) );
259
+ return(result);
260
+ }
261
+
262
+
263
+ /**
264
+ \brief Get xPSR Register
265
+ \details Returns the content of the xPSR Register.
266
+ \return xPSR Register value
267
+ */
268
+ __STATIC_FORCEINLINE uint32_t __get_xPSR(void)
269
+ {
270
+ uint32_t result;
271
+
272
+ __ASM volatile ("MRS %0, xpsr" : "=r" (result) );
273
+ return(result);
274
+ }
275
+
276
+
277
+ /**
278
+ \brief Get Process Stack Pointer
279
+ \details Returns the current value of the Process Stack Pointer (PSP).
280
+ \return PSP Register value
281
+ */
282
+ __STATIC_FORCEINLINE uint32_t __get_PSP(void)
283
+ {
284
+ uint32_t result;
285
+
286
+ __ASM volatile ("MRS %0, psp" : "=r" (result) );
287
+ return(result);
288
+ }
289
+
290
+
291
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
292
+ /**
293
+ \brief Get Process Stack Pointer (non-secure)
294
+ \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state.
295
+ \return PSP Register value
296
+ */
297
+ __STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void)
298
+ {
299
+ uint32_t result;
300
+
301
+ __ASM volatile ("MRS %0, psp_ns" : "=r" (result) );
302
+ return(result);
303
+ }
304
+ #endif
305
+
306
+
307
+ /**
308
+ \brief Set Process Stack Pointer
309
+ \details Assigns the given value to the Process Stack Pointer (PSP).
310
+ \param [in] topOfProcStack Process Stack Pointer value to set
311
+ */
312
+ __STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack)
313
+ {
314
+ __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : );
315
+ }
316
+
317
+
318
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
319
+ /**
320
+ \brief Set Process Stack Pointer (non-secure)
321
+ \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state.
322
+ \param [in] topOfProcStack Process Stack Pointer value to set
323
+ */
324
+ __STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack)
325
+ {
326
+ __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : );
327
+ }
328
+ #endif
329
+
330
+
331
+ /**
332
+ \brief Get Main Stack Pointer
333
+ \details Returns the current value of the Main Stack Pointer (MSP).
334
+ \return MSP Register value
335
+ */
336
+ __STATIC_FORCEINLINE uint32_t __get_MSP(void)
337
+ {
338
+ uint32_t result;
339
+
340
+ __ASM volatile ("MRS %0, msp" : "=r" (result) );
341
+ return(result);
342
+ }
343
+
344
+
345
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
346
+ /**
347
+ \brief Get Main Stack Pointer (non-secure)
348
+ \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state.
349
+ \return MSP Register value
350
+ */
351
+ __STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void)
352
+ {
353
+ uint32_t result;
354
+
355
+ __ASM volatile ("MRS %0, msp_ns" : "=r" (result) );
356
+ return(result);
357
+ }
358
+ #endif
359
+
360
+
361
+ /**
362
+ \brief Set Main Stack Pointer
363
+ \details Assigns the given value to the Main Stack Pointer (MSP).
364
+ \param [in] topOfMainStack Main Stack Pointer value to set
365
+ */
366
+ __STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack)
367
+ {
368
+ __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : );
369
+ }
370
+
371
+
372
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
373
+ /**
374
+ \brief Set Main Stack Pointer (non-secure)
375
+ \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state.
376
+ \param [in] topOfMainStack Main Stack Pointer value to set
377
+ */
378
+ __STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack)
379
+ {
380
+ __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : );
381
+ }
382
+ #endif
383
+
384
+
385
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
386
+ /**
387
+ \brief Get Stack Pointer (non-secure)
388
+ \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state.
389
+ \return SP Register value
390
+ */
391
+ __STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void)
392
+ {
393
+ uint32_t result;
394
+
395
+ __ASM volatile ("MRS %0, sp_ns" : "=r" (result) );
396
+ return(result);
397
+ }
398
+
399
+
400
+ /**
401
+ \brief Set Stack Pointer (non-secure)
402
+ \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state.
403
+ \param [in] topOfStack Stack Pointer value to set
404
+ */
405
+ __STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack)
406
+ {
407
+ __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : );
408
+ }
409
+ #endif
410
+
411
+
412
+ /**
413
+ \brief Get Priority Mask
414
+ \details Returns the current state of the priority mask bit from the Priority Mask Register.
415
+ \return Priority Mask value
416
+ */
417
+ __STATIC_FORCEINLINE uint32_t __get_PRIMASK(void)
418
+ {
419
+ uint32_t result;
420
+
421
+ __ASM volatile ("MRS %0, primask" : "=r" (result) );
422
+ return(result);
423
+ }
424
+
425
+
426
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
427
+ /**
428
+ \brief Get Priority Mask (non-secure)
429
+ \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state.
430
+ \return Priority Mask value
431
+ */
432
+ __STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void)
433
+ {
434
+ uint32_t result;
435
+
436
+ __ASM volatile ("MRS %0, primask_ns" : "=r" (result) );
437
+ return(result);
438
+ }
439
+ #endif
440
+
441
+
442
+ /**
443
+ \brief Set Priority Mask
444
+ \details Assigns the given value to the Priority Mask Register.
445
+ \param [in] priMask Priority Mask
446
+ */
447
+ __STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask)
448
+ {
449
+ __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory");
450
+ }
451
+
452
+
453
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
454
+ /**
455
+ \brief Set Priority Mask (non-secure)
456
+ \details Assigns the given value to the non-secure Priority Mask Register when in secure state.
457
+ \param [in] priMask Priority Mask
458
+ */
459
+ __STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask)
460
+ {
461
+ __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory");
462
+ }
463
+ #endif
464
+
465
+
466
+ #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
467
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
468
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) )
469
+ /**
470
+ \brief Enable FIQ
471
+ \details Enables FIQ interrupts by clearing the F-bit in the CPSR.
472
+ Can only be executed in Privileged modes.
473
+ */
474
+ #define __enable_fault_irq __enable_fiq /* see arm_compat.h */
475
+
476
+
477
+ /**
478
+ \brief Disable FIQ
479
+ \details Disables FIQ interrupts by setting the F-bit in the CPSR.
480
+ Can only be executed in Privileged modes.
481
+ */
482
+ #define __disable_fault_irq __disable_fiq /* see arm_compat.h */
483
+
484
+
485
+ /**
486
+ \brief Get Base Priority
487
+ \details Returns the current value of the Base Priority register.
488
+ \return Base Priority register value
489
+ */
490
+ __STATIC_FORCEINLINE uint32_t __get_BASEPRI(void)
491
+ {
492
+ uint32_t result;
493
+
494
+ __ASM volatile ("MRS %0, basepri" : "=r" (result) );
495
+ return(result);
496
+ }
497
+
498
+
499
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
500
+ /**
501
+ \brief Get Base Priority (non-secure)
502
+ \details Returns the current value of the non-secure Base Priority register when in secure state.
503
+ \return Base Priority register value
504
+ */
505
+ __STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void)
506
+ {
507
+ uint32_t result;
508
+
509
+ __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) );
510
+ return(result);
511
+ }
512
+ #endif
513
+
514
+
515
+ /**
516
+ \brief Set Base Priority
517
+ \details Assigns the given value to the Base Priority register.
518
+ \param [in] basePri Base Priority value to set
519
+ */
520
+ __STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri)
521
+ {
522
+ __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory");
523
+ }
524
+
525
+
526
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
527
+ /**
528
+ \brief Set Base Priority (non-secure)
529
+ \details Assigns the given value to the non-secure Base Priority register when in secure state.
530
+ \param [in] basePri Base Priority value to set
531
+ */
532
+ __STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri)
533
+ {
534
+ __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory");
535
+ }
536
+ #endif
537
+
538
+
539
+ /**
540
+ \brief Set Base Priority with condition
541
+ \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled,
542
+ or the new value increases the BASEPRI priority level.
543
+ \param [in] basePri Base Priority value to set
544
+ */
545
+ __STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri)
546
+ {
547
+ __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory");
548
+ }
549
+
550
+
551
+ /**
552
+ \brief Get Fault Mask
553
+ \details Returns the current value of the Fault Mask register.
554
+ \return Fault Mask register value
555
+ */
556
+ __STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void)
557
+ {
558
+ uint32_t result;
559
+
560
+ __ASM volatile ("MRS %0, faultmask" : "=r" (result) );
561
+ return(result);
562
+ }
563
+
564
+
565
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
566
+ /**
567
+ \brief Get Fault Mask (non-secure)
568
+ \details Returns the current value of the non-secure Fault Mask register when in secure state.
569
+ \return Fault Mask register value
570
+ */
571
+ __STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void)
572
+ {
573
+ uint32_t result;
574
+
575
+ __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) );
576
+ return(result);
577
+ }
578
+ #endif
579
+
580
+
581
+ /**
582
+ \brief Set Fault Mask
583
+ \details Assigns the given value to the Fault Mask register.
584
+ \param [in] faultMask Fault Mask value to set
585
+ */
586
+ __STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask)
587
+ {
588
+ __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory");
589
+ }
590
+
591
+
592
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
593
+ /**
594
+ \brief Set Fault Mask (non-secure)
595
+ \details Assigns the given value to the non-secure Fault Mask register when in secure state.
596
+ \param [in] faultMask Fault Mask value to set
597
+ */
598
+ __STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask)
599
+ {
600
+ __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory");
601
+ }
602
+ #endif
603
+
604
+ #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
605
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
606
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */
607
+
608
+
609
+ #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
610
+ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) )
611
+
612
+ /**
613
+ \brief Get Process Stack Pointer Limit
614
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
615
+ Stack Pointer Limit register hence zero is returned always in non-secure
616
+ mode.
617
+
618
+ \details Returns the current value of the Process Stack Pointer Limit (PSPLIM).
619
+ \return PSPLIM Register value
620
+ */
621
+ __STATIC_FORCEINLINE uint32_t __get_PSPLIM(void)
622
+ {
623
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
624
+ (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
625
+ // without main extensions, the non-secure PSPLIM is RAZ/WI
626
+ return 0U;
627
+ #else
628
+ uint32_t result;
629
+ __ASM volatile ("MRS %0, psplim" : "=r" (result) );
630
+ return result;
631
+ #endif
632
+ }
633
+
634
+ #if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3))
635
+ /**
636
+ \brief Get Process Stack Pointer Limit (non-secure)
637
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
638
+ Stack Pointer Limit register hence zero is returned always in non-secure
639
+ mode.
640
+
641
+ \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state.
642
+ \return PSPLIM Register value
643
+ */
644
+ __STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void)
645
+ {
646
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
647
+ // without main extensions, the non-secure PSPLIM is RAZ/WI
648
+ return 0U;
649
+ #else
650
+ uint32_t result;
651
+ __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) );
652
+ return result;
653
+ #endif
654
+ }
655
+ #endif
656
+
657
+
658
+ /**
659
+ \brief Set Process Stack Pointer Limit
660
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
661
+ Stack Pointer Limit register hence the write is silently ignored in non-secure
662
+ mode.
663
+
664
+ \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM).
665
+ \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set
666
+ */
667
+ __STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit)
668
+ {
669
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
670
+ (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
671
+ // without main extensions, the non-secure PSPLIM is RAZ/WI
672
+ (void)ProcStackPtrLimit;
673
+ #else
674
+ __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit));
675
+ #endif
676
+ }
677
+
678
+
679
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
680
+ /**
681
+ \brief Set Process Stack Pointer (non-secure)
682
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
683
+ Stack Pointer Limit register hence the write is silently ignored in non-secure
684
+ mode.
685
+
686
+ \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state.
687
+ \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set
688
+ */
689
+ __STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit)
690
+ {
691
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
692
+ // without main extensions, the non-secure PSPLIM is RAZ/WI
693
+ (void)ProcStackPtrLimit;
694
+ #else
695
+ __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit));
696
+ #endif
697
+ }
698
+ #endif
699
+
700
+
701
+ /**
702
+ \brief Get Main Stack Pointer Limit
703
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
704
+ Stack Pointer Limit register hence zero is returned always.
705
+
706
+ \details Returns the current value of the Main Stack Pointer Limit (MSPLIM).
707
+ \return MSPLIM Register value
708
+ */
709
+ __STATIC_FORCEINLINE uint32_t __get_MSPLIM(void)
710
+ {
711
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
712
+ (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
713
+ // without main extensions, the non-secure MSPLIM is RAZ/WI
714
+ return 0U;
715
+ #else
716
+ uint32_t result;
717
+ __ASM volatile ("MRS %0, msplim" : "=r" (result) );
718
+ return result;
719
+ #endif
720
+ }
721
+
722
+
723
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
724
+ /**
725
+ \brief Get Main Stack Pointer Limit (non-secure)
726
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
727
+ Stack Pointer Limit register hence zero is returned always.
728
+
729
+ \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state.
730
+ \return MSPLIM Register value
731
+ */
732
+ __STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void)
733
+ {
734
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
735
+ // without main extensions, the non-secure MSPLIM is RAZ/WI
736
+ return 0U;
737
+ #else
738
+ uint32_t result;
739
+ __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) );
740
+ return result;
741
+ #endif
742
+ }
743
+ #endif
744
+
745
+
746
+ /**
747
+ \brief Set Main Stack Pointer Limit
748
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
749
+ Stack Pointer Limit register hence the write is silently ignored.
750
+
751
+ \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM).
752
+ \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set
753
+ */
754
+ __STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit)
755
+ {
756
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
757
+ (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
758
+ // without main extensions, the non-secure MSPLIM is RAZ/WI
759
+ (void)MainStackPtrLimit;
760
+ #else
761
+ __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit));
762
+ #endif
763
+ }
764
+
765
+
766
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
767
+ /**
768
+ \brief Set Main Stack Pointer Limit (non-secure)
769
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
770
+ Stack Pointer Limit register hence the write is silently ignored.
771
+
772
+ \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state.
773
+ \param [in] MainStackPtrLimit Main Stack Pointer value to set
774
+ */
775
+ __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit)
776
+ {
777
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
778
+ // without main extensions, the non-secure MSPLIM is RAZ/WI
779
+ (void)MainStackPtrLimit;
780
+ #else
781
+ __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit));
782
+ #endif
783
+ }
784
+ #endif
785
+
786
+ #endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
787
+ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */
788
+
789
+ /**
790
+ \brief Get FPSCR
791
+ \details Returns the current value of the Floating Point Status/Control register.
792
+ \return Floating Point Status/Control register value
793
+ */
794
+ #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
795
+ (defined (__FPU_USED ) && (__FPU_USED == 1U)) )
796
+ #define __get_FPSCR (uint32_t)__builtin_arm_get_fpscr
797
+ #else
798
+ #define __get_FPSCR() ((uint32_t)0U)
799
+ #endif
800
+
801
+ /**
802
+ \brief Set FPSCR
803
+ \details Assigns the given value to the Floating Point Status/Control register.
804
+ \param [in] fpscr Floating Point Status/Control value to set
805
+ */
806
+ #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
807
+ (defined (__FPU_USED ) && (__FPU_USED == 1U)) )
808
+ #define __set_FPSCR __builtin_arm_set_fpscr
809
+ #else
810
+ #define __set_FPSCR(x) ((void)(x))
811
+ #endif
812
+
813
+
814
+ /*@} end of CMSIS_Core_RegAccFunctions */
815
+
816
+
817
+ /* ########################## Core Instruction Access ######################### */
818
+ /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
819
+ Access to dedicated instructions
820
+ @{
821
+ */
822
+
823
+ /* Define macros for porting to both thumb1 and thumb2.
824
+ * For thumb1, use low register (r0-r7), specified by constraint "l"
825
+ * Otherwise, use general registers, specified by constraint "r" */
826
+ #if defined (__thumb__) && !defined (__thumb2__)
827
+ #define __CMSIS_GCC_OUT_REG(r) "=l" (r)
828
+ #define __CMSIS_GCC_USE_REG(r) "l" (r)
829
+ #else
830
+ #define __CMSIS_GCC_OUT_REG(r) "=r" (r)
831
+ #define __CMSIS_GCC_USE_REG(r) "r" (r)
832
+ #endif
833
+
834
+ /**
835
+ \brief No Operation
836
+ \details No Operation does nothing. This instruction can be used for code alignment purposes.
837
+ */
838
+ #define __NOP __builtin_arm_nop
839
+
840
+ /**
841
+ \brief Wait For Interrupt
842
+ \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
843
+ */
844
+ #define __WFI __builtin_arm_wfi
845
+
846
+
847
+ /**
848
+ \brief Wait For Event
849
+ \details Wait For Event is a hint instruction that permits the processor to enter
850
+ a low-power state until one of a number of events occurs.
851
+ */
852
+ #define __WFE __builtin_arm_wfe
853
+
854
+
855
+ /**
856
+ \brief Send Event
857
+ \details Send Event is a hint instruction. It causes an event to be signaled to the CPU.
858
+ */
859
+ #define __SEV __builtin_arm_sev
860
+
861
+
862
+ /**
863
+ \brief Instruction Synchronization Barrier
864
+ \details Instruction Synchronization Barrier flushes the pipeline in the processor,
865
+ so that all instructions following the ISB are fetched from cache or memory,
866
+ after the instruction has been completed.
867
+ */
868
+ #define __ISB() __builtin_arm_isb(0xF)
869
+
870
+ /**
871
+ \brief Data Synchronization Barrier
872
+ \details Acts as a special kind of Data Memory Barrier.
873
+ It completes when all explicit memory accesses before this instruction complete.
874
+ */
875
+ #define __DSB() __builtin_arm_dsb(0xF)
876
+
877
+
878
+ /**
879
+ \brief Data Memory Barrier
880
+ \details Ensures the apparent order of the explicit memory operations before
881
+ and after the instruction, without ensuring their completion.
882
+ */
883
+ #define __DMB() __builtin_arm_dmb(0xF)
884
+
885
+
886
+ /**
887
+ \brief Reverse byte order (32 bit)
888
+ \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
889
+ \param [in] value Value to reverse
890
+ \return Reversed value
891
+ */
892
+ #define __REV(value) __builtin_bswap32(value)
893
+
894
+
895
+ /**
896
+ \brief Reverse byte order (16 bit)
897
+ \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
898
+ \param [in] value Value to reverse
899
+ \return Reversed value
900
+ */
901
+ #define __REV16(value) __ROR(__REV(value), 16)
902
+
903
+
904
+ /**
905
+ \brief Reverse byte order (16 bit)
906
+ \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
907
+ \param [in] value Value to reverse
908
+ \return Reversed value
909
+ */
910
+ #define __REVSH(value) (int16_t)__builtin_bswap16(value)
911
+
912
+
913
+ /**
914
+ \brief Rotate Right in unsigned value (32 bit)
915
+ \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
916
+ \param [in] op1 Value to rotate
917
+ \param [in] op2 Number of Bits to rotate
918
+ \return Rotated value
919
+ */
920
+ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
921
+ {
922
+ op2 %= 32U;
923
+ if (op2 == 0U)
924
+ {
925
+ return op1;
926
+ }
927
+ return (op1 >> op2) | (op1 << (32U - op2));
928
+ }
929
+
930
+
931
+ /**
932
+ \brief Breakpoint
933
+ \details Causes the processor to enter Debug state.
934
+ Debug tools can use this to investigate system state when the instruction at a particular address is reached.
935
+ \param [in] value is ignored by the processor.
936
+ If required, a debugger can use it to store additional information about the breakpoint.
937
+ */
938
+ #define __BKPT(value) __ASM volatile ("bkpt "#value)
939
+
940
+
941
+ /**
942
+ \brief Reverse bit order of value
943
+ \details Reverses the bit order of the given value.
944
+ \param [in] value Value to reverse
945
+ \return Reversed value
946
+ */
947
+ #define __RBIT __builtin_arm_rbit
948
+
949
+ /**
950
+ \brief Count leading zeros
951
+ \details Counts the number of leading zeros of a data value.
952
+ \param [in] value Value to count the leading zeros
953
+ \return number of leading zeros in value
954
+ */
955
+ __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value)
956
+ {
957
+ /* Even though __builtin_clz produces a CLZ instruction on ARM, formally
958
+ __builtin_clz(0) is undefined behaviour, so handle this case specially.
959
+ This guarantees ARM-compatible results if happening to compile on a non-ARM
960
+ target, and ensures the compiler doesn't decide to activate any
961
+ optimisations using the logic "value was passed to __builtin_clz, so it
962
+ is non-zero".
963
+ ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a
964
+ single CLZ instruction.
965
+ */
966
+ if (value == 0U)
967
+ {
968
+ return 32U;
969
+ }
970
+ return __builtin_clz(value);
971
+ }
972
+
973
+
974
+ #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
975
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
976
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
977
+ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) )
978
+ /**
979
+ \brief LDR Exclusive (8 bit)
980
+ \details Executes a exclusive LDR instruction for 8 bit value.
981
+ \param [in] ptr Pointer to data
982
+ \return value of type uint8_t at (*ptr)
983
+ */
984
+ #define __LDREXB (uint8_t)__builtin_arm_ldrex
985
+
986
+
987
+ /**
988
+ \brief LDR Exclusive (16 bit)
989
+ \details Executes a exclusive LDR instruction for 16 bit values.
990
+ \param [in] ptr Pointer to data
991
+ \return value of type uint16_t at (*ptr)
992
+ */
993
+ #define __LDREXH (uint16_t)__builtin_arm_ldrex
994
+
995
+
996
+ /**
997
+ \brief LDR Exclusive (32 bit)
998
+ \details Executes a exclusive LDR instruction for 32 bit values.
999
+ \param [in] ptr Pointer to data
1000
+ \return value of type uint32_t at (*ptr)
1001
+ */
1002
+ #define __LDREXW (uint32_t)__builtin_arm_ldrex
1003
+
1004
+
1005
+ /**
1006
+ \brief STR Exclusive (8 bit)
1007
+ \details Executes a exclusive STR instruction for 8 bit values.
1008
+ \param [in] value Value to store
1009
+ \param [in] ptr Pointer to location
1010
+ \return 0 Function succeeded
1011
+ \return 1 Function failed
1012
+ */
1013
+ #define __STREXB (uint32_t)__builtin_arm_strex
1014
+
1015
+
1016
+ /**
1017
+ \brief STR Exclusive (16 bit)
1018
+ \details Executes a exclusive STR instruction for 16 bit values.
1019
+ \param [in] value Value to store
1020
+ \param [in] ptr Pointer to location
1021
+ \return 0 Function succeeded
1022
+ \return 1 Function failed
1023
+ */
1024
+ #define __STREXH (uint32_t)__builtin_arm_strex
1025
+
1026
+
1027
+ /**
1028
+ \brief STR Exclusive (32 bit)
1029
+ \details Executes a exclusive STR instruction for 32 bit values.
1030
+ \param [in] value Value to store
1031
+ \param [in] ptr Pointer to location
1032
+ \return 0 Function succeeded
1033
+ \return 1 Function failed
1034
+ */
1035
+ #define __STREXW (uint32_t)__builtin_arm_strex
1036
+
1037
+
1038
+ /**
1039
+ \brief Remove the exclusive lock
1040
+ \details Removes the exclusive lock which is created by LDREX.
1041
+ */
1042
+ #define __CLREX __builtin_arm_clrex
1043
+
1044
+ #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1045
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1046
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1047
+ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */
1048
+
1049
+
1050
+ #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1051
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1052
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) )
1053
+
1054
+ /**
1055
+ \brief Signed Saturate
1056
+ \details Saturates a signed value.
1057
+ \param [in] value Value to be saturated
1058
+ \param [in] sat Bit position to saturate to (1..32)
1059
+ \return Saturated value
1060
+ */
1061
+ #define __SSAT __builtin_arm_ssat
1062
+
1063
+
1064
+ /**
1065
+ \brief Unsigned Saturate
1066
+ \details Saturates an unsigned value.
1067
+ \param [in] value Value to be saturated
1068
+ \param [in] sat Bit position to saturate to (0..31)
1069
+ \return Saturated value
1070
+ */
1071
+ #define __USAT __builtin_arm_usat
1072
+
1073
+
1074
+ /**
1075
+ \brief Rotate Right with Extend (32 bit)
1076
+ \details Moves each bit of a bitstring right by one bit.
1077
+ The carry input is shifted in at the left end of the bitstring.
1078
+ \param [in] value Value to rotate
1079
+ \return Rotated value
1080
+ */
1081
+ __STATIC_FORCEINLINE uint32_t __RRX(uint32_t value)
1082
+ {
1083
+ uint32_t result;
1084
+
1085
+ __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
1086
+ return(result);
1087
+ }
1088
+
1089
+
1090
+ /**
1091
+ \brief LDRT Unprivileged (8 bit)
1092
+ \details Executes a Unprivileged LDRT instruction for 8 bit value.
1093
+ \param [in] ptr Pointer to data
1094
+ \return value of type uint8_t at (*ptr)
1095
+ */
1096
+ __STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr)
1097
+ {
1098
+ uint32_t result;
1099
+
1100
+ __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) );
1101
+ return ((uint8_t) result); /* Add explicit type cast here */
1102
+ }
1103
+
1104
+
1105
+ /**
1106
+ \brief LDRT Unprivileged (16 bit)
1107
+ \details Executes a Unprivileged LDRT instruction for 16 bit values.
1108
+ \param [in] ptr Pointer to data
1109
+ \return value of type uint16_t at (*ptr)
1110
+ */
1111
+ __STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr)
1112
+ {
1113
+ uint32_t result;
1114
+
1115
+ __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) );
1116
+ return ((uint16_t) result); /* Add explicit type cast here */
1117
+ }
1118
+
1119
+
1120
+ /**
1121
+ \brief LDRT Unprivileged (32 bit)
1122
+ \details Executes a Unprivileged LDRT instruction for 32 bit values.
1123
+ \param [in] ptr Pointer to data
1124
+ \return value of type uint32_t at (*ptr)
1125
+ */
1126
+ __STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr)
1127
+ {
1128
+ uint32_t result;
1129
+
1130
+ __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) );
1131
+ return(result);
1132
+ }
1133
+
1134
+
1135
+ /**
1136
+ \brief STRT Unprivileged (8 bit)
1137
+ \details Executes a Unprivileged STRT instruction for 8 bit values.
1138
+ \param [in] value Value to store
1139
+ \param [in] ptr Pointer to location
1140
+ */
1141
+ __STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr)
1142
+ {
1143
+ __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
1144
+ }
1145
+
1146
+
1147
+ /**
1148
+ \brief STRT Unprivileged (16 bit)
1149
+ \details Executes a Unprivileged STRT instruction for 16 bit values.
1150
+ \param [in] value Value to store
1151
+ \param [in] ptr Pointer to location
1152
+ */
1153
+ __STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr)
1154
+ {
1155
+ __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
1156
+ }
1157
+
1158
+
1159
+ /**
1160
+ \brief STRT Unprivileged (32 bit)
1161
+ \details Executes a Unprivileged STRT instruction for 32 bit values.
1162
+ \param [in] value Value to store
1163
+ \param [in] ptr Pointer to location
1164
+ */
1165
+ __STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr)
1166
+ {
1167
+ __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) );
1168
+ }
1169
+
1170
+ #else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1171
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1172
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */
1173
+
1174
+ /**
1175
+ \brief Signed Saturate
1176
+ \details Saturates a signed value.
1177
+ \param [in] value Value to be saturated
1178
+ \param [in] sat Bit position to saturate to (1..32)
1179
+ \return Saturated value
1180
+ */
1181
+ __STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat)
1182
+ {
1183
+ if ((sat >= 1U) && (sat <= 32U))
1184
+ {
1185
+ const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U);
1186
+ const int32_t min = -1 - max ;
1187
+ if (val > max)
1188
+ {
1189
+ return max;
1190
+ }
1191
+ else if (val < min)
1192
+ {
1193
+ return min;
1194
+ }
1195
+ }
1196
+ return val;
1197
+ }
1198
+
1199
+ /**
1200
+ \brief Unsigned Saturate
1201
+ \details Saturates an unsigned value.
1202
+ \param [in] value Value to be saturated
1203
+ \param [in] sat Bit position to saturate to (0..31)
1204
+ \return Saturated value
1205
+ */
1206
+ __STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat)
1207
+ {
1208
+ if (sat <= 31U)
1209
+ {
1210
+ const uint32_t max = ((1U << sat) - 1U);
1211
+ if (val > (int32_t)max)
1212
+ {
1213
+ return max;
1214
+ }
1215
+ else if (val < 0)
1216
+ {
1217
+ return 0U;
1218
+ }
1219
+ }
1220
+ return (uint32_t)val;
1221
+ }
1222
+
1223
+ #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1224
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1225
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */
1226
+
1227
+
1228
+ #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1229
+ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) )
1230
+ /**
1231
+ \brief Load-Acquire (8 bit)
1232
+ \details Executes a LDAB instruction for 8 bit value.
1233
+ \param [in] ptr Pointer to data
1234
+ \return value of type uint8_t at (*ptr)
1235
+ */
1236
+ __STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr)
1237
+ {
1238
+ uint32_t result;
1239
+
1240
+ __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1241
+ return ((uint8_t) result);
1242
+ }
1243
+
1244
+
1245
+ /**
1246
+ \brief Load-Acquire (16 bit)
1247
+ \details Executes a LDAH instruction for 16 bit values.
1248
+ \param [in] ptr Pointer to data
1249
+ \return value of type uint16_t at (*ptr)
1250
+ */
1251
+ __STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr)
1252
+ {
1253
+ uint32_t result;
1254
+
1255
+ __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1256
+ return ((uint16_t) result);
1257
+ }
1258
+
1259
+
1260
+ /**
1261
+ \brief Load-Acquire (32 bit)
1262
+ \details Executes a LDA instruction for 32 bit values.
1263
+ \param [in] ptr Pointer to data
1264
+ \return value of type uint32_t at (*ptr)
1265
+ */
1266
+ __STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr)
1267
+ {
1268
+ uint32_t result;
1269
+
1270
+ __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1271
+ return(result);
1272
+ }
1273
+
1274
+
1275
+ /**
1276
+ \brief Store-Release (8 bit)
1277
+ \details Executes a STLB instruction for 8 bit values.
1278
+ \param [in] value Value to store
1279
+ \param [in] ptr Pointer to location
1280
+ */
1281
+ __STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr)
1282
+ {
1283
+ __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1284
+ }
1285
+
1286
+
1287
+ /**
1288
+ \brief Store-Release (16 bit)
1289
+ \details Executes a STLH instruction for 16 bit values.
1290
+ \param [in] value Value to store
1291
+ \param [in] ptr Pointer to location
1292
+ */
1293
+ __STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr)
1294
+ {
1295
+ __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1296
+ }
1297
+
1298
+
1299
+ /**
1300
+ \brief Store-Release (32 bit)
1301
+ \details Executes a STL instruction for 32 bit values.
1302
+ \param [in] value Value to store
1303
+ \param [in] ptr Pointer to location
1304
+ */
1305
+ __STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr)
1306
+ {
1307
+ __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1308
+ }
1309
+
1310
+
1311
+ /**
1312
+ \brief Load-Acquire Exclusive (8 bit)
1313
+ \details Executes a LDAB exclusive instruction for 8 bit value.
1314
+ \param [in] ptr Pointer to data
1315
+ \return value of type uint8_t at (*ptr)
1316
+ */
1317
+ #define __LDAEXB (uint8_t)__builtin_arm_ldaex
1318
+
1319
+
1320
+ /**
1321
+ \brief Load-Acquire Exclusive (16 bit)
1322
+ \details Executes a LDAH exclusive instruction for 16 bit values.
1323
+ \param [in] ptr Pointer to data
1324
+ \return value of type uint16_t at (*ptr)
1325
+ */
1326
+ #define __LDAEXH (uint16_t)__builtin_arm_ldaex
1327
+
1328
+
1329
+ /**
1330
+ \brief Load-Acquire Exclusive (32 bit)
1331
+ \details Executes a LDA exclusive instruction for 32 bit values.
1332
+ \param [in] ptr Pointer to data
1333
+ \return value of type uint32_t at (*ptr)
1334
+ */
1335
+ #define __LDAEX (uint32_t)__builtin_arm_ldaex
1336
+
1337
+
1338
+ /**
1339
+ \brief Store-Release Exclusive (8 bit)
1340
+ \details Executes a STLB exclusive instruction for 8 bit values.
1341
+ \param [in] value Value to store
1342
+ \param [in] ptr Pointer to location
1343
+ \return 0 Function succeeded
1344
+ \return 1 Function failed
1345
+ */
1346
+ #define __STLEXB (uint32_t)__builtin_arm_stlex
1347
+
1348
+
1349
+ /**
1350
+ \brief Store-Release Exclusive (16 bit)
1351
+ \details Executes a STLH exclusive instruction for 16 bit values.
1352
+ \param [in] value Value to store
1353
+ \param [in] ptr Pointer to location
1354
+ \return 0 Function succeeded
1355
+ \return 1 Function failed
1356
+ */
1357
+ #define __STLEXH (uint32_t)__builtin_arm_stlex
1358
+
1359
+
1360
+ /**
1361
+ \brief Store-Release Exclusive (32 bit)
1362
+ \details Executes a STL exclusive instruction for 32 bit values.
1363
+ \param [in] value Value to store
1364
+ \param [in] ptr Pointer to location
1365
+ \return 0 Function succeeded
1366
+ \return 1 Function failed
1367
+ */
1368
+ #define __STLEX (uint32_t)__builtin_arm_stlex
1369
+
1370
+ #endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1371
+ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */
1372
+
1373
+ /*@}*/ /* end of group CMSIS_Core_InstructionInterface */
1374
+
1375
+
1376
+ /* ################### Compiler specific Intrinsics ########################### */
1377
+ /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
1378
+ Access to dedicated SIMD instructions
1379
+ @{
1380
+ */
1381
+
1382
+ #if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1))
1383
+
1384
+ __STATIC_FORCEINLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
1385
+ {
1386
+ uint32_t result;
1387
+
1388
+ __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1389
+ return(result);
1390
+ }
1391
+
1392
+ __STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
1393
+ {
1394
+ uint32_t result;
1395
+
1396
+ __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1397
+ return(result);
1398
+ }
1399
+
1400
+ __STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
1401
+ {
1402
+ uint32_t result;
1403
+
1404
+ __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1405
+ return(result);
1406
+ }
1407
+
1408
+ __STATIC_FORCEINLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
1409
+ {
1410
+ uint32_t result;
1411
+
1412
+ __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1413
+ return(result);
1414
+ }
1415
+
1416
+ __STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
1417
+ {
1418
+ uint32_t result;
1419
+
1420
+ __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1421
+ return(result);
1422
+ }
1423
+
1424
+ __STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
1425
+ {
1426
+ uint32_t result;
1427
+
1428
+ __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1429
+ return(result);
1430
+ }
1431
+
1432
+
1433
+ __STATIC_FORCEINLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
1434
+ {
1435
+ uint32_t result;
1436
+
1437
+ __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1438
+ return(result);
1439
+ }
1440
+
1441
+ __STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
1442
+ {
1443
+ uint32_t result;
1444
+
1445
+ __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1446
+ return(result);
1447
+ }
1448
+
1449
+ __STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
1450
+ {
1451
+ uint32_t result;
1452
+
1453
+ __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1454
+ return(result);
1455
+ }
1456
+
1457
+ __STATIC_FORCEINLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
1458
+ {
1459
+ uint32_t result;
1460
+
1461
+ __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1462
+ return(result);
1463
+ }
1464
+
1465
+ __STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
1466
+ {
1467
+ uint32_t result;
1468
+
1469
+ __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1470
+ return(result);
1471
+ }
1472
+
1473
+ __STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
1474
+ {
1475
+ uint32_t result;
1476
+
1477
+ __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1478
+ return(result);
1479
+ }
1480
+
1481
+
1482
+ __STATIC_FORCEINLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
1483
+ {
1484
+ uint32_t result;
1485
+
1486
+ __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1487
+ return(result);
1488
+ }
1489
+
1490
+ __STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
1491
+ {
1492
+ uint32_t result;
1493
+
1494
+ __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1495
+ return(result);
1496
+ }
1497
+
1498
+ __STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
1499
+ {
1500
+ uint32_t result;
1501
+
1502
+ __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1503
+ return(result);
1504
+ }
1505
+
1506
+ __STATIC_FORCEINLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
1507
+ {
1508
+ uint32_t result;
1509
+
1510
+ __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1511
+ return(result);
1512
+ }
1513
+
1514
+ __STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
1515
+ {
1516
+ uint32_t result;
1517
+
1518
+ __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1519
+ return(result);
1520
+ }
1521
+
1522
+ __STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
1523
+ {
1524
+ uint32_t result;
1525
+
1526
+ __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1527
+ return(result);
1528
+ }
1529
+
1530
+ __STATIC_FORCEINLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
1531
+ {
1532
+ uint32_t result;
1533
+
1534
+ __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1535
+ return(result);
1536
+ }
1537
+
1538
+ __STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
1539
+ {
1540
+ uint32_t result;
1541
+
1542
+ __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1543
+ return(result);
1544
+ }
1545
+
1546
+ __STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
1547
+ {
1548
+ uint32_t result;
1549
+
1550
+ __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1551
+ return(result);
1552
+ }
1553
+
1554
+ __STATIC_FORCEINLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
1555
+ {
1556
+ uint32_t result;
1557
+
1558
+ __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1559
+ return(result);
1560
+ }
1561
+
1562
+ __STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
1563
+ {
1564
+ uint32_t result;
1565
+
1566
+ __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1567
+ return(result);
1568
+ }
1569
+
1570
+ __STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
1571
+ {
1572
+ uint32_t result;
1573
+
1574
+ __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1575
+ return(result);
1576
+ }
1577
+
1578
+ __STATIC_FORCEINLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
1579
+ {
1580
+ uint32_t result;
1581
+
1582
+ __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1583
+ return(result);
1584
+ }
1585
+
1586
+ __STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
1587
+ {
1588
+ uint32_t result;
1589
+
1590
+ __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1591
+ return(result);
1592
+ }
1593
+
1594
+ __STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
1595
+ {
1596
+ uint32_t result;
1597
+
1598
+ __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1599
+ return(result);
1600
+ }
1601
+
1602
+ __STATIC_FORCEINLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
1603
+ {
1604
+ uint32_t result;
1605
+
1606
+ __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1607
+ return(result);
1608
+ }
1609
+
1610
+ __STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
1611
+ {
1612
+ uint32_t result;
1613
+
1614
+ __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1615
+ return(result);
1616
+ }
1617
+
1618
+ __STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
1619
+ {
1620
+ uint32_t result;
1621
+
1622
+ __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1623
+ return(result);
1624
+ }
1625
+
1626
+ __STATIC_FORCEINLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
1627
+ {
1628
+ uint32_t result;
1629
+
1630
+ __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1631
+ return(result);
1632
+ }
1633
+
1634
+ __STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
1635
+ {
1636
+ uint32_t result;
1637
+
1638
+ __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1639
+ return(result);
1640
+ }
1641
+
1642
+ __STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
1643
+ {
1644
+ uint32_t result;
1645
+
1646
+ __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1647
+ return(result);
1648
+ }
1649
+
1650
+ __STATIC_FORCEINLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
1651
+ {
1652
+ uint32_t result;
1653
+
1654
+ __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1655
+ return(result);
1656
+ }
1657
+
1658
+ __STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
1659
+ {
1660
+ uint32_t result;
1661
+
1662
+ __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1663
+ return(result);
1664
+ }
1665
+
1666
+ __STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
1667
+ {
1668
+ uint32_t result;
1669
+
1670
+ __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1671
+ return(result);
1672
+ }
1673
+
1674
+ __STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
1675
+ {
1676
+ uint32_t result;
1677
+
1678
+ __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1679
+ return(result);
1680
+ }
1681
+
1682
+ __STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
1683
+ {
1684
+ uint32_t result;
1685
+
1686
+ __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1687
+ return(result);
1688
+ }
1689
+
1690
+ #define __SSAT16(ARG1,ARG2) \
1691
+ ({ \
1692
+ int32_t __RES, __ARG1 = (ARG1); \
1693
+ __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
1694
+ __RES; \
1695
+ })
1696
+
1697
+ #define __USAT16(ARG1,ARG2) \
1698
+ ({ \
1699
+ uint32_t __RES, __ARG1 = (ARG1); \
1700
+ __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
1701
+ __RES; \
1702
+ })
1703
+
1704
+ __STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1)
1705
+ {
1706
+ uint32_t result;
1707
+
1708
+ __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
1709
+ return(result);
1710
+ }
1711
+
1712
+ __STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
1713
+ {
1714
+ uint32_t result;
1715
+
1716
+ __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1717
+ return(result);
1718
+ }
1719
+
1720
+ __STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1)
1721
+ {
1722
+ uint32_t result;
1723
+
1724
+ __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
1725
+ return(result);
1726
+ }
1727
+
1728
+ __STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
1729
+ {
1730
+ uint32_t result;
1731
+
1732
+ __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1733
+ return(result);
1734
+ }
1735
+
1736
+ __STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
1737
+ {
1738
+ uint32_t result;
1739
+
1740
+ __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1741
+ return(result);
1742
+ }
1743
+
1744
+ __STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
1745
+ {
1746
+ uint32_t result;
1747
+
1748
+ __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1749
+ return(result);
1750
+ }
1751
+
1752
+ __STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
1753
+ {
1754
+ uint32_t result;
1755
+
1756
+ __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1757
+ return(result);
1758
+ }
1759
+
1760
+ __STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
1761
+ {
1762
+ uint32_t result;
1763
+
1764
+ __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1765
+ return(result);
1766
+ }
1767
+
1768
+ __STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
1769
+ {
1770
+ union llreg_u{
1771
+ uint32_t w32[2];
1772
+ uint64_t w64;
1773
+ } llr;
1774
+ llr.w64 = acc;
1775
+
1776
+ #ifndef __ARMEB__ /* Little endian */
1777
+ __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1778
+ #else /* Big endian */
1779
+ __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1780
+ #endif
1781
+
1782
+ return(llr.w64);
1783
+ }
1784
+
1785
+ __STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
1786
+ {
1787
+ union llreg_u{
1788
+ uint32_t w32[2];
1789
+ uint64_t w64;
1790
+ } llr;
1791
+ llr.w64 = acc;
1792
+
1793
+ #ifndef __ARMEB__ /* Little endian */
1794
+ __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1795
+ #else /* Big endian */
1796
+ __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1797
+ #endif
1798
+
1799
+ return(llr.w64);
1800
+ }
1801
+
1802
+ __STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
1803
+ {
1804
+ uint32_t result;
1805
+
1806
+ __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1807
+ return(result);
1808
+ }
1809
+
1810
+ __STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
1811
+ {
1812
+ uint32_t result;
1813
+
1814
+ __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1815
+ return(result);
1816
+ }
1817
+
1818
+ __STATIC_FORCEINLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
1819
+ {
1820
+ uint32_t result;
1821
+
1822
+ __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1823
+ return(result);
1824
+ }
1825
+
1826
+ __STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
1827
+ {
1828
+ uint32_t result;
1829
+
1830
+ __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1831
+ return(result);
1832
+ }
1833
+
1834
+ __STATIC_FORCEINLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
1835
+ {
1836
+ union llreg_u{
1837
+ uint32_t w32[2];
1838
+ uint64_t w64;
1839
+ } llr;
1840
+ llr.w64 = acc;
1841
+
1842
+ #ifndef __ARMEB__ /* Little endian */
1843
+ __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1844
+ #else /* Big endian */
1845
+ __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1846
+ #endif
1847
+
1848
+ return(llr.w64);
1849
+ }
1850
+
1851
+ __STATIC_FORCEINLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
1852
+ {
1853
+ union llreg_u{
1854
+ uint32_t w32[2];
1855
+ uint64_t w64;
1856
+ } llr;
1857
+ llr.w64 = acc;
1858
+
1859
+ #ifndef __ARMEB__ /* Little endian */
1860
+ __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1861
+ #else /* Big endian */
1862
+ __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1863
+ #endif
1864
+
1865
+ return(llr.w64);
1866
+ }
1867
+
1868
+ __STATIC_FORCEINLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
1869
+ {
1870
+ uint32_t result;
1871
+
1872
+ __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1873
+ return(result);
1874
+ }
1875
+
1876
+ __STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2)
1877
+ {
1878
+ int32_t result;
1879
+
1880
+ __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1881
+ return(result);
1882
+ }
1883
+
1884
+ __STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2)
1885
+ {
1886
+ int32_t result;
1887
+
1888
+ __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1889
+ return(result);
1890
+ }
1891
+
1892
+ #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
1893
+ ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
1894
+
1895
+ #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
1896
+ ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
1897
+
1898
+ #define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2))
1899
+
1900
+ #define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3))
1901
+
1902
+ __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
1903
+ {
1904
+ int32_t result;
1905
+
1906
+ __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
1907
+ return(result);
1908
+ }
1909
+
1910
+ #endif /* (__ARM_FEATURE_DSP == 1) */
1911
+ /*@} end of group CMSIS_SIMD_intrinsics */
1912
+
1913
+
1914
+ #endif /* __CMSIS_ARMCLANG_H */
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/cmsis_compiler.h ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**************************************************************************//**
2
+ * @file cmsis_compiler.h
3
+ * @brief CMSIS compiler generic header file
4
+ * @version V5.1.0
5
+ * @date 09. October 2018
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ #ifndef __CMSIS_COMPILER_H
26
+ #define __CMSIS_COMPILER_H
27
+
28
+ #include <stdint.h>
29
+
30
+ /*
31
+ * Arm Compiler 4/5
32
+ */
33
+ #if defined ( __CC_ARM )
34
+ #include "cmsis_armcc.h"
35
+
36
+
37
+ /*
38
+ * Arm Compiler 6.6 LTM (armclang)
39
+ */
40
+ #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) && (__ARMCC_VERSION < 6100100)
41
+ #include "cmsis_armclang_ltm.h"
42
+
43
+ /*
44
+ * Arm Compiler above 6.10.1 (armclang)
45
+ */
46
+ #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6100100)
47
+ #include "cmsis_armclang.h"
48
+
49
+
50
+ /*
51
+ * GNU Compiler
52
+ */
53
+ #elif defined ( __GNUC__ )
54
+ #include "cmsis_gcc.h"
55
+
56
+
57
+ /*
58
+ * IAR Compiler
59
+ */
60
+ #elif defined ( __ICCARM__ )
61
+ #include <cmsis_iccarm.h>
62
+
63
+
64
+ /*
65
+ * TI Arm Compiler
66
+ */
67
+ #elif defined ( __TI_ARM__ )
68
+ #include <cmsis_ccs.h>
69
+
70
+ #ifndef __ASM
71
+ #define __ASM __asm
72
+ #endif
73
+ #ifndef __INLINE
74
+ #define __INLINE inline
75
+ #endif
76
+ #ifndef __STATIC_INLINE
77
+ #define __STATIC_INLINE static inline
78
+ #endif
79
+ #ifndef __STATIC_FORCEINLINE
80
+ #define __STATIC_FORCEINLINE __STATIC_INLINE
81
+ #endif
82
+ #ifndef __NO_RETURN
83
+ #define __NO_RETURN __attribute__((noreturn))
84
+ #endif
85
+ #ifndef __USED
86
+ #define __USED __attribute__((used))
87
+ #endif
88
+ #ifndef __WEAK
89
+ #define __WEAK __attribute__((weak))
90
+ #endif
91
+ #ifndef __PACKED
92
+ #define __PACKED __attribute__((packed))
93
+ #endif
94
+ #ifndef __PACKED_STRUCT
95
+ #define __PACKED_STRUCT struct __attribute__((packed))
96
+ #endif
97
+ #ifndef __PACKED_UNION
98
+ #define __PACKED_UNION union __attribute__((packed))
99
+ #endif
100
+ #ifndef __UNALIGNED_UINT32 /* deprecated */
101
+ struct __attribute__((packed)) T_UINT32 { uint32_t v; };
102
+ #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v)
103
+ #endif
104
+ #ifndef __UNALIGNED_UINT16_WRITE
105
+ __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
106
+ #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void*)(addr))->v) = (val))
107
+ #endif
108
+ #ifndef __UNALIGNED_UINT16_READ
109
+ __PACKED_STRUCT T_UINT16_READ { uint16_t v; };
110
+ #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v)
111
+ #endif
112
+ #ifndef __UNALIGNED_UINT32_WRITE
113
+ __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
114
+ #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
115
+ #endif
116
+ #ifndef __UNALIGNED_UINT32_READ
117
+ __PACKED_STRUCT T_UINT32_READ { uint32_t v; };
118
+ #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v)
119
+ #endif
120
+ #ifndef __ALIGNED
121
+ #define __ALIGNED(x) __attribute__((aligned(x)))
122
+ #endif
123
+ #ifndef __RESTRICT
124
+ #define __RESTRICT __restrict
125
+ #endif
126
+ #ifndef __COMPILER_BARRIER
127
+ #warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored.
128
+ #define __COMPILER_BARRIER() (void)0
129
+ #endif
130
+
131
+
132
+ /*
133
+ * TASKING Compiler
134
+ */
135
+ #elif defined ( __TASKING__ )
136
+ /*
137
+ * The CMSIS functions have been implemented as intrinsics in the compiler.
138
+ * Please use "carm -?i" to get an up to date list of all intrinsics,
139
+ * Including the CMSIS ones.
140
+ */
141
+
142
+ #ifndef __ASM
143
+ #define __ASM __asm
144
+ #endif
145
+ #ifndef __INLINE
146
+ #define __INLINE inline
147
+ #endif
148
+ #ifndef __STATIC_INLINE
149
+ #define __STATIC_INLINE static inline
150
+ #endif
151
+ #ifndef __STATIC_FORCEINLINE
152
+ #define __STATIC_FORCEINLINE __STATIC_INLINE
153
+ #endif
154
+ #ifndef __NO_RETURN
155
+ #define __NO_RETURN __attribute__((noreturn))
156
+ #endif
157
+ #ifndef __USED
158
+ #define __USED __attribute__((used))
159
+ #endif
160
+ #ifndef __WEAK
161
+ #define __WEAK __attribute__((weak))
162
+ #endif
163
+ #ifndef __PACKED
164
+ #define __PACKED __packed__
165
+ #endif
166
+ #ifndef __PACKED_STRUCT
167
+ #define __PACKED_STRUCT struct __packed__
168
+ #endif
169
+ #ifndef __PACKED_UNION
170
+ #define __PACKED_UNION union __packed__
171
+ #endif
172
+ #ifndef __UNALIGNED_UINT32 /* deprecated */
173
+ struct __packed__ T_UINT32 { uint32_t v; };
174
+ #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v)
175
+ #endif
176
+ #ifndef __UNALIGNED_UINT16_WRITE
177
+ __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
178
+ #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
179
+ #endif
180
+ #ifndef __UNALIGNED_UINT16_READ
181
+ __PACKED_STRUCT T_UINT16_READ { uint16_t v; };
182
+ #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v)
183
+ #endif
184
+ #ifndef __UNALIGNED_UINT32_WRITE
185
+ __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
186
+ #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
187
+ #endif
188
+ #ifndef __UNALIGNED_UINT32_READ
189
+ __PACKED_STRUCT T_UINT32_READ { uint32_t v; };
190
+ #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v)
191
+ #endif
192
+ #ifndef __ALIGNED
193
+ #define __ALIGNED(x) __align(x)
194
+ #endif
195
+ #ifndef __RESTRICT
196
+ #warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored.
197
+ #define __RESTRICT
198
+ #endif
199
+ #ifndef __COMPILER_BARRIER
200
+ #warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored.
201
+ #define __COMPILER_BARRIER() (void)0
202
+ #endif
203
+
204
+
205
+ /*
206
+ * COSMIC Compiler
207
+ */
208
+ #elif defined ( __CSMC__ )
209
+ #include <cmsis_csm.h>
210
+
211
+ #ifndef __ASM
212
+ #define __ASM _asm
213
+ #endif
214
+ #ifndef __INLINE
215
+ #define __INLINE inline
216
+ #endif
217
+ #ifndef __STATIC_INLINE
218
+ #define __STATIC_INLINE static inline
219
+ #endif
220
+ #ifndef __STATIC_FORCEINLINE
221
+ #define __STATIC_FORCEINLINE __STATIC_INLINE
222
+ #endif
223
+ #ifndef __NO_RETURN
224
+ // NO RETURN is automatically detected hence no warning here
225
+ #define __NO_RETURN
226
+ #endif
227
+ #ifndef __USED
228
+ #warning No compiler specific solution for __USED. __USED is ignored.
229
+ #define __USED
230
+ #endif
231
+ #ifndef __WEAK
232
+ #define __WEAK __weak
233
+ #endif
234
+ #ifndef __PACKED
235
+ #define __PACKED @packed
236
+ #endif
237
+ #ifndef __PACKED_STRUCT
238
+ #define __PACKED_STRUCT @packed struct
239
+ #endif
240
+ #ifndef __PACKED_UNION
241
+ #define __PACKED_UNION @packed union
242
+ #endif
243
+ #ifndef __UNALIGNED_UINT32 /* deprecated */
244
+ @packed struct T_UINT32 { uint32_t v; };
245
+ #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v)
246
+ #endif
247
+ #ifndef __UNALIGNED_UINT16_WRITE
248
+ __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
249
+ #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
250
+ #endif
251
+ #ifndef __UNALIGNED_UINT16_READ
252
+ __PACKED_STRUCT T_UINT16_READ { uint16_t v; };
253
+ #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v)
254
+ #endif
255
+ #ifndef __UNALIGNED_UINT32_WRITE
256
+ __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
257
+ #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
258
+ #endif
259
+ #ifndef __UNALIGNED_UINT32_READ
260
+ __PACKED_STRUCT T_UINT32_READ { uint32_t v; };
261
+ #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v)
262
+ #endif
263
+ #ifndef __ALIGNED
264
+ #warning No compiler specific solution for __ALIGNED. __ALIGNED is ignored.
265
+ #define __ALIGNED(x)
266
+ #endif
267
+ #ifndef __RESTRICT
268
+ #warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored.
269
+ #define __RESTRICT
270
+ #endif
271
+ #ifndef __COMPILER_BARRIER
272
+ #warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored.
273
+ #define __COMPILER_BARRIER() (void)0
274
+ #endif
275
+
276
+
277
+ #else
278
+ #error Unknown compiler.
279
+ #endif
280
+
281
+
282
+ #endif /* __CMSIS_COMPILER_H */
283
+
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/cmsis_gcc.h ADDED
@@ -0,0 +1,2215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**************************************************************************//**
2
+ * @file cmsis_gcc.h
3
+ * @brief CMSIS compiler GCC header file
4
+ * @version V5.3.2
5
+ * @date 25. January 2021
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2009-2021 Arm Limited. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ #ifndef __CMSIS_GCC_H
26
+ #define __CMSIS_GCC_H
27
+
28
+ /* ignore some GCC warnings */
29
+ #pragma GCC diagnostic push
30
+ #pragma GCC diagnostic ignored "-Wsign-conversion"
31
+ #pragma GCC diagnostic ignored "-Wconversion"
32
+ #pragma GCC diagnostic ignored "-Wunused-parameter"
33
+
34
+ /* Fallback for __has_builtin */
35
+ #ifndef __has_builtin
36
+ #define __has_builtin(x) (0)
37
+ #endif
38
+
39
+ /* CMSIS compiler specific defines */
40
+ #ifndef __ASM
41
+ #define __ASM __asm
42
+ #endif
43
+ #ifndef __INLINE
44
+ #define __INLINE inline
45
+ #endif
46
+ #ifndef __STATIC_INLINE
47
+ #define __STATIC_INLINE static inline
48
+ #endif
49
+ #ifndef __STATIC_FORCEINLINE
50
+ #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline
51
+ #endif
52
+ #ifndef __NO_RETURN
53
+ #define __NO_RETURN __attribute__((__noreturn__))
54
+ #endif
55
+ #ifndef __USED
56
+ #define __USED __attribute__((used))
57
+ #endif
58
+ #ifndef __WEAK
59
+ #define __WEAK __attribute__((weak))
60
+ #endif
61
+ #ifndef __PACKED
62
+ #define __PACKED __attribute__((packed, aligned(1)))
63
+ #endif
64
+ #ifndef __PACKED_STRUCT
65
+ #define __PACKED_STRUCT struct __attribute__((packed, aligned(1)))
66
+ #endif
67
+ #ifndef __PACKED_UNION
68
+ #define __PACKED_UNION union __attribute__((packed, aligned(1)))
69
+ #endif
70
+ #ifndef __UNALIGNED_UINT32 /* deprecated */
71
+ #pragma GCC diagnostic push
72
+ #pragma GCC diagnostic ignored "-Wpacked"
73
+ #pragma GCC diagnostic ignored "-Wattributes"
74
+ struct __attribute__((packed)) T_UINT32 { uint32_t v; };
75
+ #pragma GCC diagnostic pop
76
+ #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v)
77
+ #endif
78
+ #ifndef __UNALIGNED_UINT16_WRITE
79
+ #pragma GCC diagnostic push
80
+ #pragma GCC diagnostic ignored "-Wpacked"
81
+ #pragma GCC diagnostic ignored "-Wattributes"
82
+ __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
83
+ #pragma GCC diagnostic pop
84
+ #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
85
+ #endif
86
+ #ifndef __UNALIGNED_UINT16_READ
87
+ #pragma GCC diagnostic push
88
+ #pragma GCC diagnostic ignored "-Wpacked"
89
+ #pragma GCC diagnostic ignored "-Wattributes"
90
+ __PACKED_STRUCT T_UINT16_READ { uint16_t v; };
91
+ #pragma GCC diagnostic pop
92
+ #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v)
93
+ #endif
94
+ #ifndef __UNALIGNED_UINT32_WRITE
95
+ #pragma GCC diagnostic push
96
+ #pragma GCC diagnostic ignored "-Wpacked"
97
+ #pragma GCC diagnostic ignored "-Wattributes"
98
+ __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
99
+ #pragma GCC diagnostic pop
100
+ #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
101
+ #endif
102
+ #ifndef __UNALIGNED_UINT32_READ
103
+ #pragma GCC diagnostic push
104
+ #pragma GCC diagnostic ignored "-Wpacked"
105
+ #pragma GCC diagnostic ignored "-Wattributes"
106
+ __PACKED_STRUCT T_UINT32_READ { uint32_t v; };
107
+ #pragma GCC diagnostic pop
108
+ #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v)
109
+ #endif
110
+ #ifndef __ALIGNED
111
+ #define __ALIGNED(x) __attribute__((aligned(x)))
112
+ #endif
113
+ #ifndef __RESTRICT
114
+ #define __RESTRICT __restrict
115
+ #endif
116
+ #ifndef __COMPILER_BARRIER
117
+ #define __COMPILER_BARRIER() __ASM volatile("":::"memory")
118
+ #endif
119
+
120
+ /* ######################### Startup and Lowlevel Init ######################## */
121
+
122
+ #ifndef __PROGRAM_START
123
+
124
+ /**
125
+ \brief Initializes data and bss sections
126
+ \details This default implementations initialized all data and additional bss
127
+ sections relying on .copy.table and .zero.table specified properly
128
+ in the used linker script.
129
+
130
+ */
131
+ __STATIC_FORCEINLINE __NO_RETURN void __cmsis_start(void)
132
+ {
133
+ extern void _start(void) __NO_RETURN;
134
+
135
+ typedef struct {
136
+ uint32_t const* src;
137
+ uint32_t* dest;
138
+ uint32_t wlen;
139
+ } __copy_table_t;
140
+
141
+ typedef struct {
142
+ uint32_t* dest;
143
+ uint32_t wlen;
144
+ } __zero_table_t;
145
+
146
+ extern const __copy_table_t __copy_table_start__;
147
+ extern const __copy_table_t __copy_table_end__;
148
+ extern const __zero_table_t __zero_table_start__;
149
+ extern const __zero_table_t __zero_table_end__;
150
+
151
+ for (__copy_table_t const* pTable = &__copy_table_start__; pTable < &__copy_table_end__; ++pTable) {
152
+ for(uint32_t i=0u; i<pTable->wlen; ++i) {
153
+ pTable->dest[i] = pTable->src[i];
154
+ }
155
+ }
156
+
157
+ for (__zero_table_t const* pTable = &__zero_table_start__; pTable < &__zero_table_end__; ++pTable) {
158
+ for(uint32_t i=0u; i<pTable->wlen; ++i) {
159
+ pTable->dest[i] = 0u;
160
+ }
161
+ }
162
+
163
+ _start();
164
+ }
165
+
166
+ #define __PROGRAM_START __cmsis_start
167
+ #endif
168
+
169
+ #ifndef __INITIAL_SP
170
+ #define __INITIAL_SP __StackTop
171
+ #endif
172
+
173
+ #ifndef __STACK_LIMIT
174
+ #define __STACK_LIMIT __StackLimit
175
+ #endif
176
+
177
+ #ifndef __VECTOR_TABLE
178
+ #define __VECTOR_TABLE __Vectors
179
+ #endif
180
+
181
+ #ifndef __VECTOR_TABLE_ATTRIBUTE
182
+ #define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section(".vectors")))
183
+ #endif
184
+
185
+ #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U)
186
+ #ifndef __STACK_SEAL
187
+ #define __STACK_SEAL __StackSeal
188
+ #endif
189
+
190
+ #ifndef __TZ_STACK_SEAL_SIZE
191
+ #define __TZ_STACK_SEAL_SIZE 8U
192
+ #endif
193
+
194
+ #ifndef __TZ_STACK_SEAL_VALUE
195
+ #define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL
196
+ #endif
197
+
198
+
199
+ __STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) {
200
+ *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE;
201
+ }
202
+ #endif
203
+
204
+
205
+ /* ########################### Core Function Access ########################### */
206
+ /** \ingroup CMSIS_Core_FunctionInterface
207
+ \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
208
+ @{
209
+ */
210
+
211
+ /**
212
+ \brief Enable IRQ Interrupts
213
+ \details Enables IRQ interrupts by clearing the I-bit in the CPSR.
214
+ Can only be executed in Privileged modes.
215
+ */
216
+ // Patched by Edge Impulse, fix for targets that already have __enable_irq
217
+ #ifndef __enable_irq
218
+ __STATIC_FORCEINLINE void __enable_irq(void)
219
+ {
220
+ __ASM volatile ("cpsie i" : : : "memory");
221
+ }
222
+ #endif
223
+
224
+
225
+ /**
226
+ \brief Disable IRQ Interrupts
227
+ \details Disables IRQ interrupts by setting the I-bit in the CPSR.
228
+ Can only be executed in Privileged modes.
229
+ */
230
+ // Patched by Edge Impulse, fix for targets that already have __disable_irq
231
+ #ifndef __disable_irq
232
+ __STATIC_FORCEINLINE void __disable_irq(void)
233
+ {
234
+ __ASM volatile ("cpsid i" : : : "memory");
235
+ }
236
+ #endif
237
+
238
+
239
+ /**
240
+ \brief Get Control Register
241
+ \details Returns the content of the Control Register.
242
+ \return Control Register value
243
+ */
244
+ __STATIC_FORCEINLINE uint32_t __get_CONTROL(void)
245
+ {
246
+ uint32_t result;
247
+
248
+ __ASM volatile ("MRS %0, control" : "=r" (result) );
249
+ return(result);
250
+ }
251
+
252
+
253
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
254
+ /**
255
+ \brief Get Control Register (non-secure)
256
+ \details Returns the content of the non-secure Control Register when in secure mode.
257
+ \return non-secure Control Register value
258
+ */
259
+ __STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void)
260
+ {
261
+ uint32_t result;
262
+
263
+ __ASM volatile ("MRS %0, control_ns" : "=r" (result) );
264
+ return(result);
265
+ }
266
+ #endif
267
+
268
+
269
+ /**
270
+ \brief Set Control Register
271
+ \details Writes the given value to the Control Register.
272
+ \param [in] control Control Register value to set
273
+ */
274
+ __STATIC_FORCEINLINE void __set_CONTROL(uint32_t control)
275
+ {
276
+ __ASM volatile ("MSR control, %0" : : "r" (control) : "memory");
277
+ }
278
+
279
+
280
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
281
+ /**
282
+ \brief Set Control Register (non-secure)
283
+ \details Writes the given value to the non-secure Control Register when in secure state.
284
+ \param [in] control Control Register value to set
285
+ */
286
+ __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control)
287
+ {
288
+ __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory");
289
+ }
290
+ #endif
291
+
292
+
293
+ /**
294
+ \brief Get IPSR Register
295
+ \details Returns the content of the IPSR Register.
296
+ \return IPSR Register value
297
+ */
298
+ __STATIC_FORCEINLINE uint32_t __get_IPSR(void)
299
+ {
300
+ uint32_t result;
301
+
302
+ __ASM volatile ("MRS %0, ipsr" : "=r" (result) );
303
+ return(result);
304
+ }
305
+
306
+
307
+ /**
308
+ \brief Get APSR Register
309
+ \details Returns the content of the APSR Register.
310
+ \return APSR Register value
311
+ */
312
+ __STATIC_FORCEINLINE uint32_t __get_APSR(void)
313
+ {
314
+ uint32_t result;
315
+
316
+ __ASM volatile ("MRS %0, apsr" : "=r" (result) );
317
+ return(result);
318
+ }
319
+
320
+
321
+ /**
322
+ \brief Get xPSR Register
323
+ \details Returns the content of the xPSR Register.
324
+ \return xPSR Register value
325
+ */
326
+ __STATIC_FORCEINLINE uint32_t __get_xPSR(void)
327
+ {
328
+ uint32_t result;
329
+
330
+ __ASM volatile ("MRS %0, xpsr" : "=r" (result) );
331
+ return(result);
332
+ }
333
+
334
+
335
+ /**
336
+ \brief Get Process Stack Pointer
337
+ \details Returns the current value of the Process Stack Pointer (PSP).
338
+ \return PSP Register value
339
+ */
340
+ __STATIC_FORCEINLINE uint32_t __get_PSP(void)
341
+ {
342
+ uint32_t result;
343
+
344
+ __ASM volatile ("MRS %0, psp" : "=r" (result) );
345
+ return(result);
346
+ }
347
+
348
+
349
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
350
+ /**
351
+ \brief Get Process Stack Pointer (non-secure)
352
+ \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state.
353
+ \return PSP Register value
354
+ */
355
+ __STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void)
356
+ {
357
+ uint32_t result;
358
+
359
+ __ASM volatile ("MRS %0, psp_ns" : "=r" (result) );
360
+ return(result);
361
+ }
362
+ #endif
363
+
364
+
365
+ /**
366
+ \brief Set Process Stack Pointer
367
+ \details Assigns the given value to the Process Stack Pointer (PSP).
368
+ \param [in] topOfProcStack Process Stack Pointer value to set
369
+ */
370
+ __STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack)
371
+ {
372
+ __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : );
373
+ }
374
+
375
+
376
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
377
+ /**
378
+ \brief Set Process Stack Pointer (non-secure)
379
+ \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state.
380
+ \param [in] topOfProcStack Process Stack Pointer value to set
381
+ */
382
+ __STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack)
383
+ {
384
+ __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : );
385
+ }
386
+ #endif
387
+
388
+
389
+ /**
390
+ \brief Get Main Stack Pointer
391
+ \details Returns the current value of the Main Stack Pointer (MSP).
392
+ \return MSP Register value
393
+ */
394
+ __STATIC_FORCEINLINE uint32_t __get_MSP(void)
395
+ {
396
+ uint32_t result;
397
+
398
+ __ASM volatile ("MRS %0, msp" : "=r" (result) );
399
+ return(result);
400
+ }
401
+
402
+
403
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
404
+ /**
405
+ \brief Get Main Stack Pointer (non-secure)
406
+ \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state.
407
+ \return MSP Register value
408
+ */
409
+ __STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void)
410
+ {
411
+ uint32_t result;
412
+
413
+ __ASM volatile ("MRS %0, msp_ns" : "=r" (result) );
414
+ return(result);
415
+ }
416
+ #endif
417
+
418
+
419
+ /**
420
+ \brief Set Main Stack Pointer
421
+ \details Assigns the given value to the Main Stack Pointer (MSP).
422
+ \param [in] topOfMainStack Main Stack Pointer value to set
423
+ */
424
+ __STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack)
425
+ {
426
+ __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : );
427
+ }
428
+
429
+
430
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
431
+ /**
432
+ \brief Set Main Stack Pointer (non-secure)
433
+ \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state.
434
+ \param [in] topOfMainStack Main Stack Pointer value to set
435
+ */
436
+ __STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack)
437
+ {
438
+ __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : );
439
+ }
440
+ #endif
441
+
442
+
443
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
444
+ /**
445
+ \brief Get Stack Pointer (non-secure)
446
+ \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state.
447
+ \return SP Register value
448
+ */
449
+ __STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void)
450
+ {
451
+ uint32_t result;
452
+
453
+ __ASM volatile ("MRS %0, sp_ns" : "=r" (result) );
454
+ return(result);
455
+ }
456
+
457
+
458
+ /**
459
+ \brief Set Stack Pointer (non-secure)
460
+ \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state.
461
+ \param [in] topOfStack Stack Pointer value to set
462
+ */
463
+ __STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack)
464
+ {
465
+ __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : );
466
+ }
467
+ #endif
468
+
469
+
470
+ /**
471
+ \brief Get Priority Mask
472
+ \details Returns the current state of the priority mask bit from the Priority Mask Register.
473
+ \return Priority Mask value
474
+ */
475
+ __STATIC_FORCEINLINE uint32_t __get_PRIMASK(void)
476
+ {
477
+ uint32_t result;
478
+
479
+ __ASM volatile ("MRS %0, primask" : "=r" (result) );
480
+ return(result);
481
+ }
482
+
483
+
484
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
485
+ /**
486
+ \brief Get Priority Mask (non-secure)
487
+ \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state.
488
+ \return Priority Mask value
489
+ */
490
+ __STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void)
491
+ {
492
+ uint32_t result;
493
+
494
+ __ASM volatile ("MRS %0, primask_ns" : "=r" (result) );
495
+ return(result);
496
+ }
497
+ #endif
498
+
499
+
500
+ /**
501
+ \brief Set Priority Mask
502
+ \details Assigns the given value to the Priority Mask Register.
503
+ \param [in] priMask Priority Mask
504
+ */
505
+ __STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask)
506
+ {
507
+ __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory");
508
+ }
509
+
510
+
511
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
512
+ /**
513
+ \brief Set Priority Mask (non-secure)
514
+ \details Assigns the given value to the non-secure Priority Mask Register when in secure state.
515
+ \param [in] priMask Priority Mask
516
+ */
517
+ __STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask)
518
+ {
519
+ __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory");
520
+ }
521
+ #endif
522
+
523
+
524
+ #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
525
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
526
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) )
527
+ /**
528
+ \brief Enable FIQ
529
+ \details Enables FIQ interrupts by clearing the F-bit in the CPSR.
530
+ Can only be executed in Privileged modes.
531
+ */
532
+ __STATIC_FORCEINLINE void __enable_fault_irq(void)
533
+ {
534
+ __ASM volatile ("cpsie f" : : : "memory");
535
+ }
536
+
537
+
538
+ /**
539
+ \brief Disable FIQ
540
+ \details Disables FIQ interrupts by setting the F-bit in the CPSR.
541
+ Can only be executed in Privileged modes.
542
+ */
543
+ __STATIC_FORCEINLINE void __disable_fault_irq(void)
544
+ {
545
+ __ASM volatile ("cpsid f" : : : "memory");
546
+ }
547
+
548
+
549
+ /**
550
+ \brief Get Base Priority
551
+ \details Returns the current value of the Base Priority register.
552
+ \return Base Priority register value
553
+ */
554
+ __STATIC_FORCEINLINE uint32_t __get_BASEPRI(void)
555
+ {
556
+ uint32_t result;
557
+
558
+ __ASM volatile ("MRS %0, basepri" : "=r" (result) );
559
+ return(result);
560
+ }
561
+
562
+
563
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
564
+ /**
565
+ \brief Get Base Priority (non-secure)
566
+ \details Returns the current value of the non-secure Base Priority register when in secure state.
567
+ \return Base Priority register value
568
+ */
569
+ __STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void)
570
+ {
571
+ uint32_t result;
572
+
573
+ __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) );
574
+ return(result);
575
+ }
576
+ #endif
577
+
578
+
579
+ /**
580
+ \brief Set Base Priority
581
+ \details Assigns the given value to the Base Priority register.
582
+ \param [in] basePri Base Priority value to set
583
+ */
584
+ __STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri)
585
+ {
586
+ __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory");
587
+ }
588
+
589
+
590
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
591
+ /**
592
+ \brief Set Base Priority (non-secure)
593
+ \details Assigns the given value to the non-secure Base Priority register when in secure state.
594
+ \param [in] basePri Base Priority value to set
595
+ */
596
+ __STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri)
597
+ {
598
+ __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory");
599
+ }
600
+ #endif
601
+
602
+
603
+ /**
604
+ \brief Set Base Priority with condition
605
+ \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled,
606
+ or the new value increases the BASEPRI priority level.
607
+ \param [in] basePri Base Priority value to set
608
+ */
609
+ __STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri)
610
+ {
611
+ __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory");
612
+ }
613
+
614
+
615
+ /**
616
+ \brief Get Fault Mask
617
+ \details Returns the current value of the Fault Mask register.
618
+ \return Fault Mask register value
619
+ */
620
+ __STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void)
621
+ {
622
+ uint32_t result;
623
+
624
+ __ASM volatile ("MRS %0, faultmask" : "=r" (result) );
625
+ return(result);
626
+ }
627
+
628
+
629
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
630
+ /**
631
+ \brief Get Fault Mask (non-secure)
632
+ \details Returns the current value of the non-secure Fault Mask register when in secure state.
633
+ \return Fault Mask register value
634
+ */
635
+ __STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void)
636
+ {
637
+ uint32_t result;
638
+
639
+ __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) );
640
+ return(result);
641
+ }
642
+ #endif
643
+
644
+
645
+ /**
646
+ \brief Set Fault Mask
647
+ \details Assigns the given value to the Fault Mask register.
648
+ \param [in] faultMask Fault Mask value to set
649
+ */
650
+ __STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask)
651
+ {
652
+ __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory");
653
+ }
654
+
655
+
656
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
657
+ /**
658
+ \brief Set Fault Mask (non-secure)
659
+ \details Assigns the given value to the non-secure Fault Mask register when in secure state.
660
+ \param [in] faultMask Fault Mask value to set
661
+ */
662
+ __STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask)
663
+ {
664
+ __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory");
665
+ }
666
+ #endif
667
+
668
+ #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
669
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
670
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */
671
+
672
+
673
+ #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
674
+ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) )
675
+
676
+ /**
677
+ \brief Get Process Stack Pointer Limit
678
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
679
+ Stack Pointer Limit register hence zero is returned always in non-secure
680
+ mode.
681
+
682
+ \details Returns the current value of the Process Stack Pointer Limit (PSPLIM).
683
+ \return PSPLIM Register value
684
+ */
685
+ __STATIC_FORCEINLINE uint32_t __get_PSPLIM(void)
686
+ {
687
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
688
+ (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
689
+ // without main extensions, the non-secure PSPLIM is RAZ/WI
690
+ return 0U;
691
+ #else
692
+ uint32_t result;
693
+ __ASM volatile ("MRS %0, psplim" : "=r" (result) );
694
+ return result;
695
+ #endif
696
+ }
697
+
698
+ #if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3))
699
+ /**
700
+ \brief Get Process Stack Pointer Limit (non-secure)
701
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
702
+ Stack Pointer Limit register hence zero is returned always.
703
+
704
+ \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state.
705
+ \return PSPLIM Register value
706
+ */
707
+ __STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void)
708
+ {
709
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
710
+ // without main extensions, the non-secure PSPLIM is RAZ/WI
711
+ return 0U;
712
+ #else
713
+ uint32_t result;
714
+ __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) );
715
+ return result;
716
+ #endif
717
+ }
718
+ #endif
719
+
720
+
721
+ /**
722
+ \brief Set Process Stack Pointer Limit
723
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
724
+ Stack Pointer Limit register hence the write is silently ignored in non-secure
725
+ mode.
726
+
727
+ \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM).
728
+ \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set
729
+ */
730
+ __STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit)
731
+ {
732
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
733
+ (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
734
+ // without main extensions, the non-secure PSPLIM is RAZ/WI
735
+ (void)ProcStackPtrLimit;
736
+ #else
737
+ __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit));
738
+ #endif
739
+ }
740
+
741
+
742
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
743
+ /**
744
+ \brief Set Process Stack Pointer (non-secure)
745
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
746
+ Stack Pointer Limit register hence the write is silently ignored.
747
+
748
+ \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state.
749
+ \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set
750
+ */
751
+ __STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit)
752
+ {
753
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
754
+ // without main extensions, the non-secure PSPLIM is RAZ/WI
755
+ (void)ProcStackPtrLimit;
756
+ #else
757
+ __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit));
758
+ #endif
759
+ }
760
+ #endif
761
+
762
+
763
+ /**
764
+ \brief Get Main Stack Pointer Limit
765
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
766
+ Stack Pointer Limit register hence zero is returned always in non-secure
767
+ mode.
768
+
769
+ \details Returns the current value of the Main Stack Pointer Limit (MSPLIM).
770
+ \return MSPLIM Register value
771
+ */
772
+ __STATIC_FORCEINLINE uint32_t __get_MSPLIM(void)
773
+ {
774
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
775
+ (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
776
+ // without main extensions, the non-secure MSPLIM is RAZ/WI
777
+ return 0U;
778
+ #else
779
+ uint32_t result;
780
+ __ASM volatile ("MRS %0, msplim" : "=r" (result) );
781
+ return result;
782
+ #endif
783
+ }
784
+
785
+
786
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
787
+ /**
788
+ \brief Get Main Stack Pointer Limit (non-secure)
789
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
790
+ Stack Pointer Limit register hence zero is returned always.
791
+
792
+ \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state.
793
+ \return MSPLIM Register value
794
+ */
795
+ __STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void)
796
+ {
797
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
798
+ // without main extensions, the non-secure MSPLIM is RAZ/WI
799
+ return 0U;
800
+ #else
801
+ uint32_t result;
802
+ __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) );
803
+ return result;
804
+ #endif
805
+ }
806
+ #endif
807
+
808
+
809
+ /**
810
+ \brief Set Main Stack Pointer Limit
811
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
812
+ Stack Pointer Limit register hence the write is silently ignored in non-secure
813
+ mode.
814
+
815
+ \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM).
816
+ \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set
817
+ */
818
+ __STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit)
819
+ {
820
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
821
+ (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
822
+ // without main extensions, the non-secure MSPLIM is RAZ/WI
823
+ (void)MainStackPtrLimit;
824
+ #else
825
+ __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit));
826
+ #endif
827
+ }
828
+
829
+
830
+ #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
831
+ /**
832
+ \brief Set Main Stack Pointer Limit (non-secure)
833
+ Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
834
+ Stack Pointer Limit register hence the write is silently ignored.
835
+
836
+ \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state.
837
+ \param [in] MainStackPtrLimit Main Stack Pointer value to set
838
+ */
839
+ __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit)
840
+ {
841
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
842
+ // without main extensions, the non-secure MSPLIM is RAZ/WI
843
+ (void)MainStackPtrLimit;
844
+ #else
845
+ __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit));
846
+ #endif
847
+ }
848
+ #endif
849
+
850
+ #endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
851
+ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */
852
+
853
+
854
+ /**
855
+ \brief Get FPSCR
856
+ \details Returns the current value of the Floating Point Status/Control register.
857
+ \return Floating Point Status/Control register value
858
+ */
859
+ __STATIC_FORCEINLINE uint32_t __get_FPSCR(void)
860
+ {
861
+ #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
862
+ (defined (__FPU_USED ) && (__FPU_USED == 1U)) )
863
+ #if __has_builtin(__builtin_arm_get_fpscr)
864
+ // Re-enable using built-in when GCC has been fixed
865
+ // || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
866
+ /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
867
+ return __builtin_arm_get_fpscr();
868
+ #else
869
+ uint32_t result;
870
+
871
+ __ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
872
+ return(result);
873
+ #endif
874
+ #else
875
+ return(0U);
876
+ #endif
877
+ }
878
+
879
+
880
+ /**
881
+ \brief Set FPSCR
882
+ \details Assigns the given value to the Floating Point Status/Control register.
883
+ \param [in] fpscr Floating Point Status/Control value to set
884
+ */
885
+ __STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr)
886
+ {
887
+ #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
888
+ (defined (__FPU_USED ) && (__FPU_USED == 1U)) )
889
+ #if __has_builtin(__builtin_arm_set_fpscr)
890
+ // Re-enable using built-in when GCC has been fixed
891
+ // || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
892
+ /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
893
+ __builtin_arm_set_fpscr(fpscr);
894
+ #else
895
+ __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory");
896
+ #endif
897
+ #else
898
+ (void)fpscr;
899
+ #endif
900
+ }
901
+
902
+
903
+ /*@} end of CMSIS_Core_RegAccFunctions */
904
+
905
+
906
+ /* ########################## Core Instruction Access ######################### */
907
+ /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
908
+ Access to dedicated instructions
909
+ @{
910
+ */
911
+
912
+ /* Define macros for porting to both thumb1 and thumb2.
913
+ * For thumb1, use low register (r0-r7), specified by constraint "l"
914
+ * Otherwise, use general registers, specified by constraint "r" */
915
+ #if defined (__thumb__) && !defined (__thumb2__)
916
+ #define __CMSIS_GCC_OUT_REG(r) "=l" (r)
917
+ #define __CMSIS_GCC_RW_REG(r) "+l" (r)
918
+ #define __CMSIS_GCC_USE_REG(r) "l" (r)
919
+ #else
920
+ #define __CMSIS_GCC_OUT_REG(r) "=r" (r)
921
+ #define __CMSIS_GCC_RW_REG(r) "+r" (r)
922
+ #define __CMSIS_GCC_USE_REG(r) "r" (r)
923
+ #endif
924
+
925
+ /**
926
+ \brief No Operation
927
+ \details No Operation does nothing. This instruction can be used for code alignment purposes.
928
+ */
929
+ #define __NOP() __ASM volatile ("nop")
930
+
931
+ /**
932
+ \brief Wait For Interrupt
933
+ \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
934
+ */
935
+ #define __WFI() __ASM volatile ("wfi":::"memory")
936
+
937
+
938
+ /**
939
+ \brief Wait For Event
940
+ \details Wait For Event is a hint instruction that permits the processor to enter
941
+ a low-power state until one of a number of events occurs.
942
+ */
943
+ #define __WFE() __ASM volatile ("wfe":::"memory")
944
+
945
+
946
+ /**
947
+ \brief Send Event
948
+ \details Send Event is a hint instruction. It causes an event to be signaled to the CPU.
949
+ */
950
+ #define __SEV() __ASM volatile ("sev")
951
+
952
+
953
+ /**
954
+ \brief Instruction Synchronization Barrier
955
+ \details Instruction Synchronization Barrier flushes the pipeline in the processor,
956
+ so that all instructions following the ISB are fetched from cache or memory,
957
+ after the instruction has been completed.
958
+ */
959
+ __STATIC_FORCEINLINE void __ISB(void)
960
+ {
961
+ __ASM volatile ("isb 0xF":::"memory");
962
+ }
963
+
964
+
965
+ /**
966
+ \brief Data Synchronization Barrier
967
+ \details Acts as a special kind of Data Memory Barrier.
968
+ It completes when all explicit memory accesses before this instruction complete.
969
+ */
970
+ __STATIC_FORCEINLINE void __DSB(void)
971
+ {
972
+ __ASM volatile ("dsb 0xF":::"memory");
973
+ }
974
+
975
+
976
+ /**
977
+ \brief Data Memory Barrier
978
+ \details Ensures the apparent order of the explicit memory operations before
979
+ and after the instruction, without ensuring their completion.
980
+ */
981
+ __STATIC_FORCEINLINE void __DMB(void)
982
+ {
983
+ __ASM volatile ("dmb 0xF":::"memory");
984
+ }
985
+
986
+
987
+ /**
988
+ \brief Reverse byte order (32 bit)
989
+ \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
990
+ \param [in] value Value to reverse
991
+ \return Reversed value
992
+ */
993
+ __STATIC_FORCEINLINE uint32_t __REV(uint32_t value)
994
+ {
995
+ #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
996
+ return __builtin_bswap32(value);
997
+ #else
998
+ uint32_t result;
999
+
1000
+ __ASM ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
1001
+ return result;
1002
+ #endif
1003
+ }
1004
+
1005
+
1006
+ /**
1007
+ \brief Reverse byte order (16 bit)
1008
+ \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
1009
+ \param [in] value Value to reverse
1010
+ \return Reversed value
1011
+ */
1012
+ __STATIC_FORCEINLINE uint32_t __REV16(uint32_t value)
1013
+ {
1014
+ uint32_t result;
1015
+
1016
+ __ASM ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
1017
+ return result;
1018
+ }
1019
+
1020
+
1021
+ /**
1022
+ \brief Reverse byte order (16 bit)
1023
+ \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
1024
+ \param [in] value Value to reverse
1025
+ \return Reversed value
1026
+ */
1027
+ __STATIC_FORCEINLINE int16_t __REVSH(int16_t value)
1028
+ {
1029
+ #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
1030
+ return (int16_t)__builtin_bswap16(value);
1031
+ #else
1032
+ int16_t result;
1033
+
1034
+ __ASM ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
1035
+ return result;
1036
+ #endif
1037
+ }
1038
+
1039
+
1040
+ /**
1041
+ \brief Rotate Right in unsigned value (32 bit)
1042
+ \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
1043
+ \param [in] op1 Value to rotate
1044
+ \param [in] op2 Number of Bits to rotate
1045
+ \return Rotated value
1046
+ */
1047
+ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
1048
+ {
1049
+ op2 %= 32U;
1050
+ if (op2 == 0U)
1051
+ {
1052
+ return op1;
1053
+ }
1054
+ return (op1 >> op2) | (op1 << (32U - op2));
1055
+ }
1056
+
1057
+
1058
+ /**
1059
+ \brief Breakpoint
1060
+ \details Causes the processor to enter Debug state.
1061
+ Debug tools can use this to investigate system state when the instruction at a particular address is reached.
1062
+ \param [in] value is ignored by the processor.
1063
+ If required, a debugger can use it to store additional information about the breakpoint.
1064
+ */
1065
+ #define __BKPT(value) __ASM volatile ("bkpt "#value)
1066
+
1067
+
1068
+ /**
1069
+ \brief Reverse bit order of value
1070
+ \details Reverses the bit order of the given value.
1071
+ \param [in] value Value to reverse
1072
+ \return Reversed value
1073
+ */
1074
+ __STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value)
1075
+ {
1076
+ uint32_t result;
1077
+
1078
+ #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1079
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1080
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) )
1081
+ __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) );
1082
+ #else
1083
+ uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */
1084
+
1085
+ result = value; /* r will be reversed bits of v; first get LSB of v */
1086
+ for (value >>= 1U; value != 0U; value >>= 1U)
1087
+ {
1088
+ result <<= 1U;
1089
+ result |= value & 1U;
1090
+ s--;
1091
+ }
1092
+ result <<= s; /* shift when v's highest bits are zero */
1093
+ #endif
1094
+ return result;
1095
+ }
1096
+
1097
+
1098
+ /**
1099
+ \brief Count leading zeros
1100
+ \details Counts the number of leading zeros of a data value.
1101
+ \param [in] value Value to count the leading zeros
1102
+ \return number of leading zeros in value
1103
+ */
1104
+ __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value)
1105
+ {
1106
+ /* Even though __builtin_clz produces a CLZ instruction on ARM, formally
1107
+ __builtin_clz(0) is undefined behaviour, so handle this case specially.
1108
+ This guarantees ARM-compatible results if happening to compile on a non-ARM
1109
+ target, and ensures the compiler doesn't decide to activate any
1110
+ optimisations using the logic "value was passed to __builtin_clz, so it
1111
+ is non-zero".
1112
+ ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a
1113
+ single CLZ instruction.
1114
+ */
1115
+ if (value == 0U)
1116
+ {
1117
+ return 32U;
1118
+ }
1119
+ return __builtin_clz(value);
1120
+ }
1121
+
1122
+
1123
+ #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1124
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1125
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1126
+ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) )
1127
+ /**
1128
+ \brief LDR Exclusive (8 bit)
1129
+ \details Executes a exclusive LDR instruction for 8 bit value.
1130
+ \param [in] ptr Pointer to data
1131
+ \return value of type uint8_t at (*ptr)
1132
+ */
1133
+ __STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr)
1134
+ {
1135
+ uint32_t result;
1136
+
1137
+ #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
1138
+ __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
1139
+ #else
1140
+ /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
1141
+ accepted by assembler. So has to use following less efficient pattern.
1142
+ */
1143
+ __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
1144
+ #endif
1145
+ return ((uint8_t) result); /* Add explicit type cast here */
1146
+ }
1147
+
1148
+
1149
+ /**
1150
+ \brief LDR Exclusive (16 bit)
1151
+ \details Executes a exclusive LDR instruction for 16 bit values.
1152
+ \param [in] ptr Pointer to data
1153
+ \return value of type uint16_t at (*ptr)
1154
+ */
1155
+ __STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr)
1156
+ {
1157
+ uint32_t result;
1158
+
1159
+ #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
1160
+ __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
1161
+ #else
1162
+ /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
1163
+ accepted by assembler. So has to use following less efficient pattern.
1164
+ */
1165
+ __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
1166
+ #endif
1167
+ return ((uint16_t) result); /* Add explicit type cast here */
1168
+ }
1169
+
1170
+
1171
+ /**
1172
+ \brief LDR Exclusive (32 bit)
1173
+ \details Executes a exclusive LDR instruction for 32 bit values.
1174
+ \param [in] ptr Pointer to data
1175
+ \return value of type uint32_t at (*ptr)
1176
+ */
1177
+ __STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr)
1178
+ {
1179
+ uint32_t result;
1180
+
1181
+ __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
1182
+ return(result);
1183
+ }
1184
+
1185
+
1186
+ /**
1187
+ \brief STR Exclusive (8 bit)
1188
+ \details Executes a exclusive STR instruction for 8 bit values.
1189
+ \param [in] value Value to store
1190
+ \param [in] ptr Pointer to location
1191
+ \return 0 Function succeeded
1192
+ \return 1 Function failed
1193
+ */
1194
+ __STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
1195
+ {
1196
+ uint32_t result;
1197
+
1198
+ __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
1199
+ return(result);
1200
+ }
1201
+
1202
+
1203
+ /**
1204
+ \brief STR Exclusive (16 bit)
1205
+ \details Executes a exclusive STR instruction for 16 bit values.
1206
+ \param [in] value Value to store
1207
+ \param [in] ptr Pointer to location
1208
+ \return 0 Function succeeded
1209
+ \return 1 Function failed
1210
+ */
1211
+ __STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
1212
+ {
1213
+ uint32_t result;
1214
+
1215
+ __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
1216
+ return(result);
1217
+ }
1218
+
1219
+
1220
+ /**
1221
+ \brief STR Exclusive (32 bit)
1222
+ \details Executes a exclusive STR instruction for 32 bit values.
1223
+ \param [in] value Value to store
1224
+ \param [in] ptr Pointer to location
1225
+ \return 0 Function succeeded
1226
+ \return 1 Function failed
1227
+ */
1228
+ __STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
1229
+ {
1230
+ uint32_t result;
1231
+
1232
+ __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
1233
+ return(result);
1234
+ }
1235
+
1236
+
1237
+ /**
1238
+ \brief Remove the exclusive lock
1239
+ \details Removes the exclusive lock which is created by LDREX.
1240
+ */
1241
+ __STATIC_FORCEINLINE void __CLREX(void)
1242
+ {
1243
+ __ASM volatile ("clrex" ::: "memory");
1244
+ }
1245
+
1246
+ #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1247
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1248
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1249
+ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */
1250
+
1251
+
1252
+ #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1253
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1254
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) )
1255
+ /**
1256
+ \brief Signed Saturate
1257
+ \details Saturates a signed value.
1258
+ \param [in] ARG1 Value to be saturated
1259
+ \param [in] ARG2 Bit position to saturate to (1..32)
1260
+ \return Saturated value
1261
+ */
1262
+ #define __SSAT(ARG1, ARG2) \
1263
+ __extension__ \
1264
+ ({ \
1265
+ int32_t __RES, __ARG1 = (ARG1); \
1266
+ __ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
1267
+ __RES; \
1268
+ })
1269
+
1270
+
1271
+ /**
1272
+ \brief Unsigned Saturate
1273
+ \details Saturates an unsigned value.
1274
+ \param [in] ARG1 Value to be saturated
1275
+ \param [in] ARG2 Bit position to saturate to (0..31)
1276
+ \return Saturated value
1277
+ */
1278
+ #define __USAT(ARG1, ARG2) \
1279
+ __extension__ \
1280
+ ({ \
1281
+ uint32_t __RES, __ARG1 = (ARG1); \
1282
+ __ASM volatile ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
1283
+ __RES; \
1284
+ })
1285
+
1286
+
1287
+ /**
1288
+ \brief Rotate Right with Extend (32 bit)
1289
+ \details Moves each bit of a bitstring right by one bit.
1290
+ The carry input is shifted in at the left end of the bitstring.
1291
+ \param [in] value Value to rotate
1292
+ \return Rotated value
1293
+ */
1294
+ __STATIC_FORCEINLINE uint32_t __RRX(uint32_t value)
1295
+ {
1296
+ uint32_t result;
1297
+
1298
+ __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
1299
+ return(result);
1300
+ }
1301
+
1302
+
1303
+ /**
1304
+ \brief LDRT Unprivileged (8 bit)
1305
+ \details Executes a Unprivileged LDRT instruction for 8 bit value.
1306
+ \param [in] ptr Pointer to data
1307
+ \return value of type uint8_t at (*ptr)
1308
+ */
1309
+ __STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr)
1310
+ {
1311
+ uint32_t result;
1312
+
1313
+ #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
1314
+ __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) );
1315
+ #else
1316
+ /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
1317
+ accepted by assembler. So has to use following less efficient pattern.
1318
+ */
1319
+ __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" );
1320
+ #endif
1321
+ return ((uint8_t) result); /* Add explicit type cast here */
1322
+ }
1323
+
1324
+
1325
+ /**
1326
+ \brief LDRT Unprivileged (16 bit)
1327
+ \details Executes a Unprivileged LDRT instruction for 16 bit values.
1328
+ \param [in] ptr Pointer to data
1329
+ \return value of type uint16_t at (*ptr)
1330
+ */
1331
+ __STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr)
1332
+ {
1333
+ uint32_t result;
1334
+
1335
+ #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
1336
+ __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) );
1337
+ #else
1338
+ /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
1339
+ accepted by assembler. So has to use following less efficient pattern.
1340
+ */
1341
+ __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" );
1342
+ #endif
1343
+ return ((uint16_t) result); /* Add explicit type cast here */
1344
+ }
1345
+
1346
+
1347
+ /**
1348
+ \brief LDRT Unprivileged (32 bit)
1349
+ \details Executes a Unprivileged LDRT instruction for 32 bit values.
1350
+ \param [in] ptr Pointer to data
1351
+ \return value of type uint32_t at (*ptr)
1352
+ */
1353
+ __STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr)
1354
+ {
1355
+ uint32_t result;
1356
+
1357
+ __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) );
1358
+ return(result);
1359
+ }
1360
+
1361
+
1362
+ /**
1363
+ \brief STRT Unprivileged (8 bit)
1364
+ \details Executes a Unprivileged STRT instruction for 8 bit values.
1365
+ \param [in] value Value to store
1366
+ \param [in] ptr Pointer to location
1367
+ */
1368
+ __STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr)
1369
+ {
1370
+ __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
1371
+ }
1372
+
1373
+
1374
+ /**
1375
+ \brief STRT Unprivileged (16 bit)
1376
+ \details Executes a Unprivileged STRT instruction for 16 bit values.
1377
+ \param [in] value Value to store
1378
+ \param [in] ptr Pointer to location
1379
+ */
1380
+ __STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr)
1381
+ {
1382
+ __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
1383
+ }
1384
+
1385
+
1386
+ /**
1387
+ \brief STRT Unprivileged (32 bit)
1388
+ \details Executes a Unprivileged STRT instruction for 32 bit values.
1389
+ \param [in] value Value to store
1390
+ \param [in] ptr Pointer to location
1391
+ */
1392
+ __STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr)
1393
+ {
1394
+ __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) );
1395
+ }
1396
+
1397
+ #else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1398
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1399
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */
1400
+
1401
+ /**
1402
+ \brief Signed Saturate
1403
+ \details Saturates a signed value.
1404
+ \param [in] value Value to be saturated
1405
+ \param [in] sat Bit position to saturate to (1..32)
1406
+ \return Saturated value
1407
+ */
1408
+ __STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat)
1409
+ {
1410
+ if ((sat >= 1U) && (sat <= 32U))
1411
+ {
1412
+ const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U);
1413
+ const int32_t min = -1 - max ;
1414
+ if (val > max)
1415
+ {
1416
+ return max;
1417
+ }
1418
+ else if (val < min)
1419
+ {
1420
+ return min;
1421
+ }
1422
+ }
1423
+ return val;
1424
+ }
1425
+
1426
+ /**
1427
+ \brief Unsigned Saturate
1428
+ \details Saturates an unsigned value.
1429
+ \param [in] value Value to be saturated
1430
+ \param [in] sat Bit position to saturate to (0..31)
1431
+ \return Saturated value
1432
+ */
1433
+ __STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat)
1434
+ {
1435
+ if (sat <= 31U)
1436
+ {
1437
+ const uint32_t max = ((1U << sat) - 1U);
1438
+ if (val > (int32_t)max)
1439
+ {
1440
+ return max;
1441
+ }
1442
+ else if (val < 0)
1443
+ {
1444
+ return 0U;
1445
+ }
1446
+ }
1447
+ return (uint32_t)val;
1448
+ }
1449
+
1450
+ #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1451
+ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1452
+ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */
1453
+
1454
+
1455
+ #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1456
+ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) )
1457
+ /**
1458
+ \brief Load-Acquire (8 bit)
1459
+ \details Executes a LDAB instruction for 8 bit value.
1460
+ \param [in] ptr Pointer to data
1461
+ \return value of type uint8_t at (*ptr)
1462
+ */
1463
+ __STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr)
1464
+ {
1465
+ uint32_t result;
1466
+
1467
+ __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1468
+ return ((uint8_t) result);
1469
+ }
1470
+
1471
+
1472
+ /**
1473
+ \brief Load-Acquire (16 bit)
1474
+ \details Executes a LDAH instruction for 16 bit values.
1475
+ \param [in] ptr Pointer to data
1476
+ \return value of type uint16_t at (*ptr)
1477
+ */
1478
+ __STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr)
1479
+ {
1480
+ uint32_t result;
1481
+
1482
+ __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1483
+ return ((uint16_t) result);
1484
+ }
1485
+
1486
+
1487
+ /**
1488
+ \brief Load-Acquire (32 bit)
1489
+ \details Executes a LDA instruction for 32 bit values.
1490
+ \param [in] ptr Pointer to data
1491
+ \return value of type uint32_t at (*ptr)
1492
+ */
1493
+ __STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr)
1494
+ {
1495
+ uint32_t result;
1496
+
1497
+ __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1498
+ return(result);
1499
+ }
1500
+
1501
+
1502
+ /**
1503
+ \brief Store-Release (8 bit)
1504
+ \details Executes a STLB instruction for 8 bit values.
1505
+ \param [in] value Value to store
1506
+ \param [in] ptr Pointer to location
1507
+ */
1508
+ __STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr)
1509
+ {
1510
+ __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1511
+ }
1512
+
1513
+
1514
+ /**
1515
+ \brief Store-Release (16 bit)
1516
+ \details Executes a STLH instruction for 16 bit values.
1517
+ \param [in] value Value to store
1518
+ \param [in] ptr Pointer to location
1519
+ */
1520
+ __STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr)
1521
+ {
1522
+ __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1523
+ }
1524
+
1525
+
1526
+ /**
1527
+ \brief Store-Release (32 bit)
1528
+ \details Executes a STL instruction for 32 bit values.
1529
+ \param [in] value Value to store
1530
+ \param [in] ptr Pointer to location
1531
+ */
1532
+ __STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr)
1533
+ {
1534
+ __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1535
+ }
1536
+
1537
+
1538
+ /**
1539
+ \brief Load-Acquire Exclusive (8 bit)
1540
+ \details Executes a LDAB exclusive instruction for 8 bit value.
1541
+ \param [in] ptr Pointer to data
1542
+ \return value of type uint8_t at (*ptr)
1543
+ */
1544
+ __STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr)
1545
+ {
1546
+ uint32_t result;
1547
+
1548
+ __ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1549
+ return ((uint8_t) result);
1550
+ }
1551
+
1552
+
1553
+ /**
1554
+ \brief Load-Acquire Exclusive (16 bit)
1555
+ \details Executes a LDAH exclusive instruction for 16 bit values.
1556
+ \param [in] ptr Pointer to data
1557
+ \return value of type uint16_t at (*ptr)
1558
+ */
1559
+ __STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr)
1560
+ {
1561
+ uint32_t result;
1562
+
1563
+ __ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1564
+ return ((uint16_t) result);
1565
+ }
1566
+
1567
+
1568
+ /**
1569
+ \brief Load-Acquire Exclusive (32 bit)
1570
+ \details Executes a LDA exclusive instruction for 32 bit values.
1571
+ \param [in] ptr Pointer to data
1572
+ \return value of type uint32_t at (*ptr)
1573
+ */
1574
+ __STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr)
1575
+ {
1576
+ uint32_t result;
1577
+
1578
+ __ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1579
+ return(result);
1580
+ }
1581
+
1582
+
1583
+ /**
1584
+ \brief Store-Release Exclusive (8 bit)
1585
+ \details Executes a STLB exclusive instruction for 8 bit values.
1586
+ \param [in] value Value to store
1587
+ \param [in] ptr Pointer to location
1588
+ \return 0 Function succeeded
1589
+ \return 1 Function failed
1590
+ */
1591
+ __STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr)
1592
+ {
1593
+ uint32_t result;
1594
+
1595
+ __ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1596
+ return(result);
1597
+ }
1598
+
1599
+
1600
+ /**
1601
+ \brief Store-Release Exclusive (16 bit)
1602
+ \details Executes a STLH exclusive instruction for 16 bit values.
1603
+ \param [in] value Value to store
1604
+ \param [in] ptr Pointer to location
1605
+ \return 0 Function succeeded
1606
+ \return 1 Function failed
1607
+ */
1608
+ __STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr)
1609
+ {
1610
+ uint32_t result;
1611
+
1612
+ __ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1613
+ return(result);
1614
+ }
1615
+
1616
+
1617
+ /**
1618
+ \brief Store-Release Exclusive (32 bit)
1619
+ \details Executes a STL exclusive instruction for 32 bit values.
1620
+ \param [in] value Value to store
1621
+ \param [in] ptr Pointer to location
1622
+ \return 0 Function succeeded
1623
+ \return 1 Function failed
1624
+ */
1625
+ __STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr)
1626
+ {
1627
+ uint32_t result;
1628
+
1629
+ __ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1630
+ return(result);
1631
+ }
1632
+
1633
+ #endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1634
+ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */
1635
+
1636
+ /*@}*/ /* end of group CMSIS_Core_InstructionInterface */
1637
+
1638
+
1639
+ /* ################### Compiler specific Intrinsics ########################### */
1640
+ /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
1641
+ Access to dedicated SIMD instructions
1642
+ @{
1643
+ */
1644
+
1645
+ #if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1))
1646
+
1647
+ __STATIC_FORCEINLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
1648
+ {
1649
+ uint32_t result;
1650
+
1651
+ __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1652
+ return(result);
1653
+ }
1654
+
1655
+ __STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
1656
+ {
1657
+ uint32_t result;
1658
+
1659
+ __ASM ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1660
+ return(result);
1661
+ }
1662
+
1663
+ __STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
1664
+ {
1665
+ uint32_t result;
1666
+
1667
+ __ASM ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1668
+ return(result);
1669
+ }
1670
+
1671
+ __STATIC_FORCEINLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
1672
+ {
1673
+ uint32_t result;
1674
+
1675
+ __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1676
+ return(result);
1677
+ }
1678
+
1679
+ __STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
1680
+ {
1681
+ uint32_t result;
1682
+
1683
+ __ASM ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1684
+ return(result);
1685
+ }
1686
+
1687
+ __STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
1688
+ {
1689
+ uint32_t result;
1690
+
1691
+ __ASM ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1692
+ return(result);
1693
+ }
1694
+
1695
+
1696
+ __STATIC_FORCEINLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
1697
+ {
1698
+ uint32_t result;
1699
+
1700
+ __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1701
+ return(result);
1702
+ }
1703
+
1704
+ __STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
1705
+ {
1706
+ uint32_t result;
1707
+
1708
+ __ASM ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1709
+ return(result);
1710
+ }
1711
+
1712
+ __STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
1713
+ {
1714
+ uint32_t result;
1715
+
1716
+ __ASM ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1717
+ return(result);
1718
+ }
1719
+
1720
+ __STATIC_FORCEINLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
1721
+ {
1722
+ uint32_t result;
1723
+
1724
+ __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1725
+ return(result);
1726
+ }
1727
+
1728
+ __STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
1729
+ {
1730
+ uint32_t result;
1731
+
1732
+ __ASM ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1733
+ return(result);
1734
+ }
1735
+
1736
+ __STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
1737
+ {
1738
+ uint32_t result;
1739
+
1740
+ __ASM ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1741
+ return(result);
1742
+ }
1743
+
1744
+
1745
+ __STATIC_FORCEINLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
1746
+ {
1747
+ uint32_t result;
1748
+
1749
+ __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1750
+ return(result);
1751
+ }
1752
+
1753
+ __STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
1754
+ {
1755
+ uint32_t result;
1756
+
1757
+ __ASM ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1758
+ return(result);
1759
+ }
1760
+
1761
+ __STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
1762
+ {
1763
+ uint32_t result;
1764
+
1765
+ __ASM ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1766
+ return(result);
1767
+ }
1768
+
1769
+ __STATIC_FORCEINLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
1770
+ {
1771
+ uint32_t result;
1772
+
1773
+ __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1774
+ return(result);
1775
+ }
1776
+
1777
+ __STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
1778
+ {
1779
+ uint32_t result;
1780
+
1781
+ __ASM ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1782
+ return(result);
1783
+ }
1784
+
1785
+ __STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
1786
+ {
1787
+ uint32_t result;
1788
+
1789
+ __ASM ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1790
+ return(result);
1791
+ }
1792
+
1793
+ __STATIC_FORCEINLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
1794
+ {
1795
+ uint32_t result;
1796
+
1797
+ __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1798
+ return(result);
1799
+ }
1800
+
1801
+ __STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
1802
+ {
1803
+ uint32_t result;
1804
+
1805
+ __ASM ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1806
+ return(result);
1807
+ }
1808
+
1809
+ __STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
1810
+ {
1811
+ uint32_t result;
1812
+
1813
+ __ASM ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1814
+ return(result);
1815
+ }
1816
+
1817
+ __STATIC_FORCEINLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
1818
+ {
1819
+ uint32_t result;
1820
+
1821
+ __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1822
+ return(result);
1823
+ }
1824
+
1825
+ __STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
1826
+ {
1827
+ uint32_t result;
1828
+
1829
+ __ASM ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1830
+ return(result);
1831
+ }
1832
+
1833
+ __STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
1834
+ {
1835
+ uint32_t result;
1836
+
1837
+ __ASM ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1838
+ return(result);
1839
+ }
1840
+
1841
+ __STATIC_FORCEINLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
1842
+ {
1843
+ uint32_t result;
1844
+
1845
+ __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1846
+ return(result);
1847
+ }
1848
+
1849
+ __STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
1850
+ {
1851
+ uint32_t result;
1852
+
1853
+ __ASM ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1854
+ return(result);
1855
+ }
1856
+
1857
+ __STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
1858
+ {
1859
+ uint32_t result;
1860
+
1861
+ __ASM ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1862
+ return(result);
1863
+ }
1864
+
1865
+ __STATIC_FORCEINLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
1866
+ {
1867
+ uint32_t result;
1868
+
1869
+ __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1870
+ return(result);
1871
+ }
1872
+
1873
+ __STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
1874
+ {
1875
+ uint32_t result;
1876
+
1877
+ __ASM ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1878
+ return(result);
1879
+ }
1880
+
1881
+ __STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
1882
+ {
1883
+ uint32_t result;
1884
+
1885
+ __ASM ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1886
+ return(result);
1887
+ }
1888
+
1889
+ __STATIC_FORCEINLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
1890
+ {
1891
+ uint32_t result;
1892
+
1893
+ __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1894
+ return(result);
1895
+ }
1896
+
1897
+ __STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
1898
+ {
1899
+ uint32_t result;
1900
+
1901
+ __ASM ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1902
+ return(result);
1903
+ }
1904
+
1905
+ __STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
1906
+ {
1907
+ uint32_t result;
1908
+
1909
+ __ASM ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1910
+ return(result);
1911
+ }
1912
+
1913
+ __STATIC_FORCEINLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
1914
+ {
1915
+ uint32_t result;
1916
+
1917
+ __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1918
+ return(result);
1919
+ }
1920
+
1921
+ __STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
1922
+ {
1923
+ uint32_t result;
1924
+
1925
+ __ASM ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1926
+ return(result);
1927
+ }
1928
+
1929
+ __STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
1930
+ {
1931
+ uint32_t result;
1932
+
1933
+ __ASM ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1934
+ return(result);
1935
+ }
1936
+
1937
+ __STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
1938
+ {
1939
+ uint32_t result;
1940
+
1941
+ __ASM ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1942
+ return(result);
1943
+ }
1944
+
1945
+ __STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
1946
+ {
1947
+ uint32_t result;
1948
+
1949
+ __ASM ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1950
+ return(result);
1951
+ }
1952
+
1953
+ #define __SSAT16(ARG1, ARG2) \
1954
+ __extension__ \
1955
+ ({ \
1956
+ int32_t __RES, __ARG1 = (ARG1); \
1957
+ __ASM volatile ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
1958
+ __RES; \
1959
+ })
1960
+
1961
+ #define __USAT16(ARG1, ARG2) \
1962
+ __extension__ \
1963
+ ({ \
1964
+ uint32_t __RES, __ARG1 = (ARG1); \
1965
+ __ASM volatile ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
1966
+ __RES; \
1967
+ })
1968
+
1969
+ __STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1)
1970
+ {
1971
+ uint32_t result;
1972
+
1973
+ __ASM ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
1974
+ return(result);
1975
+ }
1976
+
1977
+ __STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
1978
+ {
1979
+ uint32_t result;
1980
+
1981
+ __ASM ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1982
+ return(result);
1983
+ }
1984
+
1985
+ __STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1)
1986
+ {
1987
+ uint32_t result;
1988
+
1989
+ __ASM ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
1990
+ return(result);
1991
+ }
1992
+
1993
+ __STATIC_FORCEINLINE uint32_t __SXTB16_RORn(uint32_t op1, uint32_t rotate)
1994
+ {
1995
+ uint32_t result;
1996
+ if (__builtin_constant_p(rotate) && ((rotate == 8U) || (rotate == 16U) || (rotate == 24U))) {
1997
+ __ASM volatile ("sxtb16 %0, %1, ROR %2" : "=r" (result) : "r" (op1), "i" (rotate) );
1998
+ } else {
1999
+ result = __SXTB16(__ROR(op1, rotate)) ;
2000
+ }
2001
+ return result;
2002
+ }
2003
+
2004
+ __STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
2005
+ {
2006
+ uint32_t result;
2007
+
2008
+ __ASM ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2009
+ return(result);
2010
+ }
2011
+
2012
+ __STATIC_FORCEINLINE uint32_t __SXTAB16_RORn(uint32_t op1, uint32_t op2, uint32_t rotate)
2013
+ {
2014
+ uint32_t result;
2015
+ if (__builtin_constant_p(rotate) && ((rotate == 8U) || (rotate == 16U) || (rotate == 24U))) {
2016
+ __ASM volatile ("sxtab16 %0, %1, %2, ROR %3" : "=r" (result) : "r" (op1) , "r" (op2) , "i" (rotate));
2017
+ } else {
2018
+ result = __SXTAB16(op1, __ROR(op2, rotate));
2019
+ }
2020
+ return result;
2021
+ }
2022
+
2023
+
2024
+ __STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
2025
+ {
2026
+ uint32_t result;
2027
+
2028
+ __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2029
+ return(result);
2030
+ }
2031
+
2032
+ __STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
2033
+ {
2034
+ uint32_t result;
2035
+
2036
+ __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2037
+ return(result);
2038
+ }
2039
+
2040
+ __STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
2041
+ {
2042
+ uint32_t result;
2043
+
2044
+ __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
2045
+ return(result);
2046
+ }
2047
+
2048
+ __STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
2049
+ {
2050
+ uint32_t result;
2051
+
2052
+ __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
2053
+ return(result);
2054
+ }
2055
+
2056
+ __STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
2057
+ {
2058
+ union llreg_u{
2059
+ uint32_t w32[2];
2060
+ uint64_t w64;
2061
+ } llr;
2062
+ llr.w64 = acc;
2063
+
2064
+ #ifndef __ARMEB__ /* Little endian */
2065
+ __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
2066
+ #else /* Big endian */
2067
+ __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
2068
+ #endif
2069
+
2070
+ return(llr.w64);
2071
+ }
2072
+
2073
+ __STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
2074
+ {
2075
+ union llreg_u{
2076
+ uint32_t w32[2];
2077
+ uint64_t w64;
2078
+ } llr;
2079
+ llr.w64 = acc;
2080
+
2081
+ #ifndef __ARMEB__ /* Little endian */
2082
+ __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
2083
+ #else /* Big endian */
2084
+ __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
2085
+ #endif
2086
+
2087
+ return(llr.w64);
2088
+ }
2089
+
2090
+ __STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
2091
+ {
2092
+ uint32_t result;
2093
+
2094
+ __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2095
+ return(result);
2096
+ }
2097
+
2098
+ __STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
2099
+ {
2100
+ uint32_t result;
2101
+
2102
+ __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2103
+ return(result);
2104
+ }
2105
+
2106
+ __STATIC_FORCEINLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
2107
+ {
2108
+ uint32_t result;
2109
+
2110
+ __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
2111
+ return(result);
2112
+ }
2113
+
2114
+ __STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
2115
+ {
2116
+ uint32_t result;
2117
+
2118
+ __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
2119
+ return(result);
2120
+ }
2121
+
2122
+ __STATIC_FORCEINLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
2123
+ {
2124
+ union llreg_u{
2125
+ uint32_t w32[2];
2126
+ uint64_t w64;
2127
+ } llr;
2128
+ llr.w64 = acc;
2129
+
2130
+ #ifndef __ARMEB__ /* Little endian */
2131
+ __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
2132
+ #else /* Big endian */
2133
+ __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
2134
+ #endif
2135
+
2136
+ return(llr.w64);
2137
+ }
2138
+
2139
+ __STATIC_FORCEINLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
2140
+ {
2141
+ union llreg_u{
2142
+ uint32_t w32[2];
2143
+ uint64_t w64;
2144
+ } llr;
2145
+ llr.w64 = acc;
2146
+
2147
+ #ifndef __ARMEB__ /* Little endian */
2148
+ __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
2149
+ #else /* Big endian */
2150
+ __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
2151
+ #endif
2152
+
2153
+ return(llr.w64);
2154
+ }
2155
+
2156
+ __STATIC_FORCEINLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
2157
+ {
2158
+ uint32_t result;
2159
+
2160
+ __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2161
+ return(result);
2162
+ }
2163
+
2164
+ __STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2)
2165
+ {
2166
+ int32_t result;
2167
+
2168
+ __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2169
+ return(result);
2170
+ }
2171
+
2172
+ __STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2)
2173
+ {
2174
+ int32_t result;
2175
+
2176
+ __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2177
+ return(result);
2178
+ }
2179
+
2180
+
2181
+ #define __PKHBT(ARG1,ARG2,ARG3) \
2182
+ __extension__ \
2183
+ ({ \
2184
+ uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
2185
+ __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
2186
+ __RES; \
2187
+ })
2188
+
2189
+ #define __PKHTB(ARG1,ARG2,ARG3) \
2190
+ __extension__ \
2191
+ ({ \
2192
+ uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
2193
+ if (ARG3 == 0) \
2194
+ __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
2195
+ else \
2196
+ __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
2197
+ __RES; \
2198
+ })
2199
+
2200
+
2201
+ __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
2202
+ {
2203
+ int32_t result;
2204
+
2205
+ __ASM ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
2206
+ return(result);
2207
+ }
2208
+
2209
+ #endif /* (__ARM_FEATURE_DSP == 1) */
2210
+ /*@} end of group CMSIS_SIMD_intrinsics */
2211
+
2212
+
2213
+ #pragma GCC diagnostic pop
2214
+
2215
+ #endif /* __CMSIS_GCC_H */
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/cmsis_iccarm.h ADDED
@@ -0,0 +1,971 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**************************************************************************//**
2
+ * @file cmsis_iccarm.h
3
+ * @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file
4
+ * @version V5.2.0
5
+ * @date 28. January 2020
6
+ ******************************************************************************/
7
+
8
+ //------------------------------------------------------------------------------
9
+ //
10
+ // Copyright (c) 2017-2020 IAR Systems
11
+ // Copyright (c) 2017-2019 Arm Limited. All rights reserved.
12
+ //
13
+ // SPDX-License-Identifier: Apache-2.0
14
+ //
15
+ // Licensed under the Apache License, Version 2.0 (the "License")
16
+ // you may not use this file except in compliance with the License.
17
+ // You may obtain a copy of the License at
18
+ // http://www.apache.org/licenses/LICENSE-2.0
19
+ //
20
+ // Unless required by applicable law or agreed to in writing, software
21
+ // distributed under the License is distributed on an "AS IS" BASIS,
22
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
23
+ // See the License for the specific language governing permissions and
24
+ // limitations under the License.
25
+ //
26
+ //------------------------------------------------------------------------------
27
+
28
+
29
+ #ifndef __CMSIS_ICCARM_H__
30
+ #define __CMSIS_ICCARM_H__
31
+
32
+ #ifndef __ICCARM__
33
+ #error This file should only be compiled by ICCARM
34
+ #endif
35
+
36
+ #pragma system_include
37
+
38
+ #define __IAR_FT _Pragma("inline=forced") __intrinsic
39
+
40
+ #if (__VER__ >= 8000000)
41
+ #define __ICCARM_V8 1
42
+ #else
43
+ #define __ICCARM_V8 0
44
+ #endif
45
+
46
+ #ifndef __ALIGNED
47
+ #if __ICCARM_V8
48
+ #define __ALIGNED(x) __attribute__((aligned(x)))
49
+ #elif (__VER__ >= 7080000)
50
+ /* Needs IAR language extensions */
51
+ #define __ALIGNED(x) __attribute__((aligned(x)))
52
+ #else
53
+ #warning No compiler specific solution for __ALIGNED.__ALIGNED is ignored.
54
+ #define __ALIGNED(x)
55
+ #endif
56
+ #endif
57
+
58
+
59
+ /* Define compiler macros for CPU architecture, used in CMSIS 5.
60
+ */
61
+ #if __ARM_ARCH_6M__ || __ARM_ARCH_7M__ || __ARM_ARCH_7EM__ || __ARM_ARCH_8M_BASE__ || __ARM_ARCH_8M_MAIN__
62
+ /* Macros already defined */
63
+ #else
64
+ #if defined(__ARM8M_MAINLINE__) || defined(__ARM8EM_MAINLINE__)
65
+ #define __ARM_ARCH_8M_MAIN__ 1
66
+ #elif defined(__ARM8M_BASELINE__)
67
+ #define __ARM_ARCH_8M_BASE__ 1
68
+ #elif defined(__ARM_ARCH_PROFILE) && __ARM_ARCH_PROFILE == 'M'
69
+ #if __ARM_ARCH == 6
70
+ #define __ARM_ARCH_6M__ 1
71
+ #elif __ARM_ARCH == 7
72
+ #if __ARM_FEATURE_DSP
73
+ #define __ARM_ARCH_7EM__ 1
74
+ #else
75
+ #define __ARM_ARCH_7M__ 1
76
+ #endif
77
+ #endif /* __ARM_ARCH */
78
+ #endif /* __ARM_ARCH_PROFILE == 'M' */
79
+ #endif
80
+
81
+ /* Alternativ core deduction for older ICCARM's */
82
+ #if !defined(__ARM_ARCH_6M__) && !defined(__ARM_ARCH_7M__) && !defined(__ARM_ARCH_7EM__) && \
83
+ !defined(__ARM_ARCH_8M_BASE__) && !defined(__ARM_ARCH_8M_MAIN__)
84
+ #if defined(__ARM6M__) && (__CORE__ == __ARM6M__)
85
+ #define __ARM_ARCH_6M__ 1
86
+ #elif defined(__ARM7M__) && (__CORE__ == __ARM7M__)
87
+ #define __ARM_ARCH_7M__ 1
88
+ #elif defined(__ARM7EM__) && (__CORE__ == __ARM7EM__)
89
+ #define __ARM_ARCH_7EM__ 1
90
+ #elif defined(__ARM8M_BASELINE__) && (__CORE == __ARM8M_BASELINE__)
91
+ #define __ARM_ARCH_8M_BASE__ 1
92
+ #elif defined(__ARM8M_MAINLINE__) && (__CORE == __ARM8M_MAINLINE__)
93
+ #define __ARM_ARCH_8M_MAIN__ 1
94
+ #elif defined(__ARM8EM_MAINLINE__) && (__CORE == __ARM8EM_MAINLINE__)
95
+ #define __ARM_ARCH_8M_MAIN__ 1
96
+ #else
97
+ #error "Unknown target."
98
+ #endif
99
+ #endif
100
+
101
+
102
+
103
+ #if defined(__ARM_ARCH_6M__) && __ARM_ARCH_6M__==1
104
+ #define __IAR_M0_FAMILY 1
105
+ #elif defined(__ARM_ARCH_8M_BASE__) && __ARM_ARCH_8M_BASE__==1
106
+ #define __IAR_M0_FAMILY 1
107
+ #else
108
+ #define __IAR_M0_FAMILY 0
109
+ #endif
110
+
111
+
112
+ #ifndef __ASM
113
+ #define __ASM __asm
114
+ #endif
115
+
116
+ #ifndef __COMPILER_BARRIER
117
+ #define __COMPILER_BARRIER() __ASM volatile("":::"memory")
118
+ #endif
119
+
120
+ #ifndef __INLINE
121
+ #define __INLINE inline
122
+ #endif
123
+
124
+ #ifndef __NO_RETURN
125
+ #if __ICCARM_V8
126
+ #define __NO_RETURN __attribute__((__noreturn__))
127
+ #else
128
+ #define __NO_RETURN _Pragma("object_attribute=__noreturn")
129
+ #endif
130
+ #endif
131
+
132
+ #ifndef __PACKED
133
+ #if __ICCARM_V8
134
+ #define __PACKED __attribute__((packed, aligned(1)))
135
+ #else
136
+ /* Needs IAR language extensions */
137
+ #define __PACKED __packed
138
+ #endif
139
+ #endif
140
+
141
+ #ifndef __PACKED_STRUCT
142
+ #if __ICCARM_V8
143
+ #define __PACKED_STRUCT struct __attribute__((packed, aligned(1)))
144
+ #else
145
+ /* Needs IAR language extensions */
146
+ #define __PACKED_STRUCT __packed struct
147
+ #endif
148
+ #endif
149
+
150
+ #ifndef __PACKED_UNION
151
+ #if __ICCARM_V8
152
+ #define __PACKED_UNION union __attribute__((packed, aligned(1)))
153
+ #else
154
+ /* Needs IAR language extensions */
155
+ #define __PACKED_UNION __packed union
156
+ #endif
157
+ #endif
158
+
159
+ #ifndef __RESTRICT
160
+ #if __ICCARM_V8
161
+ #define __RESTRICT __restrict
162
+ #else
163
+ /* Needs IAR language extensions */
164
+ #define __RESTRICT restrict
165
+ #endif
166
+ #endif
167
+
168
+ #ifndef __STATIC_INLINE
169
+ #define __STATIC_INLINE static inline
170
+ #endif
171
+
172
+ #ifndef __FORCEINLINE
173
+ #define __FORCEINLINE _Pragma("inline=forced")
174
+ #endif
175
+
176
+ #ifndef __STATIC_FORCEINLINE
177
+ #define __STATIC_FORCEINLINE __FORCEINLINE __STATIC_INLINE
178
+ #endif
179
+
180
+ #ifndef __UNALIGNED_UINT16_READ
181
+ #pragma language=save
182
+ #pragma language=extended
183
+ __IAR_FT uint16_t __iar_uint16_read(void const *ptr)
184
+ {
185
+ return *(__packed uint16_t*)(ptr);
186
+ }
187
+ #pragma language=restore
188
+ #define __UNALIGNED_UINT16_READ(PTR) __iar_uint16_read(PTR)
189
+ #endif
190
+
191
+
192
+ #ifndef __UNALIGNED_UINT16_WRITE
193
+ #pragma language=save
194
+ #pragma language=extended
195
+ __IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val)
196
+ {
197
+ *(__packed uint16_t*)(ptr) = val;;
198
+ }
199
+ #pragma language=restore
200
+ #define __UNALIGNED_UINT16_WRITE(PTR,VAL) __iar_uint16_write(PTR,VAL)
201
+ #endif
202
+
203
+ #ifndef __UNALIGNED_UINT32_READ
204
+ #pragma language=save
205
+ #pragma language=extended
206
+ __IAR_FT uint32_t __iar_uint32_read(void const *ptr)
207
+ {
208
+ return *(__packed uint32_t*)(ptr);
209
+ }
210
+ #pragma language=restore
211
+ #define __UNALIGNED_UINT32_READ(PTR) __iar_uint32_read(PTR)
212
+ #endif
213
+
214
+ #ifndef __UNALIGNED_UINT32_WRITE
215
+ #pragma language=save
216
+ #pragma language=extended
217
+ __IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val)
218
+ {
219
+ *(__packed uint32_t*)(ptr) = val;;
220
+ }
221
+ #pragma language=restore
222
+ #define __UNALIGNED_UINT32_WRITE(PTR,VAL) __iar_uint32_write(PTR,VAL)
223
+ #endif
224
+
225
+ #ifndef __UNALIGNED_UINT32 /* deprecated */
226
+ #pragma language=save
227
+ #pragma language=extended
228
+ __packed struct __iar_u32 { uint32_t v; };
229
+ #pragma language=restore
230
+ #define __UNALIGNED_UINT32(PTR) (((struct __iar_u32 *)(PTR))->v)
231
+ #endif
232
+
233
+ #ifndef __USED
234
+ #if __ICCARM_V8
235
+ #define __USED __attribute__((used))
236
+ #else
237
+ #define __USED _Pragma("__root")
238
+ #endif
239
+ #endif
240
+
241
+ #undef __WEAK /* undo the definition from DLib_Defaults.h */
242
+ #ifndef __WEAK
243
+ #if __ICCARM_V8
244
+ #define __WEAK __attribute__((weak))
245
+ #else
246
+ #define __WEAK _Pragma("__weak")
247
+ #endif
248
+ #endif
249
+
250
+ #ifndef __PROGRAM_START
251
+ #define __PROGRAM_START __iar_program_start
252
+ #endif
253
+
254
+ #ifndef __INITIAL_SP
255
+ #define __INITIAL_SP CSTACK$$Limit
256
+ #endif
257
+
258
+ #ifndef __STACK_LIMIT
259
+ #define __STACK_LIMIT CSTACK$$Base
260
+ #endif
261
+
262
+ #ifndef __VECTOR_TABLE
263
+ #define __VECTOR_TABLE __vector_table
264
+ #endif
265
+
266
+ #ifndef __VECTOR_TABLE_ATTRIBUTE
267
+ #define __VECTOR_TABLE_ATTRIBUTE @".intvec"
268
+ #endif
269
+
270
+ #ifndef __ICCARM_INTRINSICS_VERSION__
271
+ #define __ICCARM_INTRINSICS_VERSION__ 0
272
+ #endif
273
+
274
+ #if __ICCARM_INTRINSICS_VERSION__ == 2
275
+
276
+ #if defined(__CLZ)
277
+ #undef __CLZ
278
+ #endif
279
+ #if defined(__REVSH)
280
+ #undef __REVSH
281
+ #endif
282
+ #if defined(__RBIT)
283
+ #undef __RBIT
284
+ #endif
285
+ #if defined(__SSAT)
286
+ #undef __SSAT
287
+ #endif
288
+ #if defined(__USAT)
289
+ #undef __USAT
290
+ #endif
291
+
292
+ #include "iccarm_builtin.h"
293
+
294
+ #define __disable_fault_irq __iar_builtin_disable_fiq
295
+ #define __disable_irq __iar_builtin_disable_interrupt
296
+ #define __enable_fault_irq __iar_builtin_enable_fiq
297
+ #define __enable_irq __iar_builtin_enable_interrupt
298
+ #define __arm_rsr __iar_builtin_rsr
299
+ #define __arm_wsr __iar_builtin_wsr
300
+
301
+
302
+ #define __get_APSR() (__arm_rsr("APSR"))
303
+ #define __get_BASEPRI() (__arm_rsr("BASEPRI"))
304
+ #define __get_CONTROL() (__arm_rsr("CONTROL"))
305
+ #define __get_FAULTMASK() (__arm_rsr("FAULTMASK"))
306
+
307
+ #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
308
+ (defined (__FPU_USED ) && (__FPU_USED == 1U)) )
309
+ #define __get_FPSCR() (__arm_rsr("FPSCR"))
310
+ #define __set_FPSCR(VALUE) (__arm_wsr("FPSCR", (VALUE)))
311
+ #else
312
+ #define __get_FPSCR() ( 0 )
313
+ #define __set_FPSCR(VALUE) ((void)VALUE)
314
+ #endif
315
+
316
+ #define __get_IPSR() (__arm_rsr("IPSR"))
317
+ #define __get_MSP() (__arm_rsr("MSP"))
318
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
319
+ (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
320
+ // without main extensions, the non-secure MSPLIM is RAZ/WI
321
+ #define __get_MSPLIM() (0U)
322
+ #else
323
+ #define __get_MSPLIM() (__arm_rsr("MSPLIM"))
324
+ #endif
325
+ #define __get_PRIMASK() (__arm_rsr("PRIMASK"))
326
+ #define __get_PSP() (__arm_rsr("PSP"))
327
+
328
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
329
+ (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
330
+ // without main extensions, the non-secure PSPLIM is RAZ/WI
331
+ #define __get_PSPLIM() (0U)
332
+ #else
333
+ #define __get_PSPLIM() (__arm_rsr("PSPLIM"))
334
+ #endif
335
+
336
+ #define __get_xPSR() (__arm_rsr("xPSR"))
337
+
338
+ #define __set_BASEPRI(VALUE) (__arm_wsr("BASEPRI", (VALUE)))
339
+ #define __set_BASEPRI_MAX(VALUE) (__arm_wsr("BASEPRI_MAX", (VALUE)))
340
+ #define __set_CONTROL(VALUE) (__arm_wsr("CONTROL", (VALUE)))
341
+ #define __set_FAULTMASK(VALUE) (__arm_wsr("FAULTMASK", (VALUE)))
342
+ #define __set_MSP(VALUE) (__arm_wsr("MSP", (VALUE)))
343
+
344
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
345
+ (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
346
+ // without main extensions, the non-secure MSPLIM is RAZ/WI
347
+ #define __set_MSPLIM(VALUE) ((void)(VALUE))
348
+ #else
349
+ #define __set_MSPLIM(VALUE) (__arm_wsr("MSPLIM", (VALUE)))
350
+ #endif
351
+ #define __set_PRIMASK(VALUE) (__arm_wsr("PRIMASK", (VALUE)))
352
+ #define __set_PSP(VALUE) (__arm_wsr("PSP", (VALUE)))
353
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
354
+ (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
355
+ // without main extensions, the non-secure PSPLIM is RAZ/WI
356
+ #define __set_PSPLIM(VALUE) ((void)(VALUE))
357
+ #else
358
+ #define __set_PSPLIM(VALUE) (__arm_wsr("PSPLIM", (VALUE)))
359
+ #endif
360
+
361
+ #define __TZ_get_CONTROL_NS() (__arm_rsr("CONTROL_NS"))
362
+ #define __TZ_set_CONTROL_NS(VALUE) (__arm_wsr("CONTROL_NS", (VALUE)))
363
+ #define __TZ_get_PSP_NS() (__arm_rsr("PSP_NS"))
364
+ #define __TZ_set_PSP_NS(VALUE) (__arm_wsr("PSP_NS", (VALUE)))
365
+ #define __TZ_get_MSP_NS() (__arm_rsr("MSP_NS"))
366
+ #define __TZ_set_MSP_NS(VALUE) (__arm_wsr("MSP_NS", (VALUE)))
367
+ #define __TZ_get_SP_NS() (__arm_rsr("SP_NS"))
368
+ #define __TZ_set_SP_NS(VALUE) (__arm_wsr("SP_NS", (VALUE)))
369
+ #define __TZ_get_PRIMASK_NS() (__arm_rsr("PRIMASK_NS"))
370
+ #define __TZ_set_PRIMASK_NS(VALUE) (__arm_wsr("PRIMASK_NS", (VALUE)))
371
+ #define __TZ_get_BASEPRI_NS() (__arm_rsr("BASEPRI_NS"))
372
+ #define __TZ_set_BASEPRI_NS(VALUE) (__arm_wsr("BASEPRI_NS", (VALUE)))
373
+ #define __TZ_get_FAULTMASK_NS() (__arm_rsr("FAULTMASK_NS"))
374
+ #define __TZ_set_FAULTMASK_NS(VALUE)(__arm_wsr("FAULTMASK_NS", (VALUE)))
375
+
376
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
377
+ (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
378
+ // without main extensions, the non-secure PSPLIM is RAZ/WI
379
+ #define __TZ_get_PSPLIM_NS() (0U)
380
+ #define __TZ_set_PSPLIM_NS(VALUE) ((void)(VALUE))
381
+ #else
382
+ #define __TZ_get_PSPLIM_NS() (__arm_rsr("PSPLIM_NS"))
383
+ #define __TZ_set_PSPLIM_NS(VALUE) (__arm_wsr("PSPLIM_NS", (VALUE)))
384
+ #endif
385
+
386
+ #define __TZ_get_MSPLIM_NS() (__arm_rsr("MSPLIM_NS"))
387
+ #define __TZ_set_MSPLIM_NS(VALUE) (__arm_wsr("MSPLIM_NS", (VALUE)))
388
+
389
+ #define __NOP __iar_builtin_no_operation
390
+
391
+ #define __CLZ __iar_builtin_CLZ
392
+ #define __CLREX __iar_builtin_CLREX
393
+
394
+ #define __DMB __iar_builtin_DMB
395
+ #define __DSB __iar_builtin_DSB
396
+ #define __ISB __iar_builtin_ISB
397
+
398
+ #define __LDREXB __iar_builtin_LDREXB
399
+ #define __LDREXH __iar_builtin_LDREXH
400
+ #define __LDREXW __iar_builtin_LDREX
401
+
402
+ #define __RBIT __iar_builtin_RBIT
403
+ #define __REV __iar_builtin_REV
404
+ #define __REV16 __iar_builtin_REV16
405
+
406
+ __IAR_FT int16_t __REVSH(int16_t val)
407
+ {
408
+ return (int16_t) __iar_builtin_REVSH(val);
409
+ }
410
+
411
+ #define __ROR __iar_builtin_ROR
412
+ #define __RRX __iar_builtin_RRX
413
+
414
+ #define __SEV __iar_builtin_SEV
415
+
416
+ #if !__IAR_M0_FAMILY
417
+ #define __SSAT __iar_builtin_SSAT
418
+ #endif
419
+
420
+ #define __STREXB __iar_builtin_STREXB
421
+ #define __STREXH __iar_builtin_STREXH
422
+ #define __STREXW __iar_builtin_STREX
423
+
424
+ #if !__IAR_M0_FAMILY
425
+ #define __USAT __iar_builtin_USAT
426
+ #endif
427
+
428
+ #define __WFE __iar_builtin_WFE
429
+ #define __WFI __iar_builtin_WFI
430
+
431
+ #if __ARM_MEDIA__
432
+ #define __SADD8 __iar_builtin_SADD8
433
+ #define __QADD8 __iar_builtin_QADD8
434
+ #define __SHADD8 __iar_builtin_SHADD8
435
+ #define __UADD8 __iar_builtin_UADD8
436
+ #define __UQADD8 __iar_builtin_UQADD8
437
+ #define __UHADD8 __iar_builtin_UHADD8
438
+ #define __SSUB8 __iar_builtin_SSUB8
439
+ #define __QSUB8 __iar_builtin_QSUB8
440
+ #define __SHSUB8 __iar_builtin_SHSUB8
441
+ #define __USUB8 __iar_builtin_USUB8
442
+ #define __UQSUB8 __iar_builtin_UQSUB8
443
+ #define __UHSUB8 __iar_builtin_UHSUB8
444
+ #define __SADD16 __iar_builtin_SADD16
445
+ #define __QADD16 __iar_builtin_QADD16
446
+ #define __SHADD16 __iar_builtin_SHADD16
447
+ #define __UADD16 __iar_builtin_UADD16
448
+ #define __UQADD16 __iar_builtin_UQADD16
449
+ #define __UHADD16 __iar_builtin_UHADD16
450
+ #define __SSUB16 __iar_builtin_SSUB16
451
+ #define __QSUB16 __iar_builtin_QSUB16
452
+ #define __SHSUB16 __iar_builtin_SHSUB16
453
+ #define __USUB16 __iar_builtin_USUB16
454
+ #define __UQSUB16 __iar_builtin_UQSUB16
455
+ #define __UHSUB16 __iar_builtin_UHSUB16
456
+ #define __SASX __iar_builtin_SASX
457
+ #define __QASX __iar_builtin_QASX
458
+ #define __SHASX __iar_builtin_SHASX
459
+ #define __UASX __iar_builtin_UASX
460
+ #define __UQASX __iar_builtin_UQASX
461
+ #define __UHASX __iar_builtin_UHASX
462
+ #define __SSAX __iar_builtin_SSAX
463
+ #define __QSAX __iar_builtin_QSAX
464
+ #define __SHSAX __iar_builtin_SHSAX
465
+ #define __USAX __iar_builtin_USAX
466
+ #define __UQSAX __iar_builtin_UQSAX
467
+ #define __UHSAX __iar_builtin_UHSAX
468
+ #define __USAD8 __iar_builtin_USAD8
469
+ #define __USADA8 __iar_builtin_USADA8
470
+ #define __SSAT16 __iar_builtin_SSAT16
471
+ #define __USAT16 __iar_builtin_USAT16
472
+ #define __UXTB16 __iar_builtin_UXTB16
473
+ #define __UXTAB16 __iar_builtin_UXTAB16
474
+ #define __SXTB16 __iar_builtin_SXTB16
475
+ #define __SXTAB16 __iar_builtin_SXTAB16
476
+ #define __SMUAD __iar_builtin_SMUAD
477
+ #define __SMUADX __iar_builtin_SMUADX
478
+ #define __SMMLA __iar_builtin_SMMLA
479
+ #define __SMLAD __iar_builtin_SMLAD
480
+ #define __SMLADX __iar_builtin_SMLADX
481
+ #define __SMLALD __iar_builtin_SMLALD
482
+ #define __SMLALDX __iar_builtin_SMLALDX
483
+ #define __SMUSD __iar_builtin_SMUSD
484
+ #define __SMUSDX __iar_builtin_SMUSDX
485
+ #define __SMLSD __iar_builtin_SMLSD
486
+ #define __SMLSDX __iar_builtin_SMLSDX
487
+ #define __SMLSLD __iar_builtin_SMLSLD
488
+ #define __SMLSLDX __iar_builtin_SMLSLDX
489
+ #define __SEL __iar_builtin_SEL
490
+ #define __QADD __iar_builtin_QADD
491
+ #define __QSUB __iar_builtin_QSUB
492
+ #define __PKHBT __iar_builtin_PKHBT
493
+ #define __PKHTB __iar_builtin_PKHTB
494
+ #endif
495
+
496
+ #else /* __ICCARM_INTRINSICS_VERSION__ == 2 */
497
+
498
+ #if __IAR_M0_FAMILY
499
+ /* Avoid clash between intrinsics.h and arm_math.h when compiling for Cortex-M0. */
500
+ #define __CLZ __cmsis_iar_clz_not_active
501
+ #define __SSAT __cmsis_iar_ssat_not_active
502
+ #define __USAT __cmsis_iar_usat_not_active
503
+ #define __RBIT __cmsis_iar_rbit_not_active
504
+ #define __get_APSR __cmsis_iar_get_APSR_not_active
505
+ #endif
506
+
507
+
508
+ #if (!((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
509
+ (defined (__FPU_USED ) && (__FPU_USED == 1U)) ))
510
+ #define __get_FPSCR __cmsis_iar_get_FPSR_not_active
511
+ #define __set_FPSCR __cmsis_iar_set_FPSR_not_active
512
+ #endif
513
+
514
+ #ifdef __INTRINSICS_INCLUDED
515
+ #error intrinsics.h is already included previously!
516
+ #endif
517
+
518
+ #include <intrinsics.h>
519
+
520
+ #if __IAR_M0_FAMILY
521
+ /* Avoid clash between intrinsics.h and arm_math.h when compiling for Cortex-M0. */
522
+ #undef __CLZ
523
+ #undef __SSAT
524
+ #undef __USAT
525
+ #undef __RBIT
526
+ #undef __get_APSR
527
+
528
+ __STATIC_INLINE uint8_t __CLZ(uint32_t data)
529
+ {
530
+ if (data == 0U) { return 32U; }
531
+
532
+ uint32_t count = 0U;
533
+ uint32_t mask = 0x80000000U;
534
+
535
+ while ((data & mask) == 0U)
536
+ {
537
+ count += 1U;
538
+ mask = mask >> 1U;
539
+ }
540
+ return count;
541
+ }
542
+
543
+ __STATIC_INLINE uint32_t __RBIT(uint32_t v)
544
+ {
545
+ uint8_t sc = 31U;
546
+ uint32_t r = v;
547
+ for (v >>= 1U; v; v >>= 1U)
548
+ {
549
+ r <<= 1U;
550
+ r |= v & 1U;
551
+ sc--;
552
+ }
553
+ return (r << sc);
554
+ }
555
+
556
+ __STATIC_INLINE uint32_t __get_APSR(void)
557
+ {
558
+ uint32_t res;
559
+ __asm("MRS %0,APSR" : "=r" (res));
560
+ return res;
561
+ }
562
+
563
+ #endif
564
+
565
+ #if (!((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
566
+ (defined (__FPU_USED ) && (__FPU_USED == 1U)) ))
567
+ #undef __get_FPSCR
568
+ #undef __set_FPSCR
569
+ #define __get_FPSCR() (0)
570
+ #define __set_FPSCR(VALUE) ((void)VALUE)
571
+ #endif
572
+
573
+ #pragma diag_suppress=Pe940
574
+ #pragma diag_suppress=Pe177
575
+
576
+ #define __enable_irq __enable_interrupt
577
+ #define __disable_irq __disable_interrupt
578
+ #define __NOP __no_operation
579
+
580
+ #define __get_xPSR __get_PSR
581
+
582
+ #if (!defined(__ARM_ARCH_6M__) || __ARM_ARCH_6M__==0)
583
+
584
+ __IAR_FT uint32_t __LDREXW(uint32_t volatile *ptr)
585
+ {
586
+ return __LDREX((unsigned long *)ptr);
587
+ }
588
+
589
+ __IAR_FT uint32_t __STREXW(uint32_t value, uint32_t volatile *ptr)
590
+ {
591
+ return __STREX(value, (unsigned long *)ptr);
592
+ }
593
+ #endif
594
+
595
+
596
+ /* __CORTEX_M is defined in core_cm0.h, core_cm3.h and core_cm4.h. */
597
+ #if (__CORTEX_M >= 0x03)
598
+
599
+ __IAR_FT uint32_t __RRX(uint32_t value)
600
+ {
601
+ uint32_t result;
602
+ __ASM volatile("RRX %0, %1" : "=r"(result) : "r" (value));
603
+ return(result);
604
+ }
605
+
606
+ __IAR_FT void __set_BASEPRI_MAX(uint32_t value)
607
+ {
608
+ __asm volatile("MSR BASEPRI_MAX,%0"::"r" (value));
609
+ }
610
+
611
+
612
+ #define __enable_fault_irq __enable_fiq
613
+ #define __disable_fault_irq __disable_fiq
614
+
615
+
616
+ #endif /* (__CORTEX_M >= 0x03) */
617
+
618
+ __IAR_FT uint32_t __ROR(uint32_t op1, uint32_t op2)
619
+ {
620
+ return (op1 >> op2) | (op1 << ((sizeof(op1)*8)-op2));
621
+ }
622
+
623
+ #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
624
+ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) )
625
+
626
+ __IAR_FT uint32_t __get_MSPLIM(void)
627
+ {
628
+ uint32_t res;
629
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
630
+ (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3)))
631
+ // without main extensions, the non-secure MSPLIM is RAZ/WI
632
+ res = 0U;
633
+ #else
634
+ __asm volatile("MRS %0,MSPLIM" : "=r" (res));
635
+ #endif
636
+ return res;
637
+ }
638
+
639
+ __IAR_FT void __set_MSPLIM(uint32_t value)
640
+ {
641
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
642
+ (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3)))
643
+ // without main extensions, the non-secure MSPLIM is RAZ/WI
644
+ (void)value;
645
+ #else
646
+ __asm volatile("MSR MSPLIM,%0" :: "r" (value));
647
+ #endif
648
+ }
649
+
650
+ __IAR_FT uint32_t __get_PSPLIM(void)
651
+ {
652
+ uint32_t res;
653
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
654
+ (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3)))
655
+ // without main extensions, the non-secure PSPLIM is RAZ/WI
656
+ res = 0U;
657
+ #else
658
+ __asm volatile("MRS %0,PSPLIM" : "=r" (res));
659
+ #endif
660
+ return res;
661
+ }
662
+
663
+ __IAR_FT void __set_PSPLIM(uint32_t value)
664
+ {
665
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
666
+ (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3)))
667
+ // without main extensions, the non-secure PSPLIM is RAZ/WI
668
+ (void)value;
669
+ #else
670
+ __asm volatile("MSR PSPLIM,%0" :: "r" (value));
671
+ #endif
672
+ }
673
+
674
+ __IAR_FT uint32_t __TZ_get_CONTROL_NS(void)
675
+ {
676
+ uint32_t res;
677
+ __asm volatile("MRS %0,CONTROL_NS" : "=r" (res));
678
+ return res;
679
+ }
680
+
681
+ __IAR_FT void __TZ_set_CONTROL_NS(uint32_t value)
682
+ {
683
+ __asm volatile("MSR CONTROL_NS,%0" :: "r" (value));
684
+ }
685
+
686
+ __IAR_FT uint32_t __TZ_get_PSP_NS(void)
687
+ {
688
+ uint32_t res;
689
+ __asm volatile("MRS %0,PSP_NS" : "=r" (res));
690
+ return res;
691
+ }
692
+
693
+ __IAR_FT void __TZ_set_PSP_NS(uint32_t value)
694
+ {
695
+ __asm volatile("MSR PSP_NS,%0" :: "r" (value));
696
+ }
697
+
698
+ __IAR_FT uint32_t __TZ_get_MSP_NS(void)
699
+ {
700
+ uint32_t res;
701
+ __asm volatile("MRS %0,MSP_NS" : "=r" (res));
702
+ return res;
703
+ }
704
+
705
+ __IAR_FT void __TZ_set_MSP_NS(uint32_t value)
706
+ {
707
+ __asm volatile("MSR MSP_NS,%0" :: "r" (value));
708
+ }
709
+
710
+ __IAR_FT uint32_t __TZ_get_SP_NS(void)
711
+ {
712
+ uint32_t res;
713
+ __asm volatile("MRS %0,SP_NS" : "=r" (res));
714
+ return res;
715
+ }
716
+ __IAR_FT void __TZ_set_SP_NS(uint32_t value)
717
+ {
718
+ __asm volatile("MSR SP_NS,%0" :: "r" (value));
719
+ }
720
+
721
+ __IAR_FT uint32_t __TZ_get_PRIMASK_NS(void)
722
+ {
723
+ uint32_t res;
724
+ __asm volatile("MRS %0,PRIMASK_NS" : "=r" (res));
725
+ return res;
726
+ }
727
+
728
+ __IAR_FT void __TZ_set_PRIMASK_NS(uint32_t value)
729
+ {
730
+ __asm volatile("MSR PRIMASK_NS,%0" :: "r" (value));
731
+ }
732
+
733
+ __IAR_FT uint32_t __TZ_get_BASEPRI_NS(void)
734
+ {
735
+ uint32_t res;
736
+ __asm volatile("MRS %0,BASEPRI_NS" : "=r" (res));
737
+ return res;
738
+ }
739
+
740
+ __IAR_FT void __TZ_set_BASEPRI_NS(uint32_t value)
741
+ {
742
+ __asm volatile("MSR BASEPRI_NS,%0" :: "r" (value));
743
+ }
744
+
745
+ __IAR_FT uint32_t __TZ_get_FAULTMASK_NS(void)
746
+ {
747
+ uint32_t res;
748
+ __asm volatile("MRS %0,FAULTMASK_NS" : "=r" (res));
749
+ return res;
750
+ }
751
+
752
+ __IAR_FT void __TZ_set_FAULTMASK_NS(uint32_t value)
753
+ {
754
+ __asm volatile("MSR FAULTMASK_NS,%0" :: "r" (value));
755
+ }
756
+
757
+ __IAR_FT uint32_t __TZ_get_PSPLIM_NS(void)
758
+ {
759
+ uint32_t res;
760
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
761
+ (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3)))
762
+ // without main extensions, the non-secure PSPLIM is RAZ/WI
763
+ res = 0U;
764
+ #else
765
+ __asm volatile("MRS %0,PSPLIM_NS" : "=r" (res));
766
+ #endif
767
+ return res;
768
+ }
769
+
770
+ __IAR_FT void __TZ_set_PSPLIM_NS(uint32_t value)
771
+ {
772
+ #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
773
+ (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3)))
774
+ // without main extensions, the non-secure PSPLIM is RAZ/WI
775
+ (void)value;
776
+ #else
777
+ __asm volatile("MSR PSPLIM_NS,%0" :: "r" (value));
778
+ #endif
779
+ }
780
+
781
+ __IAR_FT uint32_t __TZ_get_MSPLIM_NS(void)
782
+ {
783
+ uint32_t res;
784
+ __asm volatile("MRS %0,MSPLIM_NS" : "=r" (res));
785
+ return res;
786
+ }
787
+
788
+ __IAR_FT void __TZ_set_MSPLIM_NS(uint32_t value)
789
+ {
790
+ __asm volatile("MSR MSPLIM_NS,%0" :: "r" (value));
791
+ }
792
+
793
+ #endif /* __ARM_ARCH_8M_MAIN__ or __ARM_ARCH_8M_BASE__ */
794
+
795
+ #endif /* __ICCARM_INTRINSICS_VERSION__ == 2 */
796
+
797
+ #define __BKPT(value) __asm volatile ("BKPT %0" : : "i"(value))
798
+
799
+ #if __IAR_M0_FAMILY
800
+ __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat)
801
+ {
802
+ if ((sat >= 1U) && (sat <= 32U))
803
+ {
804
+ const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U);
805
+ const int32_t min = -1 - max ;
806
+ if (val > max)
807
+ {
808
+ return max;
809
+ }
810
+ else if (val < min)
811
+ {
812
+ return min;
813
+ }
814
+ }
815
+ return val;
816
+ }
817
+
818
+ __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat)
819
+ {
820
+ if (sat <= 31U)
821
+ {
822
+ const uint32_t max = ((1U << sat) - 1U);
823
+ if (val > (int32_t)max)
824
+ {
825
+ return max;
826
+ }
827
+ else if (val < 0)
828
+ {
829
+ return 0U;
830
+ }
831
+ }
832
+ return (uint32_t)val;
833
+ }
834
+ #endif
835
+
836
+ #if (__CORTEX_M >= 0x03) /* __CORTEX_M is defined in core_cm0.h, core_cm3.h and core_cm4.h. */
837
+
838
+ __IAR_FT uint8_t __LDRBT(volatile uint8_t *addr)
839
+ {
840
+ uint32_t res;
841
+ __ASM volatile ("LDRBT %0, [%1]" : "=r" (res) : "r" (addr) : "memory");
842
+ return ((uint8_t)res);
843
+ }
844
+
845
+ __IAR_FT uint16_t __LDRHT(volatile uint16_t *addr)
846
+ {
847
+ uint32_t res;
848
+ __ASM volatile ("LDRHT %0, [%1]" : "=r" (res) : "r" (addr) : "memory");
849
+ return ((uint16_t)res);
850
+ }
851
+
852
+ __IAR_FT uint32_t __LDRT(volatile uint32_t *addr)
853
+ {
854
+ uint32_t res;
855
+ __ASM volatile ("LDRT %0, [%1]" : "=r" (res) : "r" (addr) : "memory");
856
+ return res;
857
+ }
858
+
859
+ __IAR_FT void __STRBT(uint8_t value, volatile uint8_t *addr)
860
+ {
861
+ __ASM volatile ("STRBT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory");
862
+ }
863
+
864
+ __IAR_FT void __STRHT(uint16_t value, volatile uint16_t *addr)
865
+ {
866
+ __ASM volatile ("STRHT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory");
867
+ }
868
+
869
+ __IAR_FT void __STRT(uint32_t value, volatile uint32_t *addr)
870
+ {
871
+ __ASM volatile ("STRT %1, [%0]" : : "r" (addr), "r" (value) : "memory");
872
+ }
873
+
874
+ #endif /* (__CORTEX_M >= 0x03) */
875
+
876
+ #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
877
+ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) )
878
+
879
+
880
+ __IAR_FT uint8_t __LDAB(volatile uint8_t *ptr)
881
+ {
882
+ uint32_t res;
883
+ __ASM volatile ("LDAB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory");
884
+ return ((uint8_t)res);
885
+ }
886
+
887
+ __IAR_FT uint16_t __LDAH(volatile uint16_t *ptr)
888
+ {
889
+ uint32_t res;
890
+ __ASM volatile ("LDAH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory");
891
+ return ((uint16_t)res);
892
+ }
893
+
894
+ __IAR_FT uint32_t __LDA(volatile uint32_t *ptr)
895
+ {
896
+ uint32_t res;
897
+ __ASM volatile ("LDA %0, [%1]" : "=r" (res) : "r" (ptr) : "memory");
898
+ return res;
899
+ }
900
+
901
+ __IAR_FT void __STLB(uint8_t value, volatile uint8_t *ptr)
902
+ {
903
+ __ASM volatile ("STLB %1, [%0]" :: "r" (ptr), "r" (value) : "memory");
904
+ }
905
+
906
+ __IAR_FT void __STLH(uint16_t value, volatile uint16_t *ptr)
907
+ {
908
+ __ASM volatile ("STLH %1, [%0]" :: "r" (ptr), "r" (value) : "memory");
909
+ }
910
+
911
+ __IAR_FT void __STL(uint32_t value, volatile uint32_t *ptr)
912
+ {
913
+ __ASM volatile ("STL %1, [%0]" :: "r" (ptr), "r" (value) : "memory");
914
+ }
915
+
916
+ __IAR_FT uint8_t __LDAEXB(volatile uint8_t *ptr)
917
+ {
918
+ uint32_t res;
919
+ __ASM volatile ("LDAEXB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory");
920
+ return ((uint8_t)res);
921
+ }
922
+
923
+ __IAR_FT uint16_t __LDAEXH(volatile uint16_t *ptr)
924
+ {
925
+ uint32_t res;
926
+ __ASM volatile ("LDAEXH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory");
927
+ return ((uint16_t)res);
928
+ }
929
+
930
+ __IAR_FT uint32_t __LDAEX(volatile uint32_t *ptr)
931
+ {
932
+ uint32_t res;
933
+ __ASM volatile ("LDAEX %0, [%1]" : "=r" (res) : "r" (ptr) : "memory");
934
+ return res;
935
+ }
936
+
937
+ __IAR_FT uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr)
938
+ {
939
+ uint32_t res;
940
+ __ASM volatile ("STLEXB %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory");
941
+ return res;
942
+ }
943
+
944
+ __IAR_FT uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr)
945
+ {
946
+ uint32_t res;
947
+ __ASM volatile ("STLEXH %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory");
948
+ return res;
949
+ }
950
+
951
+ __IAR_FT uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr)
952
+ {
953
+ uint32_t res;
954
+ __ASM volatile ("STLEX %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory");
955
+ return res;
956
+ }
957
+
958
+ #endif /* __ARM_ARCH_8M_MAIN__ or __ARM_ARCH_8M_BASE__ */
959
+
960
+ #undef __IAR_FT
961
+ #undef __IAR_M0_FAMILY
962
+ #undef __ICCARM_V8
963
+
964
+ #pragma diag_default=Pe940
965
+ #pragma diag_default=Pe177
966
+
967
+ #define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2))
968
+
969
+ #define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3))
970
+
971
+ #endif /* __CMSIS_ICCARM_H__ */
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/cmsis_version.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**************************************************************************//**
2
+ * @file cmsis_version.h
3
+ * @brief CMSIS Core(M) Version definitions
4
+ * @version V5.0.4
5
+ * @date 23. July 2019
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2009-2019 ARM Limited. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ #if defined ( __ICCARM__ )
26
+ #pragma system_include /* treat file as system include file for MISRA check */
27
+ #elif defined (__clang__)
28
+ #pragma clang system_header /* treat file as system include file */
29
+ #endif
30
+
31
+ #ifndef __CMSIS_VERSION_H
32
+ #define __CMSIS_VERSION_H
33
+
34
+ /* CMSIS Version definitions */
35
+ #define __CM_CMSIS_VERSION_MAIN ( 5U) /*!< [31:16] CMSIS Core(M) main version */
36
+ #define __CM_CMSIS_VERSION_SUB ( 4U) /*!< [15:0] CMSIS Core(M) sub version */
37
+ #define __CM_CMSIS_VERSION ((__CM_CMSIS_VERSION_MAIN << 16U) | \
38
+ __CM_CMSIS_VERSION_SUB ) /*!< CMSIS Core(M) version number */
39
+ #endif
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_armv81mml.h ADDED
The diff for this file is too large to render. See raw diff
 
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_armv8mbl.h ADDED
The diff for this file is too large to render. See raw diff
 
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_armv8mml.h ADDED
The diff for this file is too large to render. See raw diff
 
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_cm0.h ADDED
@@ -0,0 +1,952 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**************************************************************************//**
2
+ * @file core_cm0.h
3
+ * @brief CMSIS Cortex-M0 Core Peripheral Access Layer Header File
4
+ * @version V5.0.8
5
+ * @date 21. August 2019
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2009-2019 Arm Limited. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ #if defined ( __ICCARM__ )
26
+ #pragma system_include /* treat file as system include file for MISRA check */
27
+ #elif defined (__clang__)
28
+ #pragma clang system_header /* treat file as system include file */
29
+ #endif
30
+
31
+ #ifndef __CORE_CM0_H_GENERIC
32
+ #define __CORE_CM0_H_GENERIC
33
+
34
+ #include <stdint.h>
35
+
36
+ #ifdef __cplusplus
37
+ extern "C" {
38
+ #endif
39
+
40
+ /**
41
+ \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions
42
+ CMSIS violates the following MISRA-C:2004 rules:
43
+
44
+ \li Required Rule 8.5, object/function definition in header file.<br>
45
+ Function definitions in header files are used to allow 'inlining'.
46
+
47
+ \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.<br>
48
+ Unions are used for effective representation of core registers.
49
+
50
+ \li Advisory Rule 19.7, Function-like macro defined.<br>
51
+ Function-like macros are used to allow more efficient code.
52
+ */
53
+
54
+
55
+ /*******************************************************************************
56
+ * CMSIS definitions
57
+ ******************************************************************************/
58
+ /**
59
+ \ingroup Cortex_M0
60
+ @{
61
+ */
62
+
63
+ #include "cmsis_version.h"
64
+
65
+ /* CMSIS CM0 definitions */
66
+ #define __CM0_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */
67
+ #define __CM0_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */
68
+ #define __CM0_CMSIS_VERSION ((__CM0_CMSIS_VERSION_MAIN << 16U) | \
69
+ __CM0_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */
70
+
71
+ #define __CORTEX_M (0U) /*!< Cortex-M Core */
72
+
73
+ /** __FPU_USED indicates whether an FPU is used or not.
74
+ This core does not support an FPU at all
75
+ */
76
+ #define __FPU_USED 0U
77
+
78
+ #if defined ( __CC_ARM )
79
+ #if defined __TARGET_FPU_VFP
80
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
81
+ #endif
82
+
83
+ #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
84
+ #if defined __ARM_FP
85
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
86
+ #endif
87
+
88
+ #elif defined ( __GNUC__ )
89
+ #if defined (__VFP_FP__) && !defined(__SOFTFP__)
90
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
91
+ #endif
92
+
93
+ #elif defined ( __ICCARM__ )
94
+ #if defined __ARMVFP__
95
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
96
+ #endif
97
+
98
+ #elif defined ( __TI_ARM__ )
99
+ #if defined __TI_VFP_SUPPORT__
100
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
101
+ #endif
102
+
103
+ #elif defined ( __TASKING__ )
104
+ #if defined __FPU_VFP__
105
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
106
+ #endif
107
+
108
+ #elif defined ( __CSMC__ )
109
+ #if ( __CSMC__ & 0x400U)
110
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
111
+ #endif
112
+
113
+ #endif
114
+
115
+ #include "edge-impulse-sdk/CMSIS/Core/Include/cmsis_compiler.h" /* CMSIS compiler specific defines */
116
+
117
+
118
+ #ifdef __cplusplus
119
+ }
120
+ #endif
121
+
122
+ #endif /* __CORE_CM0_H_GENERIC */
123
+
124
+ #ifndef __CMSIS_GENERIC
125
+
126
+ #ifndef __CORE_CM0_H_DEPENDANT
127
+ #define __CORE_CM0_H_DEPENDANT
128
+
129
+ #ifdef __cplusplus
130
+ extern "C" {
131
+ #endif
132
+
133
+ /* check device defines and use defaults */
134
+ #if defined __CHECK_DEVICE_DEFINES
135
+ #ifndef __CM0_REV
136
+ #define __CM0_REV 0x0000U
137
+ #warning "__CM0_REV not defined in device header file; using default!"
138
+ #endif
139
+
140
+ #ifndef __NVIC_PRIO_BITS
141
+ #define __NVIC_PRIO_BITS 2U
142
+ #warning "__NVIC_PRIO_BITS not defined in device header file; using default!"
143
+ #endif
144
+
145
+ #ifndef __Vendor_SysTickConfig
146
+ #define __Vendor_SysTickConfig 0U
147
+ #warning "__Vendor_SysTickConfig not defined in device header file; using default!"
148
+ #endif
149
+ #endif
150
+
151
+ /* IO definitions (access restrictions to peripheral registers) */
152
+ /**
153
+ \defgroup CMSIS_glob_defs CMSIS Global Defines
154
+
155
+ <strong>IO Type Qualifiers</strong> are used
156
+ \li to specify the access to peripheral variables.
157
+ \li for automatic generation of peripheral register debug information.
158
+ */
159
+ #ifdef __cplusplus
160
+ #define __I volatile /*!< Defines 'read only' permissions */
161
+ #else
162
+ #define __I volatile const /*!< Defines 'read only' permissions */
163
+ #endif
164
+ #define __O volatile /*!< Defines 'write only' permissions */
165
+ #define __IO volatile /*!< Defines 'read / write' permissions */
166
+
167
+ /* following defines should be used for structure members */
168
+ #define __IM volatile const /*! Defines 'read only' structure member permissions */
169
+ #define __OM volatile /*! Defines 'write only' structure member permissions */
170
+ #define __IOM volatile /*! Defines 'read / write' structure member permissions */
171
+
172
+ /*@} end of group Cortex_M0 */
173
+
174
+
175
+
176
+ /*******************************************************************************
177
+ * Register Abstraction
178
+ Core Register contain:
179
+ - Core Register
180
+ - Core NVIC Register
181
+ - Core SCB Register
182
+ - Core SysTick Register
183
+ ******************************************************************************/
184
+ /**
185
+ \defgroup CMSIS_core_register Defines and Type Definitions
186
+ \brief Type definitions and defines for Cortex-M processor based devices.
187
+ */
188
+
189
+ /**
190
+ \ingroup CMSIS_core_register
191
+ \defgroup CMSIS_CORE Status and Control Registers
192
+ \brief Core Register type definitions.
193
+ @{
194
+ */
195
+
196
+ /**
197
+ \brief Union type to access the Application Program Status Register (APSR).
198
+ */
199
+ typedef union
200
+ {
201
+ struct
202
+ {
203
+ uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */
204
+ uint32_t V:1; /*!< bit: 28 Overflow condition code flag */
205
+ uint32_t C:1; /*!< bit: 29 Carry condition code flag */
206
+ uint32_t Z:1; /*!< bit: 30 Zero condition code flag */
207
+ uint32_t N:1; /*!< bit: 31 Negative condition code flag */
208
+ } b; /*!< Structure used for bit access */
209
+ uint32_t w; /*!< Type used for word access */
210
+ } APSR_Type;
211
+
212
+ /* APSR Register Definitions */
213
+ #define APSR_N_Pos 31U /*!< APSR: N Position */
214
+ #define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */
215
+
216
+ #define APSR_Z_Pos 30U /*!< APSR: Z Position */
217
+ #define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */
218
+
219
+ #define APSR_C_Pos 29U /*!< APSR: C Position */
220
+ #define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */
221
+
222
+ #define APSR_V_Pos 28U /*!< APSR: V Position */
223
+ #define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */
224
+
225
+
226
+ /**
227
+ \brief Union type to access the Interrupt Program Status Register (IPSR).
228
+ */
229
+ typedef union
230
+ {
231
+ struct
232
+ {
233
+ uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */
234
+ uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */
235
+ } b; /*!< Structure used for bit access */
236
+ uint32_t w; /*!< Type used for word access */
237
+ } IPSR_Type;
238
+
239
+ /* IPSR Register Definitions */
240
+ #define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */
241
+ #define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */
242
+
243
+
244
+ /**
245
+ \brief Union type to access the Special-Purpose Program Status Registers (xPSR).
246
+ */
247
+ typedef union
248
+ {
249
+ struct
250
+ {
251
+ uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */
252
+ uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */
253
+ uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */
254
+ uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */
255
+ uint32_t V:1; /*!< bit: 28 Overflow condition code flag */
256
+ uint32_t C:1; /*!< bit: 29 Carry condition code flag */
257
+ uint32_t Z:1; /*!< bit: 30 Zero condition code flag */
258
+ uint32_t N:1; /*!< bit: 31 Negative condition code flag */
259
+ } b; /*!< Structure used for bit access */
260
+ uint32_t w; /*!< Type used for word access */
261
+ } xPSR_Type;
262
+
263
+ /* xPSR Register Definitions */
264
+ #define xPSR_N_Pos 31U /*!< xPSR: N Position */
265
+ #define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */
266
+
267
+ #define xPSR_Z_Pos 30U /*!< xPSR: Z Position */
268
+ #define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */
269
+
270
+ #define xPSR_C_Pos 29U /*!< xPSR: C Position */
271
+ #define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */
272
+
273
+ #define xPSR_V_Pos 28U /*!< xPSR: V Position */
274
+ #define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */
275
+
276
+ #define xPSR_T_Pos 24U /*!< xPSR: T Position */
277
+ #define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */
278
+
279
+ #define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */
280
+ #define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */
281
+
282
+
283
+ /**
284
+ \brief Union type to access the Control Registers (CONTROL).
285
+ */
286
+ typedef union
287
+ {
288
+ struct
289
+ {
290
+ uint32_t _reserved0:1; /*!< bit: 0 Reserved */
291
+ uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */
292
+ uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */
293
+ } b; /*!< Structure used for bit access */
294
+ uint32_t w; /*!< Type used for word access */
295
+ } CONTROL_Type;
296
+
297
+ /* CONTROL Register Definitions */
298
+ #define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */
299
+ #define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */
300
+
301
+ /*@} end of group CMSIS_CORE */
302
+
303
+
304
+ /**
305
+ \ingroup CMSIS_core_register
306
+ \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC)
307
+ \brief Type definitions for the NVIC Registers
308
+ @{
309
+ */
310
+
311
+ /**
312
+ \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC).
313
+ */
314
+ typedef struct
315
+ {
316
+ __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */
317
+ uint32_t RESERVED0[31U];
318
+ __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */
319
+ uint32_t RESERVED1[31U];
320
+ __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */
321
+ uint32_t RESERVED2[31U];
322
+ __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */
323
+ uint32_t RESERVED3[31U];
324
+ uint32_t RESERVED4[64U];
325
+ __IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */
326
+ } NVIC_Type;
327
+
328
+ /*@} end of group CMSIS_NVIC */
329
+
330
+
331
+ /**
332
+ \ingroup CMSIS_core_register
333
+ \defgroup CMSIS_SCB System Control Block (SCB)
334
+ \brief Type definitions for the System Control Block Registers
335
+ @{
336
+ */
337
+
338
+ /**
339
+ \brief Structure type to access the System Control Block (SCB).
340
+ */
341
+ typedef struct
342
+ {
343
+ __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */
344
+ __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */
345
+ uint32_t RESERVED0;
346
+ __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */
347
+ __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */
348
+ __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */
349
+ uint32_t RESERVED1;
350
+ __IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */
351
+ __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */
352
+ } SCB_Type;
353
+
354
+ /* SCB CPUID Register Definitions */
355
+ #define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */
356
+ #define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */
357
+
358
+ #define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */
359
+ #define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */
360
+
361
+ #define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */
362
+ #define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */
363
+
364
+ #define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */
365
+ #define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */
366
+
367
+ #define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */
368
+ #define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */
369
+
370
+ /* SCB Interrupt Control State Register Definitions */
371
+ #define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */
372
+ #define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */
373
+
374
+ #define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */
375
+ #define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */
376
+
377
+ #define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */
378
+ #define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */
379
+
380
+ #define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */
381
+ #define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */
382
+
383
+ #define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */
384
+ #define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */
385
+
386
+ #define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */
387
+ #define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */
388
+
389
+ #define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */
390
+ #define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */
391
+
392
+ #define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */
393
+ #define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */
394
+
395
+ #define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */
396
+ #define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */
397
+
398
+ /* SCB Application Interrupt and Reset Control Register Definitions */
399
+ #define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */
400
+ #define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */
401
+
402
+ #define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */
403
+ #define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */
404
+
405
+ #define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */
406
+ #define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */
407
+
408
+ #define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */
409
+ #define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */
410
+
411
+ #define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */
412
+ #define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */
413
+
414
+ /* SCB System Control Register Definitions */
415
+ #define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */
416
+ #define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */
417
+
418
+ #define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */
419
+ #define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */
420
+
421
+ #define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */
422
+ #define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */
423
+
424
+ /* SCB Configuration Control Register Definitions */
425
+ #define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */
426
+ #define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */
427
+
428
+ #define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */
429
+ #define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */
430
+
431
+ /* SCB System Handler Control and State Register Definitions */
432
+ #define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */
433
+ #define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */
434
+
435
+ /*@} end of group CMSIS_SCB */
436
+
437
+
438
+ /**
439
+ \ingroup CMSIS_core_register
440
+ \defgroup CMSIS_SysTick System Tick Timer (SysTick)
441
+ \brief Type definitions for the System Timer Registers.
442
+ @{
443
+ */
444
+
445
+ /**
446
+ \brief Structure type to access the System Timer (SysTick).
447
+ */
448
+ typedef struct
449
+ {
450
+ __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */
451
+ __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */
452
+ __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */
453
+ __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */
454
+ } SysTick_Type;
455
+
456
+ /* SysTick Control / Status Register Definitions */
457
+ #define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */
458
+ #define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */
459
+
460
+ #define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */
461
+ #define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */
462
+
463
+ #define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */
464
+ #define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */
465
+
466
+ #define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */
467
+ #define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */
468
+
469
+ /* SysTick Reload Register Definitions */
470
+ #define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */
471
+ #define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */
472
+
473
+ /* SysTick Current Register Definitions */
474
+ #define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */
475
+ #define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */
476
+
477
+ /* SysTick Calibration Register Definitions */
478
+ #define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */
479
+ #define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */
480
+
481
+ #define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */
482
+ #define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */
483
+
484
+ #define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */
485
+ #define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */
486
+
487
+ /*@} end of group CMSIS_SysTick */
488
+
489
+
490
+ /**
491
+ \ingroup CMSIS_core_register
492
+ \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug)
493
+ \brief Cortex-M0 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor.
494
+ Therefore they are not covered by the Cortex-M0 header file.
495
+ @{
496
+ */
497
+ /*@} end of group CMSIS_CoreDebug */
498
+
499
+
500
+ /**
501
+ \ingroup CMSIS_core_register
502
+ \defgroup CMSIS_core_bitfield Core register bit field macros
503
+ \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk).
504
+ @{
505
+ */
506
+
507
+ /**
508
+ \brief Mask and shift a bit field value for use in a register bit range.
509
+ \param[in] field Name of the register bit field.
510
+ \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type.
511
+ \return Masked and shifted value.
512
+ */
513
+ #define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk)
514
+
515
+ /**
516
+ \brief Mask and shift a register value to extract a bit filed value.
517
+ \param[in] field Name of the register bit field.
518
+ \param[in] value Value of register. This parameter is interpreted as an uint32_t type.
519
+ \return Masked and shifted bit field value.
520
+ */
521
+ #define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos)
522
+
523
+ /*@} end of group CMSIS_core_bitfield */
524
+
525
+
526
+ /**
527
+ \ingroup CMSIS_core_register
528
+ \defgroup CMSIS_core_base Core Definitions
529
+ \brief Definitions for base addresses, unions, and structures.
530
+ @{
531
+ */
532
+
533
+ /* Memory mapping of Core Hardware */
534
+ #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */
535
+ #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */
536
+ #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */
537
+ #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */
538
+
539
+ #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */
540
+ #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */
541
+ #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */
542
+
543
+
544
+ /*@} */
545
+
546
+
547
+
548
+ /*******************************************************************************
549
+ * Hardware Abstraction Layer
550
+ Core Function Interface contains:
551
+ - Core NVIC Functions
552
+ - Core SysTick Functions
553
+ - Core Register Access Functions
554
+ ******************************************************************************/
555
+ /**
556
+ \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference
557
+ */
558
+
559
+
560
+
561
+ /* ########################## NVIC functions #################################### */
562
+ /**
563
+ \ingroup CMSIS_Core_FunctionInterface
564
+ \defgroup CMSIS_Core_NVICFunctions NVIC Functions
565
+ \brief Functions that manage interrupts and exceptions via the NVIC.
566
+ @{
567
+ */
568
+
569
+ #ifdef CMSIS_NVIC_VIRTUAL
570
+ #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE
571
+ #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h"
572
+ #endif
573
+ #include CMSIS_NVIC_VIRTUAL_HEADER_FILE
574
+ #else
575
+ #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping
576
+ #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping
577
+ #define NVIC_EnableIRQ __NVIC_EnableIRQ
578
+ #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ
579
+ #define NVIC_DisableIRQ __NVIC_DisableIRQ
580
+ #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ
581
+ #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ
582
+ #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ
583
+ /*#define NVIC_GetActive __NVIC_GetActive not available for Cortex-M0 */
584
+ #define NVIC_SetPriority __NVIC_SetPriority
585
+ #define NVIC_GetPriority __NVIC_GetPriority
586
+ #define NVIC_SystemReset __NVIC_SystemReset
587
+ #endif /* CMSIS_NVIC_VIRTUAL */
588
+
589
+ #ifdef CMSIS_VECTAB_VIRTUAL
590
+ #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE
591
+ #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h"
592
+ #endif
593
+ #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE
594
+ #else
595
+ #define NVIC_SetVector __NVIC_SetVector
596
+ #define NVIC_GetVector __NVIC_GetVector
597
+ #endif /* (CMSIS_VECTAB_VIRTUAL) */
598
+
599
+ #define NVIC_USER_IRQ_OFFSET 16
600
+
601
+
602
+ /* The following EXC_RETURN values are saved the LR on exception entry */
603
+ #define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */
604
+ #define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */
605
+ #define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */
606
+
607
+
608
+ /* Interrupt Priorities are WORD accessible only under Armv6-M */
609
+ /* The following MACROS handle generation of the register offset and byte masks */
610
+ #define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL)
611
+ #define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) )
612
+ #define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) )
613
+
614
+ #define __NVIC_SetPriorityGrouping(X) (void)(X)
615
+ #define __NVIC_GetPriorityGrouping() (0U)
616
+
617
+ /**
618
+ \brief Enable Interrupt
619
+ \details Enables a device specific interrupt in the NVIC interrupt controller.
620
+ \param [in] IRQn Device specific interrupt number.
621
+ \note IRQn must not be negative.
622
+ */
623
+ __STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn)
624
+ {
625
+ if ((int32_t)(IRQn) >= 0)
626
+ {
627
+ __COMPILER_BARRIER();
628
+ NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
629
+ __COMPILER_BARRIER();
630
+ }
631
+ }
632
+
633
+
634
+ /**
635
+ \brief Get Interrupt Enable status
636
+ \details Returns a device specific interrupt enable status from the NVIC interrupt controller.
637
+ \param [in] IRQn Device specific interrupt number.
638
+ \return 0 Interrupt is not enabled.
639
+ \return 1 Interrupt is enabled.
640
+ \note IRQn must not be negative.
641
+ */
642
+ __STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn)
643
+ {
644
+ if ((int32_t)(IRQn) >= 0)
645
+ {
646
+ return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));
647
+ }
648
+ else
649
+ {
650
+ return(0U);
651
+ }
652
+ }
653
+
654
+
655
+ /**
656
+ \brief Disable Interrupt
657
+ \details Disables a device specific interrupt in the NVIC interrupt controller.
658
+ \param [in] IRQn Device specific interrupt number.
659
+ \note IRQn must not be negative.
660
+ */
661
+ __STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn)
662
+ {
663
+ if ((int32_t)(IRQn) >= 0)
664
+ {
665
+ NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
666
+ __DSB();
667
+ __ISB();
668
+ }
669
+ }
670
+
671
+
672
+ /**
673
+ \brief Get Pending Interrupt
674
+ \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt.
675
+ \param [in] IRQn Device specific interrupt number.
676
+ \return 0 Interrupt status is not pending.
677
+ \return 1 Interrupt status is pending.
678
+ \note IRQn must not be negative.
679
+ */
680
+ __STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn)
681
+ {
682
+ if ((int32_t)(IRQn) >= 0)
683
+ {
684
+ return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));
685
+ }
686
+ else
687
+ {
688
+ return(0U);
689
+ }
690
+ }
691
+
692
+
693
+ /**
694
+ \brief Set Pending Interrupt
695
+ \details Sets the pending bit of a device specific interrupt in the NVIC pending register.
696
+ \param [in] IRQn Device specific interrupt number.
697
+ \note IRQn must not be negative.
698
+ */
699
+ __STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn)
700
+ {
701
+ if ((int32_t)(IRQn) >= 0)
702
+ {
703
+ NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
704
+ }
705
+ }
706
+
707
+
708
+ /**
709
+ \brief Clear Pending Interrupt
710
+ \details Clears the pending bit of a device specific interrupt in the NVIC pending register.
711
+ \param [in] IRQn Device specific interrupt number.
712
+ \note IRQn must not be negative.
713
+ */
714
+ __STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn)
715
+ {
716
+ if ((int32_t)(IRQn) >= 0)
717
+ {
718
+ NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
719
+ }
720
+ }
721
+
722
+
723
+ /**
724
+ \brief Set Interrupt Priority
725
+ \details Sets the priority of a device specific interrupt or a processor exception.
726
+ The interrupt number can be positive to specify a device specific interrupt,
727
+ or negative to specify a processor exception.
728
+ \param [in] IRQn Interrupt number.
729
+ \param [in] priority Priority to set.
730
+ \note The priority cannot be set for every processor exception.
731
+ */
732
+ __STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority)
733
+ {
734
+ if ((int32_t)(IRQn) >= 0)
735
+ {
736
+ NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) |
737
+ (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));
738
+ }
739
+ else
740
+ {
741
+ SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) |
742
+ (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));
743
+ }
744
+ }
745
+
746
+
747
+ /**
748
+ \brief Get Interrupt Priority
749
+ \details Reads the priority of a device specific interrupt or a processor exception.
750
+ The interrupt number can be positive to specify a device specific interrupt,
751
+ or negative to specify a processor exception.
752
+ \param [in] IRQn Interrupt number.
753
+ \return Interrupt Priority.
754
+ Value is aligned automatically to the implemented priority bits of the microcontroller.
755
+ */
756
+ __STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn)
757
+ {
758
+
759
+ if ((int32_t)(IRQn) >= 0)
760
+ {
761
+ return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS)));
762
+ }
763
+ else
764
+ {
765
+ return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS)));
766
+ }
767
+ }
768
+
769
+
770
+ /**
771
+ \brief Encode Priority
772
+ \details Encodes the priority for an interrupt with the given priority group,
773
+ preemptive priority value, and subpriority value.
774
+ In case of a conflict between priority grouping and available
775
+ priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set.
776
+ \param [in] PriorityGroup Used priority group.
777
+ \param [in] PreemptPriority Preemptive priority value (starting from 0).
778
+ \param [in] SubPriority Subpriority value (starting from 0).
779
+ \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority().
780
+ */
781
+ __STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority)
782
+ {
783
+ uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */
784
+ uint32_t PreemptPriorityBits;
785
+ uint32_t SubPriorityBits;
786
+
787
+ PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp);
788
+ SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS));
789
+
790
+ return (
791
+ ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) |
792
+ ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL)))
793
+ );
794
+ }
795
+
796
+
797
+ /**
798
+ \brief Decode Priority
799
+ \details Decodes an interrupt priority value with a given priority group to
800
+ preemptive priority value and subpriority value.
801
+ In case of a conflict between priority grouping and available
802
+ priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set.
803
+ \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority().
804
+ \param [in] PriorityGroup Used priority group.
805
+ \param [out] pPreemptPriority Preemptive priority value (starting from 0).
806
+ \param [out] pSubPriority Subpriority value (starting from 0).
807
+ */
808
+ __STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority)
809
+ {
810
+ uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */
811
+ uint32_t PreemptPriorityBits;
812
+ uint32_t SubPriorityBits;
813
+
814
+ PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp);
815
+ SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS));
816
+
817
+ *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL);
818
+ *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL);
819
+ }
820
+
821
+
822
+
823
+ /**
824
+ \brief Set Interrupt Vector
825
+ \details Sets an interrupt vector in SRAM based interrupt vector table.
826
+ The interrupt number can be positive to specify a device specific interrupt,
827
+ or negative to specify a processor exception.
828
+ Address 0 must be mapped to SRAM.
829
+ \param [in] IRQn Interrupt number
830
+ \param [in] vector Address of interrupt handler function
831
+ */
832
+ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector)
833
+ {
834
+ uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */
835
+ *(vectors + (int32_t)IRQn) = vector; /* use pointer arithmetic to access vector */
836
+ /* ARM Application Note 321 states that the M0 does not require the architectural barrier */
837
+ }
838
+
839
+
840
+ /**
841
+ \brief Get Interrupt Vector
842
+ \details Reads an interrupt vector from interrupt vector table.
843
+ The interrupt number can be positive to specify a device specific interrupt,
844
+ or negative to specify a processor exception.
845
+ \param [in] IRQn Interrupt number.
846
+ \return Address of interrupt handler function
847
+ */
848
+ __STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn)
849
+ {
850
+ uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */
851
+ return *(vectors + (int32_t)IRQn); /* use pointer arithmetic to access vector */
852
+ }
853
+
854
+
855
+ /**
856
+ \brief System Reset
857
+ \details Initiates a system reset request to reset the MCU.
858
+ */
859
+ __NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void)
860
+ {
861
+ __DSB(); /* Ensure all outstanding memory accesses included
862
+ buffered write are completed before reset */
863
+ SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) |
864
+ SCB_AIRCR_SYSRESETREQ_Msk);
865
+ __DSB(); /* Ensure completion of memory access */
866
+
867
+ for(;;) /* wait until reset */
868
+ {
869
+ __NOP();
870
+ }
871
+ }
872
+
873
+ /*@} end of CMSIS_Core_NVICFunctions */
874
+
875
+
876
+ /* ########################## FPU functions #################################### */
877
+ /**
878
+ \ingroup CMSIS_Core_FunctionInterface
879
+ \defgroup CMSIS_Core_FpuFunctions FPU Functions
880
+ \brief Function that provides FPU type.
881
+ @{
882
+ */
883
+
884
+ /**
885
+ \brief get FPU type
886
+ \details returns the FPU type
887
+ \returns
888
+ - \b 0: No FPU
889
+ - \b 1: Single precision FPU
890
+ - \b 2: Double + Single precision FPU
891
+ */
892
+ __STATIC_INLINE uint32_t SCB_GetFPUType(void)
893
+ {
894
+ return 0U; /* No FPU */
895
+ }
896
+
897
+
898
+ /*@} end of CMSIS_Core_FpuFunctions */
899
+
900
+
901
+
902
+ /* ################################## SysTick function ############################################ */
903
+ /**
904
+ \ingroup CMSIS_Core_FunctionInterface
905
+ \defgroup CMSIS_Core_SysTickFunctions SysTick Functions
906
+ \brief Functions that configure the System.
907
+ @{
908
+ */
909
+
910
+ #if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U)
911
+
912
+ /**
913
+ \brief System Tick Configuration
914
+ \details Initializes the System Timer and its interrupt, and starts the System Tick Timer.
915
+ Counter is in free running mode to generate periodic interrupts.
916
+ \param [in] ticks Number of ticks between two interrupts.
917
+ \return 0 Function succeeded.
918
+ \return 1 Function failed.
919
+ \note When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the
920
+ function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>
921
+ must contain a vendor-specific implementation of this function.
922
+ */
923
+ __STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks)
924
+ {
925
+ if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk)
926
+ {
927
+ return (1UL); /* Reload value impossible */
928
+ }
929
+
930
+ SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */
931
+ NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */
932
+ SysTick->VAL = 0UL; /* Load the SysTick Counter Value */
933
+ SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk |
934
+ SysTick_CTRL_TICKINT_Msk |
935
+ SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */
936
+ return (0UL); /* Function successful */
937
+ }
938
+
939
+ #endif
940
+
941
+ /*@} end of CMSIS_Core_SysTickFunctions */
942
+
943
+
944
+
945
+
946
+ #ifdef __cplusplus
947
+ }
948
+ #endif
949
+
950
+ #endif /* __CORE_CM0_H_DEPENDANT */
951
+
952
+ #endif /* __CMSIS_GENERIC */
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_cm0plus.h ADDED
@@ -0,0 +1,1087 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**************************************************************************//**
2
+ * @file core_cm0plus.h
3
+ * @brief CMSIS Cortex-M0+ Core Peripheral Access Layer Header File
4
+ * @version V5.0.9
5
+ * @date 21. August 2019
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2009-2019 Arm Limited. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ #if defined ( __ICCARM__ )
26
+ #pragma system_include /* treat file as system include file for MISRA check */
27
+ #elif defined (__clang__)
28
+ #pragma clang system_header /* treat file as system include file */
29
+ #endif
30
+
31
+ #ifndef __CORE_CM0PLUS_H_GENERIC
32
+ #define __CORE_CM0PLUS_H_GENERIC
33
+
34
+ #include <stdint.h>
35
+
36
+ #ifdef __cplusplus
37
+ extern "C" {
38
+ #endif
39
+
40
+ /**
41
+ \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions
42
+ CMSIS violates the following MISRA-C:2004 rules:
43
+
44
+ \li Required Rule 8.5, object/function definition in header file.<br>
45
+ Function definitions in header files are used to allow 'inlining'.
46
+
47
+ \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.<br>
48
+ Unions are used for effective representation of core registers.
49
+
50
+ \li Advisory Rule 19.7, Function-like macro defined.<br>
51
+ Function-like macros are used to allow more efficient code.
52
+ */
53
+
54
+
55
+ /*******************************************************************************
56
+ * CMSIS definitions
57
+ ******************************************************************************/
58
+ /**
59
+ \ingroup Cortex-M0+
60
+ @{
61
+ */
62
+
63
+ #include "cmsis_version.h"
64
+
65
+ /* CMSIS CM0+ definitions */
66
+ #define __CM0PLUS_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */
67
+ #define __CM0PLUS_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */
68
+ #define __CM0PLUS_CMSIS_VERSION ((__CM0PLUS_CMSIS_VERSION_MAIN << 16U) | \
69
+ __CM0PLUS_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */
70
+
71
+ #define __CORTEX_M (0U) /*!< Cortex-M Core */
72
+
73
+ /** __FPU_USED indicates whether an FPU is used or not.
74
+ This core does not support an FPU at all
75
+ */
76
+ #define __FPU_USED 0U
77
+
78
+ #if defined ( __CC_ARM )
79
+ #if defined __TARGET_FPU_VFP
80
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
81
+ #endif
82
+
83
+ #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
84
+ #if defined __ARM_FP
85
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
86
+ #endif
87
+
88
+ #elif defined ( __GNUC__ )
89
+ #if defined (__VFP_FP__) && !defined(__SOFTFP__)
90
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
91
+ #endif
92
+
93
+ #elif defined ( __ICCARM__ )
94
+ #if defined __ARMVFP__
95
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
96
+ #endif
97
+
98
+ #elif defined ( __TI_ARM__ )
99
+ #if defined __TI_VFP_SUPPORT__
100
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
101
+ #endif
102
+
103
+ #elif defined ( __TASKING__ )
104
+ #if defined __FPU_VFP__
105
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
106
+ #endif
107
+
108
+ #elif defined ( __CSMC__ )
109
+ #if ( __CSMC__ & 0x400U)
110
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
111
+ #endif
112
+
113
+ #endif
114
+
115
+ #include "edge-impulse-sdk/CMSIS/Core/Include/cmsis_compiler.h" /* CMSIS compiler specific defines */
116
+
117
+
118
+ #ifdef __cplusplus
119
+ }
120
+ #endif
121
+
122
+ #endif /* __CORE_CM0PLUS_H_GENERIC */
123
+
124
+ #ifndef __CMSIS_GENERIC
125
+
126
+ #ifndef __CORE_CM0PLUS_H_DEPENDANT
127
+ #define __CORE_CM0PLUS_H_DEPENDANT
128
+
129
+ #ifdef __cplusplus
130
+ extern "C" {
131
+ #endif
132
+
133
+ /* check device defines and use defaults */
134
+ #if defined __CHECK_DEVICE_DEFINES
135
+ #ifndef __CM0PLUS_REV
136
+ #define __CM0PLUS_REV 0x0000U
137
+ #warning "__CM0PLUS_REV not defined in device header file; using default!"
138
+ #endif
139
+
140
+ #ifndef __MPU_PRESENT
141
+ #define __MPU_PRESENT 0U
142
+ #warning "__MPU_PRESENT not defined in device header file; using default!"
143
+ #endif
144
+
145
+ #ifndef __VTOR_PRESENT
146
+ #define __VTOR_PRESENT 0U
147
+ #warning "__VTOR_PRESENT not defined in device header file; using default!"
148
+ #endif
149
+
150
+ #ifndef __NVIC_PRIO_BITS
151
+ #define __NVIC_PRIO_BITS 2U
152
+ #warning "__NVIC_PRIO_BITS not defined in device header file; using default!"
153
+ #endif
154
+
155
+ #ifndef __Vendor_SysTickConfig
156
+ #define __Vendor_SysTickConfig 0U
157
+ #warning "__Vendor_SysTickConfig not defined in device header file; using default!"
158
+ #endif
159
+ #endif
160
+
161
+ /* IO definitions (access restrictions to peripheral registers) */
162
+ /**
163
+ \defgroup CMSIS_glob_defs CMSIS Global Defines
164
+
165
+ <strong>IO Type Qualifiers</strong> are used
166
+ \li to specify the access to peripheral variables.
167
+ \li for automatic generation of peripheral register debug information.
168
+ */
169
+ #ifdef __cplusplus
170
+ #define __I volatile /*!< Defines 'read only' permissions */
171
+ #else
172
+ #define __I volatile const /*!< Defines 'read only' permissions */
173
+ #endif
174
+ #define __O volatile /*!< Defines 'write only' permissions */
175
+ #define __IO volatile /*!< Defines 'read / write' permissions */
176
+
177
+ /* following defines should be used for structure members */
178
+ #define __IM volatile const /*! Defines 'read only' structure member permissions */
179
+ #define __OM volatile /*! Defines 'write only' structure member permissions */
180
+ #define __IOM volatile /*! Defines 'read / write' structure member permissions */
181
+
182
+ /*@} end of group Cortex-M0+ */
183
+
184
+
185
+
186
+ /*******************************************************************************
187
+ * Register Abstraction
188
+ Core Register contain:
189
+ - Core Register
190
+ - Core NVIC Register
191
+ - Core SCB Register
192
+ - Core SysTick Register
193
+ - Core MPU Register
194
+ ******************************************************************************/
195
+ /**
196
+ \defgroup CMSIS_core_register Defines and Type Definitions
197
+ \brief Type definitions and defines for Cortex-M processor based devices.
198
+ */
199
+
200
+ /**
201
+ \ingroup CMSIS_core_register
202
+ \defgroup CMSIS_CORE Status and Control Registers
203
+ \brief Core Register type definitions.
204
+ @{
205
+ */
206
+
207
+ /**
208
+ \brief Union type to access the Application Program Status Register (APSR).
209
+ */
210
+ typedef union
211
+ {
212
+ struct
213
+ {
214
+ uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */
215
+ uint32_t V:1; /*!< bit: 28 Overflow condition code flag */
216
+ uint32_t C:1; /*!< bit: 29 Carry condition code flag */
217
+ uint32_t Z:1; /*!< bit: 30 Zero condition code flag */
218
+ uint32_t N:1; /*!< bit: 31 Negative condition code flag */
219
+ } b; /*!< Structure used for bit access */
220
+ uint32_t w; /*!< Type used for word access */
221
+ } APSR_Type;
222
+
223
+ /* APSR Register Definitions */
224
+ #define APSR_N_Pos 31U /*!< APSR: N Position */
225
+ #define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */
226
+
227
+ #define APSR_Z_Pos 30U /*!< APSR: Z Position */
228
+ #define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */
229
+
230
+ #define APSR_C_Pos 29U /*!< APSR: C Position */
231
+ #define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */
232
+
233
+ #define APSR_V_Pos 28U /*!< APSR: V Position */
234
+ #define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */
235
+
236
+
237
+ /**
238
+ \brief Union type to access the Interrupt Program Status Register (IPSR).
239
+ */
240
+ typedef union
241
+ {
242
+ struct
243
+ {
244
+ uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */
245
+ uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */
246
+ } b; /*!< Structure used for bit access */
247
+ uint32_t w; /*!< Type used for word access */
248
+ } IPSR_Type;
249
+
250
+ /* IPSR Register Definitions */
251
+ #define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */
252
+ #define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */
253
+
254
+
255
+ /**
256
+ \brief Union type to access the Special-Purpose Program Status Registers (xPSR).
257
+ */
258
+ typedef union
259
+ {
260
+ struct
261
+ {
262
+ uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */
263
+ uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */
264
+ uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */
265
+ uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */
266
+ uint32_t V:1; /*!< bit: 28 Overflow condition code flag */
267
+ uint32_t C:1; /*!< bit: 29 Carry condition code flag */
268
+ uint32_t Z:1; /*!< bit: 30 Zero condition code flag */
269
+ uint32_t N:1; /*!< bit: 31 Negative condition code flag */
270
+ } b; /*!< Structure used for bit access */
271
+ uint32_t w; /*!< Type used for word access */
272
+ } xPSR_Type;
273
+
274
+ /* xPSR Register Definitions */
275
+ #define xPSR_N_Pos 31U /*!< xPSR: N Position */
276
+ #define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */
277
+
278
+ #define xPSR_Z_Pos 30U /*!< xPSR: Z Position */
279
+ #define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */
280
+
281
+ #define xPSR_C_Pos 29U /*!< xPSR: C Position */
282
+ #define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */
283
+
284
+ #define xPSR_V_Pos 28U /*!< xPSR: V Position */
285
+ #define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */
286
+
287
+ #define xPSR_T_Pos 24U /*!< xPSR: T Position */
288
+ #define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */
289
+
290
+ #define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */
291
+ #define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */
292
+
293
+
294
+ /**
295
+ \brief Union type to access the Control Registers (CONTROL).
296
+ */
297
+ typedef union
298
+ {
299
+ struct
300
+ {
301
+ uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */
302
+ uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */
303
+ uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */
304
+ } b; /*!< Structure used for bit access */
305
+ uint32_t w; /*!< Type used for word access */
306
+ } CONTROL_Type;
307
+
308
+ /* CONTROL Register Definitions */
309
+ #define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */
310
+ #define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */
311
+
312
+ #define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */
313
+ #define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */
314
+
315
+ /*@} end of group CMSIS_CORE */
316
+
317
+
318
+ /**
319
+ \ingroup CMSIS_core_register
320
+ \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC)
321
+ \brief Type definitions for the NVIC Registers
322
+ @{
323
+ */
324
+
325
+ /**
326
+ \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC).
327
+ */
328
+ typedef struct
329
+ {
330
+ __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */
331
+ uint32_t RESERVED0[31U];
332
+ __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */
333
+ uint32_t RESERVED1[31U];
334
+ __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */
335
+ uint32_t RESERVED2[31U];
336
+ __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */
337
+ uint32_t RESERVED3[31U];
338
+ uint32_t RESERVED4[64U];
339
+ __IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */
340
+ } NVIC_Type;
341
+
342
+ /*@} end of group CMSIS_NVIC */
343
+
344
+
345
+ /**
346
+ \ingroup CMSIS_core_register
347
+ \defgroup CMSIS_SCB System Control Block (SCB)
348
+ \brief Type definitions for the System Control Block Registers
349
+ @{
350
+ */
351
+
352
+ /**
353
+ \brief Structure type to access the System Control Block (SCB).
354
+ */
355
+ typedef struct
356
+ {
357
+ __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */
358
+ __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */
359
+ #if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U)
360
+ __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */
361
+ #else
362
+ uint32_t RESERVED0;
363
+ #endif
364
+ __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */
365
+ __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */
366
+ __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */
367
+ uint32_t RESERVED1;
368
+ __IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */
369
+ __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */
370
+ } SCB_Type;
371
+
372
+ /* SCB CPUID Register Definitions */
373
+ #define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */
374
+ #define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */
375
+
376
+ #define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */
377
+ #define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */
378
+
379
+ #define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */
380
+ #define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */
381
+
382
+ #define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */
383
+ #define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */
384
+
385
+ #define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */
386
+ #define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */
387
+
388
+ /* SCB Interrupt Control State Register Definitions */
389
+ #define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */
390
+ #define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */
391
+
392
+ #define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */
393
+ #define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */
394
+
395
+ #define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */
396
+ #define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */
397
+
398
+ #define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */
399
+ #define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */
400
+
401
+ #define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */
402
+ #define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */
403
+
404
+ #define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */
405
+ #define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */
406
+
407
+ #define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */
408
+ #define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */
409
+
410
+ #define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */
411
+ #define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */
412
+
413
+ #define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */
414
+ #define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */
415
+
416
+ #if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U)
417
+ /* SCB Interrupt Control State Register Definitions */
418
+ #define SCB_VTOR_TBLOFF_Pos 8U /*!< SCB VTOR: TBLOFF Position */
419
+ #define SCB_VTOR_TBLOFF_Msk (0xFFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */
420
+ #endif
421
+
422
+ /* SCB Application Interrupt and Reset Control Register Definitions */
423
+ #define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */
424
+ #define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */
425
+
426
+ #define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */
427
+ #define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */
428
+
429
+ #define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */
430
+ #define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */
431
+
432
+ #define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */
433
+ #define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */
434
+
435
+ #define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */
436
+ #define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */
437
+
438
+ /* SCB System Control Register Definitions */
439
+ #define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */
440
+ #define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */
441
+
442
+ #define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */
443
+ #define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */
444
+
445
+ #define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */
446
+ #define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */
447
+
448
+ /* SCB Configuration Control Register Definitions */
449
+ #define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */
450
+ #define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */
451
+
452
+ #define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */
453
+ #define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */
454
+
455
+ /* SCB System Handler Control and State Register Definitions */
456
+ #define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */
457
+ #define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */
458
+
459
+ /*@} end of group CMSIS_SCB */
460
+
461
+
462
+ /**
463
+ \ingroup CMSIS_core_register
464
+ \defgroup CMSIS_SysTick System Tick Timer (SysTick)
465
+ \brief Type definitions for the System Timer Registers.
466
+ @{
467
+ */
468
+
469
+ /**
470
+ \brief Structure type to access the System Timer (SysTick).
471
+ */
472
+ typedef struct
473
+ {
474
+ __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */
475
+ __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */
476
+ __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */
477
+ __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */
478
+ } SysTick_Type;
479
+
480
+ /* SysTick Control / Status Register Definitions */
481
+ #define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */
482
+ #define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */
483
+
484
+ #define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */
485
+ #define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */
486
+
487
+ #define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */
488
+ #define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */
489
+
490
+ #define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */
491
+ #define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */
492
+
493
+ /* SysTick Reload Register Definitions */
494
+ #define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */
495
+ #define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */
496
+
497
+ /* SysTick Current Register Definitions */
498
+ #define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */
499
+ #define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */
500
+
501
+ /* SysTick Calibration Register Definitions */
502
+ #define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */
503
+ #define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */
504
+
505
+ #define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */
506
+ #define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */
507
+
508
+ #define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */
509
+ #define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */
510
+
511
+ /*@} end of group CMSIS_SysTick */
512
+
513
+ #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U)
514
+ /**
515
+ \ingroup CMSIS_core_register
516
+ \defgroup CMSIS_MPU Memory Protection Unit (MPU)
517
+ \brief Type definitions for the Memory Protection Unit (MPU)
518
+ @{
519
+ */
520
+
521
+ /**
522
+ \brief Structure type to access the Memory Protection Unit (MPU).
523
+ */
524
+ typedef struct
525
+ {
526
+ __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */
527
+ __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */
528
+ __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */
529
+ __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */
530
+ __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */
531
+ } MPU_Type;
532
+
533
+ #define MPU_TYPE_RALIASES 1U
534
+
535
+ /* MPU Type Register Definitions */
536
+ #define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */
537
+ #define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */
538
+
539
+ #define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */
540
+ #define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */
541
+
542
+ #define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */
543
+ #define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */
544
+
545
+ /* MPU Control Register Definitions */
546
+ #define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */
547
+ #define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */
548
+
549
+ #define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */
550
+ #define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */
551
+
552
+ #define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */
553
+ #define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */
554
+
555
+ /* MPU Region Number Register Definitions */
556
+ #define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */
557
+ #define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */
558
+
559
+ /* MPU Region Base Address Register Definitions */
560
+ #define MPU_RBAR_ADDR_Pos 8U /*!< MPU RBAR: ADDR Position */
561
+ #define MPU_RBAR_ADDR_Msk (0xFFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */
562
+
563
+ #define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */
564
+ #define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */
565
+
566
+ #define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */
567
+ #define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */
568
+
569
+ /* MPU Region Attribute and Size Register Definitions */
570
+ #define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */
571
+ #define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */
572
+
573
+ #define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */
574
+ #define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */
575
+
576
+ #define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */
577
+ #define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */
578
+
579
+ #define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */
580
+ #define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */
581
+
582
+ #define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */
583
+ #define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */
584
+
585
+ #define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */
586
+ #define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */
587
+
588
+ #define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */
589
+ #define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */
590
+
591
+ #define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */
592
+ #define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */
593
+
594
+ #define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */
595
+ #define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */
596
+
597
+ #define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */
598
+ #define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */
599
+
600
+ /*@} end of group CMSIS_MPU */
601
+ #endif
602
+
603
+
604
+ /**
605
+ \ingroup CMSIS_core_register
606
+ \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug)
607
+ \brief Cortex-M0+ Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor.
608
+ Therefore they are not covered by the Cortex-M0+ header file.
609
+ @{
610
+ */
611
+ /*@} end of group CMSIS_CoreDebug */
612
+
613
+
614
+ /**
615
+ \ingroup CMSIS_core_register
616
+ \defgroup CMSIS_core_bitfield Core register bit field macros
617
+ \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk).
618
+ @{
619
+ */
620
+
621
+ /**
622
+ \brief Mask and shift a bit field value for use in a register bit range.
623
+ \param[in] field Name of the register bit field.
624
+ \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type.
625
+ \return Masked and shifted value.
626
+ */
627
+ #define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk)
628
+
629
+ /**
630
+ \brief Mask and shift a register value to extract a bit filed value.
631
+ \param[in] field Name of the register bit field.
632
+ \param[in] value Value of register. This parameter is interpreted as an uint32_t type.
633
+ \return Masked and shifted bit field value.
634
+ */
635
+ #define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos)
636
+
637
+ /*@} end of group CMSIS_core_bitfield */
638
+
639
+
640
+ /**
641
+ \ingroup CMSIS_core_register
642
+ \defgroup CMSIS_core_base Core Definitions
643
+ \brief Definitions for base addresses, unions, and structures.
644
+ @{
645
+ */
646
+
647
+ /* Memory mapping of Core Hardware */
648
+ #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */
649
+ #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */
650
+ #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */
651
+ #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */
652
+
653
+ #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */
654
+ #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */
655
+ #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */
656
+
657
+ #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U)
658
+ #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */
659
+ #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */
660
+ #endif
661
+
662
+ /*@} */
663
+
664
+
665
+
666
+ /*******************************************************************************
667
+ * Hardware Abstraction Layer
668
+ Core Function Interface contains:
669
+ - Core NVIC Functions
670
+ - Core SysTick Functions
671
+ - Core Register Access Functions
672
+ ******************************************************************************/
673
+ /**
674
+ \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference
675
+ */
676
+
677
+
678
+
679
+ /* ########################## NVIC functions #################################### */
680
+ /**
681
+ \ingroup CMSIS_Core_FunctionInterface
682
+ \defgroup CMSIS_Core_NVICFunctions NVIC Functions
683
+ \brief Functions that manage interrupts and exceptions via the NVIC.
684
+ @{
685
+ */
686
+
687
+ #ifdef CMSIS_NVIC_VIRTUAL
688
+ #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE
689
+ #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h"
690
+ #endif
691
+ #include CMSIS_NVIC_VIRTUAL_HEADER_FILE
692
+ #else
693
+ #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping
694
+ #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping
695
+ #define NVIC_EnableIRQ __NVIC_EnableIRQ
696
+ #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ
697
+ #define NVIC_DisableIRQ __NVIC_DisableIRQ
698
+ #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ
699
+ #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ
700
+ #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ
701
+ /*#define NVIC_GetActive __NVIC_GetActive not available for Cortex-M0+ */
702
+ #define NVIC_SetPriority __NVIC_SetPriority
703
+ #define NVIC_GetPriority __NVIC_GetPriority
704
+ #define NVIC_SystemReset __NVIC_SystemReset
705
+ #endif /* CMSIS_NVIC_VIRTUAL */
706
+
707
+ #ifdef CMSIS_VECTAB_VIRTUAL
708
+ #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE
709
+ #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h"
710
+ #endif
711
+ #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE
712
+ #else
713
+ #define NVIC_SetVector __NVIC_SetVector
714
+ #define NVIC_GetVector __NVIC_GetVector
715
+ #endif /* (CMSIS_VECTAB_VIRTUAL) */
716
+
717
+ #define NVIC_USER_IRQ_OFFSET 16
718
+
719
+
720
+ /* The following EXC_RETURN values are saved the LR on exception entry */
721
+ #define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */
722
+ #define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */
723
+ #define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */
724
+
725
+
726
+ /* Interrupt Priorities are WORD accessible only under Armv6-M */
727
+ /* The following MACROS handle generation of the register offset and byte masks */
728
+ #define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL)
729
+ #define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) )
730
+ #define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) )
731
+
732
+ #define __NVIC_SetPriorityGrouping(X) (void)(X)
733
+ #define __NVIC_GetPriorityGrouping() (0U)
734
+
735
+ /**
736
+ \brief Enable Interrupt
737
+ \details Enables a device specific interrupt in the NVIC interrupt controller.
738
+ \param [in] IRQn Device specific interrupt number.
739
+ \note IRQn must not be negative.
740
+ */
741
+ __STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn)
742
+ {
743
+ if ((int32_t)(IRQn) >= 0)
744
+ {
745
+ __COMPILER_BARRIER();
746
+ NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
747
+ __COMPILER_BARRIER();
748
+ }
749
+ }
750
+
751
+
752
+ /**
753
+ \brief Get Interrupt Enable status
754
+ \details Returns a device specific interrupt enable status from the NVIC interrupt controller.
755
+ \param [in] IRQn Device specific interrupt number.
756
+ \return 0 Interrupt is not enabled.
757
+ \return 1 Interrupt is enabled.
758
+ \note IRQn must not be negative.
759
+ */
760
+ __STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn)
761
+ {
762
+ if ((int32_t)(IRQn) >= 0)
763
+ {
764
+ return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));
765
+ }
766
+ else
767
+ {
768
+ return(0U);
769
+ }
770
+ }
771
+
772
+
773
+ /**
774
+ \brief Disable Interrupt
775
+ \details Disables a device specific interrupt in the NVIC interrupt controller.
776
+ \param [in] IRQn Device specific interrupt number.
777
+ \note IRQn must not be negative.
778
+ */
779
+ __STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn)
780
+ {
781
+ if ((int32_t)(IRQn) >= 0)
782
+ {
783
+ NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
784
+ __DSB();
785
+ __ISB();
786
+ }
787
+ }
788
+
789
+
790
+ /**
791
+ \brief Get Pending Interrupt
792
+ \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt.
793
+ \param [in] IRQn Device specific interrupt number.
794
+ \return 0 Interrupt status is not pending.
795
+ \return 1 Interrupt status is pending.
796
+ \note IRQn must not be negative.
797
+ */
798
+ __STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn)
799
+ {
800
+ if ((int32_t)(IRQn) >= 0)
801
+ {
802
+ return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));
803
+ }
804
+ else
805
+ {
806
+ return(0U);
807
+ }
808
+ }
809
+
810
+
811
+ /**
812
+ \brief Set Pending Interrupt
813
+ \details Sets the pending bit of a device specific interrupt in the NVIC pending register.
814
+ \param [in] IRQn Device specific interrupt number.
815
+ \note IRQn must not be negative.
816
+ */
817
+ __STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn)
818
+ {
819
+ if ((int32_t)(IRQn) >= 0)
820
+ {
821
+ NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
822
+ }
823
+ }
824
+
825
+
826
+ /**
827
+ \brief Clear Pending Interrupt
828
+ \details Clears the pending bit of a device specific interrupt in the NVIC pending register.
829
+ \param [in] IRQn Device specific interrupt number.
830
+ \note IRQn must not be negative.
831
+ */
832
+ __STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn)
833
+ {
834
+ if ((int32_t)(IRQn) >= 0)
835
+ {
836
+ NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
837
+ }
838
+ }
839
+
840
+
841
+ /**
842
+ \brief Set Interrupt Priority
843
+ \details Sets the priority of a device specific interrupt or a processor exception.
844
+ The interrupt number can be positive to specify a device specific interrupt,
845
+ or negative to specify a processor exception.
846
+ \param [in] IRQn Interrupt number.
847
+ \param [in] priority Priority to set.
848
+ \note The priority cannot be set for every processor exception.
849
+ */
850
+ __STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority)
851
+ {
852
+ if ((int32_t)(IRQn) >= 0)
853
+ {
854
+ NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) |
855
+ (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));
856
+ }
857
+ else
858
+ {
859
+ SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) |
860
+ (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));
861
+ }
862
+ }
863
+
864
+
865
+ /**
866
+ \brief Get Interrupt Priority
867
+ \details Reads the priority of a device specific interrupt or a processor exception.
868
+ The interrupt number can be positive to specify a device specific interrupt,
869
+ or negative to specify a processor exception.
870
+ \param [in] IRQn Interrupt number.
871
+ \return Interrupt Priority.
872
+ Value is aligned automatically to the implemented priority bits of the microcontroller.
873
+ */
874
+ __STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn)
875
+ {
876
+
877
+ if ((int32_t)(IRQn) >= 0)
878
+ {
879
+ return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS)));
880
+ }
881
+ else
882
+ {
883
+ return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS)));
884
+ }
885
+ }
886
+
887
+
888
+ /**
889
+ \brief Encode Priority
890
+ \details Encodes the priority for an interrupt with the given priority group,
891
+ preemptive priority value, and subpriority value.
892
+ In case of a conflict between priority grouping and available
893
+ priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set.
894
+ \param [in] PriorityGroup Used priority group.
895
+ \param [in] PreemptPriority Preemptive priority value (starting from 0).
896
+ \param [in] SubPriority Subpriority value (starting from 0).
897
+ \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority().
898
+ */
899
+ __STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority)
900
+ {
901
+ uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */
902
+ uint32_t PreemptPriorityBits;
903
+ uint32_t SubPriorityBits;
904
+
905
+ PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp);
906
+ SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS));
907
+
908
+ return (
909
+ ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) |
910
+ ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL)))
911
+ );
912
+ }
913
+
914
+
915
+ /**
916
+ \brief Decode Priority
917
+ \details Decodes an interrupt priority value with a given priority group to
918
+ preemptive priority value and subpriority value.
919
+ In case of a conflict between priority grouping and available
920
+ priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set.
921
+ \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority().
922
+ \param [in] PriorityGroup Used priority group.
923
+ \param [out] pPreemptPriority Preemptive priority value (starting from 0).
924
+ \param [out] pSubPriority Subpriority value (starting from 0).
925
+ */
926
+ __STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority)
927
+ {
928
+ uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */
929
+ uint32_t PreemptPriorityBits;
930
+ uint32_t SubPriorityBits;
931
+
932
+ PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp);
933
+ SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS));
934
+
935
+ *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL);
936
+ *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL);
937
+ }
938
+
939
+
940
+ /**
941
+ \brief Set Interrupt Vector
942
+ \details Sets an interrupt vector in SRAM based interrupt vector table.
943
+ The interrupt number can be positive to specify a device specific interrupt,
944
+ or negative to specify a processor exception.
945
+ VTOR must been relocated to SRAM before.
946
+ If VTOR is not present address 0 must be mapped to SRAM.
947
+ \param [in] IRQn Interrupt number
948
+ \param [in] vector Address of interrupt handler function
949
+ */
950
+ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector)
951
+ {
952
+ #if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U)
953
+ uint32_t *vectors = (uint32_t *)SCB->VTOR;
954
+ vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector;
955
+ #else
956
+ uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */
957
+ *(vectors + (int32_t)IRQn) = vector; /* use pointer arithmetic to access vector */
958
+ #endif
959
+ /* ARM Application Note 321 states that the M0+ does not require the architectural barrier */
960
+ }
961
+
962
+
963
+ /**
964
+ \brief Get Interrupt Vector
965
+ \details Reads an interrupt vector from interrupt vector table.
966
+ The interrupt number can be positive to specify a device specific interrupt,
967
+ or negative to specify a processor exception.
968
+ \param [in] IRQn Interrupt number.
969
+ \return Address of interrupt handler function
970
+ */
971
+ __STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn)
972
+ {
973
+ #if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U)
974
+ uint32_t *vectors = (uint32_t *)SCB->VTOR;
975
+ return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET];
976
+ #else
977
+ uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */
978
+ return *(vectors + (int32_t)IRQn); /* use pointer arithmetic to access vector */
979
+ #endif
980
+ }
981
+
982
+
983
+ /**
984
+ \brief System Reset
985
+ \details Initiates a system reset request to reset the MCU.
986
+ */
987
+ __NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void)
988
+ {
989
+ __DSB(); /* Ensure all outstanding memory accesses included
990
+ buffered write are completed before reset */
991
+ SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) |
992
+ SCB_AIRCR_SYSRESETREQ_Msk);
993
+ __DSB(); /* Ensure completion of memory access */
994
+
995
+ for(;;) /* wait until reset */
996
+ {
997
+ __NOP();
998
+ }
999
+ }
1000
+
1001
+ /*@} end of CMSIS_Core_NVICFunctions */
1002
+
1003
+ /* ########################## MPU functions #################################### */
1004
+
1005
+ #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U)
1006
+
1007
+ #include "mpu_armv7.h"
1008
+
1009
+ #endif
1010
+
1011
+ /* ########################## FPU functions #################################### */
1012
+ /**
1013
+ \ingroup CMSIS_Core_FunctionInterface
1014
+ \defgroup CMSIS_Core_FpuFunctions FPU Functions
1015
+ \brief Function that provides FPU type.
1016
+ @{
1017
+ */
1018
+
1019
+ /**
1020
+ \brief get FPU type
1021
+ \details returns the FPU type
1022
+ \returns
1023
+ - \b 0: No FPU
1024
+ - \b 1: Single precision FPU
1025
+ - \b 2: Double + Single precision FPU
1026
+ */
1027
+ __STATIC_INLINE uint32_t SCB_GetFPUType(void)
1028
+ {
1029
+ return 0U; /* No FPU */
1030
+ }
1031
+
1032
+
1033
+ /*@} end of CMSIS_Core_FpuFunctions */
1034
+
1035
+
1036
+
1037
+ /* ################################## SysTick function ############################################ */
1038
+ /**
1039
+ \ingroup CMSIS_Core_FunctionInterface
1040
+ \defgroup CMSIS_Core_SysTickFunctions SysTick Functions
1041
+ \brief Functions that configure the System.
1042
+ @{
1043
+ */
1044
+
1045
+ #if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U)
1046
+
1047
+ /**
1048
+ \brief System Tick Configuration
1049
+ \details Initializes the System Timer and its interrupt, and starts the System Tick Timer.
1050
+ Counter is in free running mode to generate periodic interrupts.
1051
+ \param [in] ticks Number of ticks between two interrupts.
1052
+ \return 0 Function succeeded.
1053
+ \return 1 Function failed.
1054
+ \note When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the
1055
+ function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>
1056
+ must contain a vendor-specific implementation of this function.
1057
+ */
1058
+ __STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks)
1059
+ {
1060
+ if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk)
1061
+ {
1062
+ return (1UL); /* Reload value impossible */
1063
+ }
1064
+
1065
+ SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */
1066
+ NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */
1067
+ SysTick->VAL = 0UL; /* Load the SysTick Counter Value */
1068
+ SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk |
1069
+ SysTick_CTRL_TICKINT_Msk |
1070
+ SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */
1071
+ return (0UL); /* Function successful */
1072
+ }
1073
+
1074
+ #endif
1075
+
1076
+ /*@} end of CMSIS_Core_SysTickFunctions */
1077
+
1078
+
1079
+
1080
+
1081
+ #ifdef __cplusplus
1082
+ }
1083
+ #endif
1084
+
1085
+ #endif /* __CORE_CM0PLUS_H_DEPENDANT */
1086
+
1087
+ #endif /* __CMSIS_GENERIC */
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_cm1.h ADDED
@@ -0,0 +1,979 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**************************************************************************//**
2
+ * @file core_cm1.h
3
+ * @brief CMSIS Cortex-M1 Core Peripheral Access Layer Header File
4
+ * @version V1.0.1
5
+ * @date 12. November 2018
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ #if defined ( __ICCARM__ )
26
+ #pragma system_include /* treat file as system include file for MISRA check */
27
+ #elif defined (__clang__)
28
+ #pragma clang system_header /* treat file as system include file */
29
+ #endif
30
+
31
+ #ifndef __CORE_CM1_H_GENERIC
32
+ #define __CORE_CM1_H_GENERIC
33
+
34
+ #include <stdint.h>
35
+
36
+ #ifdef __cplusplus
37
+ extern "C" {
38
+ #endif
39
+
40
+ /**
41
+ \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions
42
+ CMSIS violates the following MISRA-C:2004 rules:
43
+
44
+ \li Required Rule 8.5, object/function definition in header file.<br>
45
+ Function definitions in header files are used to allow 'inlining'.
46
+
47
+ \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.<br>
48
+ Unions are used for effective representation of core registers.
49
+
50
+ \li Advisory Rule 19.7, Function-like macro defined.<br>
51
+ Function-like macros are used to allow more efficient code.
52
+ */
53
+
54
+
55
+ /*******************************************************************************
56
+ * CMSIS definitions
57
+ ******************************************************************************/
58
+ /**
59
+ \ingroup Cortex_M1
60
+ @{
61
+ */
62
+
63
+ #include "cmsis_version.h"
64
+
65
+ /* CMSIS CM1 definitions */
66
+ #define __CM1_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */
67
+ #define __CM1_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */
68
+ #define __CM1_CMSIS_VERSION ((__CM1_CMSIS_VERSION_MAIN << 16U) | \
69
+ __CM1_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */
70
+
71
+ #define __CORTEX_M (1U) /*!< Cortex-M Core */
72
+
73
+ /** __FPU_USED indicates whether an FPU is used or not.
74
+ This core does not support an FPU at all
75
+ */
76
+ #define __FPU_USED 0U
77
+
78
+ #if defined ( __CC_ARM )
79
+ #if defined __TARGET_FPU_VFP
80
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
81
+ #endif
82
+
83
+ #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
84
+ #if defined __ARM_FP
85
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
86
+ #endif
87
+
88
+ #elif defined ( __GNUC__ )
89
+ #if defined (__VFP_FP__) && !defined(__SOFTFP__)
90
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
91
+ #endif
92
+
93
+ #elif defined ( __ICCARM__ )
94
+ #if defined __ARMVFP__
95
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
96
+ #endif
97
+
98
+ #elif defined ( __TI_ARM__ )
99
+ #if defined __TI_VFP_SUPPORT__
100
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
101
+ #endif
102
+
103
+ #elif defined ( __TASKING__ )
104
+ #if defined __FPU_VFP__
105
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
106
+ #endif
107
+
108
+ #elif defined ( __CSMC__ )
109
+ #if ( __CSMC__ & 0x400U)
110
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
111
+ #endif
112
+
113
+ #endif
114
+
115
+ #include "edge-impulse-sdk/CMSIS/Core/Include/cmsis_compiler.h" /* CMSIS compiler specific defines */
116
+
117
+
118
+ #ifdef __cplusplus
119
+ }
120
+ #endif
121
+
122
+ #endif /* __CORE_CM1_H_GENERIC */
123
+
124
+ #ifndef __CMSIS_GENERIC
125
+
126
+ #ifndef __CORE_CM1_H_DEPENDANT
127
+ #define __CORE_CM1_H_DEPENDANT
128
+
129
+ #ifdef __cplusplus
130
+ extern "C" {
131
+ #endif
132
+
133
+ /* check device defines and use defaults */
134
+ #if defined __CHECK_DEVICE_DEFINES
135
+ #ifndef __CM1_REV
136
+ #define __CM1_REV 0x0100U
137
+ #warning "__CM1_REV not defined in device header file; using default!"
138
+ #endif
139
+
140
+ #ifndef __NVIC_PRIO_BITS
141
+ #define __NVIC_PRIO_BITS 2U
142
+ #warning "__NVIC_PRIO_BITS not defined in device header file; using default!"
143
+ #endif
144
+
145
+ #ifndef __Vendor_SysTickConfig
146
+ #define __Vendor_SysTickConfig 0U
147
+ #warning "__Vendor_SysTickConfig not defined in device header file; using default!"
148
+ #endif
149
+ #endif
150
+
151
+ /* IO definitions (access restrictions to peripheral registers) */
152
+ /**
153
+ \defgroup CMSIS_glob_defs CMSIS Global Defines
154
+
155
+ <strong>IO Type Qualifiers</strong> are used
156
+ \li to specify the access to peripheral variables.
157
+ \li for automatic generation of peripheral register debug information.
158
+ */
159
+ #ifdef __cplusplus
160
+ #define __I volatile /*!< Defines 'read only' permissions */
161
+ #else
162
+ #define __I volatile const /*!< Defines 'read only' permissions */
163
+ #endif
164
+ #define __O volatile /*!< Defines 'write only' permissions */
165
+ #define __IO volatile /*!< Defines 'read / write' permissions */
166
+
167
+ /* following defines should be used for structure members */
168
+ #define __IM volatile const /*! Defines 'read only' structure member permissions */
169
+ #define __OM volatile /*! Defines 'write only' structure member permissions */
170
+ #define __IOM volatile /*! Defines 'read / write' structure member permissions */
171
+
172
+ /*@} end of group Cortex_M1 */
173
+
174
+
175
+
176
+ /*******************************************************************************
177
+ * Register Abstraction
178
+ Core Register contain:
179
+ - Core Register
180
+ - Core NVIC Register
181
+ - Core SCB Register
182
+ - Core SysTick Register
183
+ ******************************************************************************/
184
+ /**
185
+ \defgroup CMSIS_core_register Defines and Type Definitions
186
+ \brief Type definitions and defines for Cortex-M processor based devices.
187
+ */
188
+
189
+ /**
190
+ \ingroup CMSIS_core_register
191
+ \defgroup CMSIS_CORE Status and Control Registers
192
+ \brief Core Register type definitions.
193
+ @{
194
+ */
195
+
196
+ /**
197
+ \brief Union type to access the Application Program Status Register (APSR).
198
+ */
199
+ typedef union
200
+ {
201
+ struct
202
+ {
203
+ uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */
204
+ uint32_t V:1; /*!< bit: 28 Overflow condition code flag */
205
+ uint32_t C:1; /*!< bit: 29 Carry condition code flag */
206
+ uint32_t Z:1; /*!< bit: 30 Zero condition code flag */
207
+ uint32_t N:1; /*!< bit: 31 Negative condition code flag */
208
+ } b; /*!< Structure used for bit access */
209
+ uint32_t w; /*!< Type used for word access */
210
+ } APSR_Type;
211
+
212
+ /* APSR Register Definitions */
213
+ #define APSR_N_Pos 31U /*!< APSR: N Position */
214
+ #define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */
215
+
216
+ #define APSR_Z_Pos 30U /*!< APSR: Z Position */
217
+ #define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */
218
+
219
+ #define APSR_C_Pos 29U /*!< APSR: C Position */
220
+ #define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */
221
+
222
+ #define APSR_V_Pos 28U /*!< APSR: V Position */
223
+ #define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */
224
+
225
+
226
+ /**
227
+ \brief Union type to access the Interrupt Program Status Register (IPSR).
228
+ */
229
+ typedef union
230
+ {
231
+ struct
232
+ {
233
+ uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */
234
+ uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */
235
+ } b; /*!< Structure used for bit access */
236
+ uint32_t w; /*!< Type used for word access */
237
+ } IPSR_Type;
238
+
239
+ /* IPSR Register Definitions */
240
+ #define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */
241
+ #define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */
242
+
243
+
244
+ /**
245
+ \brief Union type to access the Special-Purpose Program Status Registers (xPSR).
246
+ */
247
+ typedef union
248
+ {
249
+ struct
250
+ {
251
+ uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */
252
+ uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */
253
+ uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */
254
+ uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */
255
+ uint32_t V:1; /*!< bit: 28 Overflow condition code flag */
256
+ uint32_t C:1; /*!< bit: 29 Carry condition code flag */
257
+ uint32_t Z:1; /*!< bit: 30 Zero condition code flag */
258
+ uint32_t N:1; /*!< bit: 31 Negative condition code flag */
259
+ } b; /*!< Structure used for bit access */
260
+ uint32_t w; /*!< Type used for word access */
261
+ } xPSR_Type;
262
+
263
+ /* xPSR Register Definitions */
264
+ #define xPSR_N_Pos 31U /*!< xPSR: N Position */
265
+ #define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */
266
+
267
+ #define xPSR_Z_Pos 30U /*!< xPSR: Z Position */
268
+ #define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */
269
+
270
+ #define xPSR_C_Pos 29U /*!< xPSR: C Position */
271
+ #define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */
272
+
273
+ #define xPSR_V_Pos 28U /*!< xPSR: V Position */
274
+ #define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */
275
+
276
+ #define xPSR_T_Pos 24U /*!< xPSR: T Position */
277
+ #define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */
278
+
279
+ #define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */
280
+ #define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */
281
+
282
+
283
+ /**
284
+ \brief Union type to access the Control Registers (CONTROL).
285
+ */
286
+ typedef union
287
+ {
288
+ struct
289
+ {
290
+ uint32_t _reserved0:1; /*!< bit: 0 Reserved */
291
+ uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */
292
+ uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */
293
+ } b; /*!< Structure used for bit access */
294
+ uint32_t w; /*!< Type used for word access */
295
+ } CONTROL_Type;
296
+
297
+ /* CONTROL Register Definitions */
298
+ #define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */
299
+ #define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */
300
+
301
+ /*@} end of group CMSIS_CORE */
302
+
303
+
304
+ /**
305
+ \ingroup CMSIS_core_register
306
+ \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC)
307
+ \brief Type definitions for the NVIC Registers
308
+ @{
309
+ */
310
+
311
+ /**
312
+ \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC).
313
+ */
314
+ typedef struct
315
+ {
316
+ __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */
317
+ uint32_t RESERVED0[31U];
318
+ __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */
319
+ uint32_t RSERVED1[31U];
320
+ __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */
321
+ uint32_t RESERVED2[31U];
322
+ __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */
323
+ uint32_t RESERVED3[31U];
324
+ uint32_t RESERVED4[64U];
325
+ __IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */
326
+ } NVIC_Type;
327
+
328
+ /*@} end of group CMSIS_NVIC */
329
+
330
+
331
+ /**
332
+ \ingroup CMSIS_core_register
333
+ \defgroup CMSIS_SCB System Control Block (SCB)
334
+ \brief Type definitions for the System Control Block Registers
335
+ @{
336
+ */
337
+
338
+ /**
339
+ \brief Structure type to access the System Control Block (SCB).
340
+ */
341
+ typedef struct
342
+ {
343
+ __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */
344
+ __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */
345
+ uint32_t RESERVED0;
346
+ __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */
347
+ __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */
348
+ __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */
349
+ uint32_t RESERVED1;
350
+ __IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */
351
+ __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */
352
+ } SCB_Type;
353
+
354
+ /* SCB CPUID Register Definitions */
355
+ #define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */
356
+ #define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */
357
+
358
+ #define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */
359
+ #define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */
360
+
361
+ #define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */
362
+ #define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */
363
+
364
+ #define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */
365
+ #define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */
366
+
367
+ #define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */
368
+ #define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */
369
+
370
+ /* SCB Interrupt Control State Register Definitions */
371
+ #define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */
372
+ #define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */
373
+
374
+ #define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */
375
+ #define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */
376
+
377
+ #define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */
378
+ #define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */
379
+
380
+ #define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */
381
+ #define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */
382
+
383
+ #define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */
384
+ #define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */
385
+
386
+ #define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */
387
+ #define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */
388
+
389
+ #define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */
390
+ #define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */
391
+
392
+ #define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */
393
+ #define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */
394
+
395
+ #define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */
396
+ #define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */
397
+
398
+ /* SCB Application Interrupt and Reset Control Register Definitions */
399
+ #define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */
400
+ #define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */
401
+
402
+ #define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */
403
+ #define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */
404
+
405
+ #define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */
406
+ #define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */
407
+
408
+ #define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */
409
+ #define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */
410
+
411
+ #define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */
412
+ #define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */
413
+
414
+ /* SCB System Control Register Definitions */
415
+ #define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */
416
+ #define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */
417
+
418
+ #define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */
419
+ #define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */
420
+
421
+ #define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */
422
+ #define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */
423
+
424
+ /* SCB Configuration Control Register Definitions */
425
+ #define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */
426
+ #define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */
427
+
428
+ #define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */
429
+ #define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */
430
+
431
+ /* SCB System Handler Control and State Register Definitions */
432
+ #define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */
433
+ #define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */
434
+
435
+ /*@} end of group CMSIS_SCB */
436
+
437
+
438
+ /**
439
+ \ingroup CMSIS_core_register
440
+ \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB)
441
+ \brief Type definitions for the System Control and ID Register not in the SCB
442
+ @{
443
+ */
444
+
445
+ /**
446
+ \brief Structure type to access the System Control and ID Register not in the SCB.
447
+ */
448
+ typedef struct
449
+ {
450
+ uint32_t RESERVED0[2U];
451
+ __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */
452
+ } SCnSCB_Type;
453
+
454
+ /* Auxiliary Control Register Definitions */
455
+ #define SCnSCB_ACTLR_ITCMUAEN_Pos 4U /*!< ACTLR: Instruction TCM Upper Alias Enable Position */
456
+ #define SCnSCB_ACTLR_ITCMUAEN_Msk (1UL << SCnSCB_ACTLR_ITCMUAEN_Pos) /*!< ACTLR: Instruction TCM Upper Alias Enable Mask */
457
+
458
+ #define SCnSCB_ACTLR_ITCMLAEN_Pos 3U /*!< ACTLR: Instruction TCM Lower Alias Enable Position */
459
+ #define SCnSCB_ACTLR_ITCMLAEN_Msk (1UL << SCnSCB_ACTLR_ITCMLAEN_Pos) /*!< ACTLR: Instruction TCM Lower Alias Enable Mask */
460
+
461
+ /*@} end of group CMSIS_SCnotSCB */
462
+
463
+
464
+ /**
465
+ \ingroup CMSIS_core_register
466
+ \defgroup CMSIS_SysTick System Tick Timer (SysTick)
467
+ \brief Type definitions for the System Timer Registers.
468
+ @{
469
+ */
470
+
471
+ /**
472
+ \brief Structure type to access the System Timer (SysTick).
473
+ */
474
+ typedef struct
475
+ {
476
+ __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */
477
+ __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */
478
+ __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */
479
+ __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */
480
+ } SysTick_Type;
481
+
482
+ /* SysTick Control / Status Register Definitions */
483
+ #define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */
484
+ #define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */
485
+
486
+ #define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */
487
+ #define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */
488
+
489
+ #define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */
490
+ #define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */
491
+
492
+ #define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */
493
+ #define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */
494
+
495
+ /* SysTick Reload Register Definitions */
496
+ #define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */
497
+ #define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */
498
+
499
+ /* SysTick Current Register Definitions */
500
+ #define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */
501
+ #define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */
502
+
503
+ /* SysTick Calibration Register Definitions */
504
+ #define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */
505
+ #define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */
506
+
507
+ #define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */
508
+ #define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */
509
+
510
+ #define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */
511
+ #define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */
512
+
513
+ /*@} end of group CMSIS_SysTick */
514
+
515
+
516
+ /**
517
+ \ingroup CMSIS_core_register
518
+ \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug)
519
+ \brief Cortex-M1 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor.
520
+ Therefore they are not covered by the Cortex-M1 header file.
521
+ @{
522
+ */
523
+ /*@} end of group CMSIS_CoreDebug */
524
+
525
+
526
+ /**
527
+ \ingroup CMSIS_core_register
528
+ \defgroup CMSIS_core_bitfield Core register bit field macros
529
+ \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk).
530
+ @{
531
+ */
532
+
533
+ /**
534
+ \brief Mask and shift a bit field value for use in a register bit range.
535
+ \param[in] field Name of the register bit field.
536
+ \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type.
537
+ \return Masked and shifted value.
538
+ */
539
+ #define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk)
540
+
541
+ /**
542
+ \brief Mask and shift a register value to extract a bit filed value.
543
+ \param[in] field Name of the register bit field.
544
+ \param[in] value Value of register. This parameter is interpreted as an uint32_t type.
545
+ \return Masked and shifted bit field value.
546
+ */
547
+ #define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos)
548
+
549
+ /*@} end of group CMSIS_core_bitfield */
550
+
551
+
552
+ /**
553
+ \ingroup CMSIS_core_register
554
+ \defgroup CMSIS_core_base Core Definitions
555
+ \brief Definitions for base addresses, unions, and structures.
556
+ @{
557
+ */
558
+
559
+ /* Memory mapping of Core Hardware */
560
+ #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */
561
+ #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */
562
+ #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */
563
+ #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */
564
+
565
+ #define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */
566
+ #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */
567
+ #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */
568
+ #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */
569
+
570
+
571
+ /*@} */
572
+
573
+
574
+
575
+ /*******************************************************************************
576
+ * Hardware Abstraction Layer
577
+ Core Function Interface contains:
578
+ - Core NVIC Functions
579
+ - Core SysTick Functions
580
+ - Core Register Access Functions
581
+ ******************************************************************************/
582
+ /**
583
+ \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference
584
+ */
585
+
586
+
587
+
588
+ /* ########################## NVIC functions #################################### */
589
+ /**
590
+ \ingroup CMSIS_Core_FunctionInterface
591
+ \defgroup CMSIS_Core_NVICFunctions NVIC Functions
592
+ \brief Functions that manage interrupts and exceptions via the NVIC.
593
+ @{
594
+ */
595
+
596
+ #ifdef CMSIS_NVIC_VIRTUAL
597
+ #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE
598
+ #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h"
599
+ #endif
600
+ #include CMSIS_NVIC_VIRTUAL_HEADER_FILE
601
+ #else
602
+ #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping
603
+ #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping
604
+ #define NVIC_EnableIRQ __NVIC_EnableIRQ
605
+ #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ
606
+ #define NVIC_DisableIRQ __NVIC_DisableIRQ
607
+ #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ
608
+ #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ
609
+ #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ
610
+ /*#define NVIC_GetActive __NVIC_GetActive not available for Cortex-M1 */
611
+ #define NVIC_SetPriority __NVIC_SetPriority
612
+ #define NVIC_GetPriority __NVIC_GetPriority
613
+ #define NVIC_SystemReset __NVIC_SystemReset
614
+ #endif /* CMSIS_NVIC_VIRTUAL */
615
+
616
+ #ifdef CMSIS_VECTAB_VIRTUAL
617
+ #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE
618
+ #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h"
619
+ #endif
620
+ #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE
621
+ #else
622
+ #define NVIC_SetVector __NVIC_SetVector
623
+ #define NVIC_GetVector __NVIC_GetVector
624
+ #endif /* (CMSIS_VECTAB_VIRTUAL) */
625
+
626
+ #define NVIC_USER_IRQ_OFFSET 16
627
+
628
+
629
+ /* The following EXC_RETURN values are saved the LR on exception entry */
630
+ #define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */
631
+ #define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */
632
+ #define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */
633
+
634
+
635
+ /* Interrupt Priorities are WORD accessible only under Armv6-M */
636
+ /* The following MACROS handle generation of the register offset and byte masks */
637
+ #define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL)
638
+ #define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) )
639
+ #define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) )
640
+
641
+ #define __NVIC_SetPriorityGrouping(X) (void)(X)
642
+ #define __NVIC_GetPriorityGrouping() (0U)
643
+
644
+ /**
645
+ \brief Enable Interrupt
646
+ \details Enables a device specific interrupt in the NVIC interrupt controller.
647
+ \param [in] IRQn Device specific interrupt number.
648
+ \note IRQn must not be negative.
649
+ */
650
+ __STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn)
651
+ {
652
+ if ((int32_t)(IRQn) >= 0)
653
+ {
654
+ __COMPILER_BARRIER();
655
+ NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
656
+ __COMPILER_BARRIER();
657
+ }
658
+ }
659
+
660
+
661
+ /**
662
+ \brief Get Interrupt Enable status
663
+ \details Returns a device specific interrupt enable status from the NVIC interrupt controller.
664
+ \param [in] IRQn Device specific interrupt number.
665
+ \return 0 Interrupt is not enabled.
666
+ \return 1 Interrupt is enabled.
667
+ \note IRQn must not be negative.
668
+ */
669
+ __STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn)
670
+ {
671
+ if ((int32_t)(IRQn) >= 0)
672
+ {
673
+ return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));
674
+ }
675
+ else
676
+ {
677
+ return(0U);
678
+ }
679
+ }
680
+
681
+
682
+ /**
683
+ \brief Disable Interrupt
684
+ \details Disables a device specific interrupt in the NVIC interrupt controller.
685
+ \param [in] IRQn Device specific interrupt number.
686
+ \note IRQn must not be negative.
687
+ */
688
+ __STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn)
689
+ {
690
+ if ((int32_t)(IRQn) >= 0)
691
+ {
692
+ NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
693
+ __DSB();
694
+ __ISB();
695
+ }
696
+ }
697
+
698
+
699
+ /**
700
+ \brief Get Pending Interrupt
701
+ \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt.
702
+ \param [in] IRQn Device specific interrupt number.
703
+ \return 0 Interrupt status is not pending.
704
+ \return 1 Interrupt status is pending.
705
+ \note IRQn must not be negative.
706
+ */
707
+ __STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn)
708
+ {
709
+ if ((int32_t)(IRQn) >= 0)
710
+ {
711
+ return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));
712
+ }
713
+ else
714
+ {
715
+ return(0U);
716
+ }
717
+ }
718
+
719
+
720
+ /**
721
+ \brief Set Pending Interrupt
722
+ \details Sets the pending bit of a device specific interrupt in the NVIC pending register.
723
+ \param [in] IRQn Device specific interrupt number.
724
+ \note IRQn must not be negative.
725
+ */
726
+ __STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn)
727
+ {
728
+ if ((int32_t)(IRQn) >= 0)
729
+ {
730
+ NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
731
+ }
732
+ }
733
+
734
+
735
+ /**
736
+ \brief Clear Pending Interrupt
737
+ \details Clears the pending bit of a device specific interrupt in the NVIC pending register.
738
+ \param [in] IRQn Device specific interrupt number.
739
+ \note IRQn must not be negative.
740
+ */
741
+ __STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn)
742
+ {
743
+ if ((int32_t)(IRQn) >= 0)
744
+ {
745
+ NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
746
+ }
747
+ }
748
+
749
+
750
+ /**
751
+ \brief Set Interrupt Priority
752
+ \details Sets the priority of a device specific interrupt or a processor exception.
753
+ The interrupt number can be positive to specify a device specific interrupt,
754
+ or negative to specify a processor exception.
755
+ \param [in] IRQn Interrupt number.
756
+ \param [in] priority Priority to set.
757
+ \note The priority cannot be set for every processor exception.
758
+ */
759
+ __STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority)
760
+ {
761
+ if ((int32_t)(IRQn) >= 0)
762
+ {
763
+ NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) |
764
+ (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));
765
+ }
766
+ else
767
+ {
768
+ SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) |
769
+ (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));
770
+ }
771
+ }
772
+
773
+
774
+ /**
775
+ \brief Get Interrupt Priority
776
+ \details Reads the priority of a device specific interrupt or a processor exception.
777
+ The interrupt number can be positive to specify a device specific interrupt,
778
+ or negative to specify a processor exception.
779
+ \param [in] IRQn Interrupt number.
780
+ \return Interrupt Priority.
781
+ Value is aligned automatically to the implemented priority bits of the microcontroller.
782
+ */
783
+ __STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn)
784
+ {
785
+
786
+ if ((int32_t)(IRQn) >= 0)
787
+ {
788
+ return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS)));
789
+ }
790
+ else
791
+ {
792
+ return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS)));
793
+ }
794
+ }
795
+
796
+
797
+ /**
798
+ \brief Encode Priority
799
+ \details Encodes the priority for an interrupt with the given priority group,
800
+ preemptive priority value, and subpriority value.
801
+ In case of a conflict between priority grouping and available
802
+ priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set.
803
+ \param [in] PriorityGroup Used priority group.
804
+ \param [in] PreemptPriority Preemptive priority value (starting from 0).
805
+ \param [in] SubPriority Subpriority value (starting from 0).
806
+ \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority().
807
+ */
808
+ __STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority)
809
+ {
810
+ uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */
811
+ uint32_t PreemptPriorityBits;
812
+ uint32_t SubPriorityBits;
813
+
814
+ PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp);
815
+ SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS));
816
+
817
+ return (
818
+ ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) |
819
+ ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL)))
820
+ );
821
+ }
822
+
823
+
824
+ /**
825
+ \brief Decode Priority
826
+ \details Decodes an interrupt priority value with a given priority group to
827
+ preemptive priority value and subpriority value.
828
+ In case of a conflict between priority grouping and available
829
+ priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set.
830
+ \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority().
831
+ \param [in] PriorityGroup Used priority group.
832
+ \param [out] pPreemptPriority Preemptive priority value (starting from 0).
833
+ \param [out] pSubPriority Subpriority value (starting from 0).
834
+ */
835
+ __STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority)
836
+ {
837
+ uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */
838
+ uint32_t PreemptPriorityBits;
839
+ uint32_t SubPriorityBits;
840
+
841
+ PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp);
842
+ SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS));
843
+
844
+ *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL);
845
+ *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL);
846
+ }
847
+
848
+
849
+
850
+ /**
851
+ \brief Set Interrupt Vector
852
+ \details Sets an interrupt vector in SRAM based interrupt vector table.
853
+ The interrupt number can be positive to specify a device specific interrupt,
854
+ or negative to specify a processor exception.
855
+ Address 0 must be mapped to SRAM.
856
+ \param [in] IRQn Interrupt number
857
+ \param [in] vector Address of interrupt handler function
858
+ */
859
+ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector)
860
+ {
861
+ uint32_t *vectors = (uint32_t *)0x0U;
862
+ vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector;
863
+ /* ARM Application Note 321 states that the M1 does not require the architectural barrier */
864
+ }
865
+
866
+
867
+ /**
868
+ \brief Get Interrupt Vector
869
+ \details Reads an interrupt vector from interrupt vector table.
870
+ The interrupt number can be positive to specify a device specific interrupt,
871
+ or negative to specify a processor exception.
872
+ \param [in] IRQn Interrupt number.
873
+ \return Address of interrupt handler function
874
+ */
875
+ __STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn)
876
+ {
877
+ uint32_t *vectors = (uint32_t *)0x0U;
878
+ return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET];
879
+ }
880
+
881
+
882
+ /**
883
+ \brief System Reset
884
+ \details Initiates a system reset request to reset the MCU.
885
+ */
886
+ __NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void)
887
+ {
888
+ __DSB(); /* Ensure all outstanding memory accesses included
889
+ buffered write are completed before reset */
890
+ SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) |
891
+ SCB_AIRCR_SYSRESETREQ_Msk);
892
+ __DSB(); /* Ensure completion of memory access */
893
+
894
+ for(;;) /* wait until reset */
895
+ {
896
+ __NOP();
897
+ }
898
+ }
899
+
900
+ /*@} end of CMSIS_Core_NVICFunctions */
901
+
902
+
903
+ /* ########################## FPU functions #################################### */
904
+ /**
905
+ \ingroup CMSIS_Core_FunctionInterface
906
+ \defgroup CMSIS_Core_FpuFunctions FPU Functions
907
+ \brief Function that provides FPU type.
908
+ @{
909
+ */
910
+
911
+ /**
912
+ \brief get FPU type
913
+ \details returns the FPU type
914
+ \returns
915
+ - \b 0: No FPU
916
+ - \b 1: Single precision FPU
917
+ - \b 2: Double + Single precision FPU
918
+ */
919
+ __STATIC_INLINE uint32_t SCB_GetFPUType(void)
920
+ {
921
+ return 0U; /* No FPU */
922
+ }
923
+
924
+
925
+ /*@} end of CMSIS_Core_FpuFunctions */
926
+
927
+
928
+
929
+ /* ################################## SysTick function ############################################ */
930
+ /**
931
+ \ingroup CMSIS_Core_FunctionInterface
932
+ \defgroup CMSIS_Core_SysTickFunctions SysTick Functions
933
+ \brief Functions that configure the System.
934
+ @{
935
+ */
936
+
937
+ #if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U)
938
+
939
+ /**
940
+ \brief System Tick Configuration
941
+ \details Initializes the System Timer and its interrupt, and starts the System Tick Timer.
942
+ Counter is in free running mode to generate periodic interrupts.
943
+ \param [in] ticks Number of ticks between two interrupts.
944
+ \return 0 Function succeeded.
945
+ \return 1 Function failed.
946
+ \note When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the
947
+ function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>
948
+ must contain a vendor-specific implementation of this function.
949
+ */
950
+ __STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks)
951
+ {
952
+ if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk)
953
+ {
954
+ return (1UL); /* Reload value impossible */
955
+ }
956
+
957
+ SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */
958
+ NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */
959
+ SysTick->VAL = 0UL; /* Load the SysTick Counter Value */
960
+ SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk |
961
+ SysTick_CTRL_TICKINT_Msk |
962
+ SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */
963
+ return (0UL); /* Function successful */
964
+ }
965
+
966
+ #endif
967
+
968
+ /*@} end of CMSIS_Core_SysTickFunctions */
969
+
970
+
971
+
972
+
973
+ #ifdef __cplusplus
974
+ }
975
+ #endif
976
+
977
+ #endif /* __CORE_CM1_H_DEPENDANT */
978
+
979
+ #endif /* __CMSIS_GENERIC */
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_cm23.h ADDED
The diff for this file is too large to render. See raw diff
 
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_cm3.h ADDED
The diff for this file is too large to render. See raw diff
 
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_cm33.h ADDED
The diff for this file is too large to render. See raw diff
 
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_cm35p.h ADDED
The diff for this file is too large to render. See raw diff
 
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_cm4.h ADDED
The diff for this file is too large to render. See raw diff
 
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_cm55.h ADDED
The diff for this file is too large to render. See raw diff
 
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_cm7.h ADDED
The diff for this file is too large to render. See raw diff
 
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_sc000.h ADDED
@@ -0,0 +1,1030 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**************************************************************************//**
2
+ * @file core_sc000.h
3
+ * @brief CMSIS SC000 Core Peripheral Access Layer Header File
4
+ * @version V5.0.7
5
+ * @date 27. March 2020
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2009-2020 Arm Limited. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ #if defined ( __ICCARM__ )
26
+ #pragma system_include /* treat file as system include file for MISRA check */
27
+ #elif defined (__clang__)
28
+ #pragma clang system_header /* treat file as system include file */
29
+ #endif
30
+
31
+ #ifndef __CORE_SC000_H_GENERIC
32
+ #define __CORE_SC000_H_GENERIC
33
+
34
+ #include <stdint.h>
35
+
36
+ #ifdef __cplusplus
37
+ extern "C" {
38
+ #endif
39
+
40
+ /**
41
+ \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions
42
+ CMSIS violates the following MISRA-C:2004 rules:
43
+
44
+ \li Required Rule 8.5, object/function definition in header file.<br>
45
+ Function definitions in header files are used to allow 'inlining'.
46
+
47
+ \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.<br>
48
+ Unions are used for effective representation of core registers.
49
+
50
+ \li Advisory Rule 19.7, Function-like macro defined.<br>
51
+ Function-like macros are used to allow more efficient code.
52
+ */
53
+
54
+
55
+ /*******************************************************************************
56
+ * CMSIS definitions
57
+ ******************************************************************************/
58
+ /**
59
+ \ingroup SC000
60
+ @{
61
+ */
62
+
63
+ #include "cmsis_version.h"
64
+
65
+ /* CMSIS SC000 definitions */
66
+ #define __SC000_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */
67
+ #define __SC000_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */
68
+ #define __SC000_CMSIS_VERSION ((__SC000_CMSIS_VERSION_MAIN << 16U) | \
69
+ __SC000_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */
70
+
71
+ #define __CORTEX_SC (000U) /*!< Cortex secure core */
72
+
73
+ /** __FPU_USED indicates whether an FPU is used or not.
74
+ This core does not support an FPU at all
75
+ */
76
+ #define __FPU_USED 0U
77
+
78
+ #if defined ( __CC_ARM )
79
+ #if defined __TARGET_FPU_VFP
80
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
81
+ #endif
82
+
83
+ #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
84
+ #if defined __ARM_FP
85
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
86
+ #endif
87
+
88
+ #elif defined ( __GNUC__ )
89
+ #if defined (__VFP_FP__) && !defined(__SOFTFP__)
90
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
91
+ #endif
92
+
93
+ #elif defined ( __ICCARM__ )
94
+ #if defined __ARMVFP__
95
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
96
+ #endif
97
+
98
+ #elif defined ( __TI_ARM__ )
99
+ #if defined __TI_VFP_SUPPORT__
100
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
101
+ #endif
102
+
103
+ #elif defined ( __TASKING__ )
104
+ #if defined __FPU_VFP__
105
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
106
+ #endif
107
+
108
+ #elif defined ( __CSMC__ )
109
+ #if ( __CSMC__ & 0x400U)
110
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
111
+ #endif
112
+
113
+ #endif
114
+
115
+ #include "edge-impulse-sdk/CMSIS/Core/Include/cmsis_compiler.h" /* CMSIS compiler specific defines */
116
+
117
+
118
+ #ifdef __cplusplus
119
+ }
120
+ #endif
121
+
122
+ #endif /* __CORE_SC000_H_GENERIC */
123
+
124
+ #ifndef __CMSIS_GENERIC
125
+
126
+ #ifndef __CORE_SC000_H_DEPENDANT
127
+ #define __CORE_SC000_H_DEPENDANT
128
+
129
+ #ifdef __cplusplus
130
+ extern "C" {
131
+ #endif
132
+
133
+ /* check device defines and use defaults */
134
+ #if defined __CHECK_DEVICE_DEFINES
135
+ #ifndef __SC000_REV
136
+ #define __SC000_REV 0x0000U
137
+ #warning "__SC000_REV not defined in device header file; using default!"
138
+ #endif
139
+
140
+ #ifndef __MPU_PRESENT
141
+ #define __MPU_PRESENT 0U
142
+ #warning "__MPU_PRESENT not defined in device header file; using default!"
143
+ #endif
144
+
145
+ #ifndef __VTOR_PRESENT
146
+ #define __VTOR_PRESENT 0U
147
+ #warning "__VTOR_PRESENT not defined in device header file; using default!"
148
+ #endif
149
+
150
+ #ifndef __NVIC_PRIO_BITS
151
+ #define __NVIC_PRIO_BITS 2U
152
+ #warning "__NVIC_PRIO_BITS not defined in device header file; using default!"
153
+ #endif
154
+
155
+ #ifndef __Vendor_SysTickConfig
156
+ #define __Vendor_SysTickConfig 0U
157
+ #warning "__Vendor_SysTickConfig not defined in device header file; using default!"
158
+ #endif
159
+ #endif
160
+
161
+ /* IO definitions (access restrictions to peripheral registers) */
162
+ /**
163
+ \defgroup CMSIS_glob_defs CMSIS Global Defines
164
+
165
+ <strong>IO Type Qualifiers</strong> are used
166
+ \li to specify the access to peripheral variables.
167
+ \li for automatic generation of peripheral register debug information.
168
+ */
169
+ #ifdef __cplusplus
170
+ #define __I volatile /*!< Defines 'read only' permissions */
171
+ #else
172
+ #define __I volatile const /*!< Defines 'read only' permissions */
173
+ #endif
174
+ #define __O volatile /*!< Defines 'write only' permissions */
175
+ #define __IO volatile /*!< Defines 'read / write' permissions */
176
+
177
+ /* following defines should be used for structure members */
178
+ #define __IM volatile const /*! Defines 'read only' structure member permissions */
179
+ #define __OM volatile /*! Defines 'write only' structure member permissions */
180
+ #define __IOM volatile /*! Defines 'read / write' structure member permissions */
181
+
182
+ /*@} end of group SC000 */
183
+
184
+
185
+
186
+ /*******************************************************************************
187
+ * Register Abstraction
188
+ Core Register contain:
189
+ - Core Register
190
+ - Core NVIC Register
191
+ - Core SCB Register
192
+ - Core SysTick Register
193
+ - Core MPU Register
194
+ ******************************************************************************/
195
+ /**
196
+ \defgroup CMSIS_core_register Defines and Type Definitions
197
+ \brief Type definitions and defines for Cortex-M processor based devices.
198
+ */
199
+
200
+ /**
201
+ \ingroup CMSIS_core_register
202
+ \defgroup CMSIS_CORE Status and Control Registers
203
+ \brief Core Register type definitions.
204
+ @{
205
+ */
206
+
207
+ /**
208
+ \brief Union type to access the Application Program Status Register (APSR).
209
+ */
210
+ typedef union
211
+ {
212
+ struct
213
+ {
214
+ uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */
215
+ uint32_t V:1; /*!< bit: 28 Overflow condition code flag */
216
+ uint32_t C:1; /*!< bit: 29 Carry condition code flag */
217
+ uint32_t Z:1; /*!< bit: 30 Zero condition code flag */
218
+ uint32_t N:1; /*!< bit: 31 Negative condition code flag */
219
+ } b; /*!< Structure used for bit access */
220
+ uint32_t w; /*!< Type used for word access */
221
+ } APSR_Type;
222
+
223
+ /* APSR Register Definitions */
224
+ #define APSR_N_Pos 31U /*!< APSR: N Position */
225
+ #define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */
226
+
227
+ #define APSR_Z_Pos 30U /*!< APSR: Z Position */
228
+ #define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */
229
+
230
+ #define APSR_C_Pos 29U /*!< APSR: C Position */
231
+ #define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */
232
+
233
+ #define APSR_V_Pos 28U /*!< APSR: V Position */
234
+ #define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */
235
+
236
+
237
+ /**
238
+ \brief Union type to access the Interrupt Program Status Register (IPSR).
239
+ */
240
+ typedef union
241
+ {
242
+ struct
243
+ {
244
+ uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */
245
+ uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */
246
+ } b; /*!< Structure used for bit access */
247
+ uint32_t w; /*!< Type used for word access */
248
+ } IPSR_Type;
249
+
250
+ /* IPSR Register Definitions */
251
+ #define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */
252
+ #define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */
253
+
254
+
255
+ /**
256
+ \brief Union type to access the Special-Purpose Program Status Registers (xPSR).
257
+ */
258
+ typedef union
259
+ {
260
+ struct
261
+ {
262
+ uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */
263
+ uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */
264
+ uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */
265
+ uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */
266
+ uint32_t V:1; /*!< bit: 28 Overflow condition code flag */
267
+ uint32_t C:1; /*!< bit: 29 Carry condition code flag */
268
+ uint32_t Z:1; /*!< bit: 30 Zero condition code flag */
269
+ uint32_t N:1; /*!< bit: 31 Negative condition code flag */
270
+ } b; /*!< Structure used for bit access */
271
+ uint32_t w; /*!< Type used for word access */
272
+ } xPSR_Type;
273
+
274
+ /* xPSR Register Definitions */
275
+ #define xPSR_N_Pos 31U /*!< xPSR: N Position */
276
+ #define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */
277
+
278
+ #define xPSR_Z_Pos 30U /*!< xPSR: Z Position */
279
+ #define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */
280
+
281
+ #define xPSR_C_Pos 29U /*!< xPSR: C Position */
282
+ #define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */
283
+
284
+ #define xPSR_V_Pos 28U /*!< xPSR: V Position */
285
+ #define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */
286
+
287
+ #define xPSR_T_Pos 24U /*!< xPSR: T Position */
288
+ #define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */
289
+
290
+ #define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */
291
+ #define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */
292
+
293
+
294
+ /**
295
+ \brief Union type to access the Control Registers (CONTROL).
296
+ */
297
+ typedef union
298
+ {
299
+ struct
300
+ {
301
+ uint32_t _reserved0:1; /*!< bit: 0 Reserved */
302
+ uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */
303
+ uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */
304
+ } b; /*!< Structure used for bit access */
305
+ uint32_t w; /*!< Type used for word access */
306
+ } CONTROL_Type;
307
+
308
+ /* CONTROL Register Definitions */
309
+ #define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */
310
+ #define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */
311
+
312
+ /*@} end of group CMSIS_CORE */
313
+
314
+
315
+ /**
316
+ \ingroup CMSIS_core_register
317
+ \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC)
318
+ \brief Type definitions for the NVIC Registers
319
+ @{
320
+ */
321
+
322
+ /**
323
+ \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC).
324
+ */
325
+ typedef struct
326
+ {
327
+ __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */
328
+ uint32_t RESERVED0[31U];
329
+ __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */
330
+ uint32_t RSERVED1[31U];
331
+ __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */
332
+ uint32_t RESERVED2[31U];
333
+ __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */
334
+ uint32_t RESERVED3[31U];
335
+ uint32_t RESERVED4[64U];
336
+ __IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */
337
+ } NVIC_Type;
338
+
339
+ /*@} end of group CMSIS_NVIC */
340
+
341
+
342
+ /**
343
+ \ingroup CMSIS_core_register
344
+ \defgroup CMSIS_SCB System Control Block (SCB)
345
+ \brief Type definitions for the System Control Block Registers
346
+ @{
347
+ */
348
+
349
+ /**
350
+ \brief Structure type to access the System Control Block (SCB).
351
+ */
352
+ typedef struct
353
+ {
354
+ __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */
355
+ __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */
356
+ __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */
357
+ __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */
358
+ __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */
359
+ __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */
360
+ uint32_t RESERVED0[1U];
361
+ __IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */
362
+ __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */
363
+ uint32_t RESERVED1[154U];
364
+ __IOM uint32_t SFCR; /*!< Offset: 0x290 (R/W) Security Features Control Register */
365
+ } SCB_Type;
366
+
367
+ /* SCB CPUID Register Definitions */
368
+ #define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */
369
+ #define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */
370
+
371
+ #define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */
372
+ #define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */
373
+
374
+ #define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */
375
+ #define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */
376
+
377
+ #define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */
378
+ #define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */
379
+
380
+ #define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */
381
+ #define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */
382
+
383
+ /* SCB Interrupt Control State Register Definitions */
384
+ #define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */
385
+ #define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */
386
+
387
+ #define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */
388
+ #define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */
389
+
390
+ #define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */
391
+ #define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */
392
+
393
+ #define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */
394
+ #define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */
395
+
396
+ #define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */
397
+ #define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */
398
+
399
+ #define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */
400
+ #define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */
401
+
402
+ #define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */
403
+ #define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */
404
+
405
+ #define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */
406
+ #define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */
407
+
408
+ #define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */
409
+ #define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */
410
+
411
+ /* SCB Interrupt Control State Register Definitions */
412
+ #define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */
413
+ #define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */
414
+
415
+ /* SCB Application Interrupt and Reset Control Register Definitions */
416
+ #define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */
417
+ #define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */
418
+
419
+ #define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */
420
+ #define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */
421
+
422
+ #define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */
423
+ #define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */
424
+
425
+ #define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */
426
+ #define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */
427
+
428
+ #define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */
429
+ #define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */
430
+
431
+ /* SCB System Control Register Definitions */
432
+ #define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */
433
+ #define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */
434
+
435
+ #define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */
436
+ #define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */
437
+
438
+ #define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */
439
+ #define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */
440
+
441
+ /* SCB Configuration Control Register Definitions */
442
+ #define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */
443
+ #define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */
444
+
445
+ #define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */
446
+ #define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */
447
+
448
+ /* SCB System Handler Control and State Register Definitions */
449
+ #define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */
450
+ #define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */
451
+
452
+ /*@} end of group CMSIS_SCB */
453
+
454
+
455
+ /**
456
+ \ingroup CMSIS_core_register
457
+ \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB)
458
+ \brief Type definitions for the System Control and ID Register not in the SCB
459
+ @{
460
+ */
461
+
462
+ /**
463
+ \brief Structure type to access the System Control and ID Register not in the SCB.
464
+ */
465
+ typedef struct
466
+ {
467
+ uint32_t RESERVED0[2U];
468
+ __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */
469
+ } SCnSCB_Type;
470
+
471
+ /* Auxiliary Control Register Definitions */
472
+ #define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */
473
+ #define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */
474
+
475
+ /*@} end of group CMSIS_SCnotSCB */
476
+
477
+
478
+ /**
479
+ \ingroup CMSIS_core_register
480
+ \defgroup CMSIS_SysTick System Tick Timer (SysTick)
481
+ \brief Type definitions for the System Timer Registers.
482
+ @{
483
+ */
484
+
485
+ /**
486
+ \brief Structure type to access the System Timer (SysTick).
487
+ */
488
+ typedef struct
489
+ {
490
+ __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */
491
+ __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */
492
+ __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */
493
+ __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */
494
+ } SysTick_Type;
495
+
496
+ /* SysTick Control / Status Register Definitions */
497
+ #define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */
498
+ #define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */
499
+
500
+ #define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */
501
+ #define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */
502
+
503
+ #define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */
504
+ #define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */
505
+
506
+ #define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */
507
+ #define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */
508
+
509
+ /* SysTick Reload Register Definitions */
510
+ #define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */
511
+ #define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */
512
+
513
+ /* SysTick Current Register Definitions */
514
+ #define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */
515
+ #define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */
516
+
517
+ /* SysTick Calibration Register Definitions */
518
+ #define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */
519
+ #define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */
520
+
521
+ #define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */
522
+ #define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */
523
+
524
+ #define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */
525
+ #define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */
526
+
527
+ /*@} end of group CMSIS_SysTick */
528
+
529
+ #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U)
530
+ /**
531
+ \ingroup CMSIS_core_register
532
+ \defgroup CMSIS_MPU Memory Protection Unit (MPU)
533
+ \brief Type definitions for the Memory Protection Unit (MPU)
534
+ @{
535
+ */
536
+
537
+ /**
538
+ \brief Structure type to access the Memory Protection Unit (MPU).
539
+ */
540
+ typedef struct
541
+ {
542
+ __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */
543
+ __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */
544
+ __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */
545
+ __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */
546
+ __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */
547
+ } MPU_Type;
548
+
549
+ /* MPU Type Register Definitions */
550
+ #define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */
551
+ #define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */
552
+
553
+ #define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */
554
+ #define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */
555
+
556
+ #define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */
557
+ #define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */
558
+
559
+ /* MPU Control Register Definitions */
560
+ #define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */
561
+ #define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */
562
+
563
+ #define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */
564
+ #define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */
565
+
566
+ #define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */
567
+ #define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */
568
+
569
+ /* MPU Region Number Register Definitions */
570
+ #define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */
571
+ #define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */
572
+
573
+ /* MPU Region Base Address Register Definitions */
574
+ #define MPU_RBAR_ADDR_Pos 8U /*!< MPU RBAR: ADDR Position */
575
+ #define MPU_RBAR_ADDR_Msk (0xFFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */
576
+
577
+ #define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */
578
+ #define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */
579
+
580
+ #define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */
581
+ #define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */
582
+
583
+ /* MPU Region Attribute and Size Register Definitions */
584
+ #define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */
585
+ #define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */
586
+
587
+ #define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */
588
+ #define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */
589
+
590
+ #define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */
591
+ #define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */
592
+
593
+ #define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */
594
+ #define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */
595
+
596
+ #define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */
597
+ #define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */
598
+
599
+ #define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */
600
+ #define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */
601
+
602
+ #define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */
603
+ #define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */
604
+
605
+ #define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */
606
+ #define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */
607
+
608
+ #define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */
609
+ #define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */
610
+
611
+ #define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */
612
+ #define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */
613
+
614
+ /*@} end of group CMSIS_MPU */
615
+ #endif
616
+
617
+
618
+ /**
619
+ \ingroup CMSIS_core_register
620
+ \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug)
621
+ \brief SC000 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor.
622
+ Therefore they are not covered by the SC000 header file.
623
+ @{
624
+ */
625
+ /*@} end of group CMSIS_CoreDebug */
626
+
627
+
628
+ /**
629
+ \ingroup CMSIS_core_register
630
+ \defgroup CMSIS_core_bitfield Core register bit field macros
631
+ \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk).
632
+ @{
633
+ */
634
+
635
+ /**
636
+ \brief Mask and shift a bit field value for use in a register bit range.
637
+ \param[in] field Name of the register bit field.
638
+ \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type.
639
+ \return Masked and shifted value.
640
+ */
641
+ #define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk)
642
+
643
+ /**
644
+ \brief Mask and shift a register value to extract a bit filed value.
645
+ \param[in] field Name of the register bit field.
646
+ \param[in] value Value of register. This parameter is interpreted as an uint32_t type.
647
+ \return Masked and shifted bit field value.
648
+ */
649
+ #define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos)
650
+
651
+ /*@} end of group CMSIS_core_bitfield */
652
+
653
+
654
+ /**
655
+ \ingroup CMSIS_core_register
656
+ \defgroup CMSIS_core_base Core Definitions
657
+ \brief Definitions for base addresses, unions, and structures.
658
+ @{
659
+ */
660
+
661
+ /* Memory mapping of Core Hardware */
662
+ #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */
663
+ #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */
664
+ #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */
665
+ #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */
666
+
667
+ #define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */
668
+ #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */
669
+ #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */
670
+ #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */
671
+
672
+ #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U)
673
+ #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */
674
+ #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */
675
+ #endif
676
+
677
+ /*@} */
678
+
679
+
680
+
681
+ /*******************************************************************************
682
+ * Hardware Abstraction Layer
683
+ Core Function Interface contains:
684
+ - Core NVIC Functions
685
+ - Core SysTick Functions
686
+ - Core Register Access Functions
687
+ ******************************************************************************/
688
+ /**
689
+ \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference
690
+ */
691
+
692
+
693
+
694
+ /* ########################## NVIC functions #################################### */
695
+ /**
696
+ \ingroup CMSIS_Core_FunctionInterface
697
+ \defgroup CMSIS_Core_NVICFunctions NVIC Functions
698
+ \brief Functions that manage interrupts and exceptions via the NVIC.
699
+ @{
700
+ */
701
+
702
+ #ifdef CMSIS_NVIC_VIRTUAL
703
+ #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE
704
+ #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h"
705
+ #endif
706
+ #include CMSIS_NVIC_VIRTUAL_HEADER_FILE
707
+ #else
708
+ /*#define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping not available for SC000 */
709
+ /*#define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping not available for SC000 */
710
+ #define NVIC_EnableIRQ __NVIC_EnableIRQ
711
+ #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ
712
+ #define NVIC_DisableIRQ __NVIC_DisableIRQ
713
+ #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ
714
+ #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ
715
+ #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ
716
+ /*#define NVIC_GetActive __NVIC_GetActive not available for SC000 */
717
+ #define NVIC_SetPriority __NVIC_SetPriority
718
+ #define NVIC_GetPriority __NVIC_GetPriority
719
+ #define NVIC_SystemReset __NVIC_SystemReset
720
+ #endif /* CMSIS_NVIC_VIRTUAL */
721
+
722
+ #ifdef CMSIS_VECTAB_VIRTUAL
723
+ #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE
724
+ #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h"
725
+ #endif
726
+ #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE
727
+ #else
728
+ #define NVIC_SetVector __NVIC_SetVector
729
+ #define NVIC_GetVector __NVIC_GetVector
730
+ #endif /* (CMSIS_VECTAB_VIRTUAL) */
731
+
732
+ #define NVIC_USER_IRQ_OFFSET 16
733
+
734
+
735
+ /* The following EXC_RETURN values are saved the LR on exception entry */
736
+ #define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */
737
+ #define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */
738
+ #define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */
739
+
740
+
741
+ /* Interrupt Priorities are WORD accessible only under Armv6-M */
742
+ /* The following MACROS handle generation of the register offset and byte masks */
743
+ #define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL)
744
+ #define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) )
745
+ #define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) )
746
+
747
+
748
+ /**
749
+ \brief Enable Interrupt
750
+ \details Enables a device specific interrupt in the NVIC interrupt controller.
751
+ \param [in] IRQn Device specific interrupt number.
752
+ \note IRQn must not be negative.
753
+ */
754
+ __STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn)
755
+ {
756
+ if ((int32_t)(IRQn) >= 0)
757
+ {
758
+ __COMPILER_BARRIER();
759
+ NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
760
+ __COMPILER_BARRIER();
761
+ }
762
+ }
763
+
764
+
765
+ /**
766
+ \brief Get Interrupt Enable status
767
+ \details Returns a device specific interrupt enable status from the NVIC interrupt controller.
768
+ \param [in] IRQn Device specific interrupt number.
769
+ \return 0 Interrupt is not enabled.
770
+ \return 1 Interrupt is enabled.
771
+ \note IRQn must not be negative.
772
+ */
773
+ __STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn)
774
+ {
775
+ if ((int32_t)(IRQn) >= 0)
776
+ {
777
+ return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));
778
+ }
779
+ else
780
+ {
781
+ return(0U);
782
+ }
783
+ }
784
+
785
+
786
+ /**
787
+ \brief Disable Interrupt
788
+ \details Disables a device specific interrupt in the NVIC interrupt controller.
789
+ \param [in] IRQn Device specific interrupt number.
790
+ \note IRQn must not be negative.
791
+ */
792
+ __STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn)
793
+ {
794
+ if ((int32_t)(IRQn) >= 0)
795
+ {
796
+ NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
797
+ __DSB();
798
+ __ISB();
799
+ }
800
+ }
801
+
802
+
803
+ /**
804
+ \brief Get Pending Interrupt
805
+ \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt.
806
+ \param [in] IRQn Device specific interrupt number.
807
+ \return 0 Interrupt status is not pending.
808
+ \return 1 Interrupt status is pending.
809
+ \note IRQn must not be negative.
810
+ */
811
+ __STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn)
812
+ {
813
+ if ((int32_t)(IRQn) >= 0)
814
+ {
815
+ return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));
816
+ }
817
+ else
818
+ {
819
+ return(0U);
820
+ }
821
+ }
822
+
823
+
824
+ /**
825
+ \brief Set Pending Interrupt
826
+ \details Sets the pending bit of a device specific interrupt in the NVIC pending register.
827
+ \param [in] IRQn Device specific interrupt number.
828
+ \note IRQn must not be negative.
829
+ */
830
+ __STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn)
831
+ {
832
+ if ((int32_t)(IRQn) >= 0)
833
+ {
834
+ NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
835
+ }
836
+ }
837
+
838
+
839
+ /**
840
+ \brief Clear Pending Interrupt
841
+ \details Clears the pending bit of a device specific interrupt in the NVIC pending register.
842
+ \param [in] IRQn Device specific interrupt number.
843
+ \note IRQn must not be negative.
844
+ */
845
+ __STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn)
846
+ {
847
+ if ((int32_t)(IRQn) >= 0)
848
+ {
849
+ NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
850
+ }
851
+ }
852
+
853
+
854
+ /**
855
+ \brief Set Interrupt Priority
856
+ \details Sets the priority of a device specific interrupt or a processor exception.
857
+ The interrupt number can be positive to specify a device specific interrupt,
858
+ or negative to specify a processor exception.
859
+ \param [in] IRQn Interrupt number.
860
+ \param [in] priority Priority to set.
861
+ \note The priority cannot be set for every processor exception.
862
+ */
863
+ __STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority)
864
+ {
865
+ if ((int32_t)(IRQn) >= 0)
866
+ {
867
+ NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) |
868
+ (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));
869
+ }
870
+ else
871
+ {
872
+ SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) |
873
+ (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));
874
+ }
875
+ }
876
+
877
+
878
+ /**
879
+ \brief Get Interrupt Priority
880
+ \details Reads the priority of a device specific interrupt or a processor exception.
881
+ The interrupt number can be positive to specify a device specific interrupt,
882
+ or negative to specify a processor exception.
883
+ \param [in] IRQn Interrupt number.
884
+ \return Interrupt Priority.
885
+ Value is aligned automatically to the implemented priority bits of the microcontroller.
886
+ */
887
+ __STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn)
888
+ {
889
+
890
+ if ((int32_t)(IRQn) >= 0)
891
+ {
892
+ return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS)));
893
+ }
894
+ else
895
+ {
896
+ return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS)));
897
+ }
898
+ }
899
+
900
+
901
+ /**
902
+ \brief Set Interrupt Vector
903
+ \details Sets an interrupt vector in SRAM based interrupt vector table.
904
+ The interrupt number can be positive to specify a device specific interrupt,
905
+ or negative to specify a processor exception.
906
+ VTOR must been relocated to SRAM before.
907
+ \param [in] IRQn Interrupt number
908
+ \param [in] vector Address of interrupt handler function
909
+ */
910
+ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector)
911
+ {
912
+ uint32_t *vectors = (uint32_t *)SCB->VTOR;
913
+ vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector;
914
+ /* ARM Application Note 321 states that the M0 and M0+ do not require the architectural barrier - assume SC000 is the same */
915
+ }
916
+
917
+
918
+ /**
919
+ \brief Get Interrupt Vector
920
+ \details Reads an interrupt vector from interrupt vector table.
921
+ The interrupt number can be positive to specify a device specific interrupt,
922
+ or negative to specify a processor exception.
923
+ \param [in] IRQn Interrupt number.
924
+ \return Address of interrupt handler function
925
+ */
926
+ __STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn)
927
+ {
928
+ uint32_t *vectors = (uint32_t *)SCB->VTOR;
929
+ return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET];
930
+ }
931
+
932
+
933
+ /**
934
+ \brief System Reset
935
+ \details Initiates a system reset request to reset the MCU.
936
+ */
937
+ __NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void)
938
+ {
939
+ __DSB(); /* Ensure all outstanding memory accesses included
940
+ buffered write are completed before reset */
941
+ SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) |
942
+ SCB_AIRCR_SYSRESETREQ_Msk);
943
+ __DSB(); /* Ensure completion of memory access */
944
+
945
+ for(;;) /* wait until reset */
946
+ {
947
+ __NOP();
948
+ }
949
+ }
950
+
951
+ /*@} end of CMSIS_Core_NVICFunctions */
952
+
953
+
954
+ /* ########################## FPU functions #################################### */
955
+ /**
956
+ \ingroup CMSIS_Core_FunctionInterface
957
+ \defgroup CMSIS_Core_FpuFunctions FPU Functions
958
+ \brief Function that provides FPU type.
959
+ @{
960
+ */
961
+
962
+ /**
963
+ \brief get FPU type
964
+ \details returns the FPU type
965
+ \returns
966
+ - \b 0: No FPU
967
+ - \b 1: Single precision FPU
968
+ - \b 2: Double + Single precision FPU
969
+ */
970
+ __STATIC_INLINE uint32_t SCB_GetFPUType(void)
971
+ {
972
+ return 0U; /* No FPU */
973
+ }
974
+
975
+
976
+ /*@} end of CMSIS_Core_FpuFunctions */
977
+
978
+
979
+
980
+ /* ################################## SysTick function ############################################ */
981
+ /**
982
+ \ingroup CMSIS_Core_FunctionInterface
983
+ \defgroup CMSIS_Core_SysTickFunctions SysTick Functions
984
+ \brief Functions that configure the System.
985
+ @{
986
+ */
987
+
988
+ #if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U)
989
+
990
+ /**
991
+ \brief System Tick Configuration
992
+ \details Initializes the System Timer and its interrupt, and starts the System Tick Timer.
993
+ Counter is in free running mode to generate periodic interrupts.
994
+ \param [in] ticks Number of ticks between two interrupts.
995
+ \return 0 Function succeeded.
996
+ \return 1 Function failed.
997
+ \note When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the
998
+ function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>
999
+ must contain a vendor-specific implementation of this function.
1000
+ */
1001
+ __STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks)
1002
+ {
1003
+ if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk)
1004
+ {
1005
+ return (1UL); /* Reload value impossible */
1006
+ }
1007
+
1008
+ SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */
1009
+ NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */
1010
+ SysTick->VAL = 0UL; /* Load the SysTick Counter Value */
1011
+ SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk |
1012
+ SysTick_CTRL_TICKINT_Msk |
1013
+ SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */
1014
+ return (0UL); /* Function successful */
1015
+ }
1016
+
1017
+ #endif
1018
+
1019
+ /*@} end of CMSIS_Core_SysTickFunctions */
1020
+
1021
+
1022
+
1023
+
1024
+ #ifdef __cplusplus
1025
+ }
1026
+ #endif
1027
+
1028
+ #endif /* __CORE_SC000_H_DEPENDANT */
1029
+
1030
+ #endif /* __CMSIS_GENERIC */
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/core_sc300.h ADDED
The diff for this file is too large to render. See raw diff
 
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/mpu_armv7.h ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * @file mpu_armv7.h
3
+ * @brief CMSIS MPU API for Armv7-M MPU
4
+ * @version V5.1.2
5
+ * @date 25. May 2020
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2017-2020 Arm Limited. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ #if defined ( __ICCARM__ )
26
+ #pragma system_include /* treat file as system include file for MISRA check */
27
+ #elif defined (__clang__)
28
+ #pragma clang system_header /* treat file as system include file */
29
+ #endif
30
+
31
+ #ifndef ARM_MPU_ARMV7_H
32
+ #define ARM_MPU_ARMV7_H
33
+
34
+ #define ARM_MPU_REGION_SIZE_32B ((uint8_t)0x04U) ///!< MPU Region Size 32 Bytes
35
+ #define ARM_MPU_REGION_SIZE_64B ((uint8_t)0x05U) ///!< MPU Region Size 64 Bytes
36
+ #define ARM_MPU_REGION_SIZE_128B ((uint8_t)0x06U) ///!< MPU Region Size 128 Bytes
37
+ #define ARM_MPU_REGION_SIZE_256B ((uint8_t)0x07U) ///!< MPU Region Size 256 Bytes
38
+ #define ARM_MPU_REGION_SIZE_512B ((uint8_t)0x08U) ///!< MPU Region Size 512 Bytes
39
+ #define ARM_MPU_REGION_SIZE_1KB ((uint8_t)0x09U) ///!< MPU Region Size 1 KByte
40
+ #define ARM_MPU_REGION_SIZE_2KB ((uint8_t)0x0AU) ///!< MPU Region Size 2 KBytes
41
+ #define ARM_MPU_REGION_SIZE_4KB ((uint8_t)0x0BU) ///!< MPU Region Size 4 KBytes
42
+ #define ARM_MPU_REGION_SIZE_8KB ((uint8_t)0x0CU) ///!< MPU Region Size 8 KBytes
43
+ #define ARM_MPU_REGION_SIZE_16KB ((uint8_t)0x0DU) ///!< MPU Region Size 16 KBytes
44
+ #define ARM_MPU_REGION_SIZE_32KB ((uint8_t)0x0EU) ///!< MPU Region Size 32 KBytes
45
+ #define ARM_MPU_REGION_SIZE_64KB ((uint8_t)0x0FU) ///!< MPU Region Size 64 KBytes
46
+ #define ARM_MPU_REGION_SIZE_128KB ((uint8_t)0x10U) ///!< MPU Region Size 128 KBytes
47
+ #define ARM_MPU_REGION_SIZE_256KB ((uint8_t)0x11U) ///!< MPU Region Size 256 KBytes
48
+ #define ARM_MPU_REGION_SIZE_512KB ((uint8_t)0x12U) ///!< MPU Region Size 512 KBytes
49
+ #define ARM_MPU_REGION_SIZE_1MB ((uint8_t)0x13U) ///!< MPU Region Size 1 MByte
50
+ #define ARM_MPU_REGION_SIZE_2MB ((uint8_t)0x14U) ///!< MPU Region Size 2 MBytes
51
+ #define ARM_MPU_REGION_SIZE_4MB ((uint8_t)0x15U) ///!< MPU Region Size 4 MBytes
52
+ #define ARM_MPU_REGION_SIZE_8MB ((uint8_t)0x16U) ///!< MPU Region Size 8 MBytes
53
+ #define ARM_MPU_REGION_SIZE_16MB ((uint8_t)0x17U) ///!< MPU Region Size 16 MBytes
54
+ #define ARM_MPU_REGION_SIZE_32MB ((uint8_t)0x18U) ///!< MPU Region Size 32 MBytes
55
+ #define ARM_MPU_REGION_SIZE_64MB ((uint8_t)0x19U) ///!< MPU Region Size 64 MBytes
56
+ #define ARM_MPU_REGION_SIZE_128MB ((uint8_t)0x1AU) ///!< MPU Region Size 128 MBytes
57
+ #define ARM_MPU_REGION_SIZE_256MB ((uint8_t)0x1BU) ///!< MPU Region Size 256 MBytes
58
+ #define ARM_MPU_REGION_SIZE_512MB ((uint8_t)0x1CU) ///!< MPU Region Size 512 MBytes
59
+ #define ARM_MPU_REGION_SIZE_1GB ((uint8_t)0x1DU) ///!< MPU Region Size 1 GByte
60
+ #define ARM_MPU_REGION_SIZE_2GB ((uint8_t)0x1EU) ///!< MPU Region Size 2 GBytes
61
+ #define ARM_MPU_REGION_SIZE_4GB ((uint8_t)0x1FU) ///!< MPU Region Size 4 GBytes
62
+
63
+ #define ARM_MPU_AP_NONE 0U ///!< MPU Access Permission no access
64
+ #define ARM_MPU_AP_PRIV 1U ///!< MPU Access Permission privileged access only
65
+ #define ARM_MPU_AP_URO 2U ///!< MPU Access Permission unprivileged access read-only
66
+ #define ARM_MPU_AP_FULL 3U ///!< MPU Access Permission full access
67
+ #define ARM_MPU_AP_PRO 5U ///!< MPU Access Permission privileged access read-only
68
+ #define ARM_MPU_AP_RO 6U ///!< MPU Access Permission read-only access
69
+
70
+ /** MPU Region Base Address Register Value
71
+ *
72
+ * \param Region The region to be configured, number 0 to 15.
73
+ * \param BaseAddress The base address for the region.
74
+ */
75
+ #define ARM_MPU_RBAR(Region, BaseAddress) \
76
+ (((BaseAddress) & MPU_RBAR_ADDR_Msk) | \
77
+ ((Region) & MPU_RBAR_REGION_Msk) | \
78
+ (MPU_RBAR_VALID_Msk))
79
+
80
+ /**
81
+ * MPU Memory Access Attributes
82
+ *
83
+ * \param TypeExtField Type extension field, allows you to configure memory access type, for example strongly ordered, peripheral.
84
+ * \param IsShareable Region is shareable between multiple bus masters.
85
+ * \param IsCacheable Region is cacheable, i.e. its value may be kept in cache.
86
+ * \param IsBufferable Region is bufferable, i.e. using write-back caching. Cacheable but non-bufferable regions use write-through policy.
87
+ */
88
+ #define ARM_MPU_ACCESS_(TypeExtField, IsShareable, IsCacheable, IsBufferable) \
89
+ ((((TypeExtField) << MPU_RASR_TEX_Pos) & MPU_RASR_TEX_Msk) | \
90
+ (((IsShareable) << MPU_RASR_S_Pos) & MPU_RASR_S_Msk) | \
91
+ (((IsCacheable) << MPU_RASR_C_Pos) & MPU_RASR_C_Msk) | \
92
+ (((IsBufferable) << MPU_RASR_B_Pos) & MPU_RASR_B_Msk))
93
+
94
+ /**
95
+ * MPU Region Attribute and Size Register Value
96
+ *
97
+ * \param DisableExec Instruction access disable bit, 1= disable instruction fetches.
98
+ * \param AccessPermission Data access permissions, allows you to configure read/write access for User and Privileged mode.
99
+ * \param AccessAttributes Memory access attribution, see \ref ARM_MPU_ACCESS_.
100
+ * \param SubRegionDisable Sub-region disable field.
101
+ * \param Size Region size of the region to be configured, for example 4K, 8K.
102
+ */
103
+ #define ARM_MPU_RASR_EX(DisableExec, AccessPermission, AccessAttributes, SubRegionDisable, Size) \
104
+ ((((DisableExec) << MPU_RASR_XN_Pos) & MPU_RASR_XN_Msk) | \
105
+ (((AccessPermission) << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk) | \
106
+ (((AccessAttributes) & (MPU_RASR_TEX_Msk | MPU_RASR_S_Msk | MPU_RASR_C_Msk | MPU_RASR_B_Msk))) | \
107
+ (((SubRegionDisable) << MPU_RASR_SRD_Pos) & MPU_RASR_SRD_Msk) | \
108
+ (((Size) << MPU_RASR_SIZE_Pos) & MPU_RASR_SIZE_Msk) | \
109
+ (((MPU_RASR_ENABLE_Msk))))
110
+
111
+ /**
112
+ * MPU Region Attribute and Size Register Value
113
+ *
114
+ * \param DisableExec Instruction access disable bit, 1= disable instruction fetches.
115
+ * \param AccessPermission Data access permissions, allows you to configure read/write access for User and Privileged mode.
116
+ * \param TypeExtField Type extension field, allows you to configure memory access type, for example strongly ordered, peripheral.
117
+ * \param IsShareable Region is shareable between multiple bus masters.
118
+ * \param IsCacheable Region is cacheable, i.e. its value may be kept in cache.
119
+ * \param IsBufferable Region is bufferable, i.e. using write-back caching. Cacheable but non-bufferable regions use write-through policy.
120
+ * \param SubRegionDisable Sub-region disable field.
121
+ * \param Size Region size of the region to be configured, for example 4K, 8K.
122
+ */
123
+ #define ARM_MPU_RASR(DisableExec, AccessPermission, TypeExtField, IsShareable, IsCacheable, IsBufferable, SubRegionDisable, Size) \
124
+ ARM_MPU_RASR_EX(DisableExec, AccessPermission, ARM_MPU_ACCESS_(TypeExtField, IsShareable, IsCacheable, IsBufferable), SubRegionDisable, Size)
125
+
126
+ /**
127
+ * MPU Memory Access Attribute for strongly ordered memory.
128
+ * - TEX: 000b
129
+ * - Shareable
130
+ * - Non-cacheable
131
+ * - Non-bufferable
132
+ */
133
+ #define ARM_MPU_ACCESS_ORDERED ARM_MPU_ACCESS_(0U, 1U, 0U, 0U)
134
+
135
+ /**
136
+ * MPU Memory Access Attribute for device memory.
137
+ * - TEX: 000b (if shareable) or 010b (if non-shareable)
138
+ * - Shareable or non-shareable
139
+ * - Non-cacheable
140
+ * - Bufferable (if shareable) or non-bufferable (if non-shareable)
141
+ *
142
+ * \param IsShareable Configures the device memory as shareable or non-shareable.
143
+ */
144
+ #define ARM_MPU_ACCESS_DEVICE(IsShareable) ((IsShareable) ? ARM_MPU_ACCESS_(0U, 1U, 0U, 1U) : ARM_MPU_ACCESS_(2U, 0U, 0U, 0U))
145
+
146
+ /**
147
+ * MPU Memory Access Attribute for normal memory.
148
+ * - TEX: 1BBb (reflecting outer cacheability rules)
149
+ * - Shareable or non-shareable
150
+ * - Cacheable or non-cacheable (reflecting inner cacheability rules)
151
+ * - Bufferable or non-bufferable (reflecting inner cacheability rules)
152
+ *
153
+ * \param OuterCp Configures the outer cache policy.
154
+ * \param InnerCp Configures the inner cache policy.
155
+ * \param IsShareable Configures the memory as shareable or non-shareable.
156
+ */
157
+ #define ARM_MPU_ACCESS_NORMAL(OuterCp, InnerCp, IsShareable) ARM_MPU_ACCESS_((4U | (OuterCp)), IsShareable, ((InnerCp) >> 1U), ((InnerCp) & 1U))
158
+
159
+ /**
160
+ * MPU Memory Access Attribute non-cacheable policy.
161
+ */
162
+ #define ARM_MPU_CACHEP_NOCACHE 0U
163
+
164
+ /**
165
+ * MPU Memory Access Attribute write-back, write and read allocate policy.
166
+ */
167
+ #define ARM_MPU_CACHEP_WB_WRA 1U
168
+
169
+ /**
170
+ * MPU Memory Access Attribute write-through, no write allocate policy.
171
+ */
172
+ #define ARM_MPU_CACHEP_WT_NWA 2U
173
+
174
+ /**
175
+ * MPU Memory Access Attribute write-back, no write allocate policy.
176
+ */
177
+ #define ARM_MPU_CACHEP_WB_NWA 3U
178
+
179
+
180
+ /**
181
+ * Struct for a single MPU Region
182
+ */
183
+ typedef struct {
184
+ uint32_t RBAR; //!< The region base address register value (RBAR)
185
+ uint32_t RASR; //!< The region attribute and size register value (RASR) \ref MPU_RASR
186
+ } ARM_MPU_Region_t;
187
+
188
+ /** Enable the MPU.
189
+ * \param MPU_Control Default access permissions for unconfigured regions.
190
+ */
191
+ __STATIC_INLINE void ARM_MPU_Enable(uint32_t MPU_Control)
192
+ {
193
+ __DMB();
194
+ MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk;
195
+ #ifdef SCB_SHCSR_MEMFAULTENA_Msk
196
+ SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk;
197
+ #endif
198
+ __DSB();
199
+ __ISB();
200
+ }
201
+
202
+ /** Disable the MPU.
203
+ */
204
+ __STATIC_INLINE void ARM_MPU_Disable(void)
205
+ {
206
+ __DMB();
207
+ #ifdef SCB_SHCSR_MEMFAULTENA_Msk
208
+ SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk;
209
+ #endif
210
+ MPU->CTRL &= ~MPU_CTRL_ENABLE_Msk;
211
+ __DSB();
212
+ __ISB();
213
+ }
214
+
215
+ /** Clear and disable the given MPU region.
216
+ * \param rnr Region number to be cleared.
217
+ */
218
+ __STATIC_INLINE void ARM_MPU_ClrRegion(uint32_t rnr)
219
+ {
220
+ MPU->RNR = rnr;
221
+ MPU->RASR = 0U;
222
+ }
223
+
224
+ /** Configure an MPU region.
225
+ * \param rbar Value for RBAR register.
226
+ * \param rasr Value for RASR register.
227
+ */
228
+ __STATIC_INLINE void ARM_MPU_SetRegion(uint32_t rbar, uint32_t rasr)
229
+ {
230
+ MPU->RBAR = rbar;
231
+ MPU->RASR = rasr;
232
+ }
233
+
234
+ /** Configure the given MPU region.
235
+ * \param rnr Region number to be configured.
236
+ * \param rbar Value for RBAR register.
237
+ * \param rasr Value for RASR register.
238
+ */
239
+ __STATIC_INLINE void ARM_MPU_SetRegionEx(uint32_t rnr, uint32_t rbar, uint32_t rasr)
240
+ {
241
+ MPU->RNR = rnr;
242
+ MPU->RBAR = rbar;
243
+ MPU->RASR = rasr;
244
+ }
245
+
246
+ /** Memcpy with strictly ordered memory access, e.g. used by code in ARM_MPU_Load().
247
+ * \param dst Destination data is copied to.
248
+ * \param src Source data is copied from.
249
+ * \param len Amount of data words to be copied.
250
+ */
251
+ __STATIC_INLINE void ARM_MPU_OrderedMemcpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len)
252
+ {
253
+ uint32_t i;
254
+ for (i = 0U; i < len; ++i)
255
+ {
256
+ dst[i] = src[i];
257
+ }
258
+ }
259
+
260
+ /** Load the given number of MPU regions from a table.
261
+ * \param table Pointer to the MPU configuration table.
262
+ * \param cnt Amount of regions to be configured.
263
+ */
264
+ __STATIC_INLINE void ARM_MPU_Load(ARM_MPU_Region_t const* table, uint32_t cnt)
265
+ {
266
+ const uint32_t rowWordSize = sizeof(ARM_MPU_Region_t)/4U;
267
+ while (cnt > MPU_TYPE_RALIASES) {
268
+ ARM_MPU_OrderedMemcpy(&(MPU->RBAR), &(table->RBAR), MPU_TYPE_RALIASES*rowWordSize);
269
+ table += MPU_TYPE_RALIASES;
270
+ cnt -= MPU_TYPE_RALIASES;
271
+ }
272
+ ARM_MPU_OrderedMemcpy(&(MPU->RBAR), &(table->RBAR), cnt*rowWordSize);
273
+ }
274
+
275
+ #endif
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/mpu_armv8.h ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * @file mpu_armv8.h
3
+ * @brief CMSIS MPU API for Armv8-M and Armv8.1-M MPU
4
+ * @version V5.1.2
5
+ * @date 10. February 2020
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2017-2020 Arm Limited. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ #if defined ( __ICCARM__ )
26
+ #pragma system_include /* treat file as system include file for MISRA check */
27
+ #elif defined (__clang__)
28
+ #pragma clang system_header /* treat file as system include file */
29
+ #endif
30
+
31
+ #ifndef ARM_MPU_ARMV8_H
32
+ #define ARM_MPU_ARMV8_H
33
+
34
+ /** \brief Attribute for device memory (outer only) */
35
+ #define ARM_MPU_ATTR_DEVICE ( 0U )
36
+
37
+ /** \brief Attribute for non-cacheable, normal memory */
38
+ #define ARM_MPU_ATTR_NON_CACHEABLE ( 4U )
39
+
40
+ /** \brief Attribute for normal memory (outer and inner)
41
+ * \param NT Non-Transient: Set to 1 for non-transient data.
42
+ * \param WB Write-Back: Set to 1 to use write-back update policy.
43
+ * \param RA Read Allocation: Set to 1 to use cache allocation on read miss.
44
+ * \param WA Write Allocation: Set to 1 to use cache allocation on write miss.
45
+ */
46
+ #define ARM_MPU_ATTR_MEMORY_(NT, WB, RA, WA) \
47
+ ((((NT) & 1U) << 3U) | (((WB) & 1U) << 2U) | (((RA) & 1U) << 1U) | ((WA) & 1U))
48
+
49
+ /** \brief Device memory type non Gathering, non Re-ordering, non Early Write Acknowledgement */
50
+ #define ARM_MPU_ATTR_DEVICE_nGnRnE (0U)
51
+
52
+ /** \brief Device memory type non Gathering, non Re-ordering, Early Write Acknowledgement */
53
+ #define ARM_MPU_ATTR_DEVICE_nGnRE (1U)
54
+
55
+ /** \brief Device memory type non Gathering, Re-ordering, Early Write Acknowledgement */
56
+ #define ARM_MPU_ATTR_DEVICE_nGRE (2U)
57
+
58
+ /** \brief Device memory type Gathering, Re-ordering, Early Write Acknowledgement */
59
+ #define ARM_MPU_ATTR_DEVICE_GRE (3U)
60
+
61
+ /** \brief Memory Attribute
62
+ * \param O Outer memory attributes
63
+ * \param I O == ARM_MPU_ATTR_DEVICE: Device memory attributes, else: Inner memory attributes
64
+ */
65
+ #define ARM_MPU_ATTR(O, I) ((((O) & 0xFU) << 4U) | ((((O) & 0xFU) != 0U) ? ((I) & 0xFU) : (((I) & 0x3U) << 2U)))
66
+
67
+ /** \brief Normal memory non-shareable */
68
+ #define ARM_MPU_SH_NON (0U)
69
+
70
+ /** \brief Normal memory outer shareable */
71
+ #define ARM_MPU_SH_OUTER (2U)
72
+
73
+ /** \brief Normal memory inner shareable */
74
+ #define ARM_MPU_SH_INNER (3U)
75
+
76
+ /** \brief Memory access permissions
77
+ * \param RO Read-Only: Set to 1 for read-only memory.
78
+ * \param NP Non-Privileged: Set to 1 for non-privileged memory.
79
+ */
80
+ #define ARM_MPU_AP_(RO, NP) ((((RO) & 1U) << 1U) | ((NP) & 1U))
81
+
82
+ /** \brief Region Base Address Register value
83
+ * \param BASE The base address bits [31:5] of a memory region. The value is zero extended. Effective address gets 32 byte aligned.
84
+ * \param SH Defines the Shareability domain for this memory region.
85
+ * \param RO Read-Only: Set to 1 for a read-only memory region.
86
+ * \param NP Non-Privileged: Set to 1 for a non-privileged memory region.
87
+ * \oaram XN eXecute Never: Set to 1 for a non-executable memory region.
88
+ */
89
+ #define ARM_MPU_RBAR(BASE, SH, RO, NP, XN) \
90
+ (((BASE) & MPU_RBAR_BASE_Msk) | \
91
+ (((SH) << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | \
92
+ ((ARM_MPU_AP_(RO, NP) << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk) | \
93
+ (((XN) << MPU_RBAR_XN_Pos) & MPU_RBAR_XN_Msk))
94
+
95
+ /** \brief Region Limit Address Register value
96
+ * \param LIMIT The limit address bits [31:5] for this memory region. The value is one extended.
97
+ * \param IDX The attribute index to be associated with this memory region.
98
+ */
99
+ #define ARM_MPU_RLAR(LIMIT, IDX) \
100
+ (((LIMIT) & MPU_RLAR_LIMIT_Msk) | \
101
+ (((IDX) << MPU_RLAR_AttrIndx_Pos) & MPU_RLAR_AttrIndx_Msk) | \
102
+ (MPU_RLAR_EN_Msk))
103
+
104
+ #if defined(MPU_RLAR_PXN_Pos)
105
+
106
+ /** \brief Region Limit Address Register with PXN value
107
+ * \param LIMIT The limit address bits [31:5] for this memory region. The value is one extended.
108
+ * \param PXN Privileged execute never. Defines whether code can be executed from this privileged region.
109
+ * \param IDX The attribute index to be associated with this memory region.
110
+ */
111
+ #define ARM_MPU_RLAR_PXN(LIMIT, PXN, IDX) \
112
+ (((LIMIT) & MPU_RLAR_LIMIT_Msk) | \
113
+ (((PXN) << MPU_RLAR_PXN_Pos) & MPU_RLAR_PXN_Msk) | \
114
+ (((IDX) << MPU_RLAR_AttrIndx_Pos) & MPU_RLAR_AttrIndx_Msk) | \
115
+ (MPU_RLAR_EN_Msk))
116
+
117
+ #endif
118
+
119
+ /**
120
+ * Struct for a single MPU Region
121
+ */
122
+ typedef struct {
123
+ uint32_t RBAR; /*!< Region Base Address Register value */
124
+ uint32_t RLAR; /*!< Region Limit Address Register value */
125
+ } ARM_MPU_Region_t;
126
+
127
+ /** Enable the MPU.
128
+ * \param MPU_Control Default access permissions for unconfigured regions.
129
+ */
130
+ __STATIC_INLINE void ARM_MPU_Enable(uint32_t MPU_Control)
131
+ {
132
+ __DMB();
133
+ MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk;
134
+ #ifdef SCB_SHCSR_MEMFAULTENA_Msk
135
+ SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk;
136
+ #endif
137
+ __DSB();
138
+ __ISB();
139
+ }
140
+
141
+ /** Disable the MPU.
142
+ */
143
+ __STATIC_INLINE void ARM_MPU_Disable(void)
144
+ {
145
+ __DMB();
146
+ #ifdef SCB_SHCSR_MEMFAULTENA_Msk
147
+ SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk;
148
+ #endif
149
+ MPU->CTRL &= ~MPU_CTRL_ENABLE_Msk;
150
+ __DSB();
151
+ __ISB();
152
+ }
153
+
154
+ #ifdef MPU_NS
155
+ /** Enable the Non-secure MPU.
156
+ * \param MPU_Control Default access permissions for unconfigured regions.
157
+ */
158
+ __STATIC_INLINE void ARM_MPU_Enable_NS(uint32_t MPU_Control)
159
+ {
160
+ __DMB();
161
+ MPU_NS->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk;
162
+ #ifdef SCB_SHCSR_MEMFAULTENA_Msk
163
+ SCB_NS->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk;
164
+ #endif
165
+ __DSB();
166
+ __ISB();
167
+ }
168
+
169
+ /** Disable the Non-secure MPU.
170
+ */
171
+ __STATIC_INLINE void ARM_MPU_Disable_NS(void)
172
+ {
173
+ __DMB();
174
+ #ifdef SCB_SHCSR_MEMFAULTENA_Msk
175
+ SCB_NS->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk;
176
+ #endif
177
+ MPU_NS->CTRL &= ~MPU_CTRL_ENABLE_Msk;
178
+ __DSB();
179
+ __ISB();
180
+ }
181
+ #endif
182
+
183
+ /** Set the memory attribute encoding to the given MPU.
184
+ * \param mpu Pointer to the MPU to be configured.
185
+ * \param idx The attribute index to be set [0-7]
186
+ * \param attr The attribute value to be set.
187
+ */
188
+ __STATIC_INLINE void ARM_MPU_SetMemAttrEx(MPU_Type* mpu, uint8_t idx, uint8_t attr)
189
+ {
190
+ const uint8_t reg = idx / 4U;
191
+ const uint32_t pos = ((idx % 4U) * 8U);
192
+ const uint32_t mask = 0xFFU << pos;
193
+
194
+ if (reg >= (sizeof(mpu->MAIR) / sizeof(mpu->MAIR[0]))) {
195
+ return; // invalid index
196
+ }
197
+
198
+ mpu->MAIR[reg] = ((mpu->MAIR[reg] & ~mask) | ((attr << pos) & mask));
199
+ }
200
+
201
+ /** Set the memory attribute encoding.
202
+ * \param idx The attribute index to be set [0-7]
203
+ * \param attr The attribute value to be set.
204
+ */
205
+ __STATIC_INLINE void ARM_MPU_SetMemAttr(uint8_t idx, uint8_t attr)
206
+ {
207
+ ARM_MPU_SetMemAttrEx(MPU, idx, attr);
208
+ }
209
+
210
+ #ifdef MPU_NS
211
+ /** Set the memory attribute encoding to the Non-secure MPU.
212
+ * \param idx The attribute index to be set [0-7]
213
+ * \param attr The attribute value to be set.
214
+ */
215
+ __STATIC_INLINE void ARM_MPU_SetMemAttr_NS(uint8_t idx, uint8_t attr)
216
+ {
217
+ ARM_MPU_SetMemAttrEx(MPU_NS, idx, attr);
218
+ }
219
+ #endif
220
+
221
+ /** Clear and disable the given MPU region of the given MPU.
222
+ * \param mpu Pointer to MPU to be used.
223
+ * \param rnr Region number to be cleared.
224
+ */
225
+ __STATIC_INLINE void ARM_MPU_ClrRegionEx(MPU_Type* mpu, uint32_t rnr)
226
+ {
227
+ mpu->RNR = rnr;
228
+ mpu->RLAR = 0U;
229
+ }
230
+
231
+ /** Clear and disable the given MPU region.
232
+ * \param rnr Region number to be cleared.
233
+ */
234
+ __STATIC_INLINE void ARM_MPU_ClrRegion(uint32_t rnr)
235
+ {
236
+ ARM_MPU_ClrRegionEx(MPU, rnr);
237
+ }
238
+
239
+ #ifdef MPU_NS
240
+ /** Clear and disable the given Non-secure MPU region.
241
+ * \param rnr Region number to be cleared.
242
+ */
243
+ __STATIC_INLINE void ARM_MPU_ClrRegion_NS(uint32_t rnr)
244
+ {
245
+ ARM_MPU_ClrRegionEx(MPU_NS, rnr);
246
+ }
247
+ #endif
248
+
249
+ /** Configure the given MPU region of the given MPU.
250
+ * \param mpu Pointer to MPU to be used.
251
+ * \param rnr Region number to be configured.
252
+ * \param rbar Value for RBAR register.
253
+ * \param rlar Value for RLAR register.
254
+ */
255
+ __STATIC_INLINE void ARM_MPU_SetRegionEx(MPU_Type* mpu, uint32_t rnr, uint32_t rbar, uint32_t rlar)
256
+ {
257
+ mpu->RNR = rnr;
258
+ mpu->RBAR = rbar;
259
+ mpu->RLAR = rlar;
260
+ }
261
+
262
+ /** Configure the given MPU region.
263
+ * \param rnr Region number to be configured.
264
+ * \param rbar Value for RBAR register.
265
+ * \param rlar Value for RLAR register.
266
+ */
267
+ __STATIC_INLINE void ARM_MPU_SetRegion(uint32_t rnr, uint32_t rbar, uint32_t rlar)
268
+ {
269
+ ARM_MPU_SetRegionEx(MPU, rnr, rbar, rlar);
270
+ }
271
+
272
+ #ifdef MPU_NS
273
+ /** Configure the given Non-secure MPU region.
274
+ * \param rnr Region number to be configured.
275
+ * \param rbar Value for RBAR register.
276
+ * \param rlar Value for RLAR register.
277
+ */
278
+ __STATIC_INLINE void ARM_MPU_SetRegion_NS(uint32_t rnr, uint32_t rbar, uint32_t rlar)
279
+ {
280
+ ARM_MPU_SetRegionEx(MPU_NS, rnr, rbar, rlar);
281
+ }
282
+ #endif
283
+
284
+ /** Memcpy with strictly ordered memory access, e.g. used by code in ARM_MPU_LoadEx()
285
+ * \param dst Destination data is copied to.
286
+ * \param src Source data is copied from.
287
+ * \param len Amount of data words to be copied.
288
+ */
289
+ __STATIC_INLINE void ARM_MPU_OrderedMemcpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len)
290
+ {
291
+ uint32_t i;
292
+ for (i = 0U; i < len; ++i)
293
+ {
294
+ dst[i] = src[i];
295
+ }
296
+ }
297
+
298
+ /** Load the given number of MPU regions from a table to the given MPU.
299
+ * \param mpu Pointer to the MPU registers to be used.
300
+ * \param rnr First region number to be configured.
301
+ * \param table Pointer to the MPU configuration table.
302
+ * \param cnt Amount of regions to be configured.
303
+ */
304
+ __STATIC_INLINE void ARM_MPU_LoadEx(MPU_Type* mpu, uint32_t rnr, ARM_MPU_Region_t const* table, uint32_t cnt)
305
+ {
306
+ const uint32_t rowWordSize = sizeof(ARM_MPU_Region_t)/4U;
307
+ if (cnt == 1U) {
308
+ mpu->RNR = rnr;
309
+ ARM_MPU_OrderedMemcpy(&(mpu->RBAR), &(table->RBAR), rowWordSize);
310
+ } else {
311
+ uint32_t rnrBase = rnr & ~(MPU_TYPE_RALIASES-1U);
312
+ uint32_t rnrOffset = rnr % MPU_TYPE_RALIASES;
313
+
314
+ mpu->RNR = rnrBase;
315
+ while ((rnrOffset + cnt) > MPU_TYPE_RALIASES) {
316
+ uint32_t c = MPU_TYPE_RALIASES - rnrOffset;
317
+ ARM_MPU_OrderedMemcpy(&(mpu->RBAR)+(rnrOffset*2U), &(table->RBAR), c*rowWordSize);
318
+ table += c;
319
+ cnt -= c;
320
+ rnrOffset = 0U;
321
+ rnrBase += MPU_TYPE_RALIASES;
322
+ mpu->RNR = rnrBase;
323
+ }
324
+
325
+ ARM_MPU_OrderedMemcpy(&(mpu->RBAR)+(rnrOffset*2U), &(table->RBAR), cnt*rowWordSize);
326
+ }
327
+ }
328
+
329
+ /** Load the given number of MPU regions from a table.
330
+ * \param rnr First region number to be configured.
331
+ * \param table Pointer to the MPU configuration table.
332
+ * \param cnt Amount of regions to be configured.
333
+ */
334
+ __STATIC_INLINE void ARM_MPU_Load(uint32_t rnr, ARM_MPU_Region_t const* table, uint32_t cnt)
335
+ {
336
+ ARM_MPU_LoadEx(MPU, rnr, table, cnt);
337
+ }
338
+
339
+ #ifdef MPU_NS
340
+ /** Load the given number of MPU regions from a table to the Non-secure MPU.
341
+ * \param rnr First region number to be configured.
342
+ * \param table Pointer to the MPU configuration table.
343
+ * \param cnt Amount of regions to be configured.
344
+ */
345
+ __STATIC_INLINE void ARM_MPU_Load_NS(uint32_t rnr, ARM_MPU_Region_t const* table, uint32_t cnt)
346
+ {
347
+ ARM_MPU_LoadEx(MPU_NS, rnr, table, cnt);
348
+ }
349
+ #endif
350
+
351
+ #endif
352
+
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/pmu_armv8.h ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * @file pmu_armv8.h
3
+ * @brief CMSIS PMU API for Armv8.1-M PMU
4
+ * @version V1.0.1
5
+ * @date 15. April 2020
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2020 Arm Limited. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ #if defined ( __ICCARM__ )
26
+ #pragma system_include /* treat file as system include file for MISRA check */
27
+ #elif defined (__clang__)
28
+ #pragma clang system_header /* treat file as system include file */
29
+ #endif
30
+
31
+ #ifndef ARM_PMU_ARMV8_H
32
+ #define ARM_PMU_ARMV8_H
33
+
34
+ /**
35
+ * \brief PMU Events
36
+ * \note See the Armv8.1-M Architecture Reference Manual for full details on these PMU events.
37
+ * */
38
+
39
+ #define ARM_PMU_SW_INCR 0x0000 /*!< Software update to the PMU_SWINC register, architecturally executed and condition code check pass */
40
+ #define ARM_PMU_L1I_CACHE_REFILL 0x0001 /*!< L1 I-Cache refill */
41
+ #define ARM_PMU_L1D_CACHE_REFILL 0x0003 /*!< L1 D-Cache refill */
42
+ #define ARM_PMU_L1D_CACHE 0x0004 /*!< L1 D-Cache access */
43
+ #define ARM_PMU_LD_RETIRED 0x0006 /*!< Memory-reading instruction architecturally executed and condition code check pass */
44
+ #define ARM_PMU_ST_RETIRED 0x0007 /*!< Memory-writing instruction architecturally executed and condition code check pass */
45
+ #define ARM_PMU_INST_RETIRED 0x0008 /*!< Instruction architecturally executed */
46
+ #define ARM_PMU_EXC_TAKEN 0x0009 /*!< Exception entry */
47
+ #define ARM_PMU_EXC_RETURN 0x000A /*!< Exception return instruction architecturally executed and the condition code check pass */
48
+ #define ARM_PMU_PC_WRITE_RETIRED 0x000C /*!< Software change to the Program Counter (PC). Instruction is architecturally executed and condition code check pass */
49
+ #define ARM_PMU_BR_IMMED_RETIRED 0x000D /*!< Immediate branch architecturally executed */
50
+ #define ARM_PMU_BR_RETURN_RETIRED 0x000E /*!< Function return instruction architecturally executed and the condition code check pass */
51
+ #define ARM_PMU_UNALIGNED_LDST_RETIRED 0x000F /*!< Unaligned memory memory-reading or memory-writing instruction architecturally executed and condition code check pass */
52
+ #define ARM_PMU_BR_MIS_PRED 0x0010 /*!< Mispredicted or not predicted branch speculatively executed */
53
+ #define ARM_PMU_CPU_CYCLES 0x0011 /*!< Cycle */
54
+ #define ARM_PMU_BR_PRED 0x0012 /*!< Predictable branch speculatively executed */
55
+ #define ARM_PMU_MEM_ACCESS 0x0013 /*!< Data memory access */
56
+ #define ARM_PMU_L1I_CACHE 0x0014 /*!< Level 1 instruction cache access */
57
+ #define ARM_PMU_L1D_CACHE_WB 0x0015 /*!< Level 1 data cache write-back */
58
+ #define ARM_PMU_L2D_CACHE 0x0016 /*!< Level 2 data cache access */
59
+ #define ARM_PMU_L2D_CACHE_REFILL 0x0017 /*!< Level 2 data cache refill */
60
+ #define ARM_PMU_L2D_CACHE_WB 0x0018 /*!< Level 2 data cache write-back */
61
+ #define ARM_PMU_BUS_ACCESS 0x0019 /*!< Bus access */
62
+ #define ARM_PMU_MEMORY_ERROR 0x001A /*!< Local memory error */
63
+ #define ARM_PMU_INST_SPEC 0x001B /*!< Instruction speculatively executed */
64
+ #define ARM_PMU_BUS_CYCLES 0x001D /*!< Bus cycles */
65
+ #define ARM_PMU_CHAIN 0x001E /*!< For an odd numbered counter, increment when an overflow occurs on the preceding even-numbered counter on the same PE */
66
+ #define ARM_PMU_L1D_CACHE_ALLOCATE 0x001F /*!< Level 1 data cache allocation without refill */
67
+ #define ARM_PMU_L2D_CACHE_ALLOCATE 0x0020 /*!< Level 2 data cache allocation without refill */
68
+ #define ARM_PMU_BR_RETIRED 0x0021 /*!< Branch instruction architecturally executed */
69
+ #define ARM_PMU_BR_MIS_PRED_RETIRED 0x0022 /*!< Mispredicted branch instruction architecturally executed */
70
+ #define ARM_PMU_STALL_FRONTEND 0x0023 /*!< No operation issued because of the frontend */
71
+ #define ARM_PMU_STALL_BACKEND 0x0024 /*!< No operation issued because of the backend */
72
+ #define ARM_PMU_L2I_CACHE 0x0027 /*!< Level 2 instruction cache access */
73
+ #define ARM_PMU_L2I_CACHE_REFILL 0x0028 /*!< Level 2 instruction cache refill */
74
+ #define ARM_PMU_L3D_CACHE_ALLOCATE 0x0029 /*!< Level 3 data cache allocation without refill */
75
+ #define ARM_PMU_L3D_CACHE_REFILL 0x002A /*!< Level 3 data cache refill */
76
+ #define ARM_PMU_L3D_CACHE 0x002B /*!< Level 3 data cache access */
77
+ #define ARM_PMU_L3D_CACHE_WB 0x002C /*!< Level 3 data cache write-back */
78
+ #define ARM_PMU_LL_CACHE_RD 0x0036 /*!< Last level data cache read */
79
+ #define ARM_PMU_LL_CACHE_MISS_RD 0x0037 /*!< Last level data cache read miss */
80
+ #define ARM_PMU_L1D_CACHE_MISS_RD 0x0039 /*!< Level 1 data cache read miss */
81
+ #define ARM_PMU_OP_COMPLETE 0x003A /*!< Operation retired */
82
+ #define ARM_PMU_OP_SPEC 0x003B /*!< Operation speculatively executed */
83
+ #define ARM_PMU_STALL 0x003C /*!< Stall cycle for instruction or operation not sent for execution */
84
+ #define ARM_PMU_STALL_OP_BACKEND 0x003D /*!< Stall cycle for instruction or operation not sent for execution due to pipeline backend */
85
+ #define ARM_PMU_STALL_OP_FRONTEND 0x003E /*!< Stall cycle for instruction or operation not sent for execution due to pipeline frontend */
86
+ #define ARM_PMU_STALL_OP 0x003F /*!< Instruction or operation slots not occupied each cycle */
87
+ #define ARM_PMU_L1D_CACHE_RD 0x0040 /*!< Level 1 data cache read */
88
+ #define ARM_PMU_LE_RETIRED 0x0100 /*!< Loop end instruction executed */
89
+ #define ARM_PMU_LE_SPEC 0x0101 /*!< Loop end instruction speculatively executed */
90
+ #define ARM_PMU_BF_RETIRED 0x0104 /*!< Branch future instruction architecturally executed and condition code check pass */
91
+ #define ARM_PMU_BF_SPEC 0x0105 /*!< Branch future instruction speculatively executed and condition code check pass */
92
+ #define ARM_PMU_LE_CANCEL 0x0108 /*!< Loop end instruction not taken */
93
+ #define ARM_PMU_BF_CANCEL 0x0109 /*!< Branch future instruction not taken */
94
+ #define ARM_PMU_SE_CALL_S 0x0114 /*!< Call to secure function, resulting in Security state change */
95
+ #define ARM_PMU_SE_CALL_NS 0x0115 /*!< Call to non-secure function, resulting in Security state change */
96
+ #define ARM_PMU_DWT_CMPMATCH0 0x0118 /*!< DWT comparator 0 match */
97
+ #define ARM_PMU_DWT_CMPMATCH1 0x0119 /*!< DWT comparator 1 match */
98
+ #define ARM_PMU_DWT_CMPMATCH2 0x011A /*!< DWT comparator 2 match */
99
+ #define ARM_PMU_DWT_CMPMATCH3 0x011B /*!< DWT comparator 3 match */
100
+ #define ARM_PMU_MVE_INST_RETIRED 0x0200 /*!< MVE instruction architecturally executed */
101
+ #define ARM_PMU_MVE_INST_SPEC 0x0201 /*!< MVE instruction speculatively executed */
102
+ #define ARM_PMU_MVE_FP_RETIRED 0x0204 /*!< MVE floating-point instruction architecturally executed */
103
+ #define ARM_PMU_MVE_FP_SPEC 0x0205 /*!< MVE floating-point instruction speculatively executed */
104
+ #define ARM_PMU_MVE_FP_HP_RETIRED 0x0208 /*!< MVE half-precision floating-point instruction architecturally executed */
105
+ #define ARM_PMU_MVE_FP_HP_SPEC 0x0209 /*!< MVE half-precision floating-point instruction speculatively executed */
106
+ #define ARM_PMU_MVE_FP_SP_RETIRED 0x020C /*!< MVE single-precision floating-point instruction architecturally executed */
107
+ #define ARM_PMU_MVE_FP_SP_SPEC 0x020D /*!< MVE single-precision floating-point instruction speculatively executed */
108
+ #define ARM_PMU_MVE_FP_MAC_RETIRED 0x0214 /*!< MVE floating-point multiply or multiply-accumulate instruction architecturally executed */
109
+ #define ARM_PMU_MVE_FP_MAC_SPEC 0x0215 /*!< MVE floating-point multiply or multiply-accumulate instruction speculatively executed */
110
+ #define ARM_PMU_MVE_INT_RETIRED 0x0224 /*!< MVE integer instruction architecturally executed */
111
+ #define ARM_PMU_MVE_INT_SPEC 0x0225 /*!< MVE integer instruction speculatively executed */
112
+ #define ARM_PMU_MVE_INT_MAC_RETIRED 0x0228 /*!< MVE multiply or multiply-accumulate instruction architecturally executed */
113
+ #define ARM_PMU_MVE_INT_MAC_SPEC 0x0229 /*!< MVE multiply or multiply-accumulate instruction speculatively executed */
114
+ #define ARM_PMU_MVE_LDST_RETIRED 0x0238 /*!< MVE load or store instruction architecturally executed */
115
+ #define ARM_PMU_MVE_LDST_SPEC 0x0239 /*!< MVE load or store instruction speculatively executed */
116
+ #define ARM_PMU_MVE_LD_RETIRED 0x023C /*!< MVE load instruction architecturally executed */
117
+ #define ARM_PMU_MVE_LD_SPEC 0x023D /*!< MVE load instruction speculatively executed */
118
+ #define ARM_PMU_MVE_ST_RETIRED 0x0240 /*!< MVE store instruction architecturally executed */
119
+ #define ARM_PMU_MVE_ST_SPEC 0x0241 /*!< MVE store instruction speculatively executed */
120
+ #define ARM_PMU_MVE_LDST_CONTIG_RETIRED 0x0244 /*!< MVE contiguous load or store instruction architecturally executed */
121
+ #define ARM_PMU_MVE_LDST_CONTIG_SPEC 0x0245 /*!< MVE contiguous load or store instruction speculatively executed */
122
+ #define ARM_PMU_MVE_LD_CONTIG_RETIRED 0x0248 /*!< MVE contiguous load instruction architecturally executed */
123
+ #define ARM_PMU_MVE_LD_CONTIG_SPEC 0x0249 /*!< MVE contiguous load instruction speculatively executed */
124
+ #define ARM_PMU_MVE_ST_CONTIG_RETIRED 0x024C /*!< MVE contiguous store instruction architecturally executed */
125
+ #define ARM_PMU_MVE_ST_CONTIG_SPEC 0x024D /*!< MVE contiguous store instruction speculatively executed */
126
+ #define ARM_PMU_MVE_LDST_NONCONTIG_RETIRED 0x0250 /*!< MVE non-contiguous load or store instruction architecturally executed */
127
+ #define ARM_PMU_MVE_LDST_NONCONTIG_SPEC 0x0251 /*!< MVE non-contiguous load or store instruction speculatively executed */
128
+ #define ARM_PMU_MVE_LD_NONCONTIG_RETIRED 0x0254 /*!< MVE non-contiguous load instruction architecturally executed */
129
+ #define ARM_PMU_MVE_LD_NONCONTIG_SPEC 0x0255 /*!< MVE non-contiguous load instruction speculatively executed */
130
+ #define ARM_PMU_MVE_ST_NONCONTIG_RETIRED 0x0258 /*!< MVE non-contiguous store instruction architecturally executed */
131
+ #define ARM_PMU_MVE_ST_NONCONTIG_SPEC 0x0259 /*!< MVE non-contiguous store instruction speculatively executed */
132
+ #define ARM_PMU_MVE_LDST_MULTI_RETIRED 0x025C /*!< MVE memory instruction targeting multiple registers architecturally executed */
133
+ #define ARM_PMU_MVE_LDST_MULTI_SPEC 0x025D /*!< MVE memory instruction targeting multiple registers speculatively executed */
134
+ #define ARM_PMU_MVE_LD_MULTI_RETIRED 0x0260 /*!< MVE memory load instruction targeting multiple registers architecturally executed */
135
+ #define ARM_PMU_MVE_LD_MULTI_SPEC 0x0261 /*!< MVE memory load instruction targeting multiple registers speculatively executed */
136
+ #define ARM_PMU_MVE_ST_MULTI_RETIRED 0x0261 /*!< MVE memory store instruction targeting multiple registers architecturally executed */
137
+ #define ARM_PMU_MVE_ST_MULTI_SPEC 0x0265 /*!< MVE memory store instruction targeting multiple registers speculatively executed */
138
+ #define ARM_PMU_MVE_LDST_UNALIGNED_RETIRED 0x028C /*!< MVE unaligned memory load or store instruction architecturally executed */
139
+ #define ARM_PMU_MVE_LDST_UNALIGNED_SPEC 0x028D /*!< MVE unaligned memory load or store instruction speculatively executed */
140
+ #define ARM_PMU_MVE_LD_UNALIGNED_RETIRED 0x0290 /*!< MVE unaligned load instruction architecturally executed */
141
+ #define ARM_PMU_MVE_LD_UNALIGNED_SPEC 0x0291 /*!< MVE unaligned load instruction speculatively executed */
142
+ #define ARM_PMU_MVE_ST_UNALIGNED_RETIRED 0x0294 /*!< MVE unaligned store instruction architecturally executed */
143
+ #define ARM_PMU_MVE_ST_UNALIGNED_SPEC 0x0295 /*!< MVE unaligned store instruction speculatively executed */
144
+ #define ARM_PMU_MVE_LDST_UNALIGNED_NONCONTIG_RETIRED 0x0298 /*!< MVE unaligned noncontiguous load or store instruction architecturally executed */
145
+ #define ARM_PMU_MVE_LDST_UNALIGNED_NONCONTIG_SPEC 0x0299 /*!< MVE unaligned noncontiguous load or store instruction speculatively executed */
146
+ #define ARM_PMU_MVE_VREDUCE_RETIRED 0x02A0 /*!< MVE vector reduction instruction architecturally executed */
147
+ #define ARM_PMU_MVE_VREDUCE_SPEC 0x02A1 /*!< MVE vector reduction instruction speculatively executed */
148
+ #define ARM_PMU_MVE_VREDUCE_FP_RETIRED 0x02A4 /*!< MVE floating-point vector reduction instruction architecturally executed */
149
+ #define ARM_PMU_MVE_VREDUCE_FP_SPEC 0x02A5 /*!< MVE floating-point vector reduction instruction speculatively executed */
150
+ #define ARM_PMU_MVE_VREDUCE_INT_RETIRED 0x02A8 /*!< MVE integer vector reduction instruction architecturally executed */
151
+ #define ARM_PMU_MVE_VREDUCE_INT_SPEC 0x02A9 /*!< MVE integer vector reduction instruction speculatively executed */
152
+ #define ARM_PMU_MVE_PRED 0x02B8 /*!< Cycles where one or more predicated beats architecturally executed */
153
+ #define ARM_PMU_MVE_STALL 0x02CC /*!< Stall cycles caused by an MVE instruction */
154
+ #define ARM_PMU_MVE_STALL_RESOURCE 0x02CD /*!< Stall cycles caused by an MVE instruction because of resource conflicts */
155
+ #define ARM_PMU_MVE_STALL_RESOURCE_MEM 0x02CE /*!< Stall cycles caused by an MVE instruction because of memory resource conflicts */
156
+ #define ARM_PMU_MVE_STALL_RESOURCE_FP 0x02CF /*!< Stall cycles caused by an MVE instruction because of floating-point resource conflicts */
157
+ #define ARM_PMU_MVE_STALL_RESOURCE_INT 0x02D0 /*!< Stall cycles caused by an MVE instruction because of integer resource conflicts */
158
+ #define ARM_PMU_MVE_STALL_BREAK 0x02D3 /*!< Stall cycles caused by an MVE chain break */
159
+ #define ARM_PMU_MVE_STALL_DEPENDENCY 0x02D4 /*!< Stall cycles caused by MVE register dependency */
160
+ #define ARM_PMU_ITCM_ACCESS 0x4007 /*!< Instruction TCM access */
161
+ #define ARM_PMU_DTCM_ACCESS 0x4008 /*!< Data TCM access */
162
+ #define ARM_PMU_TRCEXTOUT0 0x4010 /*!< ETM external output 0 */
163
+ #define ARM_PMU_TRCEXTOUT1 0x4011 /*!< ETM external output 1 */
164
+ #define ARM_PMU_TRCEXTOUT2 0x4012 /*!< ETM external output 2 */
165
+ #define ARM_PMU_TRCEXTOUT3 0x4013 /*!< ETM external output 3 */
166
+ #define ARM_PMU_CTI_TRIGOUT4 0x4018 /*!< Cross-trigger Interface output trigger 4 */
167
+ #define ARM_PMU_CTI_TRIGOUT5 0x4019 /*!< Cross-trigger Interface output trigger 5 */
168
+ #define ARM_PMU_CTI_TRIGOUT6 0x401A /*!< Cross-trigger Interface output trigger 6 */
169
+ #define ARM_PMU_CTI_TRIGOUT7 0x401B /*!< Cross-trigger Interface output trigger 7 */
170
+
171
+ /** \brief PMU Functions */
172
+
173
+ __STATIC_INLINE void ARM_PMU_Enable(void);
174
+ __STATIC_INLINE void ARM_PMU_Disable(void);
175
+
176
+ __STATIC_INLINE void ARM_PMU_Set_EVTYPER(uint32_t num, uint32_t type);
177
+
178
+ __STATIC_INLINE void ARM_PMU_CYCCNT_Reset(void);
179
+ __STATIC_INLINE void ARM_PMU_EVCNTR_ALL_Reset(void);
180
+
181
+ __STATIC_INLINE void ARM_PMU_CNTR_Enable(uint32_t mask);
182
+ __STATIC_INLINE void ARM_PMU_CNTR_Disable(uint32_t mask);
183
+
184
+ __STATIC_INLINE uint32_t ARM_PMU_Get_CCNTR(void);
185
+ __STATIC_INLINE uint32_t ARM_PMU_Get_EVCNTR(uint32_t num);
186
+
187
+ __STATIC_INLINE uint32_t ARM_PMU_Get_CNTR_OVS(void);
188
+ __STATIC_INLINE void ARM_PMU_Set_CNTR_OVS(uint32_t mask);
189
+
190
+ __STATIC_INLINE void ARM_PMU_Set_CNTR_IRQ_Enable(uint32_t mask);
191
+ __STATIC_INLINE void ARM_PMU_Set_CNTR_IRQ_Disable(uint32_t mask);
192
+
193
+ __STATIC_INLINE void ARM_PMU_CNTR_Increment(uint32_t mask);
194
+
195
+ /**
196
+ \brief Enable the PMU
197
+ */
198
+ __STATIC_INLINE void ARM_PMU_Enable(void)
199
+ {
200
+ PMU->CTRL |= PMU_CTRL_ENABLE_Msk;
201
+ }
202
+
203
+ /**
204
+ \brief Disable the PMU
205
+ */
206
+ __STATIC_INLINE void ARM_PMU_Disable(void)
207
+ {
208
+ PMU->CTRL &= ~PMU_CTRL_ENABLE_Msk;
209
+ }
210
+
211
+ /**
212
+ \brief Set event to count for PMU eventer counter
213
+ \param [in] num Event counter (0-30) to configure
214
+ \param [in] type Event to count
215
+ */
216
+ __STATIC_INLINE void ARM_PMU_Set_EVTYPER(uint32_t num, uint32_t type)
217
+ {
218
+ PMU->EVTYPER[num] = type;
219
+ }
220
+
221
+ /**
222
+ \brief Reset cycle counter
223
+ */
224
+ __STATIC_INLINE void ARM_PMU_CYCCNT_Reset(void)
225
+ {
226
+ PMU->CTRL |= PMU_CTRL_CYCCNT_RESET_Msk;
227
+ }
228
+
229
+ /**
230
+ \brief Reset all event counters
231
+ */
232
+ __STATIC_INLINE void ARM_PMU_EVCNTR_ALL_Reset(void)
233
+ {
234
+ PMU->CTRL |= PMU_CTRL_EVENTCNT_RESET_Msk;
235
+ }
236
+
237
+ /**
238
+ \brief Enable counters
239
+ \param [in] mask Counters to enable
240
+ \note Enables one or more of the following:
241
+ - event counters (0-30)
242
+ - cycle counter
243
+ */
244
+ __STATIC_INLINE void ARM_PMU_CNTR_Enable(uint32_t mask)
245
+ {
246
+ PMU->CNTENSET = mask;
247
+ }
248
+
249
+ /**
250
+ \brief Disable counters
251
+ \param [in] mask Counters to enable
252
+ \note Disables one or more of the following:
253
+ - event counters (0-30)
254
+ - cycle counter
255
+ */
256
+ __STATIC_INLINE void ARM_PMU_CNTR_Disable(uint32_t mask)
257
+ {
258
+ PMU->CNTENCLR = mask;
259
+ }
260
+
261
+ /**
262
+ \brief Read cycle counter
263
+ \return Cycle count
264
+ */
265
+ __STATIC_INLINE uint32_t ARM_PMU_Get_CCNTR(void)
266
+ {
267
+ return PMU->CCNTR;
268
+ }
269
+
270
+ /**
271
+ \brief Read event counter
272
+ \param [in] num Event counter (0-30) to read
273
+ \return Event count
274
+ */
275
+ __STATIC_INLINE uint32_t ARM_PMU_Get_EVCNTR(uint32_t num)
276
+ {
277
+ return PMU_EVCNTR_CNT_Msk & PMU->EVCNTR[num];
278
+ }
279
+
280
+ /**
281
+ \brief Read counter overflow status
282
+ \return Counter overflow status bits for the following:
283
+ - event counters (0-30)
284
+ - cycle counter
285
+ */
286
+ __STATIC_INLINE uint32_t ARM_PMU_Get_CNTR_OVS(void)
287
+ {
288
+ return PMU->OVSSET;
289
+ }
290
+
291
+ /**
292
+ \brief Clear counter overflow status
293
+ \param [in] mask Counter overflow status bits to clear
294
+ \note Clears overflow status bits for one or more of the following:
295
+ - event counters (0-30)
296
+ - cycle counter
297
+ */
298
+ __STATIC_INLINE void ARM_PMU_Set_CNTR_OVS(uint32_t mask)
299
+ {
300
+ PMU->OVSCLR = mask;
301
+ }
302
+
303
+ /**
304
+ \brief Enable counter overflow interrupt request
305
+ \param [in] mask Counter overflow interrupt request bits to set
306
+ \note Sets overflow interrupt request bits for one or more of the following:
307
+ - event counters (0-30)
308
+ - cycle counter
309
+ */
310
+ __STATIC_INLINE void ARM_PMU_Set_CNTR_IRQ_Enable(uint32_t mask)
311
+ {
312
+ PMU->INTENSET = mask;
313
+ }
314
+
315
+ /**
316
+ \brief Disable counter overflow interrupt request
317
+ \param [in] mask Counter overflow interrupt request bits to clear
318
+ \note Clears overflow interrupt request bits for one or more of the following:
319
+ - event counters (0-30)
320
+ - cycle counter
321
+ */
322
+ __STATIC_INLINE void ARM_PMU_Set_CNTR_IRQ_Disable(uint32_t mask)
323
+ {
324
+ PMU->INTENCLR = mask;
325
+ }
326
+
327
+ /**
328
+ \brief Software increment event counter
329
+ \param [in] mask Counters to increment
330
+ \note Software increment bits for one or more event counters (0-30)
331
+ */
332
+ __STATIC_INLINE void ARM_PMU_CNTR_Increment(uint32_t mask)
333
+ {
334
+ PMU->SWINC = mask;
335
+ }
336
+
337
+ #endif
ei-cpp-export/edge-impulse-sdk/CMSIS/Core/Include/tz_context.h ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * @file tz_context.h
3
+ * @brief Context Management for Armv8-M TrustZone
4
+ * @version V1.0.1
5
+ * @date 10. January 2018
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2017-2018 Arm Limited. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ #if defined ( __ICCARM__ )
26
+ #pragma system_include /* treat file as system include file for MISRA check */
27
+ #elif defined (__clang__)
28
+ #pragma clang system_header /* treat file as system include file */
29
+ #endif
30
+
31
+ #ifndef TZ_CONTEXT_H
32
+ #define TZ_CONTEXT_H
33
+
34
+ #include <stdint.h>
35
+
36
+ #ifndef TZ_MODULEID_T
37
+ #define TZ_MODULEID_T
38
+ /// \details Data type that identifies secure software modules called by a process.
39
+ typedef uint32_t TZ_ModuleId_t;
40
+ #endif
41
+
42
+ /// \details TZ Memory ID identifies an allocated memory slot.
43
+ typedef uint32_t TZ_MemoryId_t;
44
+
45
+ /// Initialize secure context memory system
46
+ /// \return execution status (1: success, 0: error)
47
+ uint32_t TZ_InitContextSystem_S (void);
48
+
49
+ /// Allocate context memory for calling secure software modules in TrustZone
50
+ /// \param[in] module identifies software modules called from non-secure mode
51
+ /// \return value != 0 id TrustZone memory slot identifier
52
+ /// \return value 0 no memory available or internal error
53
+ TZ_MemoryId_t TZ_AllocModuleContext_S (TZ_ModuleId_t module);
54
+
55
+ /// Free context memory that was previously allocated with \ref TZ_AllocModuleContext_S
56
+ /// \param[in] id TrustZone memory slot identifier
57
+ /// \return execution status (1: success, 0: error)
58
+ uint32_t TZ_FreeModuleContext_S (TZ_MemoryId_t id);
59
+
60
+ /// Load secure context (called on RTOS thread context switch)
61
+ /// \param[in] id TrustZone memory slot identifier
62
+ /// \return execution status (1: success, 0: error)
63
+ uint32_t TZ_LoadContext_S (TZ_MemoryId_t id);
64
+
65
+ /// Store secure context (called on RTOS thread context switch)
66
+ /// \param[in] id TrustZone memory slot identifier
67
+ /// \return execution status (1: success, 0: error)
68
+ uint32_t TZ_StoreContext_S (TZ_MemoryId_t id);
69
+
70
+ #endif // TZ_CONTEXT_H
ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables.h ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* ----------------------------------------------------------------------
2
+ * Project: CMSIS DSP Library
3
+ * Title: arm_common_tables.h
4
+ * Description: Extern declaration for common tables
5
+ *
6
+ * $Date: 27. January 2017
7
+ * $Revision: V.1.5.1
8
+ *
9
+ * Target Processor: Cortex-M cores
10
+ * -------------------------------------------------------------------- */
11
+ /*
12
+ * Copyright (C) 2010-2017 ARM Limited or its affiliates. All rights reserved.
13
+ *
14
+ * SPDX-License-Identifier: Apache-2.0
15
+ *
16
+ * Licensed under the Apache License, Version 2.0 (the License); you may
17
+ * not use this file except in compliance with the License.
18
+ * You may obtain a copy of the License at
19
+ *
20
+ * www.apache.org/licenses/LICENSE-2.0
21
+ *
22
+ * Unless required by applicable law or agreed to in writing, software
23
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
24
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
25
+ * See the License for the specific language governing permissions and
26
+ * limitations under the License.
27
+ */
28
+
29
+ #ifndef _ARM_COMMON_TABLES_H
30
+ #define _ARM_COMMON_TABLES_H
31
+
32
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h"
33
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h"
34
+
35
+ #ifdef __cplusplus
36
+ extern "C"
37
+ {
38
+ #endif
39
+
40
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES)
41
+ /* Double Precision Float CFFT twiddles */
42
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREV_1024)
43
+ extern const uint16_t armBitRevTable[1024];
44
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
45
+
46
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_16)
47
+ extern const uint64_t twiddleCoefF64_16[32];
48
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
49
+
50
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_32)
51
+ extern const uint64_t twiddleCoefF64_32[64];
52
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
53
+
54
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_64)
55
+ extern const uint64_t twiddleCoefF64_64[128];
56
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
57
+
58
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_128)
59
+ extern const uint64_t twiddleCoefF64_128[256];
60
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
61
+
62
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_256)
63
+ extern const uint64_t twiddleCoefF64_256[512];
64
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
65
+
66
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_512)
67
+ extern const uint64_t twiddleCoefF64_512[1024];
68
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
69
+
70
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_1024)
71
+ extern const uint64_t twiddleCoefF64_1024[2048];
72
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
73
+
74
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_2048)
75
+ extern const uint64_t twiddleCoefF64_2048[4096];
76
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
77
+
78
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_4096)
79
+ extern const uint64_t twiddleCoefF64_4096[8192];
80
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
81
+
82
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_16)
83
+ extern const float32_t twiddleCoef_16[32];
84
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
85
+
86
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_32)
87
+ extern const float32_t twiddleCoef_32[64];
88
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
89
+
90
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_64)
91
+ extern const float32_t twiddleCoef_64[128];
92
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
93
+
94
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_128)
95
+ extern const float32_t twiddleCoef_128[256];
96
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
97
+
98
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_256)
99
+ extern const float32_t twiddleCoef_256[512];
100
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
101
+
102
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_512)
103
+ extern const float32_t twiddleCoef_512[1024];
104
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
105
+
106
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_1024)
107
+ extern const float32_t twiddleCoef_1024[2048];
108
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
109
+
110
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_2048)
111
+ extern const float32_t twiddleCoef_2048[4096];
112
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
113
+
114
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_4096)
115
+ extern const float32_t twiddleCoef_4096[8192];
116
+ #define twiddleCoef twiddleCoef_4096
117
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
118
+
119
+ /* Q31 */
120
+
121
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_16)
122
+ extern const q31_t twiddleCoef_16_q31[24];
123
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
124
+
125
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_32)
126
+ extern const q31_t twiddleCoef_32_q31[48];
127
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
128
+
129
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_64)
130
+ extern const q31_t twiddleCoef_64_q31[96];
131
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
132
+
133
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_128)
134
+ extern const q31_t twiddleCoef_128_q31[192];
135
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
136
+
137
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_256)
138
+ extern const q31_t twiddleCoef_256_q31[384];
139
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
140
+
141
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_512)
142
+ extern const q31_t twiddleCoef_512_q31[768];
143
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
144
+
145
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_1024)
146
+ extern const q31_t twiddleCoef_1024_q31[1536];
147
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
148
+
149
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_2048)
150
+ extern const q31_t twiddleCoef_2048_q31[3072];
151
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
152
+
153
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_4096)
154
+ extern const q31_t twiddleCoef_4096_q31[6144];
155
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
156
+
157
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_16)
158
+ extern const q15_t twiddleCoef_16_q15[24];
159
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
160
+
161
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_32)
162
+ extern const q15_t twiddleCoef_32_q15[48];
163
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
164
+
165
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_64)
166
+ extern const q15_t twiddleCoef_64_q15[96];
167
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
168
+
169
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_128)
170
+ extern const q15_t twiddleCoef_128_q15[192];
171
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
172
+
173
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_256)
174
+ extern const q15_t twiddleCoef_256_q15[384];
175
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
176
+
177
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_512)
178
+ extern const q15_t twiddleCoef_512_q15[768];
179
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
180
+
181
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_1024)
182
+ extern const q15_t twiddleCoef_1024_q15[1536];
183
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
184
+
185
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_2048)
186
+ extern const q15_t twiddleCoef_2048_q15[3072];
187
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
188
+
189
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_4096)
190
+ extern const q15_t twiddleCoef_4096_q15[6144];
191
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
192
+
193
+ /* Double Precision Float RFFT twiddles */
194
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_32)
195
+ extern const uint64_t twiddleCoefF64_rfft_32[32];
196
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
197
+
198
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_64)
199
+ extern const uint64_t twiddleCoefF64_rfft_64[64];
200
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
201
+
202
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_128)
203
+ extern const uint64_t twiddleCoefF64_rfft_128[128];
204
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
205
+
206
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_256)
207
+ extern const uint64_t twiddleCoefF64_rfft_256[256];
208
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
209
+
210
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_512)
211
+ extern const uint64_t twiddleCoefF64_rfft_512[512];
212
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
213
+
214
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_1024)
215
+ extern const uint64_t twiddleCoefF64_rfft_1024[1024];
216
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
217
+
218
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_2048)
219
+ extern const uint64_t twiddleCoefF64_rfft_2048[2048];
220
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
221
+
222
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_4096)
223
+ extern const uint64_t twiddleCoefF64_rfft_4096[4096];
224
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
225
+
226
+
227
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_32)
228
+ extern const float32_t twiddleCoef_rfft_32[32];
229
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
230
+
231
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_64)
232
+ extern const float32_t twiddleCoef_rfft_64[64];
233
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
234
+
235
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_128)
236
+ extern const float32_t twiddleCoef_rfft_128[128];
237
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
238
+
239
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_256)
240
+ extern const float32_t twiddleCoef_rfft_256[256];
241
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
242
+
243
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_512)
244
+ extern const float32_t twiddleCoef_rfft_512[512];
245
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
246
+
247
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_1024)
248
+ extern const float32_t twiddleCoef_rfft_1024[1024];
249
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
250
+
251
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_2048)
252
+ extern const float32_t twiddleCoef_rfft_2048[2048];
253
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
254
+
255
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_4096)
256
+ extern const float32_t twiddleCoef_rfft_4096[4096];
257
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
258
+
259
+
260
+ /* Double precision floating-point bit reversal tables */
261
+
262
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_16)
263
+ #define ARMBITREVINDEXTABLEF64_16_TABLE_LENGTH ((uint16_t)12)
264
+ extern const uint16_t armBitRevIndexTableF64_16[ARMBITREVINDEXTABLEF64_16_TABLE_LENGTH];
265
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
266
+
267
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_32)
268
+ #define ARMBITREVINDEXTABLEF64_32_TABLE_LENGTH ((uint16_t)24)
269
+ extern const uint16_t armBitRevIndexTableF64_32[ARMBITREVINDEXTABLEF64_32_TABLE_LENGTH];
270
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
271
+
272
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_64)
273
+ #define ARMBITREVINDEXTABLEF64_64_TABLE_LENGTH ((uint16_t)56)
274
+ extern const uint16_t armBitRevIndexTableF64_64[ARMBITREVINDEXTABLEF64_64_TABLE_LENGTH];
275
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
276
+
277
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_128)
278
+ #define ARMBITREVINDEXTABLEF64_128_TABLE_LENGTH ((uint16_t)112)
279
+ extern const uint16_t armBitRevIndexTableF64_128[ARMBITREVINDEXTABLEF64_128_TABLE_LENGTH];
280
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
281
+
282
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_256)
283
+ #define ARMBITREVINDEXTABLEF64_256_TABLE_LENGTH ((uint16_t)240)
284
+ extern const uint16_t armBitRevIndexTableF64_256[ARMBITREVINDEXTABLEF64_256_TABLE_LENGTH];
285
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
286
+
287
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_512)
288
+ #define ARMBITREVINDEXTABLEF64_512_TABLE_LENGTH ((uint16_t)480)
289
+ extern const uint16_t armBitRevIndexTableF64_512[ARMBITREVINDEXTABLEF64_512_TABLE_LENGTH];
290
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
291
+
292
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_1024)
293
+ #define ARMBITREVINDEXTABLEF64_1024_TABLE_LENGTH ((uint16_t)992)
294
+ extern const uint16_t armBitRevIndexTableF64_1024[ARMBITREVINDEXTABLEF64_1024_TABLE_LENGTH];
295
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
296
+
297
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_2048)
298
+ #define ARMBITREVINDEXTABLEF64_2048_TABLE_LENGTH ((uint16_t)1984)
299
+ extern const uint16_t armBitRevIndexTableF64_2048[ARMBITREVINDEXTABLEF64_2048_TABLE_LENGTH];
300
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
301
+
302
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_4096)
303
+ #define ARMBITREVINDEXTABLEF64_4096_TABLE_LENGTH ((uint16_t)4032)
304
+ extern const uint16_t armBitRevIndexTableF64_4096[ARMBITREVINDEXTABLEF64_4096_TABLE_LENGTH];
305
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
306
+ /* floating-point bit reversal tables */
307
+
308
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_16)
309
+ #define ARMBITREVINDEXTABLE_16_TABLE_LENGTH ((uint16_t)20)
310
+ extern const uint16_t armBitRevIndexTable16[ARMBITREVINDEXTABLE_16_TABLE_LENGTH];
311
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
312
+
313
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_32)
314
+ #define ARMBITREVINDEXTABLE_32_TABLE_LENGTH ((uint16_t)48)
315
+ extern const uint16_t armBitRevIndexTable32[ARMBITREVINDEXTABLE_32_TABLE_LENGTH];
316
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
317
+
318
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_64)
319
+ #define ARMBITREVINDEXTABLE_64_TABLE_LENGTH ((uint16_t)56)
320
+ extern const uint16_t armBitRevIndexTable64[ARMBITREVINDEXTABLE_64_TABLE_LENGTH];
321
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
322
+
323
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_128)
324
+ #define ARMBITREVINDEXTABLE_128_TABLE_LENGTH ((uint16_t)208)
325
+ extern const uint16_t armBitRevIndexTable128[ARMBITREVINDEXTABLE_128_TABLE_LENGTH];
326
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
327
+
328
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_256)
329
+ #define ARMBITREVINDEXTABLE_256_TABLE_LENGTH ((uint16_t)440)
330
+ extern const uint16_t armBitRevIndexTable256[ARMBITREVINDEXTABLE_256_TABLE_LENGTH];
331
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
332
+
333
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_512)
334
+ #define ARMBITREVINDEXTABLE_512_TABLE_LENGTH ((uint16_t)448)
335
+ extern const uint16_t armBitRevIndexTable512[ARMBITREVINDEXTABLE_512_TABLE_LENGTH];
336
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
337
+
338
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_1024)
339
+ #define ARMBITREVINDEXTABLE_1024_TABLE_LENGTH ((uint16_t)1800)
340
+ extern const uint16_t armBitRevIndexTable1024[ARMBITREVINDEXTABLE_1024_TABLE_LENGTH];
341
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
342
+
343
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_2048)
344
+ #define ARMBITREVINDEXTABLE_2048_TABLE_LENGTH ((uint16_t)3808)
345
+ extern const uint16_t armBitRevIndexTable2048[ARMBITREVINDEXTABLE_2048_TABLE_LENGTH];
346
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
347
+
348
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_4096)
349
+ #define ARMBITREVINDEXTABLE_4096_TABLE_LENGTH ((uint16_t)4032)
350
+ extern const uint16_t armBitRevIndexTable4096[ARMBITREVINDEXTABLE_4096_TABLE_LENGTH];
351
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
352
+
353
+
354
+ /* fixed-point bit reversal tables */
355
+
356
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_16)
357
+ #define ARMBITREVINDEXTABLE_FIXED_16_TABLE_LENGTH ((uint16_t)12)
358
+ extern const uint16_t armBitRevIndexTable_fixed_16[ARMBITREVINDEXTABLE_FIXED_16_TABLE_LENGTH];
359
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
360
+
361
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_32)
362
+ #define ARMBITREVINDEXTABLE_FIXED_32_TABLE_LENGTH ((uint16_t)24)
363
+ extern const uint16_t armBitRevIndexTable_fixed_32[ARMBITREVINDEXTABLE_FIXED_32_TABLE_LENGTH];
364
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
365
+
366
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_64)
367
+ #define ARMBITREVINDEXTABLE_FIXED_64_TABLE_LENGTH ((uint16_t)56)
368
+ extern const uint16_t armBitRevIndexTable_fixed_64[ARMBITREVINDEXTABLE_FIXED_64_TABLE_LENGTH];
369
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
370
+
371
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_128)
372
+ #define ARMBITREVINDEXTABLE_FIXED_128_TABLE_LENGTH ((uint16_t)112)
373
+ extern const uint16_t armBitRevIndexTable_fixed_128[ARMBITREVINDEXTABLE_FIXED_128_TABLE_LENGTH];
374
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
375
+
376
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_256)
377
+ #define ARMBITREVINDEXTABLE_FIXED_256_TABLE_LENGTH ((uint16_t)240)
378
+ extern const uint16_t armBitRevIndexTable_fixed_256[ARMBITREVINDEXTABLE_FIXED_256_TABLE_LENGTH];
379
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
380
+
381
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_512)
382
+ #define ARMBITREVINDEXTABLE_FIXED_512_TABLE_LENGTH ((uint16_t)480)
383
+ extern const uint16_t armBitRevIndexTable_fixed_512[ARMBITREVINDEXTABLE_FIXED_512_TABLE_LENGTH];
384
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
385
+
386
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_1024)
387
+ #define ARMBITREVINDEXTABLE_FIXED_1024_TABLE_LENGTH ((uint16_t)992)
388
+ extern const uint16_t armBitRevIndexTable_fixed_1024[ARMBITREVINDEXTABLE_FIXED_1024_TABLE_LENGTH];
389
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
390
+
391
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_2048)
392
+ #define ARMBITREVINDEXTABLE_FIXED_2048_TABLE_LENGTH ((uint16_t)1984)
393
+ extern const uint16_t armBitRevIndexTable_fixed_2048[ARMBITREVINDEXTABLE_FIXED_2048_TABLE_LENGTH];
394
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
395
+
396
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_4096)
397
+ #define ARMBITREVINDEXTABLE_FIXED_4096_TABLE_LENGTH ((uint16_t)4032)
398
+ extern const uint16_t armBitRevIndexTable_fixed_4096[ARMBITREVINDEXTABLE_FIXED_4096_TABLE_LENGTH];
399
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
400
+
401
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_REALCOEF_F32)
402
+ extern const float32_t realCoefA[8192];
403
+ extern const float32_t realCoefB[8192];
404
+ #endif
405
+
406
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_REALCOEF_Q31)
407
+ extern const q31_t realCoefAQ31[8192];
408
+ extern const q31_t realCoefBQ31[8192];
409
+ #endif
410
+
411
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_REALCOEF_Q15)
412
+ extern const q15_t realCoefAQ15[8192];
413
+ extern const q15_t realCoefBQ15[8192];
414
+ #endif
415
+
416
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_F32_128)
417
+ extern const float32_t Weights_128[256];
418
+ extern const float32_t cos_factors_128[128];
419
+ #endif
420
+
421
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_F32_512)
422
+ extern const float32_t Weights_512[1024];
423
+ extern const float32_t cos_factors_512[512];
424
+ #endif
425
+
426
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_F32_2048)
427
+ extern const float32_t Weights_2048[4096];
428
+ extern const float32_t cos_factors_2048[2048];
429
+ #endif
430
+
431
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_F32_8192)
432
+ extern const float32_t Weights_8192[16384];
433
+ extern const float32_t cos_factors_8192[8192];
434
+ #endif
435
+
436
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q15_128)
437
+ extern const q15_t WeightsQ15_128[256];
438
+ extern const q15_t cos_factorsQ15_128[128];
439
+ #endif
440
+
441
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q15_512)
442
+ extern const q15_t WeightsQ15_512[1024];
443
+ extern const q15_t cos_factorsQ15_512[512];
444
+ #endif
445
+
446
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q15_2048)
447
+ extern const q15_t WeightsQ15_2048[4096];
448
+ extern const q15_t cos_factorsQ15_2048[2048];
449
+ #endif
450
+
451
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q15_8192)
452
+ extern const q15_t WeightsQ15_8192[16384];
453
+ extern const q15_t cos_factorsQ15_8192[8192];
454
+ #endif
455
+
456
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q31_128)
457
+ extern const q31_t WeightsQ31_128[256];
458
+ extern const q31_t cos_factorsQ31_128[128];
459
+ #endif
460
+
461
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q31_512)
462
+ extern const q31_t WeightsQ31_512[1024];
463
+ extern const q31_t cos_factorsQ31_512[512];
464
+ #endif
465
+
466
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q31_2048)
467
+ extern const q31_t WeightsQ31_2048[4096];
468
+ extern const q31_t cos_factorsQ31_2048[2048];
469
+ #endif
470
+
471
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q31_8192)
472
+ extern const q31_t WeightsQ31_8192[16384];
473
+ extern const q31_t cos_factorsQ31_8192[8192];
474
+ #endif
475
+
476
+ #endif /* if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_TABLES) */
477
+
478
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FAST_ALLOW_TABLES)
479
+
480
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_RECIP_Q15)
481
+ extern const q15_t armRecipTableQ15[64];
482
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */
483
+
484
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_RECIP_Q31)
485
+ extern const q31_t armRecipTableQ31[64];
486
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */
487
+
488
+ /* Tables for Fast Math Sine and Cosine */
489
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_SIN_F32)
490
+ extern const float32_t sinTable_f32[FAST_MATH_TABLE_SIZE + 1];
491
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */
492
+
493
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_SIN_Q31)
494
+ extern const q31_t sinTable_q31[FAST_MATH_TABLE_SIZE + 1];
495
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */
496
+
497
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_SIN_Q15)
498
+ extern const q15_t sinTable_q15[FAST_MATH_TABLE_SIZE + 1];
499
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */
500
+
501
+ #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE)
502
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q31_MVE)
503
+ extern const q31_t sqrtTable_Q31[256];
504
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */
505
+ #endif
506
+
507
+ #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE)
508
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q15_MVE)
509
+ extern const q15_t sqrtTable_Q15[256];
510
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */
511
+ #endif
512
+
513
+ #endif /* if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FAST_TABLES) */
514
+
515
+ #if (defined(ARM_MATH_MVEF) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE)
516
+ extern const float32_t exp_tab[8];
517
+ extern const float32_t __logf_lut_f32[8];
518
+ #endif /* (defined(ARM_MATH_MVEF) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE) */
519
+
520
+ #if (defined(ARM_MATH_MVEI) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE)
521
+ extern const unsigned char hwLUT[256];
522
+ #endif /* (defined(ARM_MATH_MVEI) || defined(ARM_MATH_HELIUM)) */
523
+
524
+ #ifdef __cplusplus
525
+ }
526
+ #endif
527
+
528
+ #endif /* ARM_COMMON_TABLES_H */
529
+
ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables_f16.h ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* ----------------------------------------------------------------------
2
+ * Project: CMSIS DSP Library
3
+ * Title: arm_common_tables_f16.h
4
+ * Description: Extern declaration for common tables
5
+ *
6
+ * $Date: 27. January 2017
7
+ * $Revision: V.1.5.1
8
+ *
9
+ * Target Processor: Cortex-M cores
10
+ * -------------------------------------------------------------------- */
11
+ /*
12
+ * Copyright (C) 2010-2017 ARM Limited or its affiliates. All rights reserved.
13
+ *
14
+ * SPDX-License-Identifier: Apache-2.0
15
+ *
16
+ * Licensed under the Apache License, Version 2.0 (the License); you may
17
+ * not use this file except in compliance with the License.
18
+ * You may obtain a copy of the License at
19
+ *
20
+ * www.apache.org/licenses/LICENSE-2.0
21
+ *
22
+ * Unless required by applicable law or agreed to in writing, software
23
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
24
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
25
+ * See the License for the specific language governing permissions and
26
+ * limitations under the License.
27
+ */
28
+
29
+ #ifndef _ARM_COMMON_TABLES_F16_H
30
+ #define _ARM_COMMON_TABLES_F16_H
31
+
32
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types_f16.h"
33
+
34
+ #ifdef __cplusplus
35
+ extern "C"
36
+ {
37
+ #endif
38
+
39
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES)
40
+
41
+ /* F16 */
42
+ #if !defined(__CC_ARM) && defined(ARM_FLOAT16_SUPPORTED)
43
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F16_16)
44
+ extern const float16_t twiddleCoefF16_16[32];
45
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
46
+
47
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F16_32)
48
+ extern const float16_t twiddleCoefF16_32[64];
49
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
50
+
51
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F16_64)
52
+ extern const float16_t twiddleCoefF16_64[128];
53
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
54
+
55
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F16_128)
56
+ extern const float16_t twiddleCoefF16_128[256];
57
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
58
+
59
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F16_256)
60
+ extern const float16_t twiddleCoefF16_256[512];
61
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
62
+
63
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F16_512)
64
+ extern const float16_t twiddleCoefF16_512[1024];
65
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
66
+
67
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F16_1024)
68
+ extern const float16_t twiddleCoefF16_1024[2048];
69
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
70
+
71
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F16_2048)
72
+ extern const float16_t twiddleCoefF16_2048[4096];
73
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
74
+
75
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F16_4096)
76
+ extern const float16_t twiddleCoefF16_4096[8192];
77
+ #define twiddleCoefF16 twiddleCoefF16_4096
78
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */
79
+
80
+
81
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F16_32)
82
+ extern const float16_t twiddleCoefF16_rfft_32[32];
83
+ #endif
84
+
85
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F16_64)
86
+ extern const float16_t twiddleCoefF16_rfft_64[64];
87
+ #endif
88
+
89
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F16_128)
90
+ extern const float16_t twiddleCoefF16_rfft_128[128];
91
+ #endif
92
+
93
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F16_256)
94
+ extern const float16_t twiddleCoefF16_rfft_256[256];
95
+ #endif
96
+
97
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F16_512)
98
+ extern const float16_t twiddleCoefF16_rfft_512[512];
99
+ #endif
100
+
101
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F16_1024)
102
+ extern const float16_t twiddleCoefF16_rfft_1024[1024];
103
+ #endif
104
+
105
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F16_2048)
106
+ extern const float16_t twiddleCoefF16_rfft_2048[2048];
107
+ #endif
108
+
109
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F16_4096)
110
+ extern const float16_t twiddleCoefF16_rfft_4096[4096];
111
+ #endif
112
+
113
+ #endif /* ARMAC5 */
114
+
115
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES) */
116
+
117
+ #if !defined(__CC_ARM) && defined(ARM_FLOAT16_SUPPORTED)
118
+
119
+ #if (defined(ARM_MATH_MVEF) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE)
120
+ extern const float16_t exp_tab_f16[8];
121
+ extern const float16_t __logf_lut_f16[8];
122
+ #endif /* (defined(ARM_MATH_MVEF) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE) */
123
+ #endif
124
+
125
+
126
+ #ifdef __cplusplus
127
+ }
128
+ #endif
129
+
130
+ #endif /* _ARM_COMMON_TABLES_F16_H */
131
+
132
+
ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_const_structs.h ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* ----------------------------------------------------------------------
2
+ * Project: CMSIS DSP Library
3
+ * Title: arm_const_structs.h
4
+ * Description: Constant structs that are initialized for user convenience.
5
+ * For example, some can be given as arguments to the arm_cfft_f32() function.
6
+ *
7
+ * $Date: 27. January 2017
8
+ * $Revision: V.1.5.1
9
+ *
10
+ * Target Processor: Cortex-M cores
11
+ * -------------------------------------------------------------------- */
12
+ /*
13
+ * Copyright (C) 2010-2017 ARM Limited or its affiliates. All rights reserved.
14
+ *
15
+ * SPDX-License-Identifier: Apache-2.0
16
+ *
17
+ * Licensed under the Apache License, Version 2.0 (the License); you may
18
+ * not use this file except in compliance with the License.
19
+ * You may obtain a copy of the License at
20
+ *
21
+ * www.apache.org/licenses/LICENSE-2.0
22
+ *
23
+ * Unless required by applicable law or agreed to in writing, software
24
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
25
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
26
+ * See the License for the specific language governing permissions and
27
+ * limitations under the License.
28
+ */
29
+
30
+ #ifndef _ARM_CONST_STRUCTS_H
31
+ #define _ARM_CONST_STRUCTS_H
32
+
33
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h"
34
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables.h"
35
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/transform_functions.h"
36
+
37
+ #ifdef __cplusplus
38
+ extern "C"
39
+ {
40
+ #endif
41
+ extern const arm_cfft_instance_f64 arm_cfft_sR_f64_len16;
42
+ extern const arm_cfft_instance_f64 arm_cfft_sR_f64_len32;
43
+ extern const arm_cfft_instance_f64 arm_cfft_sR_f64_len64;
44
+ extern const arm_cfft_instance_f64 arm_cfft_sR_f64_len128;
45
+ extern const arm_cfft_instance_f64 arm_cfft_sR_f64_len256;
46
+ extern const arm_cfft_instance_f64 arm_cfft_sR_f64_len512;
47
+ extern const arm_cfft_instance_f64 arm_cfft_sR_f64_len1024;
48
+ extern const arm_cfft_instance_f64 arm_cfft_sR_f64_len2048;
49
+ extern const arm_cfft_instance_f64 arm_cfft_sR_f64_len4096;
50
+
51
+ extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len16;
52
+ extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len32;
53
+ extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len64;
54
+ extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len128;
55
+ extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len256;
56
+ extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len512;
57
+ extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len1024;
58
+ extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len2048;
59
+ extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len4096;
60
+
61
+ extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len16;
62
+ extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len32;
63
+ extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len64;
64
+ extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len128;
65
+ extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len256;
66
+ extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len512;
67
+ extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len1024;
68
+ extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len2048;
69
+ extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len4096;
70
+
71
+ extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len16;
72
+ extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len32;
73
+ extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len64;
74
+ extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len128;
75
+ extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len256;
76
+ extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len512;
77
+ extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len1024;
78
+ extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len2048;
79
+ extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len4096;
80
+
81
+ #ifdef __cplusplus
82
+ }
83
+ #endif
84
+
85
+ #endif
86
+
ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_const_structs_f16.h ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* ----------------------------------------------------------------------
2
+ * Project: CMSIS DSP Library
3
+ * Title: arm_const_structs_f16.h
4
+ * Description: Constant structs that are initialized for user convenience.
5
+ * For example, some can be given as arguments to the arm_cfft_f16() function.
6
+ *
7
+ * $Date: 20. April 2020
8
+ * $Revision: V.1.5.1
9
+ *
10
+ * Target Processor: Cortex-M cores
11
+ * -------------------------------------------------------------------- */
12
+ /*
13
+ * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved.
14
+ *
15
+ * SPDX-License-Identifier: Apache-2.0
16
+ *
17
+ * Licensed under the Apache License, Version 2.0 (the License); you may
18
+ * not use this file except in compliance with the License.
19
+ * You may obtain a copy of the License at
20
+ *
21
+ * www.apache.org/licenses/LICENSE-2.0
22
+ *
23
+ * Unless required by applicable law or agreed to in writing, software
24
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
25
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
26
+ * See the License for the specific language governing permissions and
27
+ * limitations under the License.
28
+ */
29
+
30
+ #ifndef _ARM_CONST_STRUCTS_F16_H
31
+ #define _ARM_CONST_STRUCTS_F16_H
32
+
33
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types_f16.h"
34
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables.h"
35
+ #include "arm_common_tables_f16.h"
36
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/transform_functions_f16.h"
37
+
38
+ #ifdef __cplusplus
39
+ extern "C"
40
+ {
41
+ #endif
42
+
43
+ #if !defined(__CC_ARM) && defined(ARM_FLOAT16_SUPPORTED)
44
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_F16_16) && defined(ARM_TABLE_BITREVIDX_FLT_16))
45
+ extern const arm_cfft_instance_f16 arm_cfft_sR_f16_len16;
46
+ #endif
47
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_F16_32) && defined(ARM_TABLE_BITREVIDX_FLT_32))
48
+ extern const arm_cfft_instance_f16 arm_cfft_sR_f16_len32;
49
+ #endif
50
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_F16_64) && defined(ARM_TABLE_BITREVIDX_FLT_64))
51
+ extern const arm_cfft_instance_f16 arm_cfft_sR_f16_len64;
52
+ #endif
53
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_F16_128) && defined(ARM_TABLE_BITREVIDX_FLT_128))
54
+ extern const arm_cfft_instance_f16 arm_cfft_sR_f16_len128;
55
+ #endif
56
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_F16_256) && defined(ARM_TABLE_BITREVIDX_FLT_256))
57
+ extern const arm_cfft_instance_f16 arm_cfft_sR_f16_len256;
58
+ #endif
59
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_F16_512) && defined(ARM_TABLE_BITREVIDX_FLT_512))
60
+ extern const arm_cfft_instance_f16 arm_cfft_sR_f16_len512;
61
+ #endif
62
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_F16_1024) && defined(ARM_TABLE_BITREVIDX_FLT_1024))
63
+ extern const arm_cfft_instance_f16 arm_cfft_sR_f16_len1024;
64
+ #endif
65
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_F16_2048) && defined(ARM_TABLE_BITREVIDX_FLT_2048))
66
+ extern const arm_cfft_instance_f16 arm_cfft_sR_f16_len2048;
67
+ #endif
68
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_F16_4096) && defined(ARM_TABLE_BITREVIDX_FLT_4096))
69
+ extern const arm_cfft_instance_f16 arm_cfft_sR_f16_len4096;
70
+ #endif
71
+ #endif
72
+
73
+ #ifdef __cplusplus
74
+ }
75
+ #endif
76
+
77
+ #endif
ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h ADDED
@@ -0,0 +1,753 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* ----------------------------------------------------------------------
2
+ * Project: CMSIS DSP Library
3
+ * Title: arm_helium_utils.h
4
+ * Description: Utility functions for Helium development
5
+ *
6
+ * $Date: 09. September 2019
7
+ * $Revision: V.1.5.1
8
+ *
9
+ * Target Processor: Cortex-M cores
10
+ * -------------------------------------------------------------------- */
11
+ /*
12
+ * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved.
13
+ *
14
+ * SPDX-License-Identifier: Apache-2.0
15
+ *
16
+ * Licensed under the Apache License, Version 2.0 (the License); you may
17
+ * not use this file except in compliance with the License.
18
+ * You may obtain a copy of the License at
19
+ *
20
+ * www.apache.org/licenses/LICENSE-2.0
21
+ *
22
+ * Unless required by applicable law or agreed to in writing, software
23
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
24
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
25
+ * See the License for the specific language governing permissions and
26
+ * limitations under the License.
27
+ */
28
+
29
+ #ifndef _ARM_UTILS_HELIUM_H_
30
+ #define _ARM_UTILS_HELIUM_H_
31
+
32
+
33
+ #ifdef __cplusplus
34
+ extern "C"
35
+ {
36
+ #endif
37
+ /***************************************
38
+
39
+ Definitions available for MVEF and MVEI
40
+
41
+ ***************************************/
42
+ #if (defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI)) && !defined(ARM_MATH_AUTOVECTORIZE)
43
+
44
+ #define INACTIVELANE 0 /* inactive lane content */
45
+
46
+
47
+ #endif /* defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI) */
48
+
49
+ /***************************************
50
+
51
+ Definitions available for MVEF only
52
+
53
+ ***************************************/
54
+ #if (defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF)) && !defined(ARM_MATH_AUTOVECTORIZE)
55
+
56
+ __STATIC_FORCEINLINE float32_t vecAddAcrossF32Mve(float32x4_t in)
57
+ {
58
+ float32_t acc;
59
+
60
+ acc = vgetq_lane(in, 0) + vgetq_lane(in, 1) +
61
+ vgetq_lane(in, 2) + vgetq_lane(in, 3);
62
+
63
+ return acc;
64
+ }
65
+
66
+
67
+
68
+
69
+ /* newton initial guess */
70
+ #define INVSQRT_MAGIC_F32 0x5f3759df
71
+ #define INV_NEWTON_INIT_F32 0x7EF127EA
72
+
73
+
74
+ #define INVSQRT_NEWTON_MVE_F32(invSqrt, xHalf, xStart)\
75
+ { \
76
+ float32x4_t tmp; \
77
+ \
78
+ /* tmp = xhalf * x * x */ \
79
+ tmp = vmulq(xStart, xStart); \
80
+ tmp = vmulq(tmp, xHalf); \
81
+ /* (1.5f - xhalf * x * x) */ \
82
+ tmp = vsubq(vdupq_n_f32(1.5f), tmp); \
83
+ /* x = x*(1.5f-xhalf*x*x); */ \
84
+ invSqrt = vmulq(tmp, xStart); \
85
+ }
86
+ #endif /* defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) */
87
+
88
+
89
+ /***************************************
90
+
91
+ Definitions available for f16 datatype with HW acceleration only
92
+
93
+ ***************************************/
94
+ #if defined(ARM_FLOAT16_SUPPORTED)
95
+ #if defined (ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE)
96
+
97
+ __STATIC_FORCEINLINE float16_t vecAddAcrossF16Mve(float16x8_t in)
98
+ {
99
+ float16x8_t tmpVec;
100
+ _Float16 acc;
101
+
102
+ tmpVec = (float16x8_t) vrev32q_s16((int16x8_t) in);
103
+ in = vaddq_f16(tmpVec, in);
104
+ tmpVec = (float16x8_t) vrev64q_s32((int32x4_t) in);
105
+ in = vaddq_f16(tmpVec, in);
106
+ acc = (_Float16)vgetq_lane_f16(in, 0) + (_Float16)vgetq_lane_f16(in, 4);
107
+
108
+ return acc;
109
+ }
110
+
111
+ __STATIC_FORCEINLINE float16x8_t __mve_cmplx_sum_intra_vec_f16(
112
+ float16x8_t vecIn)
113
+ {
114
+ float16x8_t vecTmp, vecOut;
115
+ uint32_t tmp;
116
+
117
+ vecTmp = (float16x8_t) vrev64q_s32((int32x4_t) vecIn);
118
+ // TO TRACK : using canonical addition leads to unefficient code generation for f16
119
+ // vecTmp = vecTmp + vecAccCpx0;
120
+ /*
121
+ * Compute
122
+ * re0+re1 | im0+im1 | re0+re1 | im0+im1
123
+ * re2+re3 | im2+im3 | re2+re3 | im2+im3
124
+ */
125
+ vecTmp = vaddq_f16(vecTmp, vecIn);
126
+ vecOut = vecTmp;
127
+ /*
128
+ * shift left, random tmp insertion in bottom
129
+ */
130
+ vecOut = vreinterpretq_f16_s32(vshlcq_s32(vreinterpretq_s32_f16(vecOut) , &tmp, 32));
131
+ /*
132
+ * Compute:
133
+ * DONTCARE | DONTCARE | re0+re1+re0+re1 |im0+im1+im0+im1
134
+ * re0+re1+re2+re3 | im0+im1+im2+im3 | re2+re3+re2+re3 |im2+im3+im2+im3
135
+ */
136
+ vecOut = vaddq_f16(vecOut, vecTmp);
137
+ /*
138
+ * Cmplx sum is in 4rd & 5th f16 elt
139
+ * return full vector
140
+ */
141
+ return vecOut;
142
+ }
143
+
144
+
145
+ #define mve_cmplx_sum_intra_r_i_f16(vec, Re, Im) \
146
+ { \
147
+ float16x8_t vecOut = __mve_cmplx_sum_intra_vec_f16(vec); \
148
+ Re = vgetq_lane(vecOut, 4); \
149
+ Im = vgetq_lane(vecOut, 5); \
150
+ }
151
+
152
+ __STATIC_FORCEINLINE void mve_cmplx_sum_intra_vec_f16(
153
+ float16x8_t vecIn,
154
+ float16_t *pOut)
155
+ {
156
+ float16x8_t vecOut = __mve_cmplx_sum_intra_vec_f16(vecIn);
157
+ /*
158
+ * Cmplx sum is in 4rd & 5th f16 elt
159
+ * use 32-bit extraction
160
+ */
161
+ *(float32_t *) pOut = ((float32x4_t) vecOut)[2];
162
+ }
163
+
164
+
165
+ #define INVSQRT_MAGIC_F16 0x59ba /* ( 0x1ba = 0x3759df >> 13) */
166
+
167
+ /* canonical version of INVSQRT_NEWTON_MVE_F16 leads to bad performance */
168
+ #define INVSQRT_NEWTON_MVE_F16(invSqrt, xHalf, xStart) \
169
+ { \
170
+ float16x8_t tmp; \
171
+ \
172
+ /* tmp = xhalf * x * x */ \
173
+ tmp = vmulq(xStart, xStart); \
174
+ tmp = vmulq(tmp, xHalf); \
175
+ /* (1.5f - xhalf * x * x) */ \
176
+ tmp = vsubq(vdupq_n_f16((float16_t)1.5), tmp); \
177
+ /* x = x*(1.5f-xhalf*x*x); */ \
178
+ invSqrt = vmulq(tmp, xStart); \
179
+ }
180
+
181
+ #endif
182
+ #endif
183
+
184
+ /***************************************
185
+
186
+ Definitions available for MVEI and MVEF only
187
+
188
+ ***************************************/
189
+ #if (defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI)) && !defined(ARM_MATH_AUTOVECTORIZE)
190
+ /* Following functions are used to transpose matrix in f32 and q31 cases */
191
+ __STATIC_INLINE arm_status arm_mat_trans_32bit_2x2_mve(
192
+ uint32_t * pDataSrc,
193
+ uint32_t * pDataDest)
194
+ {
195
+ static const uint32x4_t vecOffs = { 0, 2, 1, 3 };
196
+ /*
197
+ *
198
+ * | 0 1 | => | 0 2 |
199
+ * | 2 3 | | 1 3 |
200
+ *
201
+ */
202
+ uint32x4_t vecIn = vldrwq_u32((uint32_t const *)pDataSrc);
203
+ vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs, vecIn);
204
+
205
+ return (ARM_MATH_SUCCESS);
206
+ }
207
+
208
+ __STATIC_INLINE arm_status arm_mat_trans_32bit_3x3_mve(
209
+ uint32_t * pDataSrc,
210
+ uint32_t * pDataDest)
211
+ {
212
+ const uint32x4_t vecOffs1 = { 0, 3, 6, 1};
213
+ const uint32x4_t vecOffs2 = { 4, 7, 2, 5};
214
+ /*
215
+ *
216
+ * | 0 1 2 | | 0 3 6 | 4 x 32 flattened version | 0 3 6 1 |
217
+ * | 3 4 5 | => | 1 4 7 | => | 4 7 2 5 |
218
+ * | 6 7 8 | | 2 5 8 | (row major) | 8 . . . |
219
+ *
220
+ */
221
+ uint32x4_t vecIn1 = vldrwq_u32((uint32_t const *) pDataSrc);
222
+ uint32x4_t vecIn2 = vldrwq_u32((uint32_t const *) &pDataSrc[4]);
223
+
224
+ vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs1, vecIn1);
225
+ vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs2, vecIn2);
226
+
227
+ pDataDest[8] = pDataSrc[8];
228
+
229
+ return (ARM_MATH_SUCCESS);
230
+ }
231
+
232
+ __STATIC_INLINE arm_status arm_mat_trans_32bit_4x4_mve(uint32_t * pDataSrc, uint32_t * pDataDest)
233
+ {
234
+ /*
235
+ * 4x4 Matrix transposition
236
+ * is 4 x de-interleave operation
237
+ *
238
+ * 0 1 2 3 0 4 8 12
239
+ * 4 5 6 7 1 5 9 13
240
+ * 8 9 10 11 2 6 10 14
241
+ * 12 13 14 15 3 7 11 15
242
+ */
243
+
244
+ uint32x4x4_t vecIn;
245
+
246
+ vecIn = vld4q((uint32_t const *) pDataSrc);
247
+ vstrwq(pDataDest, vecIn.val[0]);
248
+ pDataDest += 4;
249
+ vstrwq(pDataDest, vecIn.val[1]);
250
+ pDataDest += 4;
251
+ vstrwq(pDataDest, vecIn.val[2]);
252
+ pDataDest += 4;
253
+ vstrwq(pDataDest, vecIn.val[3]);
254
+
255
+ return (ARM_MATH_SUCCESS);
256
+ }
257
+
258
+
259
+ __STATIC_INLINE arm_status arm_mat_trans_32bit_generic_mve(
260
+ uint16_t srcRows,
261
+ uint16_t srcCols,
262
+ uint32_t * pDataSrc,
263
+ uint32_t * pDataDest)
264
+ {
265
+ uint32x4_t vecOffs;
266
+ uint32_t i;
267
+ uint32_t blkCnt;
268
+ uint32_t const *pDataC;
269
+ uint32_t *pDataDestR;
270
+ uint32x4_t vecIn;
271
+
272
+ vecOffs = vidupq_u32((uint32_t)0, 1);
273
+ vecOffs = vecOffs * srcCols;
274
+
275
+ i = srcCols;
276
+ do
277
+ {
278
+ pDataC = (uint32_t const *) pDataSrc;
279
+ pDataDestR = pDataDest;
280
+
281
+ blkCnt = srcRows >> 2;
282
+ while (blkCnt > 0U)
283
+ {
284
+ vecIn = vldrwq_gather_shifted_offset_u32(pDataC, vecOffs);
285
+ vstrwq(pDataDestR, vecIn);
286
+ pDataDestR += 4;
287
+ pDataC = pDataC + srcCols * 4;
288
+ /*
289
+ * Decrement the blockSize loop counter
290
+ */
291
+ blkCnt--;
292
+ }
293
+
294
+ /*
295
+ * tail
296
+ */
297
+ blkCnt = srcRows & 3;
298
+ if (blkCnt > 0U)
299
+ {
300
+ mve_pred16_t p0 = vctp32q(blkCnt);
301
+ vecIn = vldrwq_gather_shifted_offset_u32(pDataC, vecOffs);
302
+ vstrwq_p(pDataDestR, vecIn, p0);
303
+ }
304
+
305
+ pDataSrc += 1;
306
+ pDataDest += srcRows;
307
+ }
308
+ while (--i);
309
+
310
+ return (ARM_MATH_SUCCESS);
311
+ }
312
+
313
+ __STATIC_INLINE arm_status arm_mat_cmplx_trans_32bit(
314
+ uint16_t srcRows,
315
+ uint16_t srcCols,
316
+ uint32_t *pDataSrc,
317
+ uint16_t dstRows,
318
+ uint16_t dstCols,
319
+ uint32_t *pDataDest)
320
+ {
321
+ uint32_t i;
322
+ uint32_t const *pDataC;
323
+ uint32_t *pDataRow;
324
+ uint32_t *pDataDestR, *pDataDestRow;
325
+ uint32x4_t vecOffsRef, vecOffsCur;
326
+ uint32_t blkCnt;
327
+ uint32x4_t vecIn;
328
+
329
+ #ifdef ARM_MATH_MATRIX_CHECK
330
+ /*
331
+ * Check for matrix mismatch condition
332
+ */
333
+ if ((srcRows != dstCols) || (srcCols != dstRows))
334
+ {
335
+ /*
336
+ * Set status as ARM_MATH_SIZE_MISMATCH
337
+ */
338
+ return = ARM_MATH_SIZE_MISMATCH;
339
+ }
340
+ #else
341
+ (void)dstRows;
342
+ (void)dstCols;
343
+ #endif
344
+
345
+ /* 2x2, 3x3 and 4x4 specialization to be added */
346
+
347
+ vecOffsRef[0] = 0;
348
+ vecOffsRef[1] = 1;
349
+ vecOffsRef[2] = srcCols << 1;
350
+ vecOffsRef[3] = (srcCols << 1) + 1;
351
+
352
+ pDataRow = pDataSrc;
353
+ pDataDestRow = pDataDest;
354
+ i = srcCols;
355
+ do
356
+ {
357
+ pDataC = (uint32_t const *) pDataRow;
358
+ pDataDestR = pDataDestRow;
359
+ vecOffsCur = vecOffsRef;
360
+
361
+ blkCnt = (srcRows * CMPLX_DIM) >> 2;
362
+ while (blkCnt > 0U)
363
+ {
364
+ vecIn = vldrwq_gather_shifted_offset(pDataC, vecOffsCur);
365
+ vstrwq(pDataDestR, vecIn);
366
+ pDataDestR += 4;
367
+ vecOffsCur = vaddq(vecOffsCur, (srcCols << 2));
368
+ /*
369
+ * Decrement the blockSize loop counter
370
+ */
371
+ blkCnt--;
372
+ }
373
+ /*
374
+ * tail
375
+ * (will be merged thru tail predication)
376
+ */
377
+ blkCnt = (srcRows * CMPLX_DIM) & 3;
378
+ if (blkCnt > 0U)
379
+ {
380
+ mve_pred16_t p0 = vctp32q(blkCnt);
381
+ vecIn = vldrwq_gather_shifted_offset(pDataC, vecOffsCur);
382
+ vstrwq_p(pDataDestR, vecIn, p0);
383
+ }
384
+
385
+ pDataRow += CMPLX_DIM;
386
+ pDataDestRow += (srcRows * CMPLX_DIM);
387
+ }
388
+ while (--i);
389
+
390
+ return (ARM_MATH_SUCCESS);
391
+ }
392
+
393
+ __STATIC_INLINE arm_status arm_mat_trans_16bit_2x2(uint16_t * pDataSrc, uint16_t * pDataDest)
394
+ {
395
+ pDataDest[0] = pDataSrc[0];
396
+ pDataDest[3] = pDataSrc[3];
397
+ pDataDest[2] = pDataSrc[1];
398
+ pDataDest[1] = pDataSrc[2];
399
+
400
+ return (ARM_MATH_SUCCESS);
401
+ }
402
+
403
+ __STATIC_INLINE arm_status arm_mat_trans_16bit_3x3_mve(uint16_t * pDataSrc, uint16_t * pDataDest)
404
+ {
405
+ static const uint16_t stridesTr33[8] = { 0, 3, 6, 1, 4, 7, 2, 5 };
406
+ uint16x8_t vecOffs1;
407
+ uint16x8_t vecIn1;
408
+ /*
409
+ *
410
+ * | 0 1 2 | | 0 3 6 | 8 x 16 flattened version | 0 3 6 1 4 7 2 5 |
411
+ * | 3 4 5 | => | 1 4 7 | => | 8 . . . . . . . |
412
+ * | 6 7 8 | | 2 5 8 | (row major)
413
+ *
414
+ */
415
+ vecOffs1 = vldrhq_u16((uint16_t const *) stridesTr33);
416
+ vecIn1 = vldrhq_u16((uint16_t const *) pDataSrc);
417
+
418
+ vstrhq_scatter_shifted_offset_u16(pDataDest, vecOffs1, vecIn1);
419
+
420
+ pDataDest[8] = pDataSrc[8];
421
+
422
+ return (ARM_MATH_SUCCESS);
423
+ }
424
+
425
+
426
+ __STATIC_INLINE arm_status arm_mat_trans_16bit_4x4_mve(uint16_t * pDataSrc, uint16_t * pDataDest)
427
+ {
428
+ static const uint16_t stridesTr44_1[8] = { 0, 4, 8, 12, 1, 5, 9, 13 };
429
+ static const uint16_t stridesTr44_2[8] = { 2, 6, 10, 14, 3, 7, 11, 15 };
430
+ uint16x8_t vecOffs1, vecOffs2;
431
+ uint16x8_t vecIn1, vecIn2;
432
+ uint16_t const * pDataSrcVec = (uint16_t const *) pDataSrc;
433
+
434
+ /*
435
+ * 4x4 Matrix transposition
436
+ *
437
+ * | 0 1 2 3 | | 0 4 8 12 | 8 x 16 flattened version
438
+ * | 4 5 6 7 | => | 1 5 9 13 | => [0 4 8 12 1 5 9 13]
439
+ * | 8 9 10 11 | | 2 6 10 14 | [2 6 10 14 3 7 11 15]
440
+ * | 12 13 14 15 | | 3 7 11 15 |
441
+ */
442
+
443
+ vecOffs1 = vldrhq_u16((uint16_t const *) stridesTr44_1);
444
+ vecOffs2 = vldrhq_u16((uint16_t const *) stridesTr44_2);
445
+ vecIn1 = vldrhq_u16(pDataSrcVec);
446
+ pDataSrcVec += 8;
447
+ vecIn2 = vldrhq_u16(pDataSrcVec);
448
+
449
+ vstrhq_scatter_shifted_offset_u16(pDataDest, vecOffs1, vecIn1);
450
+ vstrhq_scatter_shifted_offset_u16(pDataDest, vecOffs2, vecIn2);
451
+
452
+
453
+ return (ARM_MATH_SUCCESS);
454
+ }
455
+
456
+
457
+
458
+ __STATIC_INLINE arm_status arm_mat_trans_16bit_generic(
459
+ uint16_t srcRows,
460
+ uint16_t srcCols,
461
+ uint16_t * pDataSrc,
462
+ uint16_t * pDataDest)
463
+ {
464
+ uint16x8_t vecOffs;
465
+ uint32_t i;
466
+ uint32_t blkCnt;
467
+ uint16_t const *pDataC;
468
+ uint16_t *pDataDestR;
469
+ uint16x8_t vecIn;
470
+
471
+ vecOffs = vidupq_u16((uint32_t)0, 1);
472
+ vecOffs = vecOffs * srcCols;
473
+
474
+ i = srcCols;
475
+ while(i > 0U)
476
+ {
477
+ pDataC = (uint16_t const *) pDataSrc;
478
+ pDataDestR = pDataDest;
479
+
480
+ blkCnt = srcRows >> 3;
481
+ while (blkCnt > 0U)
482
+ {
483
+ vecIn = vldrhq_gather_shifted_offset_u16(pDataC, vecOffs);
484
+ vstrhq_u16(pDataDestR, vecIn);
485
+ pDataDestR += 8;
486
+ pDataC = pDataC + srcCols * 8;
487
+ /*
488
+ * Decrement the blockSize loop counter
489
+ */
490
+ blkCnt--;
491
+ }
492
+
493
+ /*
494
+ * tail
495
+ */
496
+ blkCnt = srcRows & 7;
497
+ if (blkCnt > 0U)
498
+ {
499
+ mve_pred16_t p0 = vctp16q(blkCnt);
500
+ vecIn = vldrhq_gather_shifted_offset_u16(pDataC, vecOffs);
501
+ vstrhq_p_u16(pDataDestR, vecIn, p0);
502
+ }
503
+ pDataSrc += 1;
504
+ pDataDest += srcRows;
505
+ i--;
506
+ }
507
+
508
+ return (ARM_MATH_SUCCESS);
509
+ }
510
+
511
+
512
+ __STATIC_INLINE arm_status arm_mat_cmplx_trans_16bit(
513
+ uint16_t srcRows,
514
+ uint16_t srcCols,
515
+ uint16_t *pDataSrc,
516
+ uint16_t dstRows,
517
+ uint16_t dstCols,
518
+ uint16_t *pDataDest)
519
+ {
520
+ static const uint16_t loadCmplxCol[8] = { 0, 0, 1, 1, 2, 2, 3, 3 };
521
+ int i;
522
+ uint16x8_t vecOffsRef, vecOffsCur;
523
+ uint16_t const *pDataC;
524
+ uint16_t *pDataRow;
525
+ uint16_t *pDataDestR, *pDataDestRow;
526
+ uint32_t blkCnt;
527
+ uint16x8_t vecIn;
528
+
529
+ #ifdef ARM_MATH_MATRIX_CHECK
530
+ /*
531
+ * Check for matrix mismatch condition
532
+ */
533
+ if ((srcRows != dstCols) || (srcCols != dstRows))
534
+ {
535
+ /*
536
+ * Set status as ARM_MATH_SIZE_MISMATCH
537
+ */
538
+ return = ARM_MATH_SIZE_MISMATCH;
539
+ }
540
+ #else
541
+ (void)dstRows;
542
+ (void)dstCols;
543
+ #endif
544
+
545
+ /*
546
+ * 2x2, 3x3 and 4x4 specialization to be added
547
+ */
548
+
549
+
550
+ /*
551
+ * build [0, 1, 2xcol, 2xcol+1, 4xcol, 4xcol+1, 6xcol, 6xcol+1]
552
+ */
553
+ vecOffsRef = vldrhq_u16((uint16_t const *) loadCmplxCol);
554
+ vecOffsRef = vmulq(vecOffsRef, (uint16_t) (srcCols * CMPLX_DIM))
555
+ + viwdupq_u16((uint32_t)0, (uint16_t) 2, 1);
556
+
557
+ pDataRow = pDataSrc;
558
+ pDataDestRow = pDataDest;
559
+ i = srcCols;
560
+ do
561
+ {
562
+ pDataC = (uint16_t const *) pDataRow;
563
+ pDataDestR = pDataDestRow;
564
+ vecOffsCur = vecOffsRef;
565
+
566
+ blkCnt = (srcRows * CMPLX_DIM) >> 3;
567
+ while (blkCnt > 0U)
568
+ {
569
+ vecIn = vldrhq_gather_shifted_offset(pDataC, vecOffsCur);
570
+ vstrhq(pDataDestR, vecIn);
571
+ pDataDestR+= 8; // VEC_LANES_U16
572
+ vecOffsCur = vaddq(vecOffsCur, (srcCols << 3));
573
+ /*
574
+ * Decrement the blockSize loop counter
575
+ */
576
+ blkCnt--;
577
+ }
578
+ /*
579
+ * tail
580
+ * (will be merged thru tail predication)
581
+ */
582
+ blkCnt = (srcRows * CMPLX_DIM) & 0x7;
583
+ if (blkCnt > 0U)
584
+ {
585
+ mve_pred16_t p0 = vctp16q(blkCnt);
586
+ vecIn = vldrhq_gather_shifted_offset(pDataC, vecOffsCur);
587
+ vstrhq_p(pDataDestR, vecIn, p0);
588
+ }
589
+
590
+ pDataRow += CMPLX_DIM;
591
+ pDataDestRow += (srcRows * CMPLX_DIM);
592
+ }
593
+ while (--i);
594
+
595
+ return (ARM_MATH_SUCCESS);
596
+ }
597
+ #endif /* MVEF and MVEI */
598
+
599
+ /***************************************
600
+
601
+ Definitions available for MVEI only
602
+
603
+ ***************************************/
604
+ #if (defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEI)) && !defined(ARM_MATH_AUTOVECTORIZE)
605
+
606
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables.h"
607
+
608
+ #define MVE_ASRL_SAT16(acc, shift) ((sqrshrl_sat48(acc, -(32-shift)) >> 32) & 0xffffffff)
609
+ #define MVE_ASRL_SAT32(acc, shift) ((sqrshrl(acc, -(32-shift)) >> 32) & 0xffffffff)
610
+
611
+
612
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q31_MVE)
613
+ __STATIC_INLINE q31x4_t FAST_VSQRT_Q31(q31x4_t vecIn)
614
+ {
615
+ q63x2_t vecTmpLL;
616
+ q31x4_t vecTmp0, vecTmp1;
617
+ q31_t scale;
618
+ q63_t tmp64;
619
+ q31x4_t vecNrm, vecDst, vecIdx, vecSignBits;
620
+
621
+
622
+ vecSignBits = vclsq(vecIn);
623
+ vecSignBits = vbicq(vecSignBits, 1);
624
+ /*
625
+ * in = in << no_of_sign_bits;
626
+ */
627
+ vecNrm = vshlq(vecIn, vecSignBits);
628
+ /*
629
+ * index = in >> 24;
630
+ */
631
+ vecIdx = vecNrm >> 24;
632
+ vecIdx = vecIdx << 1;
633
+
634
+ vecTmp0 = vldrwq_gather_shifted_offset_s32(sqrtTable_Q31, (uint32x4_t)vecIdx);
635
+
636
+ vecIdx = vecIdx + 1;
637
+
638
+ vecTmp1 = vldrwq_gather_shifted_offset_s32(sqrtTable_Q31, (uint32x4_t)vecIdx);
639
+
640
+ vecTmp1 = vqrdmulhq(vecTmp1, vecNrm);
641
+ vecTmp0 = vecTmp0 - vecTmp1;
642
+ vecTmp1 = vqrdmulhq(vecTmp0, vecTmp0);
643
+ vecTmp1 = vqrdmulhq(vecNrm, vecTmp1);
644
+ vecTmp1 = vdupq_n_s32(0x18000000) - vecTmp1;
645
+ vecTmp0 = vqrdmulhq(vecTmp0, vecTmp1);
646
+ vecTmpLL = vmullbq_int(vecNrm, vecTmp0);
647
+
648
+ /*
649
+ * scale elements 0, 2
650
+ */
651
+ scale = 26 + (vecSignBits[0] >> 1);
652
+ tmp64 = asrl(vecTmpLL[0], scale);
653
+ vecDst[0] = (q31_t) tmp64;
654
+
655
+ scale = 26 + (vecSignBits[2] >> 1);
656
+ tmp64 = asrl(vecTmpLL[1], scale);
657
+ vecDst[2] = (q31_t) tmp64;
658
+
659
+ vecTmpLL = vmulltq_int(vecNrm, vecTmp0);
660
+
661
+ /*
662
+ * scale elements 1, 3
663
+ */
664
+ scale = 26 + (vecSignBits[1] >> 1);
665
+ tmp64 = asrl(vecTmpLL[0], scale);
666
+ vecDst[1] = (q31_t) tmp64;
667
+
668
+ scale = 26 + (vecSignBits[3] >> 1);
669
+ tmp64 = asrl(vecTmpLL[1], scale);
670
+ vecDst[3] = (q31_t) tmp64;
671
+ /*
672
+ * set negative values to 0
673
+ */
674
+ vecDst = vdupq_m(vecDst, 0, vcmpltq_n_s32(vecIn, 0));
675
+
676
+ return vecDst;
677
+ }
678
+ #endif
679
+
680
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q15_MVE)
681
+ __STATIC_INLINE q15x8_t FAST_VSQRT_Q15(q15x8_t vecIn)
682
+ {
683
+ q31x4_t vecTmpLev, vecTmpLodd, vecSignL;
684
+ q15x8_t vecTmp0, vecTmp1;
685
+ q15x8_t vecNrm, vecDst, vecIdx, vecSignBits;
686
+
687
+ vecDst = vuninitializedq_s16();
688
+
689
+ vecSignBits = vclsq(vecIn);
690
+ vecSignBits = vbicq(vecSignBits, 1);
691
+ /*
692
+ * in = in << no_of_sign_bits;
693
+ */
694
+ vecNrm = vshlq(vecIn, vecSignBits);
695
+
696
+ vecIdx = vecNrm >> 8;
697
+ vecIdx = vecIdx << 1;
698
+
699
+ vecTmp0 = vldrhq_gather_shifted_offset_s16(sqrtTable_Q15, (uint16x8_t)vecIdx);
700
+
701
+ vecIdx = vecIdx + 1;
702
+
703
+ vecTmp1 = vldrhq_gather_shifted_offset_s16(sqrtTable_Q15, (uint16x8_t)vecIdx);
704
+
705
+ vecTmp1 = vqrdmulhq(vecTmp1, vecNrm);
706
+ vecTmp0 = vecTmp0 - vecTmp1;
707
+ vecTmp1 = vqrdmulhq(vecTmp0, vecTmp0);
708
+ vecTmp1 = vqrdmulhq(vecNrm, vecTmp1);
709
+ vecTmp1 = vdupq_n_s16(0x1800) - vecTmp1;
710
+ vecTmp0 = vqrdmulhq(vecTmp0, vecTmp1);
711
+
712
+ vecSignBits = vecSignBits >> 1;
713
+
714
+ vecTmpLev = vmullbq_int(vecNrm, vecTmp0);
715
+ vecTmpLodd = vmulltq_int(vecNrm, vecTmp0);
716
+
717
+ vecTmp0 = vecSignBits + 10;
718
+ /*
719
+ * negate sign to apply register based vshl
720
+ */
721
+ vecTmp0 = -vecTmp0;
722
+
723
+ /*
724
+ * shift even elements
725
+ */
726
+ vecSignL = vmovlbq(vecTmp0);
727
+ vecTmpLev = vshlq(vecTmpLev, vecSignL);
728
+ /*
729
+ * shift odd elements
730
+ */
731
+ vecSignL = vmovltq(vecTmp0);
732
+ vecTmpLodd = vshlq(vecTmpLodd, vecSignL);
733
+ /*
734
+ * merge and narrow odd and even parts
735
+ */
736
+ vecDst = vmovnbq_s32(vecDst, vecTmpLev);
737
+ vecDst = vmovntq_s32(vecDst, vecTmpLodd);
738
+ /*
739
+ * set negative values to 0
740
+ */
741
+ vecDst = vdupq_m(vecDst, 0, vcmpltq_n_s16(vecIn, 0));
742
+
743
+ return vecDst;
744
+ }
745
+ #endif
746
+
747
+ #endif /* defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEI) */
748
+
749
+ #ifdef __cplusplus
750
+ }
751
+ #endif
752
+
753
+ #endif
ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_math.h ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * @file arm_math.h
3
+ * @brief Public header file for CMSIS DSP Library
4
+ * @version V1.7.0
5
+ * @date 18. March 2019
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2010-2019 Arm Limited or its affiliates. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ /**
26
+ \mainpage CMSIS DSP Software Library
27
+ *
28
+ * \section intro Introduction
29
+ *
30
+ * This user manual describes the CMSIS DSP software library,
31
+ * a suite of common signal processing functions for use on Cortex-M and Cortex-A processor
32
+ * based devices.
33
+ *
34
+ * The library is divided into a number of functions each covering a specific category:
35
+ * - Basic math functions
36
+ * - Fast math functions
37
+ * - Complex math functions
38
+ * - Filtering functions
39
+ * - Matrix functions
40
+ * - Transform functions
41
+ * - Motor control functions
42
+ * - Statistical functions
43
+ * - Support functions
44
+ * - Interpolation functions
45
+ * - Support Vector Machine functions (SVM)
46
+ * - Bayes classifier functions
47
+ * - Distance functions
48
+ * - Quaternion functions
49
+ *
50
+ * The library has generally separate functions for operating on 8-bit integers, 16-bit integers,
51
+ * 32-bit integer and 32-bit floating-point values.
52
+ *
53
+ * The library is providing vectorized versions of most algorthms for Helium
54
+ * and of most f32 algorithms for Neon.
55
+ *
56
+ * When using a vectorized version, provide a little bit of padding after the end of
57
+ * a buffer (3 words) because the vectorized code may read a little bit after the end
58
+ * of a buffer. You don't have to modify your buffers but just ensure that the
59
+ * end of buffer + padding is not outside of a memory region.
60
+ *
61
+ * \section using Using the Library
62
+ *
63
+ * The library installer contains prebuilt versions of the libraries in the <code>Lib</code> folder.
64
+ *
65
+ * Here is the list of pre-built libraries :
66
+ * - arm_cortexM7lfdp_math.lib (Cortex-M7, Little endian, Double Precision Floating Point Unit)
67
+ * - arm_cortexM7bfdp_math.lib (Cortex-M7, Big endian, Double Precision Floating Point Unit)
68
+ * - arm_cortexM7lfsp_math.lib (Cortex-M7, Little endian, Single Precision Floating Point Unit)
69
+ * - arm_cortexM7bfsp_math.lib (Cortex-M7, Big endian and Single Precision Floating Point Unit on)
70
+ * - arm_cortexM7l_math.lib (Cortex-M7, Little endian)
71
+ * - arm_cortexM7b_math.lib (Cortex-M7, Big endian)
72
+ * - arm_cortexM4lf_math.lib (Cortex-M4, Little endian, Floating Point Unit)
73
+ * - arm_cortexM4bf_math.lib (Cortex-M4, Big endian, Floating Point Unit)
74
+ * - arm_cortexM4l_math.lib (Cortex-M4, Little endian)
75
+ * - arm_cortexM4b_math.lib (Cortex-M4, Big endian)
76
+ * - arm_cortexM3l_math.lib (Cortex-M3, Little endian)
77
+ * - arm_cortexM3b_math.lib (Cortex-M3, Big endian)
78
+ * - arm_cortexM0l_math.lib (Cortex-M0 / Cortex-M0+, Little endian)
79
+ * - arm_cortexM0b_math.lib (Cortex-M0 / Cortex-M0+, Big endian)
80
+ * - arm_ARMv8MBLl_math.lib (Armv8-M Baseline, Little endian)
81
+ * - arm_ARMv8MMLl_math.lib (Armv8-M Mainline, Little endian)
82
+ * - arm_ARMv8MMLlfsp_math.lib (Armv8-M Mainline, Little endian, Single Precision Floating Point Unit)
83
+ * - arm_ARMv8MMLld_math.lib (Armv8-M Mainline, Little endian, DSP instructions)
84
+ * - arm_ARMv8MMLldfsp_math.lib (Armv8-M Mainline, Little endian, DSP instructions, Single Precision Floating Point Unit)
85
+ *
86
+ * The library functions are declared in the public file <code>arm_math.h</code> which is placed in the <code>Include</code> folder.
87
+ * Simply include this file and link the appropriate library in the application and begin calling the library functions. The Library supports single
88
+ * public header file <code> arm_math.h</code> for Cortex-M cores with little endian and big endian. Same header file will be used for floating point unit(FPU) variants.
89
+ *
90
+ *
91
+ * \section example Examples
92
+ *
93
+ * The library ships with a number of examples which demonstrate how to use the library functions.
94
+ *
95
+ * \section toolchain Toolchain Support
96
+ *
97
+ * The library is now tested on Fast Models building with cmake.
98
+ * Core M0, M7, A5 are tested.
99
+ *
100
+ *
101
+ *
102
+ * \section building Building the Library
103
+ *
104
+ * The library installer contains a project file to rebuild libraries on MDK toolchain in the <code>CMSIS\\DSP\\Projects\\ARM</code> folder.
105
+ * - arm_cortexM_math.uvprojx
106
+ *
107
+ *
108
+ * The libraries can be built by opening the arm_cortexM_math.uvprojx project in MDK-ARM, selecting a specific target, and defining the optional preprocessor macros detailed above.
109
+ *
110
+ * There is also a work in progress cmake build. The README file is giving more details.
111
+ *
112
+ * \section preprocessor Preprocessor Macros
113
+ *
114
+ * Each library project have different preprocessor macros.
115
+ *
116
+ * - ARM_MATH_BIG_ENDIAN:
117
+ *
118
+ * Define macro ARM_MATH_BIG_ENDIAN to build the library for big endian targets. By default library builds for little endian targets.
119
+ *
120
+ * - ARM_MATH_MATRIX_CHECK:
121
+ *
122
+ * Define macro ARM_MATH_MATRIX_CHECK for checking on the input and output sizes of matrices
123
+ *
124
+ * - ARM_MATH_ROUNDING:
125
+ *
126
+ * Define macro ARM_MATH_ROUNDING for rounding on support functions
127
+ *
128
+ * - ARM_MATH_LOOPUNROLL:
129
+ *
130
+ * Define macro ARM_MATH_LOOPUNROLL to enable manual loop unrolling in DSP functions
131
+ *
132
+ * - ARM_MATH_NEON:
133
+ *
134
+ * Define macro ARM_MATH_NEON to enable Neon versions of the DSP functions.
135
+ * It is not enabled by default when Neon is available because performances are
136
+ * dependent on the compiler and target architecture.
137
+ *
138
+ * - ARM_MATH_NEON_EXPERIMENTAL:
139
+ *
140
+ * Define macro ARM_MATH_NEON_EXPERIMENTAL to enable experimental Neon versions of
141
+ * of some DSP functions. Experimental Neon versions currently do not have better
142
+ * performances than the scalar versions.
143
+ *
144
+ * - ARM_MATH_HELIUM:
145
+ *
146
+ * It implies the flags ARM_MATH_MVEF and ARM_MATH_MVEI and ARM_MATH_FLOAT16.
147
+ *
148
+ * - ARM_MATH_MVEF:
149
+ *
150
+ * Select Helium versions of the f32 algorithms.
151
+ * It implies ARM_MATH_FLOAT16 and ARM_MATH_MVEI.
152
+ *
153
+ * - ARM_MATH_MVEI:
154
+ *
155
+ * Select Helium versions of the int and fixed point algorithms.
156
+ *
157
+ * - ARM_MATH_MVE_FLOAT16:
158
+ *
159
+ * MVE Float16 implementations of some algorithms (Requires MVE extension).
160
+ *
161
+ * - DISABLEFLOAT16:
162
+ *
163
+ * Disable float16 algorithms when __fp16 is not supported for a
164
+ * specific compiler / core configuration.
165
+ * This is only valid for scalar. When vector architecture is
166
+ * supporting f16 then it can't be disabled.
167
+ *
168
+ * <hr>
169
+ * \section pack CMSIS-DSP in ARM::CMSIS Pack
170
+ *
171
+ * The following files relevant to CMSIS-DSP are present in the <b>ARM::CMSIS</b> Pack directories:
172
+ * |File/Folder |Content |
173
+ * |---------------------------------|------------------------------------------------------------------------|
174
+ * |\b CMSIS\\Documentation\\DSP | This documentation |
175
+ * |\b CMSIS\\DSP\\DSP_Lib_TestSuite | DSP_Lib deprecated test suite |
176
+ * |\b CMSIS\\DSP\\Examples | Example projects demonstrating the usage of the library functions |
177
+ * |\b CMSIS\\DSP\\Include | DSP_Lib include files for using and building the lib
178
+ * |\b CMSIS\\DSP\\PrivateInclude | DSP_Lib private include files for building the lib |
179
+ * |\b CMSIS\\DSP\\Lib | DSP_Lib binaries |
180
+ * |\b CMSIS\\DSP\\Projects | Projects to rebuild DSP_Lib binaries |
181
+ * |\b CMSIS\\DSP\\Source | DSP_Lib source files |
182
+ *
183
+ * <hr>
184
+ * \section rev Revision History of CMSIS-DSP
185
+ * Please refer to \ref ChangeLog_pg.
186
+ */
187
+
188
+
189
+
190
+
191
+
192
+
193
+
194
+
195
+
196
+
197
+
198
+ /**
199
+ * @defgroup groupExamples Examples
200
+ */
201
+
202
+
203
+
204
+
205
+
206
+ #ifndef _ARM_MATH_H
207
+ #define _ARM_MATH_H
208
+
209
+
210
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h"
211
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_memory.h"
212
+
213
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/none.h"
214
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/utils.h"
215
+
216
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h"
217
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/interpolation_functions.h"
218
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/bayes_functions.h"
219
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions.h"
220
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/complex_math_functions.h"
221
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h"
222
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/controller_functions.h"
223
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/support_functions.h"
224
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/distance_functions.h"
225
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/svm_functions.h"
226
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h"
227
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/transform_functions.h"
228
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/filtering_functions.h"
229
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/quaternion_math_functions.h"
230
+
231
+
232
+
233
+ #ifdef __cplusplus
234
+ extern "C"
235
+ {
236
+ #endif
237
+
238
+
239
+
240
+
241
+ //#define TABLE_SPACING_Q31 0x400000
242
+ //#define TABLE_SPACING_Q15 0x80
243
+
244
+
245
+
246
+
247
+
248
+ #ifdef __cplusplus
249
+ }
250
+ #endif
251
+
252
+
253
+ #endif /* _ARM_MATH_H */
254
+
255
+ /**
256
+ *
257
+ * End of file.
258
+ */
ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_f16.h ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * @file arm_math_f16.h
3
+ * @brief Public header file for f16 function of the CMSIS DSP Library
4
+ * @version V1.8.1
5
+ * @date 20. April 2020
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ #ifndef _ARM_MATH_F16_H
26
+ #define _ARM_MATH_F16_H
27
+
28
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math.h"
29
+
30
+ #ifdef __cplusplus
31
+ extern "C"
32
+ {
33
+ #endif
34
+
35
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types_f16.h"
36
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/none.h"
37
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/utils.h"
38
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions_f16.h"
39
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/interpolation_functions_f16.h"
40
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/bayes_functions_f16.h"
41
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions_f16.h"
42
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/complex_math_functions_f16.h"
43
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions_f16.h"
44
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/controller_functions_f16.h"
45
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/support_functions_f16.h"
46
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/distance_functions_f16.h"
47
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/svm_functions_f16.h"
48
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions_f16.h"
49
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/transform_functions_f16.h"
50
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/filtering_functions_f16.h"
51
+
52
+ #ifdef __cplusplus
53
+ }
54
+ #endif
55
+
56
+ #endif /* _ARM_MATH_F16_H */
57
+
58
+
ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_memory.h ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * @file arm_math_memory.h
3
+ * @brief Public header file for CMSIS DSP Library
4
+ * @version V1.9.0
5
+ * @date 20. July 2020
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ #ifndef _ARM_MATH_MEMORY_H_
26
+
27
+ #define _ARM_MATH_MEMORY_H_
28
+
29
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h"
30
+
31
+
32
+ #ifdef __cplusplus
33
+ extern "C"
34
+ {
35
+ #endif
36
+
37
+ /**
38
+ @brief definition to read/write two 16 bit values.
39
+ @deprecated
40
+ */
41
+ #if defined ( __CC_ARM )
42
+ #define __SIMD32_TYPE int32_t __packed
43
+ #elif defined ( __ARMCC_VERSION ) && ( __ARMCC_VERSION >= 6010050 )
44
+ #define __SIMD32_TYPE int32_t
45
+ #elif defined ( __GNUC__ )
46
+ #define __SIMD32_TYPE int32_t
47
+ #elif defined ( __ICCARM__ )
48
+ #define __SIMD32_TYPE int32_t __packed
49
+ #elif defined ( __TI_ARM__ )
50
+ #define __SIMD32_TYPE int32_t
51
+ #elif defined ( __CSMC__ )
52
+ #define __SIMD32_TYPE int32_t
53
+ #elif defined ( __TASKING__ )
54
+ #define __SIMD32_TYPE __un(aligned) int32_t
55
+ #elif defined(_MSC_VER )
56
+ #define __SIMD32_TYPE int32_t
57
+ #else
58
+ #error Unknown compiler
59
+ #endif
60
+
61
+ #define __SIMD32(addr) (*(__SIMD32_TYPE **) & (addr))
62
+ #define __SIMD32_CONST(addr) ( (__SIMD32_TYPE * ) (addr))
63
+ #define _SIMD32_OFFSET(addr) (*(__SIMD32_TYPE * ) (addr))
64
+ #define __SIMD64(addr) (*( int64_t **) & (addr))
65
+
66
+
67
+ /* SIMD replacement */
68
+
69
+
70
+ /**
71
+ @brief Read 2 Q15 from Q15 pointer.
72
+ @param[in] pQ15 points to input value
73
+ @return Q31 value
74
+ */
75
+ __STATIC_FORCEINLINE q31_t read_q15x2 (
76
+ q15_t * pQ15)
77
+ {
78
+ q31_t val;
79
+
80
+ #ifdef __ARM_FEATURE_UNALIGNED
81
+ memcpy (&val, pQ15, 4);
82
+ #else
83
+ val = (pQ15[1] << 16) | (pQ15[0] & 0x0FFFF) ;
84
+ #endif
85
+
86
+ return (val);
87
+ }
88
+
89
+ /**
90
+ @brief Read 2 Q15 from Q15 pointer and increment pointer afterwards.
91
+ @param[in] pQ15 points to input value
92
+ @return Q31 value
93
+ */
94
+ __STATIC_FORCEINLINE q31_t read_q15x2_ia (
95
+ q15_t ** pQ15)
96
+ {
97
+ q31_t val;
98
+
99
+ #ifdef __ARM_FEATURE_UNALIGNED
100
+ memcpy (&val, *pQ15, 4);
101
+ #else
102
+ val = ((*pQ15)[1] << 16) | ((*pQ15)[0] & 0x0FFFF);
103
+ #endif
104
+
105
+ *pQ15 += 2;
106
+ return (val);
107
+ }
108
+
109
+ /**
110
+ @brief Read 2 Q15 from Q15 pointer and decrement pointer afterwards.
111
+ @param[in] pQ15 points to input value
112
+ @return Q31 value
113
+ */
114
+ __STATIC_FORCEINLINE q31_t read_q15x2_da (
115
+ q15_t ** pQ15)
116
+ {
117
+ q31_t val;
118
+
119
+ #ifdef __ARM_FEATURE_UNALIGNED
120
+ memcpy (&val, *pQ15, 4);
121
+ #else
122
+ val = ((*pQ15)[1] << 16) | ((*pQ15)[0] & 0x0FFFF);
123
+ #endif
124
+
125
+ *pQ15 -= 2;
126
+ return (val);
127
+ }
128
+
129
+ /**
130
+ @brief Write 2 Q15 to Q15 pointer and increment pointer afterwards.
131
+ @param[in] pQ15 points to input value
132
+ @param[in] value Q31 value
133
+ @return none
134
+ */
135
+ __STATIC_FORCEINLINE void write_q15x2_ia (
136
+ q15_t ** pQ15,
137
+ q31_t value)
138
+ {
139
+ q31_t val = value;
140
+ #ifdef __ARM_FEATURE_UNALIGNED
141
+ memcpy (*pQ15, &val, 4);
142
+ #else
143
+ (*pQ15)[0] = (val & 0x0FFFF);
144
+ (*pQ15)[1] = (val >> 16) & 0x0FFFF;
145
+ #endif
146
+
147
+ *pQ15 += 2;
148
+ }
149
+
150
+ /**
151
+ @brief Write 2 Q15 to Q15 pointer.
152
+ @param[in] pQ15 points to input value
153
+ @param[in] value Q31 value
154
+ @return none
155
+ */
156
+ __STATIC_FORCEINLINE void write_q15x2 (
157
+ q15_t * pQ15,
158
+ q31_t value)
159
+ {
160
+ q31_t val = value;
161
+
162
+ #ifdef __ARM_FEATURE_UNALIGNED
163
+ memcpy (pQ15, &val, 4);
164
+ #else
165
+ pQ15[0] = val & 0x0FFFF;
166
+ pQ15[1] = val >> 16;
167
+ #endif
168
+ }
169
+
170
+
171
+ /**
172
+ @brief Read 4 Q7 from Q7 pointer and increment pointer afterwards.
173
+ @param[in] pQ7 points to input value
174
+ @return Q31 value
175
+ */
176
+ __STATIC_FORCEINLINE q31_t read_q7x4_ia (
177
+ q7_t ** pQ7)
178
+ {
179
+ q31_t val;
180
+
181
+
182
+ #ifdef __ARM_FEATURE_UNALIGNED
183
+ memcpy (&val, *pQ7, 4);
184
+ #else
185
+ val =(((*pQ7)[3] & 0x0FF) << 24) | (((*pQ7)[2] & 0x0FF) << 16) | (((*pQ7)[1] & 0x0FF) << 8) | ((*pQ7)[0] & 0x0FF);
186
+ #endif
187
+
188
+ *pQ7 += 4;
189
+
190
+ return (val);
191
+ }
192
+
193
+ /**
194
+ @brief Read 4 Q7 from Q7 pointer and decrement pointer afterwards.
195
+ @param[in] pQ7 points to input value
196
+ @return Q31 value
197
+ */
198
+ __STATIC_FORCEINLINE q31_t read_q7x4_da (
199
+ q7_t ** pQ7)
200
+ {
201
+ q31_t val;
202
+ #ifdef __ARM_FEATURE_UNALIGNED
203
+ memcpy (&val, *pQ7, 4);
204
+ #else
205
+ val = ((((*pQ7)[3]) & 0x0FF) << 24) | ((((*pQ7)[2]) & 0x0FF) << 16) | ((((*pQ7)[1]) & 0x0FF) << 8) | ((*pQ7)[0] & 0x0FF);
206
+ #endif
207
+ *pQ7 -= 4;
208
+
209
+ return (val);
210
+ }
211
+
212
+ /**
213
+ @brief Write 4 Q7 to Q7 pointer and increment pointer afterwards.
214
+ @param[in] pQ7 points to input value
215
+ @param[in] value Q31 value
216
+ @return none
217
+ */
218
+ __STATIC_FORCEINLINE void write_q7x4_ia (
219
+ q7_t ** pQ7,
220
+ q31_t value)
221
+ {
222
+ q31_t val = value;
223
+ #ifdef __ARM_FEATURE_UNALIGNED
224
+ memcpy (*pQ7, &val, 4);
225
+ #else
226
+ (*pQ7)[0] = val & 0x0FF;
227
+ (*pQ7)[1] = (val >> 8) & 0x0FF;
228
+ (*pQ7)[2] = (val >> 16) & 0x0FF;
229
+ (*pQ7)[3] = (val >> 24) & 0x0FF;
230
+
231
+ #endif
232
+ *pQ7 += 4;
233
+ }
234
+
235
+
236
+ #ifdef __cplusplus
237
+ }
238
+ #endif
239
+
240
+ #endif /*ifndef _ARM_MATH_MEMORY_H_ */
ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h ADDED
@@ -0,0 +1,591 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * @file arm_math_types.h
3
+ * @brief Public header file for CMSIS DSP Library
4
+ * @version V1.9.0
5
+ * @date 20. July 2020
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ #ifndef _ARM_MATH_TYPES_H_
26
+
27
+ #define _ARM_MATH_TYPES_H_
28
+
29
+ #ifdef __cplusplus
30
+ extern "C"
31
+ {
32
+ #endif
33
+
34
+ /* Compiler specific diagnostic adjustment */
35
+ #if defined ( __CC_ARM )
36
+
37
+ #elif defined ( __ARMCC_VERSION ) && ( __ARMCC_VERSION >= 6010050 )
38
+
39
+ #elif defined ( __GNUC__ )
40
+ #pragma GCC diagnostic push
41
+ #pragma GCC diagnostic ignored "-Wsign-conversion"
42
+ #pragma GCC diagnostic ignored "-Wconversion"
43
+ #pragma GCC diagnostic ignored "-Wunused-parameter"
44
+
45
+ #elif defined ( __ICCARM__ )
46
+
47
+ #elif defined ( __TI_ARM__ )
48
+
49
+ #elif defined ( __CSMC__ )
50
+
51
+ #elif defined ( __TASKING__ )
52
+
53
+ #elif defined ( _MSC_VER )
54
+
55
+ #else
56
+ #error Unknown compiler
57
+ #endif
58
+
59
+
60
+ /* Included for instrinsics definitions */
61
+ #if defined (_MSC_VER )
62
+ #include <stdint.h>
63
+ #define __STATIC_FORCEINLINE static __forceinline
64
+ #define __STATIC_INLINE static __inline
65
+ #define __ALIGNED(x) __declspec(align(x))
66
+
67
+ #elif defined (__GNUC_PYTHON__)
68
+ #include <stdint.h>
69
+ #define __ALIGNED(x) __attribute__((aligned(x)))
70
+ #define __STATIC_FORCEINLINE static inline __attribute__((always_inline))
71
+ #define __STATIC_INLINE static inline
72
+
73
+ #else
74
+ #include "edge-impulse-sdk/CMSIS/Core/Include/cmsis_compiler.h"
75
+ #endif
76
+
77
+
78
+
79
+ #include <string.h>
80
+ #include <math.h>
81
+ #include <float.h>
82
+ #include <limits.h>
83
+
84
+ /* evaluate ARM DSP feature */
85
+ #if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1))
86
+ #define ARM_MATH_DSP 1
87
+ #endif
88
+
89
+ #if defined(ARM_MATH_NEON)
90
+ #include <arm_neon.h>
91
+ #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
92
+ #if !defined(ARM_MATH_NEON_FLOAT16)
93
+ #define ARM_MATH_NEON_FLOAT16
94
+ #endif
95
+ #endif
96
+ #endif
97
+
98
+ #if !defined(ARM_MATH_AUTOVECTORIZE)
99
+
100
+ #if __ARM_FEATURE_MVE
101
+ #if !defined(ARM_MATH_MVEI)
102
+ #define ARM_MATH_MVEI
103
+ #endif
104
+ #endif
105
+
106
+ #if (__ARM_FEATURE_MVE & 2)
107
+ #if !defined(ARM_MATH_MVEF)
108
+ #define ARM_MATH_MVEF
109
+ #endif
110
+ #if !defined(ARM_MATH_MVE_FLOAT16)
111
+ #define ARM_MATH_MVE_FLOAT16
112
+ #endif
113
+ #endif
114
+
115
+ #endif /*!defined(ARM_MATH_AUTOVECTORIZE)*/
116
+
117
+
118
+ #if defined (ARM_MATH_HELIUM)
119
+ #if !defined(ARM_MATH_MVEF)
120
+ #define ARM_MATH_MVEF
121
+ #endif
122
+
123
+ #if !defined(ARM_MATH_MVEI)
124
+ #define ARM_MATH_MVEI
125
+ #endif
126
+
127
+ #if !defined(ARM_MATH_MVE_FLOAT16)
128
+ #define ARM_MATH_MVE_FLOAT16
129
+ #endif
130
+ #endif
131
+
132
+
133
+
134
+ #if defined ( __CC_ARM )
135
+ /* Enter low optimization region - place directly above function definition */
136
+ #if defined( __ARM_ARCH_7EM__ )
137
+ #define LOW_OPTIMIZATION_ENTER \
138
+ _Pragma ("push") \
139
+ _Pragma ("O1")
140
+ #else
141
+ #define LOW_OPTIMIZATION_ENTER
142
+ #endif
143
+
144
+ /* Exit low optimization region - place directly after end of function definition */
145
+ #if defined ( __ARM_ARCH_7EM__ )
146
+ #define LOW_OPTIMIZATION_EXIT \
147
+ _Pragma ("pop")
148
+ #else
149
+ #define LOW_OPTIMIZATION_EXIT
150
+ #endif
151
+
152
+ /* Enter low optimization region - place directly above function definition */
153
+ #define IAR_ONLY_LOW_OPTIMIZATION_ENTER
154
+
155
+ /* Exit low optimization region - place directly after end of function definition */
156
+ #define IAR_ONLY_LOW_OPTIMIZATION_EXIT
157
+
158
+ #elif defined (__ARMCC_VERSION ) && ( __ARMCC_VERSION >= 6010050 )
159
+ #define LOW_OPTIMIZATION_ENTER
160
+ #define LOW_OPTIMIZATION_EXIT
161
+ #define IAR_ONLY_LOW_OPTIMIZATION_ENTER
162
+ #define IAR_ONLY_LOW_OPTIMIZATION_EXIT
163
+
164
+ #elif defined ( __GNUC__ )
165
+ #define LOW_OPTIMIZATION_ENTER \
166
+ __attribute__(( optimize("-O1") ))
167
+ #define LOW_OPTIMIZATION_EXIT
168
+ #define IAR_ONLY_LOW_OPTIMIZATION_ENTER
169
+ #define IAR_ONLY_LOW_OPTIMIZATION_EXIT
170
+
171
+ #elif defined ( __ICCARM__ )
172
+ /* Enter low optimization region - place directly above function definition */
173
+ #if defined ( __ARM_ARCH_7EM__ )
174
+ #define LOW_OPTIMIZATION_ENTER \
175
+ _Pragma ("optimize=low")
176
+ #else
177
+ #define LOW_OPTIMIZATION_ENTER
178
+ #endif
179
+
180
+ /* Exit low optimization region - place directly after end of function definition */
181
+ #define LOW_OPTIMIZATION_EXIT
182
+
183
+ /* Enter low optimization region - place directly above function definition */
184
+ #if defined ( __ARM_ARCH_7EM__ )
185
+ #define IAR_ONLY_LOW_OPTIMIZATION_ENTER \
186
+ _Pragma ("optimize=low")
187
+ #else
188
+ #define IAR_ONLY_LOW_OPTIMIZATION_ENTER
189
+ #endif
190
+
191
+ /* Exit low optimization region - place directly after end of function definition */
192
+ #define IAR_ONLY_LOW_OPTIMIZATION_EXIT
193
+
194
+ #elif defined ( __TI_ARM__ )
195
+ #define LOW_OPTIMIZATION_ENTER
196
+ #define LOW_OPTIMIZATION_EXIT
197
+ #define IAR_ONLY_LOW_OPTIMIZATION_ENTER
198
+ #define IAR_ONLY_LOW_OPTIMIZATION_EXIT
199
+
200
+ #elif defined ( __CSMC__ )
201
+ #define LOW_OPTIMIZATION_ENTER
202
+ #define LOW_OPTIMIZATION_EXIT
203
+ #define IAR_ONLY_LOW_OPTIMIZATION_ENTER
204
+ #define IAR_ONLY_LOW_OPTIMIZATION_EXIT
205
+
206
+ #elif defined ( __TASKING__ )
207
+ #define LOW_OPTIMIZATION_ENTER
208
+ #define LOW_OPTIMIZATION_EXIT
209
+ #define IAR_ONLY_LOW_OPTIMIZATION_ENTER
210
+ #define IAR_ONLY_LOW_OPTIMIZATION_EXIT
211
+
212
+ #elif defined ( _MSC_VER ) || defined(__GNUC_PYTHON__)
213
+ #define LOW_OPTIMIZATION_ENTER
214
+ #define LOW_OPTIMIZATION_EXIT
215
+ #define IAR_ONLY_LOW_OPTIMIZATION_ENTER
216
+ #define IAR_ONLY_LOW_OPTIMIZATION_EXIT
217
+ #endif
218
+
219
+
220
+
221
+ /* Compiler specific diagnostic adjustment */
222
+ #if defined ( __CC_ARM )
223
+
224
+ #elif defined ( __ARMCC_VERSION ) && ( __ARMCC_VERSION >= 6010050 )
225
+
226
+ #elif defined ( __GNUC__ )
227
+ #pragma GCC diagnostic pop
228
+
229
+ #elif defined ( __ICCARM__ )
230
+
231
+ #elif defined ( __TI_ARM__ )
232
+
233
+ #elif defined ( __CSMC__ )
234
+
235
+ #elif defined ( __TASKING__ )
236
+
237
+ #elif defined ( _MSC_VER )
238
+
239
+ #else
240
+ #error Unknown compiler
241
+ #endif
242
+
243
+ #ifdef __cplusplus
244
+ }
245
+ #endif
246
+
247
+ #if __ARM_FEATURE_MVE
248
+ #include <arm_mve.h>
249
+ #endif
250
+
251
+ #ifdef __cplusplus
252
+ extern "C"
253
+ {
254
+ #endif
255
+
256
+ /**
257
+ * @brief 8-bit fractional data type in 1.7 format.
258
+ */
259
+ typedef int8_t q7_t;
260
+
261
+ /**
262
+ * @brief 16-bit fractional data type in 1.15 format.
263
+ */
264
+ typedef int16_t q15_t;
265
+
266
+ /**
267
+ * @brief 32-bit fractional data type in 1.31 format.
268
+ */
269
+ typedef int32_t q31_t;
270
+
271
+ /**
272
+ * @brief 64-bit fractional data type in 1.63 format.
273
+ */
274
+ typedef int64_t q63_t;
275
+
276
+ /**
277
+ * @brief 32-bit floating-point type definition.
278
+ */
279
+ typedef float float32_t;
280
+
281
+ /**
282
+ * @brief 64-bit floating-point type definition.
283
+ */
284
+ typedef double float64_t;
285
+
286
+ /**
287
+ * @brief vector types
288
+ */
289
+ #if defined(ARM_MATH_NEON) || (defined (ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE))
290
+ /**
291
+ * @brief 64-bit fractional 128-bit vector data type in 1.63 format
292
+ */
293
+ typedef int64x2_t q63x2_t;
294
+
295
+ /**
296
+ * @brief 32-bit fractional 128-bit vector data type in 1.31 format.
297
+ */
298
+ typedef int32x4_t q31x4_t;
299
+
300
+ /**
301
+ * @brief 16-bit fractional 128-bit vector data type with 16-bit alignement in 1.15 format.
302
+ */
303
+ typedef __ALIGNED(2) int16x8_t q15x8_t;
304
+
305
+ /**
306
+ * @brief 8-bit fractional 128-bit vector data type with 8-bit alignement in 1.7 format.
307
+ */
308
+ typedef __ALIGNED(1) int8x16_t q7x16_t;
309
+
310
+ /**
311
+ * @brief 32-bit fractional 128-bit vector pair data type in 1.31 format.
312
+ */
313
+ typedef int32x4x2_t q31x4x2_t;
314
+
315
+ /**
316
+ * @brief 32-bit fractional 128-bit vector quadruplet data type in 1.31 format.
317
+ */
318
+ typedef int32x4x4_t q31x4x4_t;
319
+
320
+ /**
321
+ * @brief 16-bit fractional 128-bit vector pair data type in 1.15 format.
322
+ */
323
+ typedef int16x8x2_t q15x8x2_t;
324
+
325
+ /**
326
+ * @brief 16-bit fractional 128-bit vector quadruplet data type in 1.15 format.
327
+ */
328
+ typedef int16x8x4_t q15x8x4_t;
329
+
330
+ /**
331
+ * @brief 8-bit fractional 128-bit vector pair data type in 1.7 format.
332
+ */
333
+ typedef int8x16x2_t q7x16x2_t;
334
+
335
+ /**
336
+ * @brief 8-bit fractional 128-bit vector quadruplet data type in 1.7 format.
337
+ */
338
+ typedef int8x16x4_t q7x16x4_t;
339
+
340
+ /**
341
+ * @brief 32-bit fractional data type in 9.23 format.
342
+ */
343
+ typedef int32_t q23_t;
344
+
345
+ /**
346
+ * @brief 32-bit fractional 128-bit vector data type in 9.23 format.
347
+ */
348
+ typedef int32x4_t q23x4_t;
349
+
350
+ /**
351
+ * @brief 64-bit status 128-bit vector data type.
352
+ */
353
+ typedef int64x2_t status64x2_t;
354
+
355
+ /**
356
+ * @brief 32-bit status 128-bit vector data type.
357
+ */
358
+ typedef int32x4_t status32x4_t;
359
+
360
+ /**
361
+ * @brief 16-bit status 128-bit vector data type.
362
+ */
363
+ typedef int16x8_t status16x8_t;
364
+
365
+ /**
366
+ * @brief 8-bit status 128-bit vector data type.
367
+ */
368
+ typedef int8x16_t status8x16_t;
369
+
370
+
371
+ #endif
372
+
373
+ #if defined(ARM_MATH_NEON) || (defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE)) /* floating point vector*/
374
+ /**
375
+ * @brief 32-bit floating-point 128-bit vector type
376
+ */
377
+ typedef float32x4_t f32x4_t;
378
+
379
+ /**
380
+ * @brief 32-bit floating-point 128-bit vector pair data type
381
+ */
382
+ typedef float32x4x2_t f32x4x2_t;
383
+
384
+ /**
385
+ * @brief 32-bit floating-point 128-bit vector quadruplet data type
386
+ */
387
+ typedef float32x4x4_t f32x4x4_t;
388
+
389
+ /**
390
+ * @brief 32-bit ubiquitous 128-bit vector data type
391
+ */
392
+ typedef union _any32x4_t
393
+ {
394
+ float32x4_t f;
395
+ int32x4_t i;
396
+ } any32x4_t;
397
+
398
+ #endif
399
+
400
+ #if defined(ARM_MATH_NEON)
401
+ /**
402
+ * @brief 32-bit fractional 64-bit vector data type in 1.31 format.
403
+ */
404
+ typedef int32x2_t q31x2_t;
405
+
406
+ /**
407
+ * @brief 16-bit fractional 64-bit vector data type in 1.15 format.
408
+ */
409
+ typedef __ALIGNED(2) int16x4_t q15x4_t;
410
+
411
+ /**
412
+ * @brief 8-bit fractional 64-bit vector data type in 1.7 format.
413
+ */
414
+ typedef __ALIGNED(1) int8x8_t q7x8_t;
415
+
416
+ /**
417
+ * @brief 32-bit float 64-bit vector data type.
418
+ */
419
+ typedef float32x2_t f32x2_t;
420
+
421
+ /**
422
+ * @brief 32-bit floating-point 128-bit vector triplet data type
423
+ */
424
+ typedef float32x4x3_t f32x4x3_t;
425
+
426
+
427
+ /**
428
+ * @brief 32-bit fractional 128-bit vector triplet data type in 1.31 format
429
+ */
430
+ typedef int32x4x3_t q31x4x3_t;
431
+
432
+ /**
433
+ * @brief 16-bit fractional 128-bit vector triplet data type in 1.15 format
434
+ */
435
+ typedef int16x8x3_t q15x8x3_t;
436
+
437
+ /**
438
+ * @brief 8-bit fractional 128-bit vector triplet data type in 1.7 format
439
+ */
440
+ typedef int8x16x3_t q7x16x3_t;
441
+
442
+ /**
443
+ * @brief 32-bit floating-point 64-bit vector pair data type
444
+ */
445
+ typedef float32x2x2_t f32x2x2_t;
446
+
447
+ /**
448
+ * @brief 32-bit floating-point 64-bit vector triplet data type
449
+ */
450
+ typedef float32x2x3_t f32x2x3_t;
451
+
452
+ /**
453
+ * @brief 32-bit floating-point 64-bit vector quadruplet data type
454
+ */
455
+ typedef float32x2x4_t f32x2x4_t;
456
+
457
+
458
+ /**
459
+ * @brief 32-bit fractional 64-bit vector pair data type in 1.31 format
460
+ */
461
+ typedef int32x2x2_t q31x2x2_t;
462
+
463
+ /**
464
+ * @brief 32-bit fractional 64-bit vector triplet data type in 1.31 format
465
+ */
466
+ typedef int32x2x3_t q31x2x3_t;
467
+
468
+ /**
469
+ * @brief 32-bit fractional 64-bit vector quadruplet data type in 1.31 format
470
+ */
471
+ typedef int32x4x3_t q31x2x4_t;
472
+
473
+ /**
474
+ * @brief 16-bit fractional 64-bit vector pair data type in 1.15 format
475
+ */
476
+ typedef int16x4x2_t q15x4x2_t;
477
+
478
+ /**
479
+ * @brief 16-bit fractional 64-bit vector triplet data type in 1.15 format
480
+ */
481
+ typedef int16x4x2_t q15x4x3_t;
482
+
483
+ /**
484
+ * @brief 16-bit fractional 64-bit vector quadruplet data type in 1.15 format
485
+ */
486
+ typedef int16x4x3_t q15x4x4_t;
487
+
488
+ /**
489
+ * @brief 8-bit fractional 64-bit vector pair data type in 1.7 format
490
+ */
491
+ typedef int8x8x2_t q7x8x2_t;
492
+
493
+ /**
494
+ * @brief 8-bit fractional 64-bit vector triplet data type in 1.7 format
495
+ */
496
+ typedef int8x8x3_t q7x8x3_t;
497
+
498
+ /**
499
+ * @brief 8-bit fractional 64-bit vector quadruplet data type in 1.7 format
500
+ */
501
+ typedef int8x8x4_t q7x8x4_t;
502
+
503
+ /**
504
+ * @brief 32-bit ubiquitous 64-bit vector data type
505
+ */
506
+ typedef union _any32x2_t
507
+ {
508
+ float32x2_t f;
509
+ int32x2_t i;
510
+ } any32x2_t;
511
+
512
+
513
+ /**
514
+ * @brief 32-bit status 64-bit vector data type.
515
+ */
516
+ typedef int32x4_t status32x2_t;
517
+
518
+ /**
519
+ * @brief 16-bit status 64-bit vector data type.
520
+ */
521
+ typedef int16x8_t status16x4_t;
522
+
523
+ /**
524
+ * @brief 8-bit status 64-bit vector data type.
525
+ */
526
+ typedef int8x16_t status8x8_t;
527
+
528
+ #endif
529
+
530
+
531
+
532
+
533
+
534
+ #define F64_MAX ((float64_t)DBL_MAX)
535
+ #define F32_MAX ((float32_t)FLT_MAX)
536
+
537
+
538
+
539
+ #define F64_MIN (-DBL_MAX)
540
+ #define F32_MIN (-FLT_MAX)
541
+
542
+
543
+
544
+ #define F64_ABSMAX ((float64_t)DBL_MAX)
545
+ #define F32_ABSMAX ((float32_t)FLT_MAX)
546
+
547
+
548
+
549
+ #define F64_ABSMIN ((float64_t)0.0)
550
+ #define F32_ABSMIN ((float32_t)0.0)
551
+
552
+
553
+ #define Q31_MAX ((q31_t)(0x7FFFFFFFL))
554
+ #define Q15_MAX ((q15_t)(0x7FFF))
555
+ #define Q7_MAX ((q7_t)(0x7F))
556
+ #define Q31_MIN ((q31_t)(0x80000000L))
557
+ #define Q15_MIN ((q15_t)(0x8000))
558
+ #define Q7_MIN ((q7_t)(0x80))
559
+
560
+ #define Q31_ABSMAX ((q31_t)(0x7FFFFFFFL))
561
+ #define Q15_ABSMAX ((q15_t)(0x7FFF))
562
+ #define Q7_ABSMAX ((q7_t)(0x7F))
563
+ #define Q31_ABSMIN ((q31_t)0)
564
+ #define Q15_ABSMIN ((q15_t)0)
565
+ #define Q7_ABSMIN ((q7_t)0)
566
+
567
+ /* Dimension C vector space */
568
+ #define CMPLX_DIM 2
569
+
570
+ /**
571
+ * @brief Error status returned by some functions in the library.
572
+ */
573
+
574
+ typedef enum
575
+ {
576
+ ARM_MATH_SUCCESS = 0, /**< No error */
577
+ ARM_MATH_ARGUMENT_ERROR = -1, /**< One or more arguments are incorrect */
578
+ ARM_MATH_LENGTH_ERROR = -2, /**< Length of data buffer is incorrect */
579
+ ARM_MATH_SIZE_MISMATCH = -3, /**< Size of matrices is not compatible with the operation */
580
+ ARM_MATH_NANINF = -4, /**< Not-a-number (NaN) or infinity is generated */
581
+ ARM_MATH_SINGULAR = -5, /**< Input matrix is singular and cannot be inverted */
582
+ ARM_MATH_TEST_FAILURE = -6, /**< Test Failed */
583
+ ARM_MATH_DECOMPOSITION_FAILURE = -7 /**< Decomposition Failed */
584
+ } arm_status;
585
+
586
+
587
+ #ifdef __cplusplus
588
+ }
589
+ #endif
590
+
591
+ #endif /*ifndef _ARM_MATH_TYPES_H_ */
ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types_f16.h ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * @file arm_math_types_f16.h
3
+ * @brief Public header file for f16 function of the CMSIS DSP Library
4
+ * @version V1.9.0
5
+ * @date 20. July 2020
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ #ifndef _ARM_MATH_TYPES_F16_H
26
+ #define _ARM_MATH_TYPES_F16_H
27
+
28
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h"
29
+
30
+ #ifdef __cplusplus
31
+ extern "C"
32
+ {
33
+ #endif
34
+
35
+ #if !defined( __CC_ARM )
36
+
37
+ /**
38
+ * @brief 16-bit floating-point type definition.
39
+ * This is already defined in arm_mve.h
40
+ *
41
+ * This is not fully supported on ARM AC5.
42
+ */
43
+
44
+ /*
45
+
46
+ Check if the type __fp16 is available.
47
+ If it is not available, f16 version of the kernels
48
+ won't be built.
49
+
50
+ */
51
+ #if !(__ARM_FEATURE_MVE & 2)
52
+ #if !defined(DISABLEFLOAT16)
53
+ #if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
54
+ typedef __fp16 float16_t;
55
+ #define ARM_FLOAT16_SUPPORTED
56
+ #endif
57
+ #endif
58
+ #else
59
+ /* When Vector float16, this flag is always defined and can't be disabled */
60
+ #define ARM_FLOAT16_SUPPORTED
61
+ #endif
62
+
63
+ #if defined(ARM_MATH_NEON) || (defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE)) /* floating point vector*/
64
+
65
+ #if defined(ARM_MATH_MVE_FLOAT16) || defined(ARM_MATH_NEON_FLOAT16)
66
+
67
+ /**
68
+ * @brief 16-bit floating-point 128-bit vector data type
69
+ */
70
+ typedef __ALIGNED(2) float16x8_t f16x8_t;
71
+
72
+ /**
73
+ * @brief 16-bit floating-point 128-bit vector pair data type
74
+ */
75
+ typedef float16x8x2_t f16x8x2_t;
76
+
77
+ /**
78
+ * @brief 16-bit floating-point 128-bit vector quadruplet data type
79
+ */
80
+ typedef float16x8x4_t f16x8x4_t;
81
+
82
+ /**
83
+ * @brief 16-bit ubiquitous 128-bit vector data type
84
+ */
85
+ typedef union _any16x8_t
86
+ {
87
+ float16x8_t f;
88
+ int16x8_t i;
89
+ } any16x8_t;
90
+ #endif
91
+
92
+ #endif
93
+
94
+ #if defined(ARM_MATH_NEON)
95
+
96
+
97
+ #if defined(ARM_MATH_NEON_FLOAT16)
98
+ /**
99
+ * @brief 16-bit float 64-bit vector data type.
100
+ */
101
+ typedef __ALIGNED(2) float16x4_t f16x4_t;
102
+
103
+ /**
104
+ * @brief 16-bit floating-point 128-bit vector triplet data type
105
+ */
106
+ typedef float16x8x3_t f16x8x3_t;
107
+
108
+ /**
109
+ * @brief 16-bit floating-point 64-bit vector pair data type
110
+ */
111
+ typedef float16x4x2_t f16x4x2_t;
112
+
113
+ /**
114
+ * @brief 16-bit floating-point 64-bit vector triplet data type
115
+ */
116
+ typedef float16x4x3_t f16x4x3_t;
117
+
118
+ /**
119
+ * @brief 16-bit floating-point 64-bit vector quadruplet data type
120
+ */
121
+ typedef float16x4x4_t f16x4x4_t;
122
+
123
+ /**
124
+ * @brief 16-bit ubiquitous 64-bit vector data type
125
+ */
126
+ typedef union _any16x4_t
127
+ {
128
+ float16x4_t f;
129
+ int16x4_t i;
130
+ } any16x4_t;
131
+ #endif
132
+
133
+ #endif
134
+
135
+
136
+
137
+ #if defined(ARM_FLOAT16_SUPPORTED)
138
+ #define F16_MAX ((float16_t)__FLT16_MAX__)
139
+ #define F16_MIN (-(float16_t)__FLT16_MAX__)
140
+
141
+ #define F16_ABSMAX ((float16_t)__FLT16_MAX__)
142
+ #define F16_ABSMIN ((float16_t)0.0f16)
143
+
144
+ #define F16INFINITY ((float16_t)__builtin_inf())
145
+
146
+ #endif /* ARM_FLOAT16_SUPPORTED*/
147
+ #endif /* !defined( __CC_ARM ) */
148
+
149
+ #ifdef __cplusplus
150
+ }
151
+ #endif
152
+
153
+ #endif /* _ARM_MATH_F16_H */
154
+
155
+
ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_mve_tables.h ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* ----------------------------------------------------------------------
2
+ * Project: CMSIS DSP Library
3
+ * Title: arm_mve_tables.h
4
+ * Description: common tables like fft twiddle factors, Bitreverse, reciprocal etc
5
+ * used for MVE implementation only
6
+ *
7
+ * $Date: 14. April 2020
8
+ *
9
+ * Target Processor: Cortex-M cores
10
+ * -------------------------------------------------------------------- */
11
+ /*
12
+ * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved.
13
+ *
14
+ * SPDX-License-Identifier: Apache-2.0
15
+ *
16
+ * Licensed under the Apache License, Version 2.0 (the License); you may
17
+ * not use this file except in compliance with the License.
18
+ * You may obtain a copy of the License at
19
+ *
20
+ * www.apache.org/licenses/LICENSE-2.0
21
+ *
22
+ * Unless required by applicable law or agreed to in writing, software
23
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
24
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
25
+ * See the License for the specific language governing permissions and
26
+ * limitations under the License.
27
+ */
28
+
29
+ #ifndef _ARM_MVE_TABLES_H
30
+ #define _ARM_MVE_TABLES_H
31
+
32
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h"
33
+
34
+ #ifdef __cplusplus
35
+ extern "C"
36
+ {
37
+ #endif
38
+
39
+
40
+
41
+
42
+ #if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE)
43
+
44
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES)
45
+
46
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_16) || defined(ARM_TABLE_TWIDDLECOEF_F32_32)
47
+
48
+ extern uint32_t rearranged_twiddle_tab_stride1_arr_16_f32[2];
49
+ extern uint32_t rearranged_twiddle_tab_stride2_arr_16_f32[2];
50
+ extern uint32_t rearranged_twiddle_tab_stride3_arr_16_f32[2];
51
+ extern float32_t rearranged_twiddle_stride1_16_f32[8];
52
+ extern float32_t rearranged_twiddle_stride2_16_f32[8];
53
+ extern float32_t rearranged_twiddle_stride3_16_f32[8];
54
+ #endif
55
+
56
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_64) || defined(ARM_TABLE_TWIDDLECOEF_F32_128)
57
+
58
+ extern uint32_t rearranged_twiddle_tab_stride1_arr_64_f32[3];
59
+ extern uint32_t rearranged_twiddle_tab_stride2_arr_64_f32[3];
60
+ extern uint32_t rearranged_twiddle_tab_stride3_arr_64_f32[3];
61
+ extern float32_t rearranged_twiddle_stride1_64_f32[40];
62
+ extern float32_t rearranged_twiddle_stride2_64_f32[40];
63
+ extern float32_t rearranged_twiddle_stride3_64_f32[40];
64
+ #endif
65
+
66
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_256) || defined(ARM_TABLE_TWIDDLECOEF_F32_512)
67
+
68
+ extern uint32_t rearranged_twiddle_tab_stride1_arr_256_f32[4];
69
+ extern uint32_t rearranged_twiddle_tab_stride2_arr_256_f32[4];
70
+ extern uint32_t rearranged_twiddle_tab_stride3_arr_256_f32[4];
71
+ extern float32_t rearranged_twiddle_stride1_256_f32[168];
72
+ extern float32_t rearranged_twiddle_stride2_256_f32[168];
73
+ extern float32_t rearranged_twiddle_stride3_256_f32[168];
74
+ #endif
75
+
76
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_1024) || defined(ARM_TABLE_TWIDDLECOEF_F32_2048)
77
+
78
+ extern uint32_t rearranged_twiddle_tab_stride1_arr_1024_f32[5];
79
+ extern uint32_t rearranged_twiddle_tab_stride2_arr_1024_f32[5];
80
+ extern uint32_t rearranged_twiddle_tab_stride3_arr_1024_f32[5];
81
+ extern float32_t rearranged_twiddle_stride1_1024_f32[680];
82
+ extern float32_t rearranged_twiddle_stride2_1024_f32[680];
83
+ extern float32_t rearranged_twiddle_stride3_1024_f32[680];
84
+ #endif
85
+
86
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_4096) || defined(ARM_TABLE_TWIDDLECOEF_F32_8192)
87
+
88
+ extern uint32_t rearranged_twiddle_tab_stride1_arr_4096_f32[6];
89
+ extern uint32_t rearranged_twiddle_tab_stride2_arr_4096_f32[6];
90
+ extern uint32_t rearranged_twiddle_tab_stride3_arr_4096_f32[6];
91
+ extern float32_t rearranged_twiddle_stride1_4096_f32[2728];
92
+ extern float32_t rearranged_twiddle_stride2_4096_f32[2728];
93
+ extern float32_t rearranged_twiddle_stride3_4096_f32[2728];
94
+ #endif
95
+
96
+
97
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES) */
98
+
99
+ #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */
100
+
101
+
102
+
103
+ #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE)
104
+
105
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES)
106
+
107
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_16) || defined(ARM_TABLE_TWIDDLECOEF_Q31_32)
108
+
109
+ extern uint32_t rearranged_twiddle_tab_stride1_arr_16_q31[2];
110
+ extern uint32_t rearranged_twiddle_tab_stride2_arr_16_q31[2];
111
+ extern uint32_t rearranged_twiddle_tab_stride3_arr_16_q31[2];
112
+ extern q31_t rearranged_twiddle_stride1_16_q31[8];
113
+ extern q31_t rearranged_twiddle_stride2_16_q31[8];
114
+ extern q31_t rearranged_twiddle_stride3_16_q31[8];
115
+ #endif
116
+
117
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_64) || defined(ARM_TABLE_TWIDDLECOEF_Q31_128)
118
+
119
+ extern uint32_t rearranged_twiddle_tab_stride1_arr_64_q31[3];
120
+ extern uint32_t rearranged_twiddle_tab_stride2_arr_64_q31[3];
121
+ extern uint32_t rearranged_twiddle_tab_stride3_arr_64_q31[3];
122
+ extern q31_t rearranged_twiddle_stride1_64_q31[40];
123
+ extern q31_t rearranged_twiddle_stride2_64_q31[40];
124
+ extern q31_t rearranged_twiddle_stride3_64_q31[40];
125
+ #endif
126
+
127
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_256) || defined(ARM_TABLE_TWIDDLECOEF_Q31_512)
128
+
129
+ extern uint32_t rearranged_twiddle_tab_stride1_arr_256_q31[4];
130
+ extern uint32_t rearranged_twiddle_tab_stride2_arr_256_q31[4];
131
+ extern uint32_t rearranged_twiddle_tab_stride3_arr_256_q31[4];
132
+ extern q31_t rearranged_twiddle_stride1_256_q31[168];
133
+ extern q31_t rearranged_twiddle_stride2_256_q31[168];
134
+ extern q31_t rearranged_twiddle_stride3_256_q31[168];
135
+ #endif
136
+
137
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_1024) || defined(ARM_TABLE_TWIDDLECOEF_Q31_2048)
138
+
139
+ extern uint32_t rearranged_twiddle_tab_stride1_arr_1024_q31[5];
140
+ extern uint32_t rearranged_twiddle_tab_stride2_arr_1024_q31[5];
141
+ extern uint32_t rearranged_twiddle_tab_stride3_arr_1024_q31[5];
142
+ extern q31_t rearranged_twiddle_stride1_1024_q31[680];
143
+ extern q31_t rearranged_twiddle_stride2_1024_q31[680];
144
+ extern q31_t rearranged_twiddle_stride3_1024_q31[680];
145
+ #endif
146
+
147
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_4096) || defined(ARM_TABLE_TWIDDLECOEF_Q31_8192)
148
+
149
+ extern uint32_t rearranged_twiddle_tab_stride1_arr_4096_q31[6];
150
+ extern uint32_t rearranged_twiddle_tab_stride2_arr_4096_q31[6];
151
+ extern uint32_t rearranged_twiddle_tab_stride3_arr_4096_q31[6];
152
+ extern q31_t rearranged_twiddle_stride1_4096_q31[2728];
153
+ extern q31_t rearranged_twiddle_stride2_4096_q31[2728];
154
+ extern q31_t rearranged_twiddle_stride3_4096_q31[2728];
155
+ #endif
156
+
157
+
158
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES) */
159
+
160
+ #endif /* defined(ARM_MATH_MVEI) */
161
+
162
+
163
+
164
+ #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE)
165
+
166
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES)
167
+
168
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_16) || defined(ARM_TABLE_TWIDDLECOEF_Q15_32)
169
+
170
+ extern uint32_t rearranged_twiddle_tab_stride1_arr_16_q15[2];
171
+ extern uint32_t rearranged_twiddle_tab_stride2_arr_16_q15[2];
172
+ extern uint32_t rearranged_twiddle_tab_stride3_arr_16_q15[2];
173
+ extern q15_t rearranged_twiddle_stride1_16_q15[8];
174
+ extern q15_t rearranged_twiddle_stride2_16_q15[8];
175
+ extern q15_t rearranged_twiddle_stride3_16_q15[8];
176
+ #endif
177
+
178
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_64) || defined(ARM_TABLE_TWIDDLECOEF_Q15_128)
179
+
180
+ extern uint32_t rearranged_twiddle_tab_stride1_arr_64_q15[3];
181
+ extern uint32_t rearranged_twiddle_tab_stride2_arr_64_q15[3];
182
+ extern uint32_t rearranged_twiddle_tab_stride3_arr_64_q15[3];
183
+ extern q15_t rearranged_twiddle_stride1_64_q15[40];
184
+ extern q15_t rearranged_twiddle_stride2_64_q15[40];
185
+ extern q15_t rearranged_twiddle_stride3_64_q15[40];
186
+ #endif
187
+
188
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_256) || defined(ARM_TABLE_TWIDDLECOEF_Q15_512)
189
+
190
+ extern uint32_t rearranged_twiddle_tab_stride1_arr_256_q15[4];
191
+ extern uint32_t rearranged_twiddle_tab_stride2_arr_256_q15[4];
192
+ extern uint32_t rearranged_twiddle_tab_stride3_arr_256_q15[4];
193
+ extern q15_t rearranged_twiddle_stride1_256_q15[168];
194
+ extern q15_t rearranged_twiddle_stride2_256_q15[168];
195
+ extern q15_t rearranged_twiddle_stride3_256_q15[168];
196
+ #endif
197
+
198
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_1024) || defined(ARM_TABLE_TWIDDLECOEF_Q15_2048)
199
+
200
+ extern uint32_t rearranged_twiddle_tab_stride1_arr_1024_q15[5];
201
+ extern uint32_t rearranged_twiddle_tab_stride2_arr_1024_q15[5];
202
+ extern uint32_t rearranged_twiddle_tab_stride3_arr_1024_q15[5];
203
+ extern q15_t rearranged_twiddle_stride1_1024_q15[680];
204
+ extern q15_t rearranged_twiddle_stride2_1024_q15[680];
205
+ extern q15_t rearranged_twiddle_stride3_1024_q15[680];
206
+ #endif
207
+
208
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_4096) || defined(ARM_TABLE_TWIDDLECOEF_Q15_8192)
209
+
210
+ extern uint32_t rearranged_twiddle_tab_stride1_arr_4096_q15[6];
211
+ extern uint32_t rearranged_twiddle_tab_stride2_arr_4096_q15[6];
212
+ extern uint32_t rearranged_twiddle_tab_stride3_arr_4096_q15[6];
213
+ extern q15_t rearranged_twiddle_stride1_4096_q15[2728];
214
+ extern q15_t rearranged_twiddle_stride2_4096_q15[2728];
215
+ extern q15_t rearranged_twiddle_stride3_4096_q15[2728];
216
+ #endif
217
+
218
+
219
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES) */
220
+
221
+ #endif /* defined(ARM_MATH_MVEI) */
222
+
223
+
224
+
225
+ #ifdef __cplusplus
226
+ }
227
+ #endif
228
+
229
+ #endif /*_ARM_MVE_TABLES_H*/
230
+
ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_mve_tables_f16.h ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* ----------------------------------------------------------------------
2
+ * Project: CMSIS DSP Library
3
+ * Title: arm_mve_tables_f16.h
4
+ * Description: common tables like fft twiddle factors, Bitreverse, reciprocal etc
5
+ * used for MVE implementation only
6
+ *
7
+ * $Date: 14. April 2020
8
+ *
9
+ * Target Processor: Cortex-M cores
10
+ * -------------------------------------------------------------------- */
11
+ /*
12
+ * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved.
13
+ *
14
+ * SPDX-License-Identifier: Apache-2.0
15
+ *
16
+ * Licensed under the Apache License, Version 2.0 (the License); you may
17
+ * not use this file except in compliance with the License.
18
+ * You may obtain a copy of the License at
19
+ *
20
+ * www.apache.org/licenses/LICENSE-2.0
21
+ *
22
+ * Unless required by applicable law or agreed to in writing, software
23
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
24
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
25
+ * See the License for the specific language governing permissions and
26
+ * limitations under the License.
27
+ */
28
+
29
+ #ifndef _ARM_MVE_TABLES_F16_H
30
+ #define _ARM_MVE_TABLES_F16_H
31
+
32
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types_f16.h"
33
+
34
+ #ifdef __cplusplus
35
+ extern "C"
36
+ {
37
+ #endif
38
+
39
+
40
+
41
+
42
+ #if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE)
43
+
44
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES)
45
+
46
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F16_16) || defined(ARM_TABLE_TWIDDLECOEF_F16_32)
47
+
48
+ extern uint32_t rearranged_twiddle_tab_stride1_arr_16_f16[2];
49
+ extern uint32_t rearranged_twiddle_tab_stride2_arr_16_f16[2];
50
+ extern uint32_t rearranged_twiddle_tab_stride3_arr_16_f16[2];
51
+ extern float16_t rearranged_twiddle_stride1_16_f16[8];
52
+ extern float16_t rearranged_twiddle_stride2_16_f16[8];
53
+ extern float16_t rearranged_twiddle_stride3_16_f16[8];
54
+ #endif
55
+
56
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F16_64) || defined(ARM_TABLE_TWIDDLECOEF_F16_128)
57
+
58
+ extern uint32_t rearranged_twiddle_tab_stride1_arr_64_f16[3];
59
+ extern uint32_t rearranged_twiddle_tab_stride2_arr_64_f16[3];
60
+ extern uint32_t rearranged_twiddle_tab_stride3_arr_64_f16[3];
61
+ extern float16_t rearranged_twiddle_stride1_64_f16[40];
62
+ extern float16_t rearranged_twiddle_stride2_64_f16[40];
63
+ extern float16_t rearranged_twiddle_stride3_64_f16[40];
64
+ #endif
65
+
66
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F16_256) || defined(ARM_TABLE_TWIDDLECOEF_F16_512)
67
+
68
+ extern uint32_t rearranged_twiddle_tab_stride1_arr_256_f16[4];
69
+ extern uint32_t rearranged_twiddle_tab_stride2_arr_256_f16[4];
70
+ extern uint32_t rearranged_twiddle_tab_stride3_arr_256_f16[4];
71
+ extern float16_t rearranged_twiddle_stride1_256_f16[168];
72
+ extern float16_t rearranged_twiddle_stride2_256_f16[168];
73
+ extern float16_t rearranged_twiddle_stride3_256_f16[168];
74
+ #endif
75
+
76
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F16_1024) || defined(ARM_TABLE_TWIDDLECOEF_F16_2048)
77
+
78
+ extern uint32_t rearranged_twiddle_tab_stride1_arr_1024_f16[5];
79
+ extern uint32_t rearranged_twiddle_tab_stride2_arr_1024_f16[5];
80
+ extern uint32_t rearranged_twiddle_tab_stride3_arr_1024_f16[5];
81
+ extern float16_t rearranged_twiddle_stride1_1024_f16[680];
82
+ extern float16_t rearranged_twiddle_stride2_1024_f16[680];
83
+ extern float16_t rearranged_twiddle_stride3_1024_f16[680];
84
+ #endif
85
+
86
+ #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F16_4096) || defined(ARM_TABLE_TWIDDLECOEF_F16_8192)
87
+
88
+ extern uint32_t rearranged_twiddle_tab_stride1_arr_4096_f16[6];
89
+ extern uint32_t rearranged_twiddle_tab_stride2_arr_4096_f16[6];
90
+ extern uint32_t rearranged_twiddle_tab_stride3_arr_4096_f16[6];
91
+ extern float16_t rearranged_twiddle_stride1_4096_f16[2728];
92
+ extern float16_t rearranged_twiddle_stride2_4096_f16[2728];
93
+ extern float16_t rearranged_twiddle_stride3_4096_f16[2728];
94
+ #endif
95
+
96
+
97
+ #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES) */
98
+
99
+ #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */
100
+
101
+
102
+
103
+ #ifdef __cplusplus
104
+ }
105
+ #endif
106
+
107
+ #endif /*_ARM_MVE_TABLES_F16_H*/
108
+
ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_sorting.h ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * @file arm_sorting.h
3
+ * @brief Private header file for CMSIS DSP Library
4
+ * @version V1.7.0
5
+ * @date 2019
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2010-2019 Arm Limited or its affiliates. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ #ifndef _ARM_SORTING_H_
26
+ #define _ARM_SORTING_H_
27
+
28
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math.h"
29
+
30
+ #ifdef __cplusplus
31
+ extern "C"
32
+ {
33
+ #endif
34
+
35
+ /**
36
+ * @param[in] S points to an instance of the sorting structure.
37
+ * @param[in] pSrc points to the block of input data.
38
+ * @param[out] pDst points to the block of output data.
39
+ * @param[in] blockSize number of samples to process.
40
+ */
41
+ void arm_bubble_sort_f32(
42
+ const arm_sort_instance_f32 * S,
43
+ float32_t * pSrc,
44
+ float32_t * pDst,
45
+ uint32_t blockSize);
46
+
47
+ /**
48
+ * @param[in] S points to an instance of the sorting structure.
49
+ * @param[in] pSrc points to the block of input data.
50
+ * @param[out] pDst points to the block of output data.
51
+ * @param[in] blockSize number of samples to process.
52
+ */
53
+ void arm_heap_sort_f32(
54
+ const arm_sort_instance_f32 * S,
55
+ float32_t * pSrc,
56
+ float32_t * pDst,
57
+ uint32_t blockSize);
58
+
59
+ /**
60
+ * @param[in] S points to an instance of the sorting structure.
61
+ * @param[in] pSrc points to the block of input data.
62
+ * @param[out] pDst points to the block of output data.
63
+ * @param[in] blockSize number of samples to process.
64
+ */
65
+ void arm_insertion_sort_f32(
66
+ const arm_sort_instance_f32 * S,
67
+ float32_t *pSrc,
68
+ float32_t* pDst,
69
+ uint32_t blockSize);
70
+
71
+ /**
72
+ * @param[in] S points to an instance of the sorting structure.
73
+ * @param[in] pSrc points to the block of input data.
74
+ * @param[out] pDst points to the block of output data
75
+ * @param[in] blockSize number of samples to process.
76
+ */
77
+ void arm_quick_sort_f32(
78
+ const arm_sort_instance_f32 * S,
79
+ float32_t * pSrc,
80
+ float32_t * pDst,
81
+ uint32_t blockSize);
82
+
83
+ /**
84
+ * @param[in] S points to an instance of the sorting structure.
85
+ * @param[in] pSrc points to the block of input data.
86
+ * @param[out] pDst points to the block of output data
87
+ * @param[in] blockSize number of samples to process.
88
+ */
89
+ void arm_selection_sort_f32(
90
+ const arm_sort_instance_f32 * S,
91
+ float32_t * pSrc,
92
+ float32_t * pDst,
93
+ uint32_t blockSize);
94
+
95
+ /**
96
+ * @param[in] S points to an instance of the sorting structure.
97
+ * @param[in] pSrc points to the block of input data.
98
+ * @param[out] pDst points to the block of output data
99
+ * @param[in] blockSize number of samples to process.
100
+ */
101
+ void arm_bitonic_sort_f32(
102
+ const arm_sort_instance_f32 * S,
103
+ float32_t * pSrc,
104
+ float32_t * pDst,
105
+ uint32_t blockSize);
106
+
107
+ #if defined(ARM_MATH_NEON)
108
+
109
+ #define vtrn256_128q(a, b) \
110
+ do { \
111
+ float32x4_t vtrn128_temp = a.val[1]; \
112
+ a.val[1] = b.val[0]; \
113
+ b.val[0] = vtrn128_temp ; \
114
+ } while (0)
115
+
116
+ #define vtrn128_64q(a, b) \
117
+ do { \
118
+ float32x2_t ab, cd, ef, gh; \
119
+ ab = vget_low_f32(a); \
120
+ ef = vget_low_f32(b); \
121
+ cd = vget_high_f32(a); \
122
+ gh = vget_high_f32(b); \
123
+ a = vcombine_f32(ab, ef); \
124
+ b = vcombine_f32(cd, gh); \
125
+ } while (0)
126
+
127
+ #define vtrn256_64q(a, b) \
128
+ do { \
129
+ float32x2_t a_0, a_1, a_2, a_3; \
130
+ float32x2_t b_0, b_1, b_2, b_3; \
131
+ a_0 = vget_low_f32(a.val[0]); \
132
+ a_1 = vget_high_f32(a.val[0]); \
133
+ a_2 = vget_low_f32(a.val[1]); \
134
+ a_3 = vget_high_f32(a.val[1]); \
135
+ b_0 = vget_low_f32(b.val[0]); \
136
+ b_1 = vget_high_f32(b.val[0]); \
137
+ b_2 = vget_low_f32(b.val[1]); \
138
+ b_3 = vget_high_f32(b.val[1]); \
139
+ a.val[0] = vcombine_f32(a_0, b_0); \
140
+ a.val[1] = vcombine_f32(a_2, b_2); \
141
+ b.val[0] = vcombine_f32(a_1, b_1); \
142
+ b.val[1] = vcombine_f32(a_3, b_3); \
143
+ } while (0)
144
+
145
+ #define vtrn128_32q(a, b) \
146
+ do { \
147
+ float32x4x2_t vtrn32_tmp = vtrnq_f32((a), (b)); \
148
+ (a) = vtrn32_tmp.val[0]; \
149
+ (b) = vtrn32_tmp.val[1]; \
150
+ } while (0)
151
+
152
+ #define vtrn256_32q(a, b) \
153
+ do { \
154
+ float32x4x2_t vtrn32_tmp_1 = vtrnq_f32((a.val[0]), (b.val[0])); \
155
+ float32x4x2_t vtrn32_tmp_2 = vtrnq_f32((a.val[1]), (b.val[1])); \
156
+ a.val[0] = vtrn32_tmp_1.val[0]; \
157
+ a.val[1] = vtrn32_tmp_2.val[0]; \
158
+ b.val[0] = vtrn32_tmp_1.val[1]; \
159
+ b.val[1] = vtrn32_tmp_2.val[1]; \
160
+ } while (0)
161
+
162
+ #define vminmaxq(a, b) \
163
+ do { \
164
+ float32x4_t minmax_tmp = (a); \
165
+ (a) = vminq_f32((a), (b)); \
166
+ (b) = vmaxq_f32(minmax_tmp, (b)); \
167
+ } while (0)
168
+
169
+ #define vminmax256q(a, b) \
170
+ do { \
171
+ float32x4x2_t minmax256_tmp = (a); \
172
+ a.val[0] = vminq_f32(a.val[0], b.val[0]); \
173
+ a.val[1] = vminq_f32(a.val[1], b.val[1]); \
174
+ b.val[0] = vmaxq_f32(minmax256_tmp.val[0], b.val[0]); \
175
+ b.val[1] = vmaxq_f32(minmax256_tmp.val[1], b.val[1]); \
176
+ } while (0)
177
+
178
+ #define vrev128q_f32(a) \
179
+ vcombine_f32(vrev64_f32(vget_high_f32(a)), vrev64_f32(vget_low_f32(a)))
180
+
181
+ #define vrev256q_f32(a) \
182
+ do { \
183
+ float32x4_t rev_tmp = vcombine_f32(vrev64_f32(vget_high_f32(a.val[0])), vrev64_f32(vget_low_f32(a.val[0]))); \
184
+ a.val[0] = vcombine_f32(vrev64_f32(vget_high_f32(a.val[1])), vrev64_f32(vget_low_f32(a.val[1]))); \
185
+ a.val[1] = rev_tmp; \
186
+ } while (0)
187
+
188
+ #define vldrev128q_f32(a, p) \
189
+ do { \
190
+ a = vld1q_f32(p); \
191
+ a = vrev128q_f32(a); \
192
+ } while (0)
193
+
194
+ #endif /* ARM_MATH_NEON */
195
+
196
+ #ifdef __cplusplus
197
+ }
198
+ #endif
199
+
200
+ #endif /* _ARM_SORTING_H */
ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_fft.h ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * @file arm_vec_fft.h
3
+ * @brief Private header file for CMSIS DSP Library
4
+ * @version V1.7.0
5
+ * @date 07. January 2020
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ #ifndef _ARM_VEC_FFT_H_
26
+ #define _ARM_VEC_FFT_H_
27
+
28
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math.h"
29
+ #include "arm_helium_utils.h"
30
+
31
+ #ifdef __cplusplus
32
+ extern "C"
33
+ {
34
+ #endif
35
+
36
+ #if (defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE)
37
+
38
+ #define MVE_CMPLX_ADD_A_ixB(A, B) vcaddq_rot90(A,B)
39
+ #define MVE_CMPLX_SUB_A_ixB(A,B) vcaddq_rot270(A,B)
40
+ #define MVE_CMPLX_MULT_FLT_AxB(A,B) vcmlaq_rot90(vcmulq(A, B), A, B)
41
+ #define MVE_CMPLX_MULT_FLT_Conj_AxB(A,B) vcmlaq_rot270(vcmulq(A, B), A, B)
42
+
43
+ #define MVE_CMPLX_MULT_FX_AxB(A,B) vqdmladhxq(vqdmlsdhq((__typeof(A))vuninitializedq_s32(), A, B), A, B)
44
+ #define MVE_CMPLX_MULT_FX_AxConjB(A,B) vqdmladhq(vqdmlsdhxq((__typeof(A))vuninitializedq_s32(), A, B), A, B)
45
+
46
+ #define MVE_CMPLX_ADD_FX_A_ixB(A, B) vhcaddq_rot90(A,B)
47
+ #define MVE_CMPLX_SUB_FX_A_ixB(A,B) vhcaddq_rot270(A,B)
48
+
49
+
50
+ #endif /* (defined(ARM_MATH_MVEF) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE)*/
51
+
52
+
53
+ #ifdef __cplusplus
54
+ }
55
+ #endif
56
+
57
+
58
+ #endif /* _ARM_VEC_FFT_H_ */
ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_filtering.h ADDED
The diff for this file is too large to render. See raw diff
 
ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_math.h ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * @file arm_vec_math.h
3
+ * @brief Public header file for CMSIS DSP Library
4
+ * @version V1.7.0
5
+ * @date 15. October 2019
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2010-2019 Arm Limited or its affiliates. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+ #ifndef _ARM_VEC_MATH_H
26
+ #define _ARM_VEC_MATH_H
27
+
28
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h"
29
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables.h"
30
+ #include "arm_helium_utils.h"
31
+
32
+ #ifdef __cplusplus
33
+ extern "C"
34
+ {
35
+ #endif
36
+
37
+ #if (defined(ARM_MATH_MVEF) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE)
38
+
39
+ #define INV_NEWTON_INIT_F32 0x7EF127EA
40
+
41
+ static const float32_t __logf_rng_f32=0.693147180f;
42
+
43
+
44
+ /* fast inverse approximation (3x newton) */
45
+ __STATIC_INLINE f32x4_t vrecip_medprec_f32(
46
+ f32x4_t x)
47
+ {
48
+ q31x4_t m;
49
+ f32x4_t b;
50
+ any32x4_t xinv;
51
+ f32x4_t ax = vabsq(x);
52
+
53
+ xinv.f = ax;
54
+ m = 0x3F800000 - (xinv.i & 0x7F800000);
55
+ xinv.i = xinv.i + m;
56
+ xinv.f = 1.41176471f - 0.47058824f * xinv.f;
57
+ xinv.i = xinv.i + m;
58
+
59
+ b = 2.0f - xinv.f * ax;
60
+ xinv.f = xinv.f * b;
61
+
62
+ b = 2.0f - xinv.f * ax;
63
+ xinv.f = xinv.f * b;
64
+
65
+ b = 2.0f - xinv.f * ax;
66
+ xinv.f = xinv.f * b;
67
+
68
+ xinv.f = vdupq_m(xinv.f, INFINITY, vcmpeqq(x, 0.0f));
69
+ /*
70
+ * restore sign
71
+ */
72
+ xinv.f = vnegq_m(xinv.f, xinv.f, vcmpltq(x, 0.0f));
73
+
74
+ return xinv.f;
75
+ }
76
+
77
+ /* fast inverse approximation (4x newton) */
78
+ __STATIC_INLINE f32x4_t vrecip_hiprec_f32(
79
+ f32x4_t x)
80
+ {
81
+ q31x4_t m;
82
+ f32x4_t b;
83
+ any32x4_t xinv;
84
+ f32x4_t ax = vabsq(x);
85
+
86
+ xinv.f = ax;
87
+
88
+ m = 0x3F800000 - (xinv.i & 0x7F800000);
89
+ xinv.i = xinv.i + m;
90
+ xinv.f = 1.41176471f - 0.47058824f * xinv.f;
91
+ xinv.i = xinv.i + m;
92
+
93
+ b = 2.0f - xinv.f * ax;
94
+ xinv.f = xinv.f * b;
95
+
96
+ b = 2.0f - xinv.f * ax;
97
+ xinv.f = xinv.f * b;
98
+
99
+ b = 2.0f - xinv.f * ax;
100
+ xinv.f = xinv.f * b;
101
+
102
+ b = 2.0f - xinv.f * ax;
103
+ xinv.f = xinv.f * b;
104
+
105
+ xinv.f = vdupq_m(xinv.f, INFINITY, vcmpeqq(x, 0.0f));
106
+ /*
107
+ * restore sign
108
+ */
109
+ xinv.f = vnegq_m(xinv.f, xinv.f, vcmpltq(x, 0.0f));
110
+
111
+ return xinv.f;
112
+ }
113
+
114
+ __STATIC_INLINE f32x4_t vdiv_f32(
115
+ f32x4_t num, f32x4_t den)
116
+ {
117
+ return vmulq(num, vrecip_hiprec_f32(den));
118
+ }
119
+
120
+ /**
121
+ @brief Single-precision taylor dev.
122
+ @param[in] x f32 quad vector input
123
+ @param[in] coeffs f32 quad vector coeffs
124
+ @return destination f32 quad vector
125
+ */
126
+
127
+ __STATIC_INLINE f32x4_t vtaylor_polyq_f32(
128
+ f32x4_t x,
129
+ const float32_t * coeffs)
130
+ {
131
+ f32x4_t A = vfmasq(vdupq_n_f32(coeffs[4]), x, coeffs[0]);
132
+ f32x4_t B = vfmasq(vdupq_n_f32(coeffs[6]), x, coeffs[2]);
133
+ f32x4_t C = vfmasq(vdupq_n_f32(coeffs[5]), x, coeffs[1]);
134
+ f32x4_t D = vfmasq(vdupq_n_f32(coeffs[7]), x, coeffs[3]);
135
+ f32x4_t x2 = vmulq(x, x);
136
+ f32x4_t x4 = vmulq(x2, x2);
137
+ f32x4_t res = vfmaq(vfmaq_f32(A, B, x2), vfmaq_f32(C, D, x2), x4);
138
+
139
+ return res;
140
+ }
141
+
142
+ __STATIC_INLINE f32x4_t vmant_exp_f32(
143
+ f32x4_t x,
144
+ int32x4_t * e)
145
+ {
146
+ any32x4_t r;
147
+ int32x4_t n;
148
+
149
+ r.f = x;
150
+ n = r.i >> 23;
151
+ n = n - 127;
152
+ r.i = r.i - (n << 23);
153
+
154
+ *e = n;
155
+ return r.f;
156
+ }
157
+
158
+
159
+ __STATIC_INLINE f32x4_t vlogq_f32(f32x4_t vecIn)
160
+ {
161
+ q31x4_t vecExpUnBiased;
162
+ f32x4_t vecTmpFlt0, vecTmpFlt1;
163
+ f32x4_t vecAcc0, vecAcc1, vecAcc2, vecAcc3;
164
+ f32x4_t vecExpUnBiasedFlt;
165
+
166
+ /*
167
+ * extract exponent
168
+ */
169
+ vecTmpFlt1 = vmant_exp_f32(vecIn, &vecExpUnBiased);
170
+
171
+ vecTmpFlt0 = vecTmpFlt1 * vecTmpFlt1;
172
+ /*
173
+ * a = (__logf_lut_f32[4] * r.f) + (__logf_lut_f32[0]);
174
+ */
175
+ vecAcc0 = vdupq_n_f32(__logf_lut_f32[0]);
176
+ vecAcc0 = vfmaq(vecAcc0, vecTmpFlt1, __logf_lut_f32[4]);
177
+ /*
178
+ * b = (__logf_lut_f32[6] * r.f) + (__logf_lut_f32[2]);
179
+ */
180
+ vecAcc1 = vdupq_n_f32(__logf_lut_f32[2]);
181
+ vecAcc1 = vfmaq(vecAcc1, vecTmpFlt1, __logf_lut_f32[6]);
182
+ /*
183
+ * c = (__logf_lut_f32[5] * r.f) + (__logf_lut_f32[1]);
184
+ */
185
+ vecAcc2 = vdupq_n_f32(__logf_lut_f32[1]);
186
+ vecAcc2 = vfmaq(vecAcc2, vecTmpFlt1, __logf_lut_f32[5]);
187
+ /*
188
+ * d = (__logf_lut_f32[7] * r.f) + (__logf_lut_f32[3]);
189
+ */
190
+ vecAcc3 = vdupq_n_f32(__logf_lut_f32[3]);
191
+ vecAcc3 = vfmaq(vecAcc3, vecTmpFlt1, __logf_lut_f32[7]);
192
+ /*
193
+ * a = a + b * xx;
194
+ */
195
+ vecAcc0 = vfmaq(vecAcc0, vecAcc1, vecTmpFlt0);
196
+ /*
197
+ * c = c + d * xx;
198
+ */
199
+ vecAcc2 = vfmaq(vecAcc2, vecAcc3, vecTmpFlt0);
200
+ /*
201
+ * xx = xx * xx;
202
+ */
203
+ vecTmpFlt0 = vecTmpFlt0 * vecTmpFlt0;
204
+ vecExpUnBiasedFlt = vcvtq_f32_s32(vecExpUnBiased);
205
+ /*
206
+ * r.f = a + c * xx;
207
+ */
208
+ vecAcc0 = vfmaq(vecAcc0, vecAcc2, vecTmpFlt0);
209
+ /*
210
+ * add exponent
211
+ * r.f = r.f + ((float32_t) m) * __logf_rng_f32;
212
+ */
213
+ vecAcc0 = vfmaq(vecAcc0, vecExpUnBiasedFlt, __logf_rng_f32);
214
+ // set log0 down to -inf
215
+ vecAcc0 = vdupq_m(vecAcc0, -INFINITY, vcmpeqq(vecIn, 0.0f));
216
+ return vecAcc0;
217
+ }
218
+
219
+ __STATIC_INLINE f32x4_t vexpq_f32(
220
+ f32x4_t x)
221
+ {
222
+ // Perform range reduction [-log(2),log(2)]
223
+ int32x4_t m = vcvtq_s32_f32(vmulq_n_f32(x, 1.4426950408f));
224
+ f32x4_t val = vfmsq_f32(x, vcvtq_f32_s32(m), vdupq_n_f32(0.6931471805f));
225
+
226
+ // Polynomial Approximation
227
+ f32x4_t poly = vtaylor_polyq_f32(val, exp_tab);
228
+
229
+ // Reconstruct
230
+ poly = (f32x4_t) (vqaddq_s32((q31x4_t) (poly), vqshlq_n_s32(m, 23)));
231
+
232
+ poly = vdupq_m(poly, 0.0f, vcmpltq_n_s32(m, -126));
233
+ return poly;
234
+ }
235
+
236
+ __STATIC_INLINE f32x4_t arm_vec_exponent_f32(f32x4_t x, int32_t nb)
237
+ {
238
+ f32x4_t r = x;
239
+ nb--;
240
+ while (nb > 0) {
241
+ r = vmulq(r, x);
242
+ nb--;
243
+ }
244
+ return (r);
245
+ }
246
+
247
+ __STATIC_INLINE f32x4_t vrecip_f32(f32x4_t vecIn)
248
+ {
249
+ f32x4_t vecSx, vecW, vecTmp;
250
+ any32x4_t v;
251
+
252
+ vecSx = vabsq(vecIn);
253
+
254
+ v.f = vecIn;
255
+ v.i = vsubq(vdupq_n_s32(INV_NEWTON_INIT_F32), v.i);
256
+
257
+ vecW = vmulq(vecSx, v.f);
258
+
259
+ // v.f = v.f * (8 + w * (-28 + w * (56 + w * (-70 + w *(56 + w * (-28 + w * (8 - w)))))));
260
+ vecTmp = vsubq(vdupq_n_f32(8.0f), vecW);
261
+ vecTmp = vfmasq(vecW, vecTmp, -28.0f);
262
+ vecTmp = vfmasq(vecW, vecTmp, 56.0f);
263
+ vecTmp = vfmasq(vecW, vecTmp, -70.0f);
264
+ vecTmp = vfmasq(vecW, vecTmp, 56.0f);
265
+ vecTmp = vfmasq(vecW, vecTmp, -28.0f);
266
+ vecTmp = vfmasq(vecW, vecTmp, 8.0f);
267
+ v.f = vmulq(v.f, vecTmp);
268
+
269
+ v.f = vdupq_m(v.f, INFINITY, vcmpeqq(vecIn, 0.0f));
270
+ /*
271
+ * restore sign
272
+ */
273
+ v.f = vnegq_m(v.f, v.f, vcmpltq(vecIn, 0.0f));
274
+ return v.f;
275
+ }
276
+
277
+ __STATIC_INLINE f32x4_t vtanhq_f32(
278
+ f32x4_t val)
279
+ {
280
+ f32x4_t x =
281
+ vminnmq_f32(vmaxnmq_f32(val, vdupq_n_f32(-10.f)), vdupq_n_f32(10.0f));
282
+ f32x4_t exp2x = vexpq_f32(vmulq_n_f32(x, 2.f));
283
+ f32x4_t num = vsubq_n_f32(exp2x, 1.f);
284
+ f32x4_t den = vaddq_n_f32(exp2x, 1.f);
285
+ f32x4_t tanh = vmulq_f32(num, vrecip_f32(den));
286
+ return tanh;
287
+ }
288
+
289
+ __STATIC_INLINE f32x4_t vpowq_f32(
290
+ f32x4_t val,
291
+ f32x4_t n)
292
+ {
293
+ return vexpq_f32(vmulq_f32(n, vlogq_f32(val)));
294
+ }
295
+
296
+ #endif /* (defined(ARM_MATH_MVEF) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE)*/
297
+
298
+ #if (defined(ARM_MATH_MVEI) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE)
299
+ #endif /* (defined(ARM_MATH_MVEI) || defined(ARM_MATH_HELIUM)) */
300
+
301
+ #if (defined(ARM_MATH_NEON) || defined(ARM_MATH_NEON_EXPERIMENTAL)) && !defined(ARM_MATH_AUTOVECTORIZE)
302
+
303
+ #include "NEMath.h"
304
+ /**
305
+ * @brief Vectorized integer exponentiation
306
+ * @param[in] x value
307
+ * @param[in] nb integer exponent >= 1
308
+ * @return x^nb
309
+ *
310
+ */
311
+ __STATIC_INLINE float32x4_t arm_vec_exponent_f32(float32x4_t x, int32_t nb)
312
+ {
313
+ float32x4_t r = x;
314
+ nb --;
315
+ while(nb > 0)
316
+ {
317
+ r = vmulq_f32(r , x);
318
+ nb--;
319
+ }
320
+ return(r);
321
+ }
322
+
323
+
324
+ __STATIC_INLINE float32x4_t __arm_vec_sqrt_f32_neon(float32x4_t x)
325
+ {
326
+ float32x4_t x1 = vmaxq_f32(x, vdupq_n_f32(FLT_MIN));
327
+ float32x4_t e = vrsqrteq_f32(x1);
328
+ e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x1, e), e), e);
329
+ e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x1, e), e), e);
330
+ return vmulq_f32(x, e);
331
+ }
332
+
333
+ __STATIC_INLINE int16x8_t __arm_vec_sqrt_q15_neon(int16x8_t vec)
334
+ {
335
+ float32x4_t tempF;
336
+ int32x4_t tempHI,tempLO;
337
+
338
+ tempLO = vmovl_s16(vget_low_s16(vec));
339
+ tempF = vcvtq_n_f32_s32(tempLO,15);
340
+ tempF = __arm_vec_sqrt_f32_neon(tempF);
341
+ tempLO = vcvtq_n_s32_f32(tempF,15);
342
+
343
+ tempHI = vmovl_s16(vget_high_s16(vec));
344
+ tempF = vcvtq_n_f32_s32(tempHI,15);
345
+ tempF = __arm_vec_sqrt_f32_neon(tempF);
346
+ tempHI = vcvtq_n_s32_f32(tempF,15);
347
+
348
+ return(vcombine_s16(vqmovn_s32(tempLO),vqmovn_s32(tempHI)));
349
+ }
350
+
351
+ __STATIC_INLINE int32x4_t __arm_vec_sqrt_q31_neon(int32x4_t vec)
352
+ {
353
+ float32x4_t temp;
354
+
355
+ temp = vcvtq_n_f32_s32(vec,31);
356
+ temp = __arm_vec_sqrt_f32_neon(temp);
357
+ return(vcvtq_n_s32_f32(temp,31));
358
+ }
359
+
360
+ #endif /* (defined(ARM_MATH_NEON) || defined(ARM_MATH_NEON_EXPERIMENTAL)) && !defined(ARM_MATH_AUTOVECTORIZE) */
361
+
362
+ #ifdef __cplusplus
363
+ }
364
+ #endif
365
+
366
+
367
+ #endif /* _ARM_VEC_MATH_H */
368
+
369
+ /**
370
+ *
371
+ * End of file.
372
+ */
ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_math_f16.h ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * @file arm_vec_math_f16.h
3
+ * @brief Public header file for CMSIS DSP Library
4
+ ******************************************************************************/
5
+ /*
6
+ * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved.
7
+ *
8
+ * SPDX-License-Identifier: Apache-2.0
9
+ *
10
+ * Licensed under the Apache License, Version 2.0 (the License); you may
11
+ * not use this file except in compliance with the License.
12
+ * You may obtain a copy of the License at
13
+ *
14
+ * www.apache.org/licenses/LICENSE-2.0
15
+ *
16
+ * Unless required by applicable law or agreed to in writing, software
17
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
18
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ * See the License for the specific language governing permissions and
20
+ * limitations under the License.
21
+ */
22
+
23
+ #ifndef _ARM_VEC_MATH_F16_H
24
+ #define _ARM_VEC_MATH_F16_H
25
+
26
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types_f16.h"
27
+ #include "arm_common_tables_f16.h"
28
+ #include "arm_helium_utils.h"
29
+
30
+ #ifdef __cplusplus
31
+ extern "C"
32
+ {
33
+ #endif
34
+
35
+ #if defined(ARM_FLOAT16_SUPPORTED)
36
+
37
+
38
+ #if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE)
39
+
40
+
41
+ static const float16_t __logf_rng_f16=0.693147180f16;
42
+
43
+ /* fast inverse approximation (3x newton) */
44
+ __STATIC_INLINE f16x8_t vrecip_medprec_f16(
45
+ f16x8_t x)
46
+ {
47
+ q15x8_t m;
48
+ f16x8_t b;
49
+ any16x8_t xinv;
50
+ f16x8_t ax = vabsq(x);
51
+
52
+ xinv.f = ax;
53
+
54
+ m = 0x03c00 - (xinv.i & 0x07c00);
55
+ xinv.i = xinv.i + m;
56
+ xinv.f = 1.41176471f16 - 0.47058824f16 * xinv.f;
57
+ xinv.i = xinv.i + m;
58
+
59
+ b = 2.0f16 - xinv.f * ax;
60
+ xinv.f = xinv.f * b;
61
+
62
+ b = 2.0f16 - xinv.f * ax;
63
+ xinv.f = xinv.f * b;
64
+
65
+ b = 2.0f16 - xinv.f * ax;
66
+ xinv.f = xinv.f * b;
67
+
68
+ xinv.f = vdupq_m(xinv.f, F16INFINITY, vcmpeqq(x, 0.0f));
69
+ /*
70
+ * restore sign
71
+ */
72
+ xinv.f = vnegq_m(xinv.f, xinv.f, vcmpltq(x, 0.0f));
73
+
74
+ return xinv.f;
75
+ }
76
+
77
+ /* fast inverse approximation (4x newton) */
78
+ __STATIC_INLINE f16x8_t vrecip_hiprec_f16(
79
+ f16x8_t x)
80
+ {
81
+ q15x8_t m;
82
+ f16x8_t b;
83
+ any16x8_t xinv;
84
+ f16x8_t ax = vabsq(x);
85
+
86
+ xinv.f = ax;
87
+
88
+ m = 0x03c00 - (xinv.i & 0x07c00);
89
+ xinv.i = xinv.i + m;
90
+ xinv.f = 1.41176471f16 - 0.47058824f16 * xinv.f;
91
+ xinv.i = xinv.i + m;
92
+
93
+ b = 2.0f16 - xinv.f * ax;
94
+ xinv.f = xinv.f * b;
95
+
96
+ b = 2.0f16 - xinv.f * ax;
97
+ xinv.f = xinv.f * b;
98
+
99
+ b = 2.0f16 - xinv.f * ax;
100
+ xinv.f = xinv.f * b;
101
+
102
+ b = 2.0f16 - xinv.f * ax;
103
+ xinv.f = xinv.f * b;
104
+
105
+ xinv.f = vdupq_m(xinv.f, F16INFINITY, vcmpeqq(x, 0.0f));
106
+ /*
107
+ * restore sign
108
+ */
109
+ xinv.f = vnegq_m(xinv.f, xinv.f, vcmpltq(x, 0.0f));
110
+
111
+ return xinv.f;
112
+ }
113
+
114
+ __STATIC_INLINE f16x8_t vdiv_f16(
115
+ f16x8_t num, f16x8_t den)
116
+ {
117
+ return vmulq(num, vrecip_hiprec_f16(den));
118
+ }
119
+
120
+
121
+ /**
122
+ @brief Single-precision taylor dev.
123
+ @param[in] x f16 vector input
124
+ @param[in] coeffs f16 vector coeffs
125
+ @return destination f16 vector
126
+ */
127
+
128
+ __STATIC_INLINE float16x8_t vtaylor_polyq_f16(
129
+ float16x8_t x,
130
+ const float16_t * coeffs)
131
+ {
132
+ float16x8_t A = vfmasq(vdupq_n_f16(coeffs[4]), x, coeffs[0]);
133
+ float16x8_t B = vfmasq(vdupq_n_f16(coeffs[6]), x, coeffs[2]);
134
+ float16x8_t C = vfmasq(vdupq_n_f16(coeffs[5]), x, coeffs[1]);
135
+ float16x8_t D = vfmasq(vdupq_n_f16(coeffs[7]), x, coeffs[3]);
136
+ float16x8_t x2 = vmulq(x, x);
137
+ float16x8_t x4 = vmulq(x2, x2);
138
+ float16x8_t res = vfmaq(vfmaq_f16(A, B, x2), vfmaq_f16(C, D, x2), x4);
139
+
140
+ return res;
141
+ }
142
+
143
+ __STATIC_INLINE float16x8_t vmant_exp_f16(
144
+ float16x8_t x,
145
+ int16x8_t * e)
146
+ {
147
+ any16x8_t r;
148
+ int16x8_t n;
149
+
150
+ r.f = x;
151
+ n = r.i >> 10;
152
+ n = n - 15;
153
+ r.i = r.i - (n << 10);
154
+
155
+ *e = n;
156
+ return r.f;
157
+ }
158
+
159
+
160
+ __STATIC_INLINE float16x8_t vlogq_f16(float16x8_t vecIn)
161
+ {
162
+ q15x8_t vecExpUnBiased;
163
+ float16x8_t vecTmpFlt0, vecTmpFlt1;
164
+ float16x8_t vecAcc0, vecAcc1, vecAcc2, vecAcc3;
165
+ float16x8_t vecExpUnBiasedFlt;
166
+
167
+ /*
168
+ * extract exponent
169
+ */
170
+ vecTmpFlt1 = vmant_exp_f16(vecIn, &vecExpUnBiased);
171
+
172
+ vecTmpFlt0 = vecTmpFlt1 * vecTmpFlt1;
173
+ /*
174
+ * a = (__logf_lut_f16[4] * r.f) + (__logf_lut_f16[0]);
175
+ */
176
+ vecAcc0 = vdupq_n_f16(__logf_lut_f16[0]);
177
+ vecAcc0 = vfmaq(vecAcc0, vecTmpFlt1, __logf_lut_f16[4]);
178
+ /*
179
+ * b = (__logf_lut_f16[6] * r.f) + (__logf_lut_f16[2]);
180
+ */
181
+ vecAcc1 = vdupq_n_f16(__logf_lut_f16[2]);
182
+ vecAcc1 = vfmaq(vecAcc1, vecTmpFlt1, __logf_lut_f16[6]);
183
+ /*
184
+ * c = (__logf_lut_f16[5] * r.f) + (__logf_lut_f16[1]);
185
+ */
186
+ vecAcc2 = vdupq_n_f16(__logf_lut_f16[1]);
187
+ vecAcc2 = vfmaq(vecAcc2, vecTmpFlt1, __logf_lut_f16[5]);
188
+ /*
189
+ * d = (__logf_lut_f16[7] * r.f) + (__logf_lut_f16[3]);
190
+ */
191
+ vecAcc3 = vdupq_n_f16(__logf_lut_f16[3]);
192
+ vecAcc3 = vfmaq(vecAcc3, vecTmpFlt1, __logf_lut_f16[7]);
193
+ /*
194
+ * a = a + b * xx;
195
+ */
196
+ vecAcc0 = vfmaq(vecAcc0, vecAcc1, vecTmpFlt0);
197
+ /*
198
+ * c = c + d * xx;
199
+ */
200
+ vecAcc2 = vfmaq(vecAcc2, vecAcc3, vecTmpFlt0);
201
+ /*
202
+ * xx = xx * xx;
203
+ */
204
+ vecTmpFlt0 = vecTmpFlt0 * vecTmpFlt0;
205
+ vecExpUnBiasedFlt = vcvtq_f16_s16(vecExpUnBiased);
206
+ /*
207
+ * r.f = a + c * xx;
208
+ */
209
+ vecAcc0 = vfmaq(vecAcc0, vecAcc2, vecTmpFlt0);
210
+ /*
211
+ * add exponent
212
+ * r.f = r.f + ((float32_t) m) * __logf_rng_f16;
213
+ */
214
+ vecAcc0 = vfmaq(vecAcc0, vecExpUnBiasedFlt, __logf_rng_f16);
215
+ // set log0 down to -inf
216
+ vecAcc0 = vdupq_m(vecAcc0, -F16INFINITY, vcmpeqq(vecIn, 0.0f));
217
+ return vecAcc0;
218
+ }
219
+
220
+ __STATIC_INLINE float16x8_t vexpq_f16(
221
+ float16x8_t x)
222
+ {
223
+ // Perform range reduction [-log(2),log(2)]
224
+ int16x8_t m = vcvtq_s16_f16(vmulq_n_f16(x, 1.4426950408f16));
225
+ float16x8_t val = vfmsq_f16(x, vcvtq_f16_s16(m), vdupq_n_f16(0.6931471805f16));
226
+
227
+ // Polynomial Approximation
228
+ float16x8_t poly = vtaylor_polyq_f16(val, exp_tab_f16);
229
+
230
+ // Reconstruct
231
+ poly = (float16x8_t) (vqaddq_s16((int16x8_t) (poly), vqshlq_n_s16(m, 10)));
232
+
233
+ poly = vdupq_m(poly, 0.0f, vcmpltq_n_s16(m, -14));
234
+ return poly;
235
+ }
236
+
237
+ __STATIC_INLINE float16x8_t arm_vec_exponent_f16(float16x8_t x, int16_t nb)
238
+ {
239
+ float16x8_t r = x;
240
+ nb--;
241
+ while (nb > 0) {
242
+ r = vmulq(r, x);
243
+ nb--;
244
+ }
245
+ return (r);
246
+ }
247
+
248
+ __STATIC_INLINE f16x8_t vpowq_f16(
249
+ f16x8_t val,
250
+ f16x8_t n)
251
+ {
252
+ return vexpq_f16(vmulq_f16(n, vlogq_f16(val)));
253
+ }
254
+
255
+ #define INV_NEWTON_INIT_F16 0x7773
256
+
257
+ __STATIC_INLINE f16x8_t vrecip_f16(f16x8_t vecIn)
258
+ {
259
+ f16x8_t vecSx, vecW, vecTmp;
260
+ any16x8_t v;
261
+
262
+ vecSx = vabsq(vecIn);
263
+
264
+ v.f = vecIn;
265
+ v.i = vsubq(vdupq_n_s16(INV_NEWTON_INIT_F16), v.i);
266
+
267
+ vecW = vmulq(vecSx, v.f);
268
+
269
+ // v.f = v.f * (8 + w * (-28 + w * (56 + w * (-70 + w *(56 + w * (-28 + w * (8 - w)))))));
270
+ vecTmp = vsubq(vdupq_n_f16(8.0f), vecW);
271
+ vecTmp = vfmasq(vecW, vecTmp, -28.0f);
272
+ vecTmp = vfmasq(vecW, vecTmp, 56.0f);
273
+ vecTmp = vfmasq(vecW, vecTmp, -70.0f);
274
+ vecTmp = vfmasq(vecW, vecTmp, 56.0f);
275
+ vecTmp = vfmasq(vecW, vecTmp, -28.0f);
276
+ vecTmp = vfmasq(vecW, vecTmp, 8.0f);
277
+ v.f = vmulq(v.f, vecTmp);
278
+
279
+ v.f = vdupq_m(v.f, F16INFINITY, vcmpeqq(vecIn, 0.0f));
280
+ /*
281
+ * restore sign
282
+ */
283
+ v.f = vnegq_m(v.f, v.f, vcmpltq(vecIn, 0.0f));
284
+ return v.f;
285
+ }
286
+
287
+ __STATIC_INLINE f16x8_t vtanhq_f16(
288
+ f16x8_t val)
289
+ {
290
+ f16x8_t x =
291
+ vminnmq_f16(vmaxnmq_f16(val, vdupq_n_f16(-10.f)), vdupq_n_f16(10.0f));
292
+ f16x8_t exp2x = vexpq_f16(vmulq_n_f16(x, 2.f));
293
+ f16x8_t num = vsubq_n_f16(exp2x, 1.f);
294
+ f16x8_t den = vaddq_n_f16(exp2x, 1.f);
295
+ f16x8_t tanh = vmulq_f16(num, vrecip_f16(den));
296
+ return tanh;
297
+ }
298
+
299
+ #endif /* defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE)*/
300
+
301
+
302
+
303
+ #ifdef __cplusplus
304
+ }
305
+ #endif
306
+
307
+ #endif /* ARM FLOAT16 SUPPORTED */
308
+
309
+ #endif /* _ARM_VEC_MATH_F16_H */
310
+
311
+ /**
312
+ *
313
+ * End of file.
314
+ */
ei-cpp-export/edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h ADDED
@@ -0,0 +1,763 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * @file basic_math_functions.h
3
+ * @brief Public header file for CMSIS DSP Library
4
+ * @version V1.9.0
5
+ * @date 20. July 2020
6
+ ******************************************************************************/
7
+ /*
8
+ * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved.
9
+ *
10
+ * SPDX-License-Identifier: Apache-2.0
11
+ *
12
+ * Licensed under the Apache License, Version 2.0 (the License); you may
13
+ * not use this file except in compliance with the License.
14
+ * You may obtain a copy of the License at
15
+ *
16
+ * www.apache.org/licenses/LICENSE-2.0
17
+ *
18
+ * Unless required by applicable law or agreed to in writing, software
19
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ * See the License for the specific language governing permissions and
22
+ * limitations under the License.
23
+ */
24
+
25
+
26
+ #ifndef _BASIC_MATH_FUNCTIONS_H_
27
+ #define _BASIC_MATH_FUNCTIONS_H_
28
+
29
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h"
30
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_memory.h"
31
+
32
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/none.h"
33
+ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/utils.h"
34
+
35
+
36
+ #ifdef __cplusplus
37
+ extern "C"
38
+ {
39
+ #endif
40
+
41
+ /**
42
+ * @defgroup groupMath Basic Math Functions
43
+ */
44
+
45
+ /**
46
+ * @brief Q7 vector multiplication.
47
+ * @param[in] pSrcA points to the first input vector
48
+ * @param[in] pSrcB points to the second input vector
49
+ * @param[out] pDst points to the output vector
50
+ * @param[in] blockSize number of samples in each vector
51
+ */
52
+ void arm_mult_q7(
53
+ const q7_t * pSrcA,
54
+ const q7_t * pSrcB,
55
+ q7_t * pDst,
56
+ uint32_t blockSize);
57
+
58
+
59
+ /**
60
+ * @brief Q15 vector multiplication.
61
+ * @param[in] pSrcA points to the first input vector
62
+ * @param[in] pSrcB points to the second input vector
63
+ * @param[out] pDst points to the output vector
64
+ * @param[in] blockSize number of samples in each vector
65
+ */
66
+ void arm_mult_q15(
67
+ const q15_t * pSrcA,
68
+ const q15_t * pSrcB,
69
+ q15_t * pDst,
70
+ uint32_t blockSize);
71
+
72
+
73
+ /**
74
+ * @brief Q31 vector multiplication.
75
+ * @param[in] pSrcA points to the first input vector
76
+ * @param[in] pSrcB points to the second input vector
77
+ * @param[out] pDst points to the output vector
78
+ * @param[in] blockSize number of samples in each vector
79
+ */
80
+ void arm_mult_q31(
81
+ const q31_t * pSrcA,
82
+ const q31_t * pSrcB,
83
+ q31_t * pDst,
84
+ uint32_t blockSize);
85
+
86
+
87
+ /**
88
+ * @brief Floating-point vector multiplication.
89
+ * @param[in] pSrcA points to the first input vector
90
+ * @param[in] pSrcB points to the second input vector
91
+ * @param[out] pDst points to the output vector
92
+ * @param[in] blockSize number of samples in each vector
93
+ */
94
+ void arm_mult_f32(
95
+ const float32_t * pSrcA,
96
+ const float32_t * pSrcB,
97
+ float32_t * pDst,
98
+ uint32_t blockSize);
99
+
100
+
101
+
102
+ /**
103
+ * @brief Floating-point vector addition.
104
+ * @param[in] pSrcA points to the first input vector
105
+ * @param[in] pSrcB points to the second input vector
106
+ * @param[out] pDst points to the output vector
107
+ * @param[in] blockSize number of samples in each vector
108
+ */
109
+ void arm_add_f32(
110
+ const float32_t * pSrcA,
111
+ const float32_t * pSrcB,
112
+ float32_t * pDst,
113
+ uint32_t blockSize);
114
+
115
+
116
+
117
+ /**
118
+ * @brief Q7 vector addition.
119
+ * @param[in] pSrcA points to the first input vector
120
+ * @param[in] pSrcB points to the second input vector
121
+ * @param[out] pDst points to the output vector
122
+ * @param[in] blockSize number of samples in each vector
123
+ */
124
+ void arm_add_q7(
125
+ const q7_t * pSrcA,
126
+ const q7_t * pSrcB,
127
+ q7_t * pDst,
128
+ uint32_t blockSize);
129
+
130
+
131
+ /**
132
+ * @brief Q15 vector addition.
133
+ * @param[in] pSrcA points to the first input vector
134
+ * @param[in] pSrcB points to the second input vector
135
+ * @param[out] pDst points to the output vector
136
+ * @param[in] blockSize number of samples in each vector
137
+ */
138
+ void arm_add_q15(
139
+ const q15_t * pSrcA,
140
+ const q15_t * pSrcB,
141
+ q15_t * pDst,
142
+ uint32_t blockSize);
143
+
144
+
145
+ /**
146
+ * @brief Q31 vector addition.
147
+ * @param[in] pSrcA points to the first input vector
148
+ * @param[in] pSrcB points to the second input vector
149
+ * @param[out] pDst points to the output vector
150
+ * @param[in] blockSize number of samples in each vector
151
+ */
152
+ void arm_add_q31(
153
+ const q31_t * pSrcA,
154
+ const q31_t * pSrcB,
155
+ q31_t * pDst,
156
+ uint32_t blockSize);
157
+
158
+
159
+ /**
160
+ * @brief Floating-point vector subtraction.
161
+ * @param[in] pSrcA points to the first input vector
162
+ * @param[in] pSrcB points to the second input vector
163
+ * @param[out] pDst points to the output vector
164
+ * @param[in] blockSize number of samples in each vector
165
+ */
166
+ void arm_sub_f32(
167
+ const float32_t * pSrcA,
168
+ const float32_t * pSrcB,
169
+ float32_t * pDst,
170
+ uint32_t blockSize);
171
+
172
+
173
+
174
+ /**
175
+ * @brief Q7 vector subtraction.
176
+ * @param[in] pSrcA points to the first input vector
177
+ * @param[in] pSrcB points to the second input vector
178
+ * @param[out] pDst points to the output vector
179
+ * @param[in] blockSize number of samples in each vector
180
+ */
181
+ void arm_sub_q7(
182
+ const q7_t * pSrcA,
183
+ const q7_t * pSrcB,
184
+ q7_t * pDst,
185
+ uint32_t blockSize);
186
+
187
+
188
+ /**
189
+ * @brief Q15 vector subtraction.
190
+ * @param[in] pSrcA points to the first input vector
191
+ * @param[in] pSrcB points to the second input vector
192
+ * @param[out] pDst points to the output vector
193
+ * @param[in] blockSize number of samples in each vector
194
+ */
195
+ void arm_sub_q15(
196
+ const q15_t * pSrcA,
197
+ const q15_t * pSrcB,
198
+ q15_t * pDst,
199
+ uint32_t blockSize);
200
+
201
+
202
+ /**
203
+ * @brief Q31 vector subtraction.
204
+ * @param[in] pSrcA points to the first input vector
205
+ * @param[in] pSrcB points to the second input vector
206
+ * @param[out] pDst points to the output vector
207
+ * @param[in] blockSize number of samples in each vector
208
+ */
209
+ void arm_sub_q31(
210
+ const q31_t * pSrcA,
211
+ const q31_t * pSrcB,
212
+ q31_t * pDst,
213
+ uint32_t blockSize);
214
+
215
+
216
+ /**
217
+ * @brief Multiplies a floating-point vector by a scalar.
218
+ * @param[in] pSrc points to the input vector
219
+ * @param[in] scale scale factor to be applied
220
+ * @param[out] pDst points to the output vector
221
+ * @param[in] blockSize number of samples in the vector
222
+ */
223
+ void arm_scale_f32(
224
+ const float32_t * pSrc,
225
+ float32_t scale,
226
+ float32_t * pDst,
227
+ uint32_t blockSize);
228
+
229
+
230
+
231
+ /**
232
+ * @brief Multiplies a Q7 vector by a scalar.
233
+ * @param[in] pSrc points to the input vector
234
+ * @param[in] scaleFract fractional portion of the scale value
235
+ * @param[in] shift number of bits to shift the result by
236
+ * @param[out] pDst points to the output vector
237
+ * @param[in] blockSize number of samples in the vector
238
+ */
239
+ void arm_scale_q7(
240
+ const q7_t * pSrc,
241
+ q7_t scaleFract,
242
+ int8_t shift,
243
+ q7_t * pDst,
244
+ uint32_t blockSize);
245
+
246
+
247
+ /**
248
+ * @brief Multiplies a Q15 vector by a scalar.
249
+ * @param[in] pSrc points to the input vector
250
+ * @param[in] scaleFract fractional portion of the scale value
251
+ * @param[in] shift number of bits to shift the result by
252
+ * @param[out] pDst points to the output vector
253
+ * @param[in] blockSize number of samples in the vector
254
+ */
255
+ void arm_scale_q15(
256
+ const q15_t * pSrc,
257
+ q15_t scaleFract,
258
+ int8_t shift,
259
+ q15_t * pDst,
260
+ uint32_t blockSize);
261
+
262
+
263
+ /**
264
+ * @brief Multiplies a Q31 vector by a scalar.
265
+ * @param[in] pSrc points to the input vector
266
+ * @param[in] scaleFract fractional portion of the scale value
267
+ * @param[in] shift number of bits to shift the result by
268
+ * @param[out] pDst points to the output vector
269
+ * @param[in] blockSize number of samples in the vector
270
+ */
271
+ void arm_scale_q31(
272
+ const q31_t * pSrc,
273
+ q31_t scaleFract,
274
+ int8_t shift,
275
+ q31_t * pDst,
276
+ uint32_t blockSize);
277
+
278
+
279
+ /**
280
+ * @brief Q7 vector absolute value.
281
+ * @param[in] pSrc points to the input buffer
282
+ * @param[out] pDst points to the output buffer
283
+ * @param[in] blockSize number of samples in each vector
284
+ */
285
+ void arm_abs_q7(
286
+ const q7_t * pSrc,
287
+ q7_t * pDst,
288
+ uint32_t blockSize);
289
+
290
+
291
+ /**
292
+ * @brief Floating-point vector absolute value.
293
+ * @param[in] pSrc points to the input buffer
294
+ * @param[out] pDst points to the output buffer
295
+ * @param[in] blockSize number of samples in each vector
296
+ */
297
+ void arm_abs_f32(
298
+ const float32_t * pSrc,
299
+ float32_t * pDst,
300
+ uint32_t blockSize);
301
+
302
+
303
+
304
+
305
+ /**
306
+ * @brief Q15 vector absolute value.
307
+ * @param[in] pSrc points to the input buffer
308
+ * @param[out] pDst points to the output buffer
309
+ * @param[in] blockSize number of samples in each vector
310
+ */
311
+ void arm_abs_q15(
312
+ const q15_t * pSrc,
313
+ q15_t * pDst,
314
+ uint32_t blockSize);
315
+
316
+
317
+ /**
318
+ * @brief Q31 vector absolute value.
319
+ * @param[in] pSrc points to the input buffer
320
+ * @param[out] pDst points to the output buffer
321
+ * @param[in] blockSize number of samples in each vector
322
+ */
323
+ void arm_abs_q31(
324
+ const q31_t * pSrc,
325
+ q31_t * pDst,
326
+ uint32_t blockSize);
327
+
328
+
329
+ /**
330
+ * @brief Dot product of floating-point vectors.
331
+ * @param[in] pSrcA points to the first input vector
332
+ * @param[in] pSrcB points to the second input vector
333
+ * @param[in] blockSize number of samples in each vector
334
+ * @param[out] result output result returned here
335
+ */
336
+ void arm_dot_prod_f32(
337
+ const float32_t * pSrcA,
338
+ const float32_t * pSrcB,
339
+ uint32_t blockSize,
340
+ float32_t * result);
341
+
342
+
343
+
344
+ /**
345
+ * @brief Dot product of Q7 vectors.
346
+ * @param[in] pSrcA points to the first input vector
347
+ * @param[in] pSrcB points to the second input vector
348
+ * @param[in] blockSize number of samples in each vector
349
+ * @param[out] result output result returned here
350
+ */
351
+ void arm_dot_prod_q7(
352
+ const q7_t * pSrcA,
353
+ const q7_t * pSrcB,
354
+ uint32_t blockSize,
355
+ q31_t * result);
356
+
357
+
358
+ /**
359
+ * @brief Dot product of Q15 vectors.
360
+ * @param[in] pSrcA points to the first input vector
361
+ * @param[in] pSrcB points to the second input vector
362
+ * @param[in] blockSize number of samples in each vector
363
+ * @param[out] result output result returned here
364
+ */
365
+ void arm_dot_prod_q15(
366
+ const q15_t * pSrcA,
367
+ const q15_t * pSrcB,
368
+ uint32_t blockSize,
369
+ q63_t * result);
370
+
371
+
372
+ /**
373
+ * @brief Dot product of Q31 vectors.
374
+ * @param[in] pSrcA points to the first input vector
375
+ * @param[in] pSrcB points to the second input vector
376
+ * @param[in] blockSize number of samples in each vector
377
+ * @param[out] result output result returned here
378
+ */
379
+ void arm_dot_prod_q31(
380
+ const q31_t * pSrcA,
381
+ const q31_t * pSrcB,
382
+ uint32_t blockSize,
383
+ q63_t * result);
384
+
385
+
386
+ /**
387
+ * @brief Shifts the elements of a Q7 vector a specified number of bits.
388
+ * @param[in] pSrc points to the input vector
389
+ * @param[in] shiftBits number of bits to shift. A positive value shifts left; a negative value shifts right.
390
+ * @param[out] pDst points to the output vector
391
+ * @param[in] blockSize number of samples in the vector
392
+ */
393
+ void arm_shift_q7(
394
+ const q7_t * pSrc,
395
+ int8_t shiftBits,
396
+ q7_t * pDst,
397
+ uint32_t blockSize);
398
+
399
+
400
+ /**
401
+ * @brief Shifts the elements of a Q15 vector a specified number of bits.
402
+ * @param[in] pSrc points to the input vector
403
+ * @param[in] shiftBits number of bits to shift. A positive value shifts left; a negative value shifts right.
404
+ * @param[out] pDst points to the output vector
405
+ * @param[in] blockSize number of samples in the vector
406
+ */
407
+ void arm_shift_q15(
408
+ const q15_t * pSrc,
409
+ int8_t shiftBits,
410
+ q15_t * pDst,
411
+ uint32_t blockSize);
412
+
413
+
414
+ /**
415
+ * @brief Shifts the elements of a Q31 vector a specified number of bits.
416
+ * @param[in] pSrc points to the input vector
417
+ * @param[in] shiftBits number of bits to shift. A positive value shifts left; a negative value shifts right.
418
+ * @param[out] pDst points to the output vector
419
+ * @param[in] blockSize number of samples in the vector
420
+ */
421
+ void arm_shift_q31(
422
+ const q31_t * pSrc,
423
+ int8_t shiftBits,
424
+ q31_t * pDst,
425
+ uint32_t blockSize);
426
+
427
+
428
+ /**
429
+ * @brief Adds a constant offset to a floating-point vector.
430
+ * @param[in] pSrc points to the input vector
431
+ * @param[in] offset is the offset to be added
432
+ * @param[out] pDst points to the output vector
433
+ * @param[in] blockSize number of samples in the vector
434
+ */
435
+ void arm_offset_f32(
436
+ const float32_t * pSrc,
437
+ float32_t offset,
438
+ float32_t * pDst,
439
+ uint32_t blockSize);
440
+
441
+
442
+
443
+ /**
444
+ * @brief Adds a constant offset to a Q7 vector.
445
+ * @param[in] pSrc points to the input vector
446
+ * @param[in] offset is the offset to be added
447
+ * @param[out] pDst points to the output vector
448
+ * @param[in] blockSize number of samples in the vector
449
+ */
450
+ void arm_offset_q7(
451
+ const q7_t * pSrc,
452
+ q7_t offset,
453
+ q7_t * pDst,
454
+ uint32_t blockSize);
455
+
456
+
457
+ /**
458
+ * @brief Adds a constant offset to a Q15 vector.
459
+ * @param[in] pSrc points to the input vector
460
+ * @param[in] offset is the offset to be added
461
+ * @param[out] pDst points to the output vector
462
+ * @param[in] blockSize number of samples in the vector
463
+ */
464
+ void arm_offset_q15(
465
+ const q15_t * pSrc,
466
+ q15_t offset,
467
+ q15_t * pDst,
468
+ uint32_t blockSize);
469
+
470
+
471
+ /**
472
+ * @brief Adds a constant offset to a Q31 vector.
473
+ * @param[in] pSrc points to the input vector
474
+ * @param[in] offset is the offset to be added
475
+ * @param[out] pDst points to the output vector
476
+ * @param[in] blockSize number of samples in the vector
477
+ */
478
+ void arm_offset_q31(
479
+ const q31_t * pSrc,
480
+ q31_t offset,
481
+ q31_t * pDst,
482
+ uint32_t blockSize);
483
+
484
+
485
+ /**
486
+ * @brief Negates the elements of a floating-point vector.
487
+ * @param[in] pSrc points to the input vector
488
+ * @param[out] pDst points to the output vector
489
+ * @param[in] blockSize number of samples in the vector
490
+ */
491
+ void arm_negate_f32(
492
+ const float32_t * pSrc,
493
+ float32_t * pDst,
494
+ uint32_t blockSize);
495
+
496
+
497
+ /**
498
+ * @brief Negates the elements of a Q7 vector.
499
+ * @param[in] pSrc points to the input vector
500
+ * @param[out] pDst points to the output vector
501
+ * @param[in] blockSize number of samples in the vector
502
+ */
503
+ void arm_negate_q7(
504
+ const q7_t * pSrc,
505
+ q7_t * pDst,
506
+ uint32_t blockSize);
507
+
508
+
509
+ /**
510
+ * @brief Negates the elements of a Q15 vector.
511
+ * @param[in] pSrc points to the input vector
512
+ * @param[out] pDst points to the output vector
513
+ * @param[in] blockSize number of samples in the vector
514
+ */
515
+ void arm_negate_q15(
516
+ const q15_t * pSrc,
517
+ q15_t * pDst,
518
+ uint32_t blockSize);
519
+
520
+
521
+ /**
522
+ * @brief Negates the elements of a Q31 vector.
523
+ * @param[in] pSrc points to the input vector
524
+ * @param[out] pDst points to the output vector
525
+ * @param[in] blockSize number of samples in the vector
526
+ */
527
+ void arm_negate_q31(
528
+ const q31_t * pSrc,
529
+ q31_t * pDst,
530
+ uint32_t blockSize);
531
+
532
+ /**
533
+ * @brief Compute the logical bitwise AND of two fixed-point vectors.
534
+ * @param[in] pSrcA points to input vector A
535
+ * @param[in] pSrcB points to input vector B
536
+ * @param[out] pDst points to output vector
537
+ * @param[in] blockSize number of samples in each vector
538
+ * @return none
539
+ */
540
+ void arm_and_u16(
541
+ const uint16_t * pSrcA,
542
+ const uint16_t * pSrcB,
543
+ uint16_t * pDst,
544
+ uint32_t blockSize);
545
+
546
+ /**
547
+ * @brief Compute the logical bitwise AND of two fixed-point vectors.
548
+ * @param[in] pSrcA points to input vector A
549
+ * @param[in] pSrcB points to input vector B
550
+ * @param[out] pDst points to output vector
551
+ * @param[in] blockSize number of samples in each vector
552
+ * @return none
553
+ */
554
+ void arm_and_u32(
555
+ const uint32_t * pSrcA,
556
+ const uint32_t * pSrcB,
557
+ uint32_t * pDst,
558
+ uint32_t blockSize);
559
+
560
+ /**
561
+ * @brief Compute the logical bitwise AND of two fixed-point vectors.
562
+ * @param[in] pSrcA points to input vector A
563
+ * @param[in] pSrcB points to input vector B
564
+ * @param[out] pDst points to output vector
565
+ * @param[in] blockSize number of samples in each vector
566
+ * @return none
567
+ */
568
+ void arm_and_u8(
569
+ const uint8_t * pSrcA,
570
+ const uint8_t * pSrcB,
571
+ uint8_t * pDst,
572
+ uint32_t blockSize);
573
+
574
+ /**
575
+ * @brief Compute the logical bitwise OR of two fixed-point vectors.
576
+ * @param[in] pSrcA points to input vector A
577
+ * @param[in] pSrcB points to input vector B
578
+ * @param[out] pDst points to output vector
579
+ * @param[in] blockSize number of samples in each vector
580
+ * @return none
581
+ */
582
+ void arm_or_u16(
583
+ const uint16_t * pSrcA,
584
+ const uint16_t * pSrcB,
585
+ uint16_t * pDst,
586
+ uint32_t blockSize);
587
+
588
+ /**
589
+ * @brief Compute the logical bitwise OR of two fixed-point vectors.
590
+ * @param[in] pSrcA points to input vector A
591
+ * @param[in] pSrcB points to input vector B
592
+ * @param[out] pDst points to output vector
593
+ * @param[in] blockSize number of samples in each vector
594
+ * @return none
595
+ */
596
+ void arm_or_u32(
597
+ const uint32_t * pSrcA,
598
+ const uint32_t * pSrcB,
599
+ uint32_t * pDst,
600
+ uint32_t blockSize);
601
+
602
+ /**
603
+ * @brief Compute the logical bitwise OR of two fixed-point vectors.
604
+ * @param[in] pSrcA points to input vector A
605
+ * @param[in] pSrcB points to input vector B
606
+ * @param[out] pDst points to output vector
607
+ * @param[in] blockSize number of samples in each vector
608
+ * @return none
609
+ */
610
+ void arm_or_u8(
611
+ const uint8_t * pSrcA,
612
+ const uint8_t * pSrcB,
613
+ uint8_t * pDst,
614
+ uint32_t blockSize);
615
+
616
+ /**
617
+ * @brief Compute the logical bitwise NOT of a fixed-point vector.
618
+ * @param[in] pSrc points to input vector
619
+ * @param[out] pDst points to output vector
620
+ * @param[in] blockSize number of samples in each vector
621
+ * @return none
622
+ */
623
+ void arm_not_u16(
624
+ const uint16_t * pSrc,
625
+ uint16_t * pDst,
626
+ uint32_t blockSize);
627
+
628
+ /**
629
+ * @brief Compute the logical bitwise NOT of a fixed-point vector.
630
+ * @param[in] pSrc points to input vector
631
+ * @param[out] pDst points to output vector
632
+ * @param[in] blockSize number of samples in each vector
633
+ * @return none
634
+ */
635
+ void arm_not_u32(
636
+ const uint32_t * pSrc,
637
+ uint32_t * pDst,
638
+ uint32_t blockSize);
639
+
640
+ /**
641
+ * @brief Compute the logical bitwise NOT of a fixed-point vector.
642
+ * @param[in] pSrc points to input vector
643
+ * @param[out] pDst points to output vector
644
+ * @param[in] blockSize number of samples in each vector
645
+ * @return none
646
+ */
647
+ void arm_not_u8(
648
+ const uint8_t * pSrc,
649
+ uint8_t * pDst,
650
+ uint32_t blockSize);
651
+
652
+ /**
653
+ * @brief Compute the logical bitwise XOR of two fixed-point vectors.
654
+ * @param[in] pSrcA points to input vector A
655
+ * @param[in] pSrcB points to input vector B
656
+ * @param[out] pDst points to output vector
657
+ * @param[in] blockSize number of samples in each vector
658
+ * @return none
659
+ */
660
+ void arm_xor_u16(
661
+ const uint16_t * pSrcA,
662
+ const uint16_t * pSrcB,
663
+ uint16_t * pDst,
664
+ uint32_t blockSize);
665
+
666
+ /**
667
+ * @brief Compute the logical bitwise XOR of two fixed-point vectors.
668
+ * @param[in] pSrcA points to input vector A
669
+ * @param[in] pSrcB points to input vector B
670
+ * @param[out] pDst points to output vector
671
+ * @param[in] blockSize number of samples in each vector
672
+ * @return none
673
+ */
674
+ void arm_xor_u32(
675
+ const uint32_t * pSrcA,
676
+ const uint32_t * pSrcB,
677
+ uint32_t * pDst,
678
+ uint32_t blockSize);
679
+
680
+ /**
681
+ * @brief Compute the logical bitwise XOR of two fixed-point vectors.
682
+ * @param[in] pSrcA points to input vector A
683
+ * @param[in] pSrcB points to input vector B
684
+ * @param[out] pDst points to output vector
685
+ * @param[in] blockSize number of samples in each vector
686
+ * @return none
687
+ */
688
+ void arm_xor_u8(
689
+ const uint8_t * pSrcA,
690
+ const uint8_t * pSrcB,
691
+ uint8_t * pDst,
692
+ uint32_t blockSize);
693
+
694
+ /**
695
+ @brief Elementwise floating-point clipping
696
+ @param[in] pSrc points to input values
697
+ @param[out] pDst points to output clipped values
698
+ @param[in] low lower bound
699
+ @param[in] high higher bound
700
+ @param[in] numSamples number of samples to clip
701
+ @return none
702
+ */
703
+
704
+ void arm_clip_f32(const float32_t * pSrc,
705
+ float32_t * pDst,
706
+ float32_t low,
707
+ float32_t high,
708
+ uint32_t numSamples);
709
+
710
+ /**
711
+ @brief Elementwise fixed-point clipping
712
+ @param[in] pSrc points to input values
713
+ @param[out] pDst points to output clipped values
714
+ @param[in] low lower bound
715
+ @param[in] high higher bound
716
+ @param[in] numSamples number of samples to clip
717
+ @return none
718
+ */
719
+
720
+ void arm_clip_q31(const q31_t * pSrc,
721
+ q31_t * pDst,
722
+ q31_t low,
723
+ q31_t high,
724
+ uint32_t numSamples);
725
+
726
+ /**
727
+ @brief Elementwise fixed-point clipping
728
+ @param[in] pSrc points to input values
729
+ @param[out] pDst points to output clipped values
730
+ @param[in] low lower bound
731
+ @param[in] high higher bound
732
+ @param[in] numSamples number of samples to clip
733
+ @return none
734
+ */
735
+
736
+ void arm_clip_q15(const q15_t * pSrc,
737
+ q15_t * pDst,
738
+ q15_t low,
739
+ q15_t high,
740
+ uint32_t numSamples);
741
+
742
+ /**
743
+ @brief Elementwise fixed-point clipping
744
+ @param[in] pSrc points to input values
745
+ @param[out] pDst points to output clipped values
746
+ @param[in] low lower bound
747
+ @param[in] high higher bound
748
+ @param[in] numSamples number of samples to clip
749
+ @return none
750
+ */
751
+
752
+ void arm_clip_q7(const q7_t * pSrc,
753
+ q7_t * pDst,
754
+ q7_t low,
755
+ q7_t high,
756
+ uint32_t numSamples);
757
+
758
+
759
+ #ifdef __cplusplus
760
+ }
761
+ #endif
762
+
763
+ #endif /* ifndef _BASIC_MATH_FUNCTIONS_H_ */