Skip to content

Commit

Permalink
Merge branch 'rolling'
Browse files Browse the repository at this point in the history
  • Loading branch information
jomjol committed Aug 28, 2022
2 parents aca6046 + c9b7a5f commit 234925c
Show file tree
Hide file tree
Showing 258 changed files with 14,826 additions and 2,444 deletions.
10 changes: 10 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,16 @@ In other cases you can contact the developer via email: <img src="https://raw.gi

------

##### 11.2.0 - Intermediate Digits (2022-08-28)

- Updated Tensorflow / TFlite to newest tflite (version as of 2022-07-27)
- Updated analog neural network file (`ana-cont_11.3.0_s2.tflite` - default, `ana-class100_0120_s1_q.tflite`)
- Updated digital neural network file (`dig-cont_0570_s3.tflite` - default, `dig-class100_0120_s2_q.tflite`)

- Added automated filtering of tflite-file in the graphical configuration (thanks to @**[caco3](https://github.com/caco3)**)
- Updated consistency algorithm & test cases
- HTML: added favicon and system name, Improved reboot dialog (thanks to @**[caco3](https://github.com/caco3)**)

##### 11.1.1 - Intermediate Digits (2022-08-22)

- New and improved consistency check (especially with analog and digital counters mixed)
Expand Down
4 changes: 3 additions & 1 deletion code/components/esp-nn/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,9 @@ set(c_srcs
"src/basic_math/esp_nn_add_ansi.c"
"src/basic_math/esp_nn_mul_ansi.c"
"src/convolution/esp_nn_conv_ansi.c"
"src/convolution/esp_nn_conv_opt.c"
"src/convolution/esp_nn_depthwise_conv_ansi.c"
"src/convolution/esp_nn_depthwise_conv_opt.c"
"src/fully_connected/esp_nn_fully_connected_ansi.c"
"src/softmax/esp_nn_softmax_ansi.c"
"src/softmax/esp_nn_softmax_opt.c"
Expand All @@ -23,7 +25,7 @@ if(CONFIG_IDF_TARGET_ESP32S3)
"src/convolution/esp_nn_conv_esp32s3.c"
"src/convolution/esp_nn_depthwise_conv_s8_esp32s3.c"
"src/convolution/esp_nn_conv_s16_mult8_esp32s3.S"
"src/convolution/esp_nn_conv_s16_mult8_1x1_esp32s3.S"
"src/convolution/esp_nn_conv_s8_mult8_1x1_esp32s3.S"
"src/convolution/esp_nn_conv_s16_mult4_1x1_esp32s3.S"
"src/convolution/esp_nn_depthwise_conv_s8_mult1_3x3_padded_esp32s3.S"
"src/convolution/esp_nn_depthwise_conv_s16_mult1_esp32s3.S"
Expand Down
8 changes: 4 additions & 4 deletions code/components/esp-nn/Kconfig.projbuild
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ choice NN_OPTIMIZATIONS
help
Use ANSI-C versions for verification and debug purpose.
Optimisations are automatically picked up for a chipset.
For ESP32-S3, assembly Optimisations are selected.
For ESP32, just the ANSI C versions are selected for now.
For ESP32-S3, assembly optimisations are selected.
For other platforms(viz., ESP32, ESP32-C3), generic optimisations are used.

config NN_ANSI_C
bool "ANSI C"
Expand All @@ -17,8 +17,8 @@ config NN_OPTIMIZED
bool "Optimized versions"
help
Optimisations are automatically picked up for a chipset.
For ESP32-S3, assembly Optimisations are selected.
For ESP32, just the ANSI C versions are selected for now.
For ESP32-S3, assembly optimisations are selected.
For other platforms(viz., ESP32, ESP32-C3), generic optimisations are used.
endchoice

config NN_OPTIMIZATIONS
Expand Down
7 changes: 4 additions & 3 deletions code/components/esp-nn/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@ The library contains optimised NN (Neural Network) functions for various Espress

* Supported ESP chipsets include:
* ESP32-S3 (Assembly versions optimised to benefit from vector instructions of ESP32-S3)
* ESP32 (ANSI C versions)
* ESP32 (Generic optimisations)
* ESP32-C3 (Generic optimisations)

## Performance

Expand Down Expand Up @@ -39,8 +40,8 @@ The library contains optimised NN (Neural Network) functions for various Espress
* Optimized versions
* ANSI C

* Default selection is for `Optimized versions`. For ESP32-S3, assembly versions are automatically selected, whereas for ESP32, ANSI-C versions are selected by default.
* For debugging purposes, you may want to select `ANSI C`
* Default selection is for `Optimized versions`. For ESP32-S3, assembly versions are automatically selected, whereas for other chipsets (viz., ESP32, ESP32-C3), generic optimisations are selected.
* For debugging purposes, you may want to select `ANSI C` reference versions.


## Contributing
Expand Down
10 changes: 5 additions & 5 deletions code/components/esp-nn/include/esp_nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#pragma once

#if defined(CONFIG_NN_OPTIMIZED)
// select apt optimisations
#ifdef CONFIG_IDF_TARGET_ESP32S3
#define ARCH_ESP32_S3 1
#endif
Expand All @@ -31,12 +32,11 @@ extern "C" {
#include "esp_nn_ansi_headers.h"

#if defined(CONFIG_NN_OPTIMIZED)
#ifdef ARCH_ESP32_S3
#if defined(ARCH_ESP32_S3)
#include "esp_nn_esp32s3.h"
#endif
#ifdef ARCH_ESP32
#include "esp_nn_esp32.h"
#endif
#else // for other platforms use generic optimisations
#include "esp_nn_generic_opt.h"
#endif // #if defined(ARCH_ESP32_S3)
#else
#include "esp_nn_ansi_c.h"
#endif
Expand Down
1 change: 1 addition & 0 deletions code/components/esp-nn/include/esp_nn_ansi_c.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

#pragma once

#include "esp_nn_defs.h"
#include "esp_nn_ansi_headers.h"

#define esp_nn_add_elementwise_s8 esp_nn_add_elementwise_s8_ansi
Expand Down
138 changes: 82 additions & 56 deletions code/components/esp-nn/include/esp_nn_ansi_headers.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,7 @@
* @file Header definitions to include for esp_nn reference functions
*/

#include <stdint.h>

#include "esp_nn_defs.h"
/************************** Basic math functions ****************************/

/**
Expand Down Expand Up @@ -81,28 +80,15 @@ void esp_nn_mul_elementwise_s8_ansi(const int8_t *input1_data,
* optimization notes: Though input_offset is int32 type,
* offset values are contained in 8 bits [-128, 127]
*/
void esp_nn_depthwise_conv_s8_ansi(const int8_t *input_data,
const uint16_t input_wd,
const uint16_t input_ht,
const uint16_t channels,
const int32_t input_offset,
const uint16_t pad_wd,
const uint16_t pad_ht,
const uint16_t stride_wd,
const uint16_t stride_ht,
const uint16_t ch_mult,
void esp_nn_depthwise_conv_s8_ansi(const data_dims_t *input_dims,
const int8_t *input_data,
const data_dims_t *filter_dims,
const int8_t *filter_data,
const uint16_t filter_wd,
const uint16_t filter_ht,
const int32_t *bias,
const data_dims_t *output_dims,
int8_t *out_data,
const uint16_t out_wd,
const uint16_t out_ht,
const int32_t out_offset,
const int32_t *out_shift,
const int32_t *out_mult,
const int32_t activation_min,
const int32_t activation_max);
const dw_conv_params_t *conv_params,
const quant_data_t *quant_data);

/**
* @brief 2d-convolution channelwise
Expand All @@ -112,43 +98,26 @@ void esp_nn_depthwise_conv_s8_ansi(const int8_t *input_data,
* inputs type: int8_t, output: int8_t
* input offsets: although int32_t, they are contained in 8 bits [-128, 127]
*/
void esp_nn_conv_s8_ansi(const int8_t *input_data,
const uint16_t input_wd,
const uint16_t input_ht,
const uint16_t in_channels,
const int32_t input_offset,
const uint16_t pad_wd,
const uint16_t pad_ht,
const uint16_t stride_wd,
const uint16_t stride_ht,
void esp_nn_conv_s8_ansi(const data_dims_t *input_dims,
const int8_t *input_data,
const data_dims_t *filter_dims,
const int8_t *filter_data,
const uint16_t filter_wd,
const uint16_t filter_ht,
const int32_t *bias,
const data_dims_t *output_dims,
int8_t *out_data,
const uint16_t out_wd,
const uint16_t out_ht,
const uint16_t out_channels,
const int32_t out_offset,
const int32_t *out_shift,
const int32_t *out_mult,
const int32_t activation_min,
const int32_t activation_max);

int esp_nn_get_conv_scratch_size_ansi(const uint16_t input_wd,
const uint16_t input_ht,
const uint16_t in_ch,
const uint16_t out_ch,
const uint16_t filter_wd,
const uint16_t filter_ht);
const conv_params_t *conv_params,
const quant_data_t *quant_data);

int esp_nn_get_conv_scratch_size_ansi(const data_dims_t *input_dims,
const data_dims_t *filter_dims,
const data_dims_t *output_dims,
const conv_params_t *conv_params);
void esp_nn_set_conv_scratch_buf_ansi(const void *buf);

int esp_nn_get_depthwise_conv_scratch_size_ansi(const uint16_t input_wd,
const uint16_t input_ht,
const uint16_t channels,
const uint16_t ch_mult,
const uint16_t filter_wd,
const uint16_t filter_ht);
int esp_nn_get_depthwise_conv_scratch_size_ansi(const data_dims_t *input_dims,
const data_dims_t *filter_dims,
const data_dims_t *output_dims,
const dw_conv_params_t *conv_params);
void esp_nn_set_depthwise_conv_scratch_buf_ansi(const void *buf);

/************************** Activation functions *****************************/
Expand Down Expand Up @@ -252,9 +221,6 @@ int32_t esp_nn_get_softmax_scratch_size_opt(const int32_t width, const int32_t h
*/
void esp_nn_set_softmax_scratch_buf_ansi(void *buffer);

/* ANSI C function to be hooked up when optimised version needed */
void esp_nn_set_softmax_scratch_buf_opt(void *buffer);

/**
* @brief reference softmax function
*
Expand All @@ -268,6 +234,66 @@ void esp_nn_softmax_s8_ansi(const int8_t *input_data,
const int32_t diff_min,
int8_t *output_data);


//////////////////////////// Generic optimisations /////////////////////////////

/************************** Convolution functions *****************************/

/**
* @brief 2d-convolution channelwise optimized version
*
* @note operation: result += (input + offset) * filter
*
* inputs type: int8_t, output: int8_t
* input offsets: although int32_t, they are contained in 8 bits [-128, 127]
*/
void esp_nn_conv_s8_opt(const data_dims_t *input_dims,
const int8_t *input_data,
const data_dims_t *filter_dims,
const int8_t *filter_data,
const int32_t *bias,
const data_dims_t *output_dims,
int8_t *out_data,
const conv_params_t *conv_params,
const quant_data_t *quant_data);

/**
* @brief depthwise convolution per channel optimized version
*
* @note inputs type: int8_t, output: int8_t
* Version used in tflite is per channel.
* This version follows the same footsprints.
* Meaning, it has per out_channel shift and multiplier for
* requantization
*
* optimization notes: Though input_offset is int32 type,
* offset values are contained in 8 bits [-128, 127]
*/
void esp_nn_depthwise_conv_s8_opt(const data_dims_t *input_dims,
const int8_t *input_data,
const data_dims_t *filter_dims,
const int8_t *filter_data,
const int32_t *bias,
const data_dims_t *output_dims,
int8_t *out_data,
const dw_conv_params_t *conv_params,
const quant_data_t *quant_data);

int esp_nn_get_conv_scratch_size_opt(const data_dims_t *input_dims,
const data_dims_t *filter_dims,
const data_dims_t *output_dims,
const conv_params_t *conv_params);
void esp_nn_set_conv_scratch_buf_opt(const void *buf);

int esp_nn_get_depthwise_conv_scratch_size_opt(const data_dims_t *input_dims,
const data_dims_t *filter_dims,
const data_dims_t *output_dims,
const dw_conv_params_t *conv_params);
void esp_nn_set_depthwise_conv_scratch_buf_opt(const void *buf);

/* ANSI C function to be hooked up when optimised version needed */
void esp_nn_set_softmax_scratch_buf_opt(void *buffer);

/**
* @brief optimised version of softmax function
*
Expand Down
83 changes: 83 additions & 0 deletions code/components/esp-nn/include/esp_nn_defs.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
// Copyright 2022 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <stdint.h>

/**
* @brief structure to club data dims
* this structure can be used for input, output and filter
*/
typedef struct data_dims {
int32_t width;
int32_t height;
int32_t channels;

int32_t extra; // can be used as batch or any other param
} data_dims_t;

/**
* @brief 2d data structure (width, height)
*
*/
typedef struct data_2d {
int32_t width;
int32_t height;
} data_2d_t;

/**
* @brief min/max activation
*/
typedef struct act_params {
int32_t min;
int32_t max;
} act_params_t;

/**
* @brief per channel quant data
*
* @note number of shift and mult elements are equal to output channels
*/
typedef struct quant_data {
int32_t *shift;
int32_t *mult;
} quant_data_t;

/**
* @brief params specific to convolution 2d
*
*/
typedef struct conv_params {
int32_t in_offset;
int32_t out_offset;
data_2d_t stride;
data_2d_t padding;
data_2d_t dilation;
act_params_t activation;
} conv_params_t;

/**
* @brief params specific to depthwise convolution 2d
*
*/
typedef struct dw_conv_params {
int32_t in_offset;
int32_t out_offset;
int32_t ch_mult; // channel multiplier. (in_ch * ch_mult = out_ch)
data_2d_t stride;
data_2d_t padding;
data_2d_t dilation;
act_params_t activation;
} dw_conv_params_t;
Loading

0 comments on commit 234925c

Please sign in to comment.