Skip to content

Commit

Permalink
Generate static TfLiteEvalTensors (#2193)
Browse files Browse the repository at this point in the history
This PR generates all of the TfLiteEvalTensors for the graph. It also generates all of the static buffers used for tensors that were present in the flatbuffer.

BUG=b/295077140
  • Loading branch information
rascani authored Aug 29, 2023
1 parent ad83c8c commit 4d07bd0
Show file tree
Hide file tree
Showing 8 changed files with 391 additions and 5 deletions.
12 changes: 12 additions & 0 deletions codegen/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ py_library(
"graph.py",
],
deps = [
":tensor",
":utils",
"//codegen/operators:factory",
"//codegen/operators:operator",
Expand All @@ -35,6 +36,17 @@ py_library(
],
)

py_library(
name = "tensor",
srcs = [
"tensor.py",
],
deps = [
":utils",
"//tensorflow/lite/python:schema_py",
],
)

py_library(
name = "utils",
srcs = [
Expand Down
159 changes: 159 additions & 0 deletions codegen/examples/hello_world/hello_world_model.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,74 @@ TFLMInferenceRegistration op_table[OpCode::kCount] = {
tflite::RegisterInference_FULLY_CONNECTED(),
};

// buffer_1 is located in the arena

alignas(16) uint8_t buffer_2[4] = {
0xAD,
0x01,
0x00,
0x00,
};

alignas(16) uint8_t buffer_3[16] = {
0xD9, 0x3B, 0x27, 0x15, 0x1C, 0xE0, 0xDE, 0xDD,
0x0F, 0x1B, 0xC5, 0xD7, 0x12, 0xDD, 0xF9, 0x7F,
};

alignas(16) uint8_t buffer_4[64] = {
0x27, 0xFD, 0xFF, 0xFF, 0xA2, 0x07, 0x00, 0x00, 0x62, 0x02, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xF1, 0x00, 0x00, 0x00, 0x29, 0xFE,
0xFF, 0xFF, 0xDD, 0xFF, 0xFF, 0xFF, 0x9D, 0xFC, 0xFF, 0xFF, 0x3B,
0x02, 0x00, 0x00, 0x45, 0x02, 0x00, 0x00, 0xA4, 0x10, 0x00, 0x00,
0x67, 0x0F, 0x00, 0x00, 0x4F, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x87, 0xFC, 0xFF, 0xFF, 0x11, 0xEC, 0xFF, 0xFF,
};

alignas(16) uint8_t buffer_5[256] = {
0xF4, 0x1A, 0xED, 0x09, 0x19, 0x21, 0xF4, 0x24, 0xE0, 0x21, 0xEF, 0xBC,
0xF7, 0xF5, 0xFA, 0x19, 0x03, 0xDC, 0xD2, 0x02, 0x06, 0xF9, 0xF4, 0x02,
0xFF, 0xFA, 0xEF, 0xF1, 0xEF, 0xD3, 0x27, 0xE1, 0xFB, 0x27, 0xDD, 0xEB,
0xDB, 0xE4, 0x05, 0x1A, 0x17, 0xFC, 0x24, 0x12, 0x15, 0xEF, 0x1E, 0xE4,
0x10, 0xFE, 0x14, 0xDA, 0x1C, 0xF8, 0xF3, 0xF1, 0xEF, 0xE2, 0xF3, 0x09,
0xE3, 0xE9, 0xED, 0xE3, 0xE4, 0x15, 0x07, 0x0B, 0x04, 0x1B, 0x1A, 0xFE,
0xEB, 0x01, 0xDE, 0x21, 0xE6, 0x0B, 0xEC, 0x03, 0x23, 0x0A, 0x22, 0x24,
0x1E, 0x27, 0x03, 0xE6, 0x03, 0x24, 0xFF, 0xC0, 0x11, 0xF8, 0xFC, 0xF1,
0x11, 0x0C, 0xF5, 0xE0, 0xF3, 0x07, 0x17, 0xE5, 0xE8, 0xED, 0xFA, 0xDC,
0xE8, 0x23, 0xFB, 0x07, 0xDD, 0xFB, 0xFD, 0x00, 0x14, 0x26, 0x11, 0x17,
0xE7, 0xF1, 0x11, 0xEA, 0x02, 0x26, 0x04, 0x04, 0x25, 0x21, 0x1D, 0x0A,
0xDB, 0x1D, 0xDC, 0x20, 0x01, 0xFA, 0xE3, 0x37, 0x0B, 0xF1, 0x1A, 0x16,
0xEF, 0x1C, 0xE7, 0x03, 0xE0, 0x16, 0x02, 0x03, 0x21, 0x18, 0x09, 0x2E,
0xD9, 0xE5, 0x14, 0x0B, 0xEA, 0x1A, 0xFC, 0xD8, 0x13, 0x00, 0xC4, 0xD8,
0xEC, 0xD9, 0xFE, 0x0D, 0x19, 0x20, 0xD8, 0xD6, 0xE2, 0x1F, 0xE9, 0xD7,
0xCA, 0xE2, 0xDD, 0xC6, 0x13, 0xE7, 0x04, 0x3E, 0x00, 0x01, 0x14, 0xC7,
0xDB, 0xE7, 0x15, 0x15, 0xF5, 0x06, 0xD6, 0x1A, 0xDC, 0x09, 0x22, 0xFE,
0x08, 0x02, 0x13, 0xEF, 0x19, 0x1E, 0xE2, 0x09, 0xFD, 0xF3, 0x14, 0xDD,
0xDA, 0x20, 0xD9, 0x0F, 0xE3, 0xF9, 0xF7, 0xEE, 0xE9, 0x24, 0xE6, 0x29,
0x00, 0x07, 0x16, 0xE2, 0x1E, 0x0D, 0x23, 0xD3, 0xDD, 0xF7, 0x14, 0xFA,
0x08, 0x22, 0x26, 0x21, 0x09, 0x08, 0x0F, 0x0B, 0xE0, 0x12, 0xF4, 0x7F,
0xDC, 0x58, 0xE5, 0x26,
};

alignas(16) uint8_t buffer_6[64] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC2, 0xEA, 0xFF,
0xFF, 0x75, 0xEA, 0xFF, 0xFF, 0xB8, 0xFA, 0xFF, 0xFF, 0x24, 0xFA,
0xFF, 0xFF, 0xC8, 0xEF, 0xFF, 0xFF, 0xAC, 0xFF, 0xFF, 0xFF, 0x44,
0x0D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xBD, 0x07, 0x00, 0x00,
0x33, 0xEA, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xCC, 0xE4, 0xFF,
0xFF, 0x4F, 0x0D, 0x00, 0x00, 0xCF, 0xE3, 0xFF, 0xFF,
};

alignas(16) uint8_t buffer_7[16] = {
0xF7, 0xCA, 0x39, 0x47, 0x68, 0x73, 0x62, 0x63,
0x40, 0xE6, 0x7F, 0x19, 0xAE, 0x44, 0x5F, 0x56,
};

// buffer_8 is located in the arena

// buffer_9 is located in the arena

// buffer_10 is located in the arena

struct Node0_0 {
struct Inputs {
int size = 3;
Expand Down Expand Up @@ -84,6 +152,56 @@ struct Node0_2 {
.asymmetric_quantize_inputs = false};
} node_0_2;

struct Tensor0_0Dims {
int size = 2;
int data[2] = {1, 1};
} tensor0_0_dims;

struct Tensor0_1Dims {
int size = 1;
int data[1] = {1};
} tensor0_1_dims;

struct Tensor0_2Dims {
int size = 2;
int data[2] = {1, 16};
} tensor0_2_dims;

struct Tensor0_3Dims {
int size = 1;
int data[1] = {16};
} tensor0_3_dims;

struct Tensor0_4Dims {
int size = 2;
int data[2] = {16, 16};
} tensor0_4_dims;

struct Tensor0_5Dims {
int size = 1;
int data[1] = {16};
} tensor0_5_dims;

struct Tensor0_6Dims {
int size = 2;
int data[2] = {16, 1};
} tensor0_6_dims;

struct Tensor0_7Dims {
int size = 2;
int data[2] = {1, 16};
} tensor0_7_dims;

struct Tensor0_8Dims {
int size = 2;
int data[2] = {1, 16};
} tensor0_8_dims;

struct Tensor0_9Dims {
int size = 2;
int data[2] = {1, 1};
} tensor0_9_dims;

} // namespace

Model::Model() {
Expand Down Expand Up @@ -119,6 +237,47 @@ Model::Model() {
.builtin_data = static_cast<void*>(&node_0_2.builtin_data),
.custom_initial_data = nullptr,
.custom_initial_data_size = 0};

subgraph0_tensors_[0] = {
.data = {.data = static_cast<void*>(nullptr /* buffer_1 */)},
.dims = reinterpret_cast<TfLiteIntArray*>(&tensor0_0_dims),
.type = kTfLiteInt8};
subgraph0_tensors_[1] = {
.data = {.data = static_cast<void*>(&buffer_2)},
.dims = reinterpret_cast<TfLiteIntArray*>(&tensor0_1_dims),
.type = kTfLiteInt32};
subgraph0_tensors_[2] = {
.data = {.data = static_cast<void*>(&buffer_3)},
.dims = reinterpret_cast<TfLiteIntArray*>(&tensor0_2_dims),
.type = kTfLiteInt8};
subgraph0_tensors_[3] = {
.data = {.data = static_cast<void*>(&buffer_4)},
.dims = reinterpret_cast<TfLiteIntArray*>(&tensor0_3_dims),
.type = kTfLiteInt32};
subgraph0_tensors_[4] = {
.data = {.data = static_cast<void*>(&buffer_5)},
.dims = reinterpret_cast<TfLiteIntArray*>(&tensor0_4_dims),
.type = kTfLiteInt8};
subgraph0_tensors_[5] = {
.data = {.data = static_cast<void*>(&buffer_6)},
.dims = reinterpret_cast<TfLiteIntArray*>(&tensor0_5_dims),
.type = kTfLiteInt32};
subgraph0_tensors_[6] = {
.data = {.data = static_cast<void*>(&buffer_7)},
.dims = reinterpret_cast<TfLiteIntArray*>(&tensor0_6_dims),
.type = kTfLiteInt8};
subgraph0_tensors_[7] = {
.data = {.data = static_cast<void*>(nullptr /* buffer_8 */)},
.dims = reinterpret_cast<TfLiteIntArray*>(&tensor0_7_dims),
.type = kTfLiteInt8};
subgraph0_tensors_[8] = {
.data = {.data = static_cast<void*>(nullptr /* buffer_9 */)},
.dims = reinterpret_cast<TfLiteIntArray*>(&tensor0_8_dims),
.type = kTfLiteInt8};
subgraph0_tensors_[9] = {
.data = {.data = static_cast<void*>(nullptr /* buffer_10 */)},
.dims = reinterpret_cast<TfLiteIntArray*>(&tensor0_9_dims),
.type = kTfLiteInt8};
}

TfLiteStatus Model::Invoke() { return InvokeSubgraph0(); }
Expand Down
1 change: 1 addition & 0 deletions codegen/examples/hello_world/hello_world_model.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ class Model {

TfLiteContext context_ = {};
TfLiteNode subgraph0_nodes_[3] = {};
TfLiteEvalTensor subgraph0_tensors_[10] = {};
};

} // namespace hello_world_model
69 changes: 65 additions & 4 deletions codegen/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,13 @@
""" Provides object representation for the model that is conducive to code
generation using templates. """

from typing import List, Optional, Sequence
from typing import Dict, List, Optional, Sequence
import string
import textwrap

from tflite_micro.codegen.operators import factory
from tflite_micro.codegen.operators import operator
from tflite_micro.codegen import tensor
from tflite_micro.codegen import utils
from tflite_micro.tensorflow.lite.python import schema_py_generated as schema_fb
from tflite_micro.tensorflow.lite.tools import visualize
Expand Down Expand Up @@ -52,13 +53,17 @@ def full_enum_name(self) -> str:

class Subgraph(object):

def __init__(self, model: schema_fb.ModelT, subgraph_idx: int,
subgraph: schema_fb.SubGraphT):
def __init__(self, model: schema_fb.ModelT, buffers: Sequence[tensor.Buffer],
subgraph_idx: int, subgraph: schema_fb.SubGraphT):
self._subgraph_idx: int = subgraph_idx
self._subgraph: schema_fb.SubGraphT = subgraph
self._op_codes: List[OpCode] = [
OpCode(op_code) for op_code in model.operatorCodes
]
self._tensors: List[Tensor] = []
for t in subgraph.tensors:
self._tensors.append(tensor.Tensor(buffers[t.buffer], t))

self._operators: List[operator.Operator] = []
for op in subgraph.operators:
op_code = model.operatorCodes[op.opcodeIndex]
Expand All @@ -72,6 +77,14 @@ def index(self) -> int:
def operators(self) -> Sequence[operator.Operator]:
return self._operators

@property
def tensors(self) -> Sequence[tensor.Tensor]:
return self._tensors

@property
def needs_zero_length_int_array(self) -> bool:
return any(t.needs_zero_length_int_array for t in self.tensors)

@property
def nodes_array(self) -> str:
return f"subgraph{self.index}_nodes_"
Expand Down Expand Up @@ -114,19 +127,67 @@ def generate_c_invoke(self, indent: str) -> str:
node=self.nodes_element(op_idx)))
return textwrap.indent("".join(invoke_strs), indent)

@property
def tensors_array(self) -> str:
return f"subgraph{self.index}_tensors_"

def tensors_element(self, tensor_idx: int) -> str:
return self.tensors_array + f"[{tensor_idx}]"

def tensor_data_type(self, tensor_idx: int) -> str:
return f"Tensor{self.index}_{tensor_idx}"

def tensor_data_name(self, tensor_idx: int) -> str:
return f"tensor{self.index}_{tensor_idx}"

def generate_c_tensor_data(self, indent: str) -> str:
tensor_dims_strs: List[str] = []
for tensor_idx, tensor in enumerate(self.tensors):
type_name = self.tensor_data_type(tensor_idx)
tensor_name = self.tensor_data_name(tensor_idx)
tensor_dims_strs.append(
tensor.generate_c_tensor_dims(type_name, tensor_name))
return textwrap.indent("\n\n".join(tensor_dims_strs), indent)

def generate_c_tensor_init(self, indent: str) -> str:
tensor_init_strs: List[str] = []
for tensor_idx, tensor in enumerate(self.tensors):
tflite_tensor_name = self.tensors_element(tensor_idx)
tensor_data_name = self.tensor_data_name(tensor_idx)
tensor_init_strs.append(
tensor.generate_c_tensor_init(tflite_tensor_name, tensor_data_name))
return textwrap.indent("\n".join(tensor_init_strs), indent)


class Graph(object):

def __init__(self, model: schema_fb.ModelT):
buffers: List[tensor.Buffer] = [
tensor.Buffer("buffer_{}".format(idx), buffer)
for idx, buffer in enumerate(model.buffers)
]
self._subgraphs: List[SubGraph] = [
Subgraph(model, idx, subgraph)
Subgraph(model, buffers, idx, subgraph)
for idx, subgraph in enumerate(model.subgraphs)
]

@property
def subgraphs(self) -> Sequence[Subgraph]:
return self._subgraphs

@property
def buffers(self) -> Sequence[tensor.Buffer]:
buffers: List[tensor.Buffer] = []
for subgraph in self.subgraphs:
for t in subgraph.tensors:
buffers.append(t.buffer)
return buffers

@property
def needs_zero_length_int_array(self) -> bool:
return any(subgraph.needs_zero_length_int_array
for subgraph in self.subgraphs)


class OpCodeTable(object):

Expand Down
10 changes: 10 additions & 0 deletions codegen/templates/inference.cc.mako
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,18 @@ TFLMInferenceRegistration op_table[OpCode::kCount] = {
% endfor
};

% for buffer in graph.buffers:
${buffer.generate_c_buffer_array("")}
% endfor
% for subgraph in graph.subgraphs:
${subgraph.generate_c_node_data("")}

${subgraph.generate_c_tensor_data("")}
% endfor

% if graph.needs_zero_length_int_array:
TfLiteIntArray zero_length_int_array = {};
% endif
} // namespace

Model::Model() {
Expand All @@ -57,6 +65,8 @@ Model::Model() {

% for subgraph in graph.subgraphs:
${subgraph.generate_c_node_init(" ")}

${subgraph.generate_c_tensor_init(" ")}
% endfor
}

Expand Down
3 changes: 3 additions & 0 deletions codegen/templates/inference.h.mako
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,9 @@ class Model {
% for subgraph in graph.subgraphs:
TfLiteNode ${subgraph.nodes_array}[${len(subgraph.operators)}] = {};
% endfor
% for subgraph in graph.subgraphs:
TfLiteEvalTensor ${subgraph.tensors_array}[${len(subgraph.tensors)}] = {};
% endfor
};

} // namespace ${model_name}
Loading

0 comments on commit 4d07bd0

Please sign in to comment.