Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add update for Tau NN with pT calibration #43639

Merged
merged 2 commits into from
Jan 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 11 additions & 19 deletions L1Trigger/Phase2L1ParticleFlow/interface/taus/TauNNIdHW.h
Original file line number Diff line number Diff line change
@@ -1,32 +1,24 @@
#ifndef L1Trigger_Phase2L1ParticleFlow_TAUNNIDHW_H_
#define L1Trigger_Phase2L1ParticleFlow_TAUNNIDHW_H_

#include "DataFormats/L1TParticleFlow/interface/layer1_emulator.h"

#include <cstdio>
#include <complex>
#include "ap_int.h"
#include "ap_fixed.h"

#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/tau_parameters.h"
#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/defines.h"

#include "DataFormats/L1TParticleFlow/interface/layer1_emulator.h"
#include "DataFormats/L1TParticleFlow/interface/PFCandidate.h"

#include "L1Trigger/Phase2L1ParticleFlow/interface/common/nnet_layer.h"
#include "L1Trigger/Phase2L1ParticleFlow/interface/common/nnet_activation.h"

//hls-fpga-machine-learning insert weights
#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w1.h"
#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b1.h"
#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w2.h"
#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b2.h"
#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w3.h"
#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b3.h"
#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w4.h"
#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b4.h"

typedef ap_ufixed<16, 14> pt_t;
typedef ap_fixed<10, 4> etaphi_t;

// Tau NN returns two values
struct Tau_NN_Result {
result_t nn_pt_correction;
result_t nn_id;
};

namespace L1TauEmu {
// Data types and constants used in the FPGA and FPGA-optimized functions
//etaphi_base maps physical eta phi units onto bits
Expand Down Expand Up @@ -148,8 +140,8 @@ class TauNNIdHW {
void initialize(const std::string &iName, int iNParticles);
void SetNNVectorVar();
input_t *NNVectorVar() { return NNvectorVar_.data(); }
result_t EvaluateNN();
result_t compute(const l1t::PFCandidate &iSeed, std::vector<l1t::PFCandidate> &iParts);
Tau_NN_Result EvaluateNN();
Tau_NN_Result compute(const l1t::PFCandidate &iSeed, std::vector<l1t::PFCandidate> &iParts);
//void print();

std::string fInput_;
Expand Down
70 changes: 70 additions & 0 deletions L1Trigger/Phase2L1ParticleFlow/interface/taus/defines.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
#ifndef DEFINES_H_
#define DEFINES_H_

#include "ap_fixed.h"
#include "ap_int.h"

#include <cstddef>
#include <cstdio>

// hls-fpga-machine-learning insert numbers
#define N_INPUT_1_1 80
#define N_LAYER_2 25
#define N_LAYER_2 25
#define N_LAYER_5 25
#define N_LAYER_5 25
#define N_LAYER_8 15
#define N_LAYER_8 15
#define N_LAYER_11 15
#define N_LAYER_11 15
#define N_LAYER_14 10
#define N_LAYER_14 10
#define N_LAYER_17 1
#define N_LAYER_17 1
#define N_LAYER_20 1

// hls-fpga-machine-learning insert layer-precision
typedef ap_fixed<16, 6> input_t;
typedef ap_fixed<24, 12> input2_t;
typedef ap_fixed<16, 6> model_default_t;
typedef ap_fixed<16, 6> layer2_t;
typedef ap_fixed<9, 3> weight2_t;
typedef ap_fixed<9, 3> bias2_t;
typedef ap_uint<1> layer2_index;
typedef ap_ufixed<9, 0, AP_RND_CONV, AP_SAT> layer4_t;
typedef ap_fixed<18, 8> relu_1_table_t;
typedef ap_fixed<16, 6> layer5_t;
typedef ap_fixed<9, 3> weight5_t;
typedef ap_fixed<9, 3> bias5_t;
typedef ap_uint<1> layer5_index;
typedef ap_ufixed<9, 0, AP_RND_CONV, AP_SAT> layer7_t;
typedef ap_fixed<18, 8> relu_2_table_t;
typedef ap_fixed<16, 6> layer8_t;
typedef ap_fixed<9, 3> weight8_t;
typedef ap_fixed<9, 3> bias8_t;
typedef ap_uint<1> layer8_index;
typedef ap_ufixed<9, 0, AP_RND_CONV, AP_SAT> layer10_t;
typedef ap_fixed<18, 8> relu_3_table_t;
typedef ap_fixed<16, 6> layer11_t;
typedef ap_fixed<9, 3> weight11_t;
typedef ap_fixed<9, 3> bias11_t;
typedef ap_uint<1> layer11_index;
typedef ap_ufixed<9, 0, AP_RND_CONV, AP_SAT> layer13_t;
typedef ap_fixed<18, 8> relu_4_table_t;
typedef ap_fixed<16, 6> layer14_t;
typedef ap_fixed<9, 3> weight14_t;
typedef ap_fixed<9, 3> bias14_t;
typedef ap_uint<1> layer14_index;
typedef ap_ufixed<9, 0, AP_RND_CONV, AP_SAT> layer16_t;
typedef ap_fixed<18, 8> relu_5_table_t;
typedef ap_fixed<16, 6> layer17_t;
typedef ap_fixed<16, 7> weight17_t;
typedef ap_fixed<16, 7> bias17_t;
typedef ap_uint<1> layer17_index;
typedef ap_fixed<16, 6> result_t;
typedef ap_fixed<18, 8> jetID_output_table_t;
typedef ap_fixed<16, 7> weight20_t;
typedef ap_fixed<16, 7> bias20_t;
typedef ap_uint<1> layer20_index;

#endif
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
#ifndef NNET_ACTIVATION_H_
#define NNET_ACTIVATION_H_

#include <cmath>
#include "ap_fixed.h"
#include "nnet_common.h"

namespace nnet {

struct activ_config {
// IO size
static const unsigned n_in = 10;

// Internal info
static const unsigned table_size = 1024;

// Resource reuse info
static const unsigned io_type = io_parallel;
static const unsigned reuse_factor = 1;

// Internal data type definitions
typedef ap_fixed<18, 8> table_t;
};

// *************************************************
// LINEAR Activation -- See Issue 53
// *************************************************
template <class data_T, class res_T, typename CONFIG_T>
void linear(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) {
for (unsigned ii = 0; ii < CONFIG_T::n_in; ii++) {
res[ii] = data[ii];
}
}

// *************************************************
// RELU Activation
// *************************************************
template <class data_T, class res_T, typename CONFIG_T>
void relu(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) {
data_T datareg;
for (unsigned ii = 0; ii < CONFIG_T::n_in; ii++) {
datareg = data[ii];
if (datareg > 0)
res[ii] = datareg;
else
res[ii] = 0;
}
}

// *************************************************
// Sigmoid Activation
// *************************************************
template <class out_T>
inline out_T sigmoid_fcn_float(float input) {
return 1.0 / (1 + exp(-input));
}

template <class res_T, typename CONFIG_T, int N_TABLE>
void init_sigmoid_table(res_T table_out[N_TABLE]) {
// Default logistic sigmoid function:
// result = 1/(1+e^(-x))
for (unsigned ii = 0; ii < N_TABLE; ii++) {
// First, convert from table index to X-value (signed 8-bit, range -8 to +8)
float in_val = 2 * 8.0 * (ii - float(N_TABLE) / 2.0) / float(N_TABLE);
// Next, compute lookup table function
res_T real_val = sigmoid_fcn_float<res_T>(in_val);
//std::cout << "Lookup table In Value: " << in_val << " Result: " << real_val << std::endl;
table_out[ii] = (res_T)real_val;
}
}

template <class data_T, class res_T, typename CONFIG_T>
void sigmoid(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) {
// Initialize the lookup table
res_T sigmoid_table[CONFIG_T::table_size];
init_sigmoid_table<res_T, CONFIG_T, CONFIG_T::table_size>(sigmoid_table);

// Index into the lookup table based on data
int data_round;
unsigned index;
for (unsigned ii = 0; ii < CONFIG_T::n_in; ii++) {
data_round = data[ii] * CONFIG_T::table_size / 16;
index = data_round + 8 * CONFIG_T::table_size / 16;
/*if (index < 0)
index = 0;*/
if (index > CONFIG_T::table_size - 1)
index = CONFIG_T::table_size - 1;
res[ii] = (res_T)sigmoid_table[index];
}
}

} // namespace nnet

#endif
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
#ifndef NNET_COMMON_H_
#define NNET_COMMON_H_

#include "ap_fixed.h"

// This is a substitute for "ceil(n/(float)d)".
#define DIV_ROUNDUP(n, d) ((n + d - 1) / d)
#define MIN(n, d) (n > d ? d : n)
#define MAX(n, d) (n > d ? n : d)

#define STRINGIFY(x) #x
#define EXPAND_STRING(x) STRINGIFY(x)

namespace nnet {

// Common type definitions
enum io_type { io_parallel = 0, io_stream };
enum strategy { latency, resource };

template <class T>
class Op_add {
public:
T operator()(T a, T b) { return a + b; }
};

template <class T>
class Op_and {
public:
T operator()(T a, T b) { return a && b; }
};

template <class T>
class Op_or {
public:
T operator()(T a, T b) { return a || b; }
};

template <class T>
class Op_max {
public:
T operator()(T a, T b) { return a >= b ? a : b; }
};

template <class T>
class Op_min {
public:
T operator()(T a, T b) { return a <= b ? a : b; }
};

} // namespace nnet

#endif
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
#ifndef NNET_DENSE_H_
#define NNET_DENSE_H_

#include "nnet_common.h"
#include "nnet_mult.h"
#include <math.h>

namespace nnet {

struct dense_config {
// Internal data type definitions
typedef float bias_t;
typedef float weight_t;
typedef float accum_t;

// Layer Sizes
static const unsigned n_in = 10;
static const unsigned n_out = 10;

// Resource reuse info
int io_type = io_parallel;
int strategy = latency;
int reuse_factor = 1;
static const bool store_weights_in_bram = false;
int n_zeros = 0;
// partitioning arrays cyclically to go with roll factors?
// Product function to use
template <class x_T, class y_T>
using product = nnet::product::mult<x_T, y_T>;
};

template <class data_T, class res_T, typename CONFIG_T>
void dense(data_T data[CONFIG_T::n_in],
res_T res[CONFIG_T::n_out],
typename CONFIG_T::weight_t weights[CONFIG_T::n_in * CONFIG_T::n_out],
typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) {
data_T cache;
typename CONFIG_T::accum_t mult[CONFIG_T::n_in * CONFIG_T::n_out];
typename CONFIG_T::accum_t acc[CONFIG_T::n_out];

// Do the matrix-multiply
for (unsigned ii = 0; ii < CONFIG_T::n_in; ii++) {
cache = data[ii];
for (unsigned jj = 0; jj < CONFIG_T::n_out; jj++) {
unsigned index = ii * CONFIG_T::n_out + jj;
mult[index] = CONFIG_T::template product<data_T, typename CONFIG_T::weight_t>::product(cache, weights[index]);
}
}

// Initialize accumulator with input biases
for (unsigned iacc = 0; iacc < CONFIG_T::n_out; iacc++) {
acc[iacc] = (typename CONFIG_T::accum_t)biases[iacc];
}

// Accumulate multiplication result
for (unsigned ii = 0; ii < CONFIG_T::n_in; ii++) {
for (unsigned jj = 0; jj < CONFIG_T::n_out; jj++) {
unsigned index = ii * CONFIG_T::n_out + jj;
acc[jj] += mult[index];
}
}

// Cast to "res_t" type
for (unsigned ires = 0; ires < CONFIG_T::n_out; ires++) {
// res[ires] = (res_T) (acc[ires]);
res[ires] = cast<data_T, res_T, CONFIG_T>(acc[ires]);
}
}

} // namespace nnet

#endif
Loading