From f69cbbd17a47171b2d4fd938740184ab717d9722 Mon Sep 17 00:00:00 2001 From: Sioni Summers Date: Wed, 29 Sep 2021 16:29:35 +0200 Subject: [PATCH 1/2] Set appropriate data type for quantized_relu activations --- hls4ml/utils/config.py | 10 +++++++--- test/pytest/test_qkeras.py | 10 +++++++--- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/hls4ml/utils/config.py b/hls4ml/utils/config.py index 07863ed8cc..b907350bb4 100644 --- a/hls4ml/utils/config.py +++ b/hls4ml/utils/config.py @@ -37,11 +37,14 @@ def _get_precision_from_quantizer(quantizer): quantizer['class_name'] = quantizer_obj.__name__ supported_quantizers = ['quantized_bits', 'quantized_relu', 'quantized_tanh', 'quantized_po2', 'quantized_relu_po2'] + signed = True if quantizer['class_name'] in supported_quantizers: bits = int(quantizer['config']['bits']) # if integer isn't specified, it should be the same as bits integer = int(quantizer['config'].get('integer', bits-1)) + 1 - + if quantizer['class_name'] == 'quantized_relu': + signed = False + integer -= 1 elif quantizer['class_name'] in ['binary', 'stochastic_binary', 'binary_tanh']: bits = 2 integer = 2 @@ -53,10 +56,11 @@ def _get_precision_from_quantizer(quantizer): raise Exception('ERROR: Unsupported quantizer: {}'.format(quantizer['class_name'])) decimal = bits - integer + signed = '' if signed else 'u' if decimal > 0: - return 'ap_fixed<{},{}>'.format(bits, integer) + return 'ap_{}fixed<{},{}>'.format(signed, bits, integer) else: - return 'ap_int<{}>'.format(bits) + return 'ap_{}int<{}>'.format(signed, bits) def config_from_keras_model(model, granularity='model', default_precision='ap_fixed<16,6>', default_reuse_factor=1): """Create an HLS conversion config given the Keras model. diff --git a/test/pytest/test_qkeras.py b/test/pytest/test_qkeras.py index dbf2fa9b38..cdfc312a8c 100644 --- a/test/pytest/test_qkeras.py +++ b/test/pytest/test_qkeras.py @@ -186,9 +186,13 @@ def randX_1000_1(): (quantized_bits(8,4)), (quantized_bits(4,2)), (quantized_bits(4,0)), - (quantized_bits(10,0)),]) - #(quantized_relu(4)), - #(quantized_relu(10))]) + (quantized_bits(10,0)), + (quantized_relu(4)), + (quantized_relu(4,2)), + (quantized_relu(8)), + (quantized_relu(8,4)), + (quantized_relu(10)), + (quantized_relu(10,5))]) def test_quantizer(randX_1000_1, quantizer): ''' Test a single quantizer as an Activation function. From f6589df71f55149348c292f6b4fe0a39fdb33b63 Mon Sep 17 00:00:00 2001 From: Sioni Summers Date: Wed, 29 Sep 2021 16:30:12 +0200 Subject: [PATCH 2/2] Display unsigned types properly in profiling --- hls4ml/model/profiling.py | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/hls4ml/model/profiling.py b/hls4ml/model/profiling.py index 1c8ff0ded9..9aeb38be98 100644 --- a/hls4ml/model/profiling.py +++ b/hls4ml/model/profiling.py @@ -159,32 +159,31 @@ def types_histogram(data, fmt='longform'): types_plots = {'boxplot' : types_boxplot, 'histogram' : types_histogram} -def ap_fixed_WIF(dtype): +def ap_fixed_WIFS(dtype): from hls4ml.templates.vivado_template import VivadoBackend dtype = VivadoBackend.convert_precision_string(None, dtype) - W, I, F = dtype.width, dtype.integer, dtype.fractional - return W, I, F + W, I, F, S = dtype.width, dtype.integer, dtype.fractional, dtype.signed + return W, I, F, S def types_hlsmodel(model): suffix = ['w', 'b'] data = {'layer' : [], 'low' : [], 'high' : []} # Plot the default precision default_precision = model.config.model_precision['default'] - # assumes ap_fixed - W, I, F = ap_fixed_WIF(default_precision) + W, I, F, S = ap_fixed_WIFS(default_precision) data['layer'].append('model') data['low'].append(-F) - data['high'].append(I-1) + data['high'].append(I-1 if S else I) for layer in model.get_layers(): for iw, weight in enumerate(layer.get_weights()): wname = '{}/{}'.format(layer.name, suffix[iw]) T = weight.type if T.name != 'model': - W, I, F = ap_fixed_WIF(T.precision) + W, I, F, S = ap_fixed_WIFS(T.precision) data['layer'].append(wname) data['low'].append(-F) - data['high'].append(I-1) + data['high'].append(I-1 if S else I) data = pandas.DataFrame(data) return data @@ -192,16 +191,16 @@ def activation_types_hlsmodel(model): data = {'layer' : [], 'low' : [], 'high' : []} # Get the default precision default_precision = model.config.model_precision['default'] - W, I, F = ap_fixed_WIF(default_precision) + W, I, F, S = ap_fixed_WIFS(default_precision) data['layer'].append('model') data['low'].append(-F) - data['high'].append(I-1) + data['high'].append(I-1 if S else I) for layer in model.get_layers(): T = layer.get_output_variable().type.precision - W, I, F = ap_fixed_WIF(T) + W, I, F, S = ap_fixed_WIFS(T) data['layer'].append(layer.name) data['low'].append(-F) - data['high'].append(I-1) + data['high'].append(I-1 if S else I) data = pandas.DataFrame(data) return data