-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrun_npu_inference.py
61 lines (48 loc) · 1.44 KB
/
run_npu_inference.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
#
# Audio classification inference accelerated with vx_delegate
#
import numpy as np
import tflite_runtime.interpreter as tflite
import librosa
# Set path to the VX_delegate Libraries
#
# Delegate path:
DELEGATE_PATH = "./libvx_delegate.so"
# Set path to the TFLite model
#
# Model path:
MODEL_PATH = "./audio_classifier.tflite"
# Set path to the input audio (for this example)
#
# Audio path:
AUDIO_PATH = "./sample.wav"
# Map the tag output to the appropriate string
TAGS = {
0:'none',
1:'hello',
2:'khadas',
3:'vim',
4:'edge',
5:'tone',
6:'mind'
}
scale, sr = librosa.load(AUDIO_PATH)
mel_spectrogram = librosa.feature.melspectrogram(y=scale, sr=sr, n_fft=4096, hop_length=512, n_mels=256, fmax=8000)
log_mel_spectrogram = librosa.power_to_db(mel_spectrogram)
log_mel_spectrogram = np.expand_dims(log_mel_spectrogram, axis=-1)
vx_delegate = tflite.load_delegate(
library = DELEGATE_PATH,
options={"logging-severity":"debug"}
)
interpreter = tflite.Interpreter(
model_path = MODEL_PATH,
experimental_delegates = [vx_delegate]
)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.set_tensor(input_details[0]["index"], [log_mel_spectrogram])
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]["index"])
prediction = np.argmax(output_data[0])
print(prediction, TAGS[prediction])