Skip to content

Commit 5a15093

Browse files
author
Fred
committed
add main files
1 parent f23cb9d commit 5a15093

File tree

9 files changed

+14932
-2
lines changed

9 files changed

+14932
-2
lines changed

README.md

Lines changed: 49 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,52 @@
11
# tfjs node tiny
22
A light-weight, 193MB version of `@tensorflow/tfjs-node` to perform inference on any TensorFlow model in the SavedModel format.
3-
This repository trims all built-in TensorFlow components used for model training, while still allowing for quicker model inference.
3+
This repository trims all built-in TensorFlow components used for model training, while still allowing for faster model inference.
44

5-
With ≈450 MB reduction in module size, ≈150%-200% the speed to load a model, and slightly faster model inference, this repository outperforms the `@tensorflow/tfjs-node` module in model inference.
5+
With ≈450 MB reduction in module size, ≈150%-200% the speed to load a model, and slightly faster model inference, this repository outperforms the `@tensorflow/tfjs-node` module in resource efficiency.
6+
7+
8+
### Code Comparison
9+
`@tensorflow/tfjs-node`:
10+
11+
```js
12+
async function run() {
13+
const { node, tensor} = require('@tensorflow/tfjs-node')
14+
const { bert_multilingual_encode } = require('./tfjs-node-tiny/bert-tokenizer')
15+
const model = await node.loadSavedModel('./bert-small-multilingual');
16+
const input = bert_multilingual_encode(`What's up?`);
17+
while (input.length < 192) input.push(0);
18+
let t = tensor(input, [192], 'int32');
19+
const prediction = model.predict({
20+
input_ids: t
21+
})['output_0'];
22+
console.log(prediction.max().arraySync());
23+
}
24+
run()
25+
```
26+
<br>
27+
28+
`tfjs-node-tiny`:
29+
30+
```js
31+
async function run() {
32+
const { bert_multilingual_encode } = require('./tfjs-node-tiny/bert-tokenizer');
33+
const { loadSavedModel, tensor} = require('./tfjs-node-tiny/node');
34+
const model = await loadSavedModel('./bert-small-multilingual');
35+
const input = bert_multilingual_encode(`What's up?`);
36+
let t = tensor(input, [192], 'int32');
37+
const prediction = model.predict({
38+
input_ids: t
39+
})['output_0'];
40+
console.log(prediction.dataSync()[0])
41+
}
42+
run()
43+
```
44+
45+
### Setup
46+
47+
```bash
48+
node setup.js
49+
```
50+
51+
52+
### Model Release

predict.js

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
async function run() {
2+
const { bert_multilingual_encode } = require('./tfjs-node-tiny/bert-tokenizer');
3+
const {loadSavedModel, tensor} = require('./tfjs-node-tiny/node');
4+
const model = await loadSavedModel('./bert-small-multilingual');
5+
const input = bert_multilingual_encode(`What's up?`);
6+
let t = tensor(input, [192], 'int32');
7+
const prediction = model.predict({
8+
input_ids: t
9+
})['output_0'];
10+
console.log(prediction.dataSync()[0])
11+
}
12+
run()

setup.js

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
const fs = require('fs');
2+
const { pipeline } = require('stream');
3+
const { promisify } = require('util');
4+
5+
6+
const download = async (url, path) => {
7+
// Taken from https://levelup.gitconnected.com/how-to-download-a-file-with-node-js-e2b88fe55409
8+
9+
const streamPipeline = promisify(pipeline);
10+
const response = await fetch(url);
11+
12+
if (!response.ok) {
13+
throw new Error(`unexpected response ${response.statusText}`);
14+
}
15+
16+
await streamPipeline(response.body, fs.createWriteStream(path));
17+
};
18+
19+
url = "https://huggingface.co/FredZhang7/bert-multilingual-toxicity-v2/resolve/main/tensorflow.dll"
20+
21+
download(url, "tfjs-node-tiny/tfnapi-v8/tensorflow.dll")

0 commit comments

Comments
 (0)