From b88cf884f39734fe140a2b77e726eb65d0ba90fb Mon Sep 17 00:00:00 2001 From: itsjoeoui Date: Wed, 27 Sep 2023 05:49:53 +0000 Subject: [PATCH] update: fix: requirements.txt to reduce vulnerabilities --- .../AI_ML/LOAD_MODEL/ONNX_MODEL/ONNX_MODEL.md | 57 +++++++++++++++++ .../ONNX_MODEL/a1-[autogen]/docstring.txt | 41 ++++++++++++ .../ONNX_MODEL/a1-[autogen]/python_code.txt | 62 +++++++++++++++++++ .../ONNX_MODEL/appendix/hardware.md | 1 + .../LOAD_MODEL/ONNX_MODEL/appendix/media.md | 1 + .../LOAD_MODEL/ONNX_MODEL/appendix/notes.md | 1 + 6 files changed, 163 insertions(+) create mode 100644 docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/ONNX_MODEL.md create mode 100644 docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/a1-[autogen]/docstring.txt create mode 100644 docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/a1-[autogen]/python_code.txt create mode 100644 docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/appendix/hardware.md create mode 100644 docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/appendix/media.md create mode 100644 docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/appendix/notes.md diff --git a/docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/ONNX_MODEL.md b/docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/ONNX_MODEL.md new file mode 100644 index 0000000000..4ded5d315d --- /dev/null +++ b/docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/ONNX_MODEL.md @@ -0,0 +1,57 @@ + +[//]: # (Custom component imports) + +import DocString from '@site/src/components/DocString'; +import PythonCode from '@site/src/components/PythonCode'; +import AppDisplay from '@site/src/components/AppDisplay'; +import SectionBreak from '@site/src/components/SectionBreak'; +import AppendixSection from '@site/src/components/AppendixSection'; + +[//]: # (Docstring) + +import DocstringSource from '!!raw-loader!./a1-[autogen]/docstring.txt'; +import PythonSource from '!!raw-loader!./a1-[autogen]/python_code.txt'; + +{DocstringSource} +{PythonSource} + + + + + +[//]: # (Examples) + +## Examples + +import Example1 from './examples/EX1/example.md'; +import App1 from '!!raw-loader!./examples/EX1/app.json'; + + + + + {App1} + + + + + + + + +[//]: # (Appendix) + +import Notes from './appendix/notes.md'; +import Hardware from './appendix/hardware.md'; +import Media from './appendix/media.md'; + +## Appendix + + + + + + diff --git a/docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/a1-[autogen]/docstring.txt b/docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/a1-[autogen]/docstring.txt new file mode 100644 index 0000000000..902772f8a6 --- /dev/null +++ b/docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/a1-[autogen]/docstring.txt @@ -0,0 +1,41 @@ +ONNX_MODEL loads a serialized ONNX model and uses it to make predictions using ONNX Runtime. + + This allows supporting a wide range of deep learning frameworks and hardware platforms. + + Notes + ----- + + On the one hand, ONNX is an open format to represent deep learning models. + ONNX defines a common set of operators - the building blocks of machine learning + and deep learning models - and a common file format to enable AI developers + to use models with a variety of frameworks, tools, runtimes, and compilers. + + See: https://onnx.ai/ + + On the other hand, ONNX Runtime is a high-performance inference engine for machine + learning models in the ONNX format. ONNX Runtime has proved to considerably increase + performance in inferencing for a broad range of ML models and hardware platforms. + + See: https://onnxruntime.ai/docs/ + + Moreover, the ONNX Model Zoo is a collection of pre-trained models for common + machine learning tasks. The models are stored in ONNX format and are ready to use + in different inference scenarios. + + See: https://github.com/onnx/models + + Parameters + ---------- + file_path : str + Path to a ONNX model to load and use for prediction. + + default : Vector + The input tensor to use for prediction. + For now, only a single input tensor is supported. + Note that the input tensor shape is not checked against the model's input shape. + + Returns + ------- + Vector: + The predictions made by the ONNX model. + For now, only a single output tensor is supported. diff --git a/docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/a1-[autogen]/python_code.txt b/docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/a1-[autogen]/python_code.txt new file mode 100644 index 0000000000..012554b48f --- /dev/null +++ b/docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/a1-[autogen]/python_code.txt @@ -0,0 +1,62 @@ +from flojoy import flojoy, run_in_venv, Vector +from flojoy.utils import FLOJOY_CACHE_DIR + + +@flojoy +@run_in_venv( + pip_dependencies=[ + "onnxruntime", + "numpy", + "onnx", + ] +) +def ONNX_MODEL( + file_path: str, + default: Vector, +) -> Vector: + + + import os + import onnx + import urllib.request + import numpy as np + import onnxruntime as rt + + model_name = os.path.basename(file_path) + + if file_path.startswith("http://") or file_path.startswith("https://"): + # Downloading the ONNX model from a URL to FLOJOY_CACHE_DIR. + onnx_model_zoo_cache = os.path.join( + FLOJOY_CACHE_DIR, "cache", "onnx", "model_zoo" + ) + + os.makedirs(onnx_model_zoo_cache, exist_ok=True) + + filename = os.path.join(onnx_model_zoo_cache, model_name) + + urllib.request.urlretrieve( + url=file_path, + filename=filename, + ) + + # Using the downloaded file. + file_path = filename + + # Pre-loading the serialized model to validate whether is well-formed or not. + model = onnx.load(file_path) + onnx.checker.check_model(model) + + # Using ONNX runtime for the ONNX model to make predictions. + sess = rt.InferenceSession(file_path, providers=["CPUExecutionProvider"]) + + # TODO(jjerphan): Assuming a single input and a single output for now. + input_name = sess.get_inputs()[0].name + label_name = sess.get_outputs()[0].name + + # TODO(jjerphan): For now NumPy is assumed to be the main backend for Flojoy. + # We might adapt it in the future so that we can use other backends + # for tensor libraries for application using Deep Learning libraries. + input_tensor = np.asarray(default.v, dtype=np.float32) + predictions = sess.run([label_name], {input_name: input_tensor})[0] + + return Vector(v=predictions) diff --git a/docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/appendix/hardware.md b/docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/appendix/hardware.md new file mode 100644 index 0000000000..7f78a555c4 --- /dev/null +++ b/docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/appendix/hardware.md @@ -0,0 +1 @@ +This node does not require any peripheral hardware to operate. Please see INSTRUMENTS for nodes that interact with the physical world through connected hardware. \ No newline at end of file diff --git a/docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/appendix/media.md b/docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/appendix/media.md new file mode 100644 index 0000000000..8bcee9be90 --- /dev/null +++ b/docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/appendix/media.md @@ -0,0 +1 @@ +No supporting screenshots, photos, or videos have been added to the media.md file for this node. \ No newline at end of file diff --git a/docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/appendix/notes.md b/docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/appendix/notes.md new file mode 100644 index 0000000000..04aded2ec9 --- /dev/null +++ b/docs/nodes/AI_ML/LOAD_MODEL/ONNX_MODEL/appendix/notes.md @@ -0,0 +1 @@ +No theory or technical notes have been contributed for this node yet. \ No newline at end of file