-
Notifications
You must be signed in to change notification settings - Fork 188
/
Copy pathcommon.py
81 lines (62 loc) · 2.48 KB
/
common.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import json
import numpy as np
from typing import Callable
from functools import partial
from mlserver.settings import ModelSettings
import torch
import tensorflow as tf
from optimum.pipelines import pipeline as opt_pipeline
from transformers.pipelines import pipeline as trf_pipeline
from transformers.pipelines.base import Pipeline
from .settings import HuggingFaceSettings
OPTIMUM_ACCELERATOR = "ort"
_PipelineConstructor = Callable[..., Pipeline]
def load_pipeline_from_settings(
hf_settings: HuggingFaceSettings, settings: ModelSettings
) -> Pipeline:
# TODO: Support URI for locally downloaded artifacts
# uri = model_parameters.uri
pipeline = _get_pipeline_class(hf_settings)
batch_size = 1
if settings.max_batch_size:
batch_size = settings.max_batch_size
tokenizer = hf_settings.pretrained_tokenizer
if not tokenizer:
tokenizer = hf_settings.pretrained_model
if hf_settings.framework == "tf":
if hf_settings.inter_op_threads is not None:
tf.config.threading.set_inter_op_parallelism_threads(
hf_settings.inter_op_threads
)
if hf_settings.intera_op_threads is not None:
tf.config.threading.set_intra_op_parallelism_threads(
hf_settings.intera_op_threads
)
elif hf_settings.framework == "pt":
if hf_settings.inter_op_threads is not None:
torch.set_num_interop_threads(hf_settings.inter_op_threads)
if hf_settings.intera_op_threads is not None:
torch.set_num_threads(hf_settings.intera_op_threads)
hf_pipeline = pipeline(
hf_settings.task_name,
model=hf_settings.pretrained_model,
tokenizer=tokenizer,
device=hf_settings.device,
batch_size=batch_size,
framework=hf_settings.framework,
)
# If max_batch_size > 0 we need to ensure tokens are padded
if settings.max_batch_size:
model = hf_pipeline.model
eos_token_id = model.config.eos_token_id
hf_pipeline.tokenizer.pad_token_id = [str(eos_token_id)] # type: ignore
return hf_pipeline
def _get_pipeline_class(hf_settings: HuggingFaceSettings) -> _PipelineConstructor:
if hf_settings.optimum_model:
return partial(opt_pipeline, accelerator=OPTIMUM_ACCELERATOR)
return trf_pipeline
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)