Skip to content

Commit

Permalink
Added example for torchserve grpc v1 and v2. (#307)
Browse files Browse the repository at this point in the history
* Added example for torchserve grpc v1 and v2.

Signed-off-by: Andrews Arokiam <andrews.arokiam@ideas2it.com>

* Schema order changed.

Signed-off-by: Andrews Arokiam <andrews.arokiam@ideas2it.com>

* corrected v2 REST input.

Signed-off-by: Andrews Arokiam <andrews.arokiam@ideas2it.com>

* Updated grpc-v2 protocolVersion.

Signed-off-by: Andrews Arokiam <andrews.arokiam@ideas2it.com>

* Update README.md

* Update README.md

* Update README.md

---------

Signed-off-by: Andrews Arokiam <andrews.arokiam@ideas2it.com>
Co-authored-by: Dan Sun <dsun20@bloomberg.net>
  • Loading branch information
andyi2it and yuzisun authored Dec 18, 2023
1 parent 097bf28 commit 93f8b93
Show file tree
Hide file tree
Showing 14 changed files with 2,233 additions and 24 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

# python
.venv
*.pyc

# editor
.vscode
Expand Down
272 changes: 250 additions & 22 deletions docs/modelserving/v1beta1/torchserve/README.md

Large diffs are not rendered by default.

37 changes: 37 additions & 0 deletions docs/modelserving/v1beta1/torchserve/inference_pb2.py

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

136 changes: 136 additions & 0 deletions docs/modelserving/v1beta1/torchserve/inference_pb2_grpc.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc

from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
import inference_pb2 as inference__pb2


class InferenceAPIsServiceStub(object):
"""Missing associated documentation comment in .proto file."""

def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Ping = channel.unary_unary(
'/org.pytorch.serve.grpc.inference.InferenceAPIsService/Ping',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=inference__pb2.TorchServeHealthResponse.FromString,
)
self.Predictions = channel.unary_unary(
'/org.pytorch.serve.grpc.inference.InferenceAPIsService/Predictions',
request_serializer=inference__pb2.PredictionsRequest.SerializeToString,
response_deserializer=inference__pb2.PredictionResponse.FromString,
)
self.StreamPredictions = channel.unary_stream(
'/org.pytorch.serve.grpc.inference.InferenceAPIsService/StreamPredictions',
request_serializer=inference__pb2.PredictionsRequest.SerializeToString,
response_deserializer=inference__pb2.PredictionResponse.FromString,
)


class InferenceAPIsServiceServicer(object):
"""Missing associated documentation comment in .proto file."""

def Ping(self, request, context):
"""Check health status of the TorchServe server.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')

def Predictions(self, request, context):
"""Predictions entry point to get inference using default model version.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')

def StreamPredictions(self, request, context):
"""Streaming response for an inference request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')


def add_InferenceAPIsServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Ping': grpc.unary_unary_rpc_method_handler(
servicer.Ping,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=inference__pb2.TorchServeHealthResponse.SerializeToString,
),
'Predictions': grpc.unary_unary_rpc_method_handler(
servicer.Predictions,
request_deserializer=inference__pb2.PredictionsRequest.FromString,
response_serializer=inference__pb2.PredictionResponse.SerializeToString,
),
'StreamPredictions': grpc.unary_stream_rpc_method_handler(
servicer.StreamPredictions,
request_deserializer=inference__pb2.PredictionsRequest.FromString,
response_serializer=inference__pb2.PredictionResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'org.pytorch.serve.grpc.inference.InferenceAPIsService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))


# This class is part of an EXPERIMENTAL API.
class InferenceAPIsService(object):
"""Missing associated documentation comment in .proto file."""

@staticmethod
def Ping(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.pytorch.serve.grpc.inference.InferenceAPIsService/Ping',
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
inference__pb2.TorchServeHealthResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)

@staticmethod
def Predictions(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.pytorch.serve.grpc.inference.InferenceAPIsService/Predictions',
inference__pb2.PredictionsRequest.SerializeToString,
inference__pb2.PredictionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)

@staticmethod
def StreamPredictions(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/org.pytorch.serve.grpc.inference.InferenceAPIsService/StreamPredictions',
inference__pb2.PredictionsRequest.SerializeToString,
inference__pb2.PredictionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
40 changes: 40 additions & 0 deletions docs/modelserving/v1beta1/torchserve/management_pb2.py

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit 93f8b93

Please sign in to comment.