diff --git a/.gitignore b/.gitignore index 14d3e6eb5..79aad7ba8 100644 --- a/.gitignore +++ b/.gitignore @@ -39,3 +39,7 @@ examples/.ipynb_checkpoints/ # pyenv .python-version + +# Data files +*.h5 +*.dpkl diff --git a/github_issue_summarization/README.md b/github_issue_summarization/README.md index 83e4c146d..601a49841 100644 --- a/github_issue_summarization/README.md +++ b/github_issue_summarization/README.md @@ -21,7 +21,7 @@ By the end of this tutorial, you should learn how to: datasets * Train a Sequence-to-Sequence model using TensorFlow on the cluster using GPUs -* Serve the model using a Tornado Server +* Serve the model using [Seldon Core](https://github.com/SeldonIO/seldon-core/) ## Steps: diff --git a/github_issue_summarization/notebooks/IssueSummarization.py b/github_issue_summarization/notebooks/IssueSummarization.py new file mode 100644 index 000000000..4dc9bc041 --- /dev/null +++ b/github_issue_summarization/notebooks/IssueSummarization.py @@ -0,0 +1,22 @@ +from __future__ import print_function + +import dill as dpickle +import numpy as np +from keras.models import load_model + +from seq2seq_utils import Seq2Seq_Inference + + +class IssueSummarization(object): + + def __init__(self): + with open('body_pp.dpkl', 'rb') as f: + body_pp = dpickle.load(f) + with open('title_pp.dpkl', 'rb') as f: + title_pp = dpickle.load(f) + self.model = Seq2Seq_Inference(encoder_preprocessor=body_pp, + decoder_preprocessor=title_pp, + seq2seq_model=load_model('seq2seq_model_tutorial.h5')) + + def predict(self, X, feature_names): + return np.asarray([[self.model.generate_issue_title(body[0])[1]] for body in X]) diff --git a/github_issue_summarization/notebooks/requirements.txt b/github_issue_summarization/notebooks/requirements.txt new file mode 100644 index 000000000..27f1c6e89 --- /dev/null +++ b/github_issue_summarization/notebooks/requirements.txt @@ -0,0 +1,11 @@ +numpy +keras +dill +matplotlib +tensorflow +annoy +tqdm +nltk +IPython +ktext +h5py diff --git a/github_issue_summarization/notebooks/server.py b/github_issue_summarization/notebooks/server.py deleted file mode 100644 index f007af22d..000000000 --- a/github_issue_summarization/notebooks/server.py +++ /dev/null @@ -1,54 +0,0 @@ -from __future__ import print_function - -import logging - -import tornado.web -from tornado import gen -from tornado.options import define, options, parse_command_line -from keras.models import load_model -import dill as dpickle -from seq2seq_utils import Seq2Seq_Inference - -define("port", default=8888, help="run on the given port", type=int) -define("instances_key", default='instances', help="requested instances json object key") - - -class PredictHandler(tornado.web.RequestHandler): - @gen.coroutine - def post(self): - request_key = self.settings['request_key'] - request_data = tornado.escape.json_decode(self.request.body) - model = self.settings['model'] - predictions = [model.generate_issue_title(body)[1] for body in request_data[request_key]] - self.write(dict(predictions=predictions)) - - -class IndexHandler(tornado.web.RequestHandler): - def get(self): - self.write('Hello World') - - -def main(): - parse_command_line() - with open('body_pp.dpkl', 'rb') as f: - body_pp = dpickle.load(f) - with open('title_pp.dpkl', 'rb') as f: - title_pp = dpickle.load(f) - model = Seq2Seq_Inference(encoder_preprocessor=body_pp, - decoder_preprocessor=title_pp, - seq2seq_model=load_model('seq2seq_model_tutorial.h5')) - app = tornado.web.Application( - [ - (r"/predict", PredictHandler), - (r"/", IndexHandler), - ], - xsrf_cookies=False, - request_key=options.instances_key, - model=model) - app.listen(options.port) - logging.info('running at http://localhost:%s' % options.port) - tornado.ioloop.IOLoop.current().start() - - -if __name__ == "__main__": - main() diff --git a/github_issue_summarization/serving_the_model.md b/github_issue_summarization/serving_the_model.md index 3b6dc4d7f..e69a1864a 100644 --- a/github_issue_summarization/serving_the_model.md +++ b/github_issue_summarization/serving_the_model.md @@ -1,21 +1,64 @@ # Serving the model -We are going to use a simple tornado server to serve the model. The [server.py](notebooks/server.py) contains the server code. +We are going to use [seldon-core](https://github.com/SeldonIO/seldon-core) to serve the model. [IssueSummatization.py](notebooks/IssueSummatization.py) contains the code for this model. We will wrap this class into a seldon-core microservice which we can then deploy as a REST or GRPC API server. -Start the server using `python server.py --port=8888`. +> The model is written in Keras and when exported as a TensorFlow model seems to be incompatible with TensorFlow Serving. So we're using seldon-core to serve this model since seldon-core allows you to serve any arbitrary model. More details [here](https://github.com/kubeflow/examples/issues/11#issuecomment-371005885). -> The model is written in Keras and when exported as a TensorFlow model seems to be incompatible with TensorFlow Serving. So we're using our own webserver to serve this model. More details [here](https://github.com/kubeflow/examples/issues/11#issuecomment-371005885). +# Prerequisites -## Sample request +Ensure that you have the following files from the [training](training_the_model.md) step in your `notebooks` directory: + +* `seq2seq_model_tutorial.h5` - the keras model +* `body_pp.dpkl` - the serialized body preprocessor +* `title_pp.dpkl` - the serialized title preprocessor + +# Wrap the model into a seldon-core microservice + +cd into the notebooks directory and run the following docker command. This will create a build/ directory. + +``` +cd notebooks/ +docker run -v $(pwd):/my_model seldonio/core-python-wrapper:0.7 /my_model IssueSummarization 0.1 gcr.io --base-image=python:3.6 --image-name=gcr-repository-name/issue-summarization +``` + +The build/ directory contains all the necessary files to build the seldon-core microservice image + +``` +cd build/ +./build_image.sh +``` + +Now you should see an image named `gcr.io/gcr-repository-name/issue-summarization:0.1` in your docker images. To test the model, you can run it locally using + +`docker run -p 5000:5000 gcr.io/gcr-repository-name/issue-summarization:0.1` + +You can push the image by running `gcloud docker -- push gcr.io/gcr-repository-name/issue-summarization:0.1` + +> You can find more details about wrapping a model with seldon-core [here](https://github.com/SeldonIO/seldon-core/blob/master/docs/wrappers/python.md) + +## Sample request and response + +Request ``` -curl -X POST -H 'Content-Type: application/json' -d '{"instances": ["issue overview add a new property to disable detection of image stream files those ended with -is.yml from target directory. expected behaviour by default cube should not process image stream files if user does not set it. current behaviour cube always try to execute -is.yml files which can cause some problems in most of cases, for example if you are using kuberentes instead of openshift or if you use together fabric8 maven plugin with cube"]}' http://localhost:8888/predict +curl -X POST -d 'json={"data":{"ndarray":[["issue overview add a new property to disable detection of image stream files those ended with -is.yml from target directory. expected behaviour by default cube should not process image stream files if user does not set it. current behaviour cube always try to execute -is.yml files which can cause some problems in most of cases, for example if you are using kuberentes instead of openshift or if you use together fabric8 maven plugin with cube"]]}}' http://localhost:5000/predict ``` -## Sample response +Response ``` -{"predictions": ["add a new property to disable detection"]} +{ + "data": { + "names": [ + "t:0" + ], + "ndarray": [ + [ + "add a new property to disable detection" + ] + ] + } +} ``` Next: [Teardown](teardown.md)