-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathinference.py
53 lines (42 loc) · 1.66 KB
/
inference.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import sys
import tensorflow as tf
from config import Config
from utils import check_restore_params
from create_model import create_model
from data import generate_batches
from data import load_dataset
from data import query2batch
from seq2seq_model import Seq2Seq_Model
config = Config()
def inference():
with tf.Session() as sess:
model = create_model(config)
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(config.save_path)
if ckpt and tf.train.get_checkpoint_state(ckpt.model_checkpoint_path):
print("start to load parameters")
saver.restore(sess, ckpt.model_checkpoint_path)
print('finish loading parameters')
else:
#raise ValueError('No model saved at this path:{}'.format(config.save_path))
print("start to load parameters")
saver.restore(sess, ckpt.model_checkpoint_path)
print('finish loading parameters')
sys.stdout.write('Please type your query:')
sys.stdout.flush()
query = sys.stdin.readline()
while query:
if not query:
pass
else:
data_batch = query2batch(query, config.word2idx)
infered_ids = model.step(sess, data_batch, forward_only=True, mode='inference')
response = ''
for idx in infered_ids[0]:
response += (config.idx2word[idx] + ' ')
print('The generated response is: {}'.format(response))
sys.stdout.flush()
query = sys.stdin.readline()
if __name__ == '__main__':
tf.reset_default_graph()
inference()