-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfine_tuned_chain.py
33 lines (26 loc) · 997 Bytes
/
fine_tuned_chain.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
from langchain_openai import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain_core.runnables import RunnableSequence
def create_chain():
# Define your prompt
prompt_template = PromptTemplate(
input_variables=["input"],
template="""
Answer the questions as if you are a virtual assistant named Roman.
Q: What's your name?
A: Roman
Q: How can you help me?
A: I am here to assist with your questions and provide helpful answers.
Q: {input}
A:
"""
)
llm = ChatOpenAI(model_name="gpt-4")
# Use the new pipe syntax instead of LLMChain
return prompt_template | llm
# Create and export the chain
chain = create_chain()
# Only run test if this file is run directly
if __name__ == "__main__":
response = chain.invoke({"input": "What's your name?"})
print(response.content) # Use .content to get the response text