diff --git a/libs/langchain/langchain/chat_models/anthropic.py b/libs/langchain/langchain/chat_models/anthropic.py index 344f4f0b2902f..24d1d7936c0fe 100644 --- a/libs/langchain/langchain/chat_models/anthropic.py +++ b/libs/langchain/langchain/chat_models/anthropic.py @@ -34,7 +34,7 @@ def _convert_one_message_to_text( elif isinstance(message, AIMessage): message_text = f"{ai_prompt} {message.content}" elif isinstance(message, SystemMessage): - message_text = f"{human_prompt} {message.content}" + message_text = message.content else: raise ValueError(f"Got unknown type {message}") return message_text @@ -56,7 +56,6 @@ def convert_messages_to_prompt_anthropic( """ messages = messages.copy() # don't mutate the original list - if not isinstance(messages[-1], AIMessage): messages.append(AIMessage(content="")) diff --git a/libs/langchain/langchain/llms/bedrock.py b/libs/langchain/langchain/llms/bedrock.py index 8bc1472633359..6a0f355b34795 100644 --- a/libs/langchain/langchain/llms/bedrock.py +++ b/libs/langchain/langchain/llms/bedrock.py @@ -42,12 +42,12 @@ def _human_assistant_format(input_text: str) -> str: if count % 2 == 0: count += 1 else: - raise ValueError(ALTERNATION_ERROR) + raise ValueError(ALTERNATION_ERROR + f" Received {input_text}") if input_text[i : i + len(ASSISTANT_PROMPT)] == ASSISTANT_PROMPT: if count % 2 == 1: count += 1 else: - raise ValueError(ALTERNATION_ERROR) + raise ValueError(ALTERNATION_ERROR + f" Received {input_text}") if count % 2 == 1: # Only saw Human, no Assistant input_text = input_text + ASSISTANT_PROMPT # SILENT CORRECTION diff --git a/libs/langchain/tests/unit_tests/chat_models/test_anthropic.py b/libs/langchain/tests/unit_tests/chat_models/test_anthropic.py index c60da340a075c..d49a3f225d44b 100644 --- a/libs/langchain/tests/unit_tests/chat_models/test_anthropic.py +++ b/libs/langchain/tests/unit_tests/chat_models/test_anthropic.py @@ -6,7 +6,7 @@ from langchain.chat_models import ChatAnthropic from langchain.chat_models.anthropic import convert_messages_to_prompt_anthropic -from langchain.schema import AIMessage, BaseMessage, HumanMessage +from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage os.environ["ANTHROPIC_API_KEY"] = "foo" @@ -50,11 +50,24 @@ def test_anthropic_initialization() -> None: ChatAnthropic(model="test", anthropic_api_key="test") -def test_formatting() -> None: - messages: List[BaseMessage] = [HumanMessage(content="Hello")] +@pytest.mark.parametrize( + ("messages", "expected"), + [ + ([HumanMessage(content="Hello")], "\n\nHuman: Hello\n\nAssistant:"), + ( + [HumanMessage(content="Hello"), AIMessage(content="Answer:")], + "\n\nHuman: Hello\n\nAssistant: Answer:", + ), + ( + [ + SystemMessage(content="You're an assistant"), + HumanMessage(content="Hello"), + AIMessage(content="Answer:"), + ], + "You're an assistant\n\nHuman: Hello\n\nAssistant: Answer:", + ), + ], +) +def test_formatting(messages: List[BaseMessage], expected: str) -> None: result = convert_messages_to_prompt_anthropic(messages) - assert result == "\n\nHuman: Hello\n\nAssistant:" - - messages = [HumanMessage(content="Hello"), AIMessage(content="Answer:")] - result = convert_messages_to_prompt_anthropic(messages) - assert result == "\n\nHuman: Hello\n\nAssistant: Answer:" + assert result == expected