from langchain_core.messages import AIMessage, HumanMessage from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
prompt = ChatPromptTemplate.from_messages( [ ( "system", "You are a helpful assistant. Answer all questions to the best of your ability.", ), MessagesPlaceholder(variable_name="messages"), ] )
chain = prompt | chat
response = chain.invoke( { "messages": [ HumanMessage( content="Translate this sentence from English to French: I love programming." ), AIMessage(content="J'adore la programmation."), HumanMessage(content="What did you just say?"), ], } ) response
1
AIMessage(content='I said "J\'adore la programmation" which is the French translation for "I love programming". \n', response_metadata={'prompt_feedback': {'block_reason': 0, 'safety_ratings': []}, 'finish_reason': 'STOP', 'safety_ratings': [{'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability': 'NEGLIGIBLE', 'blocked': False}]}, id='run-88045024-xxx-xxx-xxx-35fe7f321bbe-0')
demo_ephemeral_chat_history = ChatMessageHistory() demo_ephemeral_chat_history.add_user_message( "Translate this sentence from English to French: I love programming." ) demo_ephemeral_chat_history.add_ai_message("J'adore la programmation.")
demo_ephemeral_chat_history
1
InMemoryChatMessageHistory(messages=[HumanMessage(content='Translate this sentence from English to French: I love programming.'), AIMessage(content="J'adore la programmation.")])
在这个示例中,你可以注意到 load_memory_variables 返回了一个名为 history 的键值。这意味着你的链(以及可能的输入提示词)可能会期望一个名为 history 的输入。一般而言,你可以通过在记忆类中设置参数来管理这个变量。例如,如果你希望记忆变量在 chat_history 关键字中返回,你可以这样做:
> Entering new ConversationChain chain… Prompt after formatting: The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi there! AI:
> Finished chain. ‘Hello! 👋 How can I help you today? 😊 \n’
Conversation Buffer Window
ConversationBufferWindowMemory 跟踪并保存随时间发展的对话互动列表。它只保留最近的 K 次对话记录。这种做法有助于创建一个包含最新互动记录的滑动视窗,可以有效地避免缓存变得过大。
1 2 3 4 5 6 7
from langchain.memory import ConversationBufferWindowMemory
from langchain.memory import ConversationEntityMemory
memory = ConversationEntityMemory(llm=llm) _input = {"input": "Deven & Sam are working on a hackathon project"} memory.load_memory_variables(_input) memory
memory.save_context( _input, {"output": " That sounds like a great project! What kind of project are they working on?"} ) memory
1 2 3 4 5 6 7
ConversationEntityMemory(chat_memory=InMemoryChatMessageHistory(messages=[HumanMessage(content='Deven & Sam are working on a hackathon project'), AIMessage(content=' That sounds like a great project! What kind of project are they working on?')]), llm=GoogleGenerativeAI(model='gemini-1.5-pro-latest', google_api_key=SecretStr('**********'), client=genai.GenerativeModel( model_name='models/gemini-1.5-pro-latest', generation_config={}, safety_settings={}, tools=None, system_instruction=None, )), entity_cache=['Deven', 'Sam'], entity_store=InMemoryEntityStore(store={'Deven': 'Updated summary: Deven is working on a hackathon project with Sam.', 'Sam': 'Updated summary: Sam is working on a hackathon project with Deven.'}))
1
memory.load_memory_variables({"input": 'who is Sam'})
1 2
{'history': 'Human: Deven & Sam are working on a hackathon project\nAI: That sounds like a great project! What kind of project are they working on?', 'entities': {'Sam': 'Updated summary: Sam is working on a hackathon project with Deven.'}}
_input = {"input": "Deven & Sam are working on a hackathon project"} memory.load_memory_variables(_input) memory.save_context( _input, {"output": " That sounds like a great project! What kind of project are they working on?"} ) memory.load_memory_variables({"input": 'who is Sam'})
1 2 3 4
WARNING:langchain_core.language_models.llms:Retrying langchain_google_genai.llms._completion_with_retry.<locals>._completion_with_retry in10.0 seconds as it raised ResourceExhausted: 429 Resource has been exhausted (e.g. check quota).. {'history': [HumanMessage(content='Deven & Sam are working on a hackathon project'), AIMessage(content=' That sounds like a great project! What kind of project are they working on?')], 'entities': {'Sam': 'Updated summary:\nSam is working on a hackathon project with Deven.'}}
Conversation Knowledge Graph
这种类型的记忆使用知识图谱来重建记忆。
1 2 3 4 5 6 7
from langchain.memory import ConversationKGMemory
memory = ConversationKGMemory(llm=llm) memory.save_context({"input": "say hi to sam"}, {"output": "who is sam"}) memory.save_context({"input": "sam is a friend"}, {"output": "okay"})
memory.load_memory_variables({"input": "who is sam"})
ConversationSummaryMemory(llm=GoogleGenerativeAI(model='gemini-1.5-pro-latest', google_api_key=SecretStr('**********'), client=genai.GenerativeModel( model_name='models/gemini-1.5-pro-latest', generation_config={}, safety_settings={}, tools=None, system_instruction=None, )), chat_memory=InMemoryChatMessageHistory(messages=[HumanMessage(content='hi'), AIMessage(content='hi there!')]), return_messages=True, buffer='Current summary: \nThe human greeted the AI. The AI returned the greeting. \n')
1
memory.buffer
'Current summary: \nThe human greeted the AI. The AI returned the greeting. \n'
你可以使用之前生成的总结来加速初始化,并通过直接初始化来避免重新生成总结。
1 2 3 4 5 6 7
memory = ConversationSummaryMemory( llm=llm, buffer="The human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.", chat_memory=history, return_messages=True ) memory
1 2 3 4 5 6 7
ConversationSummaryMemory(llm=GoogleGenerativeAI(model='gemini-1.5-pro-latest', google_api_key=SecretStr('**********'), client=genai.GenerativeModel( model_name='models/gemini-1.5-pro-latest', generation_config={}, safety_settings={}, tools=None, system_instruction=None, )), chat_memory=InMemoryChatMessageHistory(messages=[HumanMessage(content='hi'), AIMessage(content='hi there!')]), return_messages=True, buffer='The human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.')