model.invoke( [ HumanMessage(content="Hi! I'm Bob"), AIMessage(content="Hello Bob! How can I assist you today?"), HumanMessage(content="What's my name?"), ] )
from langgraph.checkpoint.memory import MemorySaver from langgraph.graph import START, MessagesState, StateGraph
# Define a new graph workflow = StateGraph(state_schema=MessagesState)
# Define the function that calls the model defcall_model(state: MessagesState): response = model.invoke(state["messages"]) return {"messages": response}
# Define the (single) node in the graph workflow.add_edge(START, "model") workflow.add_node("model", call_model)
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
prompt_template = ChatPromptTemplate.from_messages( [ ( "system", "You talk like a pirate. Answer all questions to the best of your ability.", ), MessagesPlaceholder(variable_name="messages"), ] )
prompt_template = ChatPromptTemplate.from_messages( [ ( "system", "You are a helpful assistant. Answer all questions to the best of your ability in {language}.", ), MessagesPlaceholder(variable_name="messages"), ] )
config = {"configurable": {"thread_id": "abc789"}} query = "Hi I'm Todd, please tell me a joke." language = "English"
input_messages = [HumanMessage(query)] for chunk, metadata in app.stream( {"messages": input_messages, "language": language}, config, stream_mode="messages", ): ifisinstance(chunk, AIMessage): # Filter to just model responses print(chunk.content, end="|")