微信扫码
添加专属顾问
我要投稿
import os
from dotenv import load_dotenv
from langchain_openai import AzureChatOpenAI
from langchain_core.messages import HumanMessage
# 加载环境变量和设置模型
load_dotenv()
model = AzureChatOpenAI(
azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
azure_deployment=os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"),
openai_api_version=os.getenv("AZURE_OPENAI_API_VERSION"),
api_key=os.getenv("AZURE_OPENAI_API_KEY"),
)
# 第一次对话
message = HumanMessage(content="I am Bob")
response = model.invoke([message])
print("Model's response:")
print(response.content)
# 第二次对话
message = HumanMessage(content="What's my name?")
response = model.invoke([message])
print("Model's response:")
print(response.content)
Model's response:
Hello Bob! It's nice to meet you. Is there anything I can help you with today?
Model's response:
I apologize, but I don't have any prior context or information about your name. Each interaction with me starts fresh, and I don't retain information from previous conversations. If you'd like me to know your name, you'll need to tell me in this current conversation. So, may I ask what your name is?
import os
from dotenv import load_dotenv
from langchain_openai import AzureChatOpenAI
from langchain_core.messages import HumanMessage
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import START, MessagesState, StateGraph
# 加载环境变量和设置模型
load_dotenv()
model = AzureChatOpenAI(
model_name="gpt-4",
azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
azure_deployment=os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"),
openai_api_version=os.getenv("AZURE_OPENAI_API_VERSION"),
api_key=os.getenv("AZURE_OPENAI_API_KEY"),
)
# 设置对话图和记忆
workflow = StateGraph(state_schema=MessagesState)
def call_model(state: MessagesState):
response = model.invoke(state["messages"])
return {"messages": response}
workflow.add_edge(START, "model")
workflow.add_node("model", call_model)
memory = MemorySaver()
app = workflow.compile(checkpointer=memory)
# 进行对话
config = {"configurable": {"thread_id": "tom"}}
# 第一次对话
query = "Hi! I'm Bob."
input_messages = [HumanMessage(query)]
output = app.invoke({"messages": input_messages}, config)
output["messages"][-1].pretty_print()
# 第二次对话
query = "What's my name?"
input_messages = [HumanMessage(query)]
output = app.invoke({"messages": input_messages}, config)
output["messages"][-1].pretty_print()
Human: Hi! I'm Bob.
AI: Hello Bob! It's nice to meet you. How can I assist you today?
53AI,企业落地大模型首选服务商
产品:场景落地咨询+大模型应用平台+行业解决方案
承诺:免费POC验证,效果达标后再合作。零风险落地应用大模型,已交付160+中大型企业
2026-01-03
Google 刚发布 Gemini 3 Flash,说实话,AI 的经济模型被彻底改写了
2026-01-03
PAI 初探:Code2Skill的一次尝试,以及 Agent和Skill 的初步探索
2026-01-03
原来是百度给了 Manus 最大的启发
2026-01-02
2025四大AI怎么选?每月20美金,谁能当你的“全能外挂”?
2026-01-02
Google 发布研究型 Agent
2026-01-02
Gemini CLI V0.22发布了Conductor和Endor Labs并向Free Tier用户开放了Gemini 3
2026-01-02
AI Agent 重构SoR:从记录系统到决策系统的范式转移
2026-01-02
深度解析:为何私有化部署的满血版DeepSeek在企业场景下的多任务协作表现不佳,以及如何优化
2025-10-26
2025-10-07
2025-11-19
2025-10-20
2025-11-13
2025-10-18
2025-10-11
2025-10-21
2025-10-15
2025-10-09
2026-01-02
2025-12-31
2025-12-31
2025-12-31
2025-12-30
2025-12-30
2025-12-25
2025-12-25