微信扫码
添加专属顾问
我要投稿
from langchain_core.documents import Document
documents = [
Document(
page_content="狗是很好的伴侣,以忠诚和友好而闻名。",
metadata={"source": "mammal-pets-doc"},
),
Document(
page_content="猫是独立的宠物,通常喜欢自己的空间。",
metadata={"source": "mammal-pets-doc"},
),
Document(
page_content="金鱼是初学者喜欢的宠物,只需要相对简单的照顾。",
metadata={"source": "fish-pets-doc"},
),
Document(
page_content="鹦鹉是聪明的鸟类,能够模仿人类说话。",
metadata={"source": "bird-pets-doc"},
),
Document(
page_content="兔子是社交动物,需要大量空间来跳跃。",
metadata={"source": "mammal-pets-doc"},
),
]
from langchain_chroma import Chroma
from langchain_openai import AzureOpenAIEmbeddings
embeddings = AzureOpenAIEmbeddings(
azure_endpoint=env_vars.get("AZURE_OPENAI_ENDPOINT"),
azure_deployment=env_vars.get("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME"),
openai_api_version=env_vars.get("AZURE_OPENAI_EMBEDDING_API_VERSION"),
api_key=env_vars.get("AZURE_OPENAI_API_KEY"),
)
vectorstore = Chroma.from_documents(
documents,
embedding=embeddings,
)
retriever = vectorstore.as_retriever(
search_type="similarity",
search_kwargs={"k": 1},
)
template = """
根据提供的上下文回答这个问题。
问题: {question}
上下文:
{context}
回答:
"""
prompt = ChatPromptTemplate.from_template(template)
def rag_chain(question: str) -> str:
# 检索相关文档
retrieved_docs = retriever.invoke(question)
# 将检索到的文档格式化为上下文
context = "\n".join(doc.page_content for doc in retrieved_docs)
# 使用问题和上下文格式化提示
formatted_prompt = prompt.format(question=question, context=context)
# 获取模型的响应
response = model.invoke(formatted_prompt)
return response.content
import os
from dotenv import load_dotenv, dotenv_values
from langchain_openai import AzureChatOpenAI
from langchain_core.messages import HumanMessage
from langchain_core.messages import AIMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai import AzureOpenAIEmbeddings
from langchain.schema import StrOutputParser
# Get the directory of the current script
current_dir = os.path.dirname(os.path.abspath(__file__))
# Construct the path to the .env file in the same directory as rag.py
env_path = os.path.join(current_dir, '.env')
# Load environment variables from .env file
load_dotenv(env_path)
# Load .env file contents as a dictionary
env_vars = dotenv_values(env_path)
# Load LangSmith configuration from environment variables
LANGCHAIN_TRACING_V2 = os.getenv("LANGCHAIN_TRACING_V2", "false").lower() == "true"
LANGCHAIN_API_KEY = os.getenv("LANGCHAIN_API_KEY")
from langchain_core.documents import Document
documents = [
Document(
page_content="狗是很好的伴侣,以忠诚和友好而闻名。",
metadata={"source": "mammal-pets-doc"},
),
Document(
page_content="猫是独立的宠物,通常喜欢自己的空间。",
metadata={"source": "mammal-pets-doc"},
),
Document(
page_content="金鱼是初学者喜欢的宠物,只需要相对简单的照顾。",
metadata={"source": "fish-pets-doc"},
),
Document(
page_content="鹦鹉是聪明的鸟类,能够模仿人类说话。",
metadata={"source": "bird-pets-doc"},
),
Document(
page_content="兔子是社交动物,需要大量空间来跳跃。",
metadata={"source": "mammal-pets-doc"},
),
]
from langchain_chroma import Chroma
# 设置 Azure OpenAI Embeddings
# 注意:嵌入模型的质量对检索结果有重大影响
# 确保使用适合中文的高质量嵌入模型
embeddings = AzureOpenAIEmbeddings(
azure_endpoint=env_vars.get("AZURE_OPENAI_ENDPOINT"),
azure_deployment=env_vars.get("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME"),
openai_api_version=env_vars.get("AZURE_OPENAI_EMBEDDING_API_VERSION"),
api_key=env_vars.get("AZURE_OPENAI_API_KEY"),
)
vectorstore = Chroma.from_documents(
documents,
embedding=embeddings,
)
from langchain_core.documents import Document
retriever = vectorstore.as_retriever(
search_type="similarity",
search_kwargs={"k": 1},
)
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
model = AzureChatOpenAI(
model_name="gpt-4o",
azure_endpoint=env_vars.get("AZURE_OPENAI_ENDPOINT"),
azure_deployment=env_vars.get("AZURE_OPENAI_DEPLOYMENT_NAME"),
openai_api_version=env_vars.get("AZURE_OPENAI_API_VERSION"),
api_key=env_vars.get("AZURE_OPENAI_API_KEY"),
)
template = """
根据提供的上下文回答这个问题。
问题: {question}
上下文:
{context}
回答:
"""
prompt = ChatPromptTemplate.from_template(template)
def rag_chain(question: str) -> str:
# 检索相关文档
retrieved_docs = retriever.invoke(question)
# 将检索到的文档格式化为单个上下文字符串
context = "\n".join(doc.page_content for doc in retrieved_docs)
# 使用问题和上下文格式化提示
formatted_prompt = prompt.format(question=question, context=context)
# 获取模型的响应
response = model.invoke(formatted_prompt)
return response.content
# 使用示例
response = rag_chain("最好的宠物是什么?")
print(response)
53AI,企业落地大模型首选服务商
产品:场景落地咨询+大模型应用平台+行业解决方案
承诺:免费场景POC验证,效果验证后签署服务协议。零风险落地应用大模型,已交付160+中大型企业
2025-04-19
低代码 RAG 只是信息搬运工,Graph RAG 让 AI 具备垂直深度推理能力!
2025-04-18
微软PIKE-RAG全面解析:解锁工业级应用领域知识理解与推理
2025-04-18
AI 记忆不等于 RAG:对话式 AI 为何需要超越检索增强
2025-04-18
Firecrawl:颠覆传统爬虫的AI黑科技,如何为LLM时代赋能
2025-04-18
什么是RAG与为什么要RAG?
2025-04-18
Anthropic工程师揭秘高效AI Agent的三大秘诀
2025-04-17
Fireworks AI 分析
2025-04-17
文本向量的长度偏差及其在搜索中的影响
2024-10-27
2024-09-04
2024-07-18
2024-05-05
2024-06-20
2024-06-13
2024-07-09
2024-07-09
2024-05-19
2024-07-07
2025-04-18
2025-04-16
2025-04-14
2025-04-13
2025-04-11
2025-04-09
2025-04-07
2025-04-05