LangChainChainAgentPython
LangChain 集成指南
在 LangChain 中使用 ChinaWHAPI 模型,覆盖 Chain、Agent、Tool、Memory 全组件。
基础配置
LangChain 的 ChatOpenAI 类支持自定义 baseURL,接入 ChinaWHAPI 只需修改参数。
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage, SystemMessage
llm = ChatOpenAI(
model="qwen3.6-plus",
openai_api_key="your_chinawhapi_key",
openai_api_base="https://chinawhapi.com/v1",
temperature=0.7,
)Simple Chain
用 LLMChain 构建最简单的问答 Chain。
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
prompt = PromptTemplate(
input_variables=["question"],
template="请用简洁的方式回答:{question}",
)
chain = LLMChain(llm=llm, prompt=prompt)
response = chain.run("什么是 RAG?")
print(response)Conversation Chain(多轮对话)
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
llm = ChatOpenAI(
model="qwen3.6-plus",
openai_api_key="your_chinawhapi_key",
openai_api_base="https://chinawhapi.com/v1",
temperature=0.7,
)
memory = ConversationBufferMemory()
conversation = ConversationChain(llm=llm, memory=memory)
print(conversation.predict(input="我叫小明"))
print(conversation.predict(input="我叫什么名字?"))ReAct Agent
用 LangChain Agents 构建自主规划型 AI Agent,支持工具调用和推理循环。
from langchain.agents import initialize_agent, Tool
from langchain.tools import WikipediaQueryRun
from langchain.utilities import SerpAPIWrapper
tools = [
Tool(name="Search", func=SerpAPIWrapper().run, description="搜索网络信息"),
Tool(name="Wikipedia", func=WikipediaQueryRun().run, description="查询维基百科"),
]
agent = initialize_agent(
tools, llm,
agent="zero-shot-react-description",
verbose=True,
)
agent.run("量子计算的基本原理是什么?")RAG with LangChain
用 LangChain 的 RetrievalQA 链结合 ChinaWHAPI 模型构建知识库问答。
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import TextLoader
# 加载文档
loader = TextLoader("./docs.txt")
documents = loader.load()
splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=50)
texts = splitter.split_documents(documents)
# 向量化
vectorstore = Chroma.from_documents(texts, OpenAIEmbeddings(
openai_api_base="https://chinawhapi.com/v1",
openai_api_key="your_chinawhapi_key",
))
# 构建 RAG QA 链
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=vectorstore.as_retriever(),
)
result = qa.run("这份文档的核心观点是什么?")多模型路由 Agent
根据任务类型自动选择最优模型,在 LangChain 中实现成本优化。
def get_llm_by_task(task_type: str):
if task_type == "reasoning":
return ChatOpenAI(model="deepseek-r1", openai_api_base="...", temperature=0.3)
elif task_type == "code":
return ChatOpenAI(model="qwen3-coder-plus", openai_api_base="...", temperature=0.3)
else:
return ChatOpenAI(model="qwen3.5-flash", openai_api_base="...", temperature=0.7)
llm = get_llm_by_task("code")