当某个模型出问题,回调正常的模型,示例代码

from langchain_community.llms.ollama import Ollama
from langchain_community.chat_models import ChatOllama
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser

chat_prompt = ChatPromptTemplate.from_messages(
    [
        (
            "system",
            "You're a nice assistant who always includes a compliment in your response",
        ),
        ("human", "Why did the {animal} cross the road"),
    ]
)

# 在这里,我们将使用一个错误的模型名称来轻松创建一个会出错的链
chat_model = ChatOllama(model_name="gpt-fake")
bad_chain = chat_prompt | chat_model | StrOutputParser()


prompt_template = """Instructions: You should always include a compliment in your response.

Question: Why did the {animal} cross the road?"""
prompt = PromptTemplate.from_template(prompt_template)

# 然后我们构建一个一定可以正常使用的调用链
llm = Ollama(model="llama2-chinese:13b")
good_chain = prompt | llm

# 最后用使用 with_fallbacks 构建一个异常回退机制
chain = bad_chain.with_fallbacks([good_chain])
chain.invoke({"animal": "turtle"})

Logo

一站式 AI 云服务平台

更多推荐