关于一些搜索的longchain实践
#longchain调用本地大模型
# from langchain_community.llms import Ollama
# llm = Ollama(base_url="http://192.168.1.138:11434", model="llama3.1")
# response = llm.invoke("The first man on the moon was …")
# print(response)
#longchain调用本地大模型+维基百科搜索接口
# from langchain.agents import load_tools
# from langchain.agents import initialize_agent
# from langchain.agents import AgentType
# from langchain_community.chat_models import ChatOpenAI
# from langchain_community.llms import Ollama
# llm = Ollama(base_url="http://192.168.1.138:11434", model="llama3.1")
# tools = load_tools(["wikipedia", "llm-math"], llm=llm)
# agent = initialize_agent(tools,
# llm,
# agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
# verbose=True,
# handle_parsing_errors=True)
# result=agent.run("关于巴黎奥运会最近有什么新闻吗?")
# print(result)
多个搜索引擎结合longchain
import os
from langchain_community.llms import Ollama
from aiohttp.client import request
from tempfile import template
from langchain.chains import LLMChain
from langchain.chains import LLMRequestsChain
from langchain.prompts import PromptTemplate
# 定义搜索模板
template = '''在>>> 和 <<<直接是来自google的原始搜索结果。
请把对于问题'{query}'的答案从里面提取出来,如果里面没有相关信息的化就说“找不到”
请使用以下格式:
Extracted:<anser or "找不到">
>>> {requests_result} <<<
Extracted:
'''
llm = Ollama(base_url="http://192.168.1.138:11434", model="llama3.1")
PROMPT = PromptTemplate(
input_variables=["query","requests_result"],
template=template,
)
request_chain = LLMRequestsChain(llm_chain=LLMChain(llm =llm ,prompt=PROMPT))
question="今天是什么日子"
inputs={
"query":question,
"url":"https://www.google.com/search?q="+question.replace(" ","+")
# "url":"https://www.baidu.com/s?wd="+question.replace(" ","+")
# "url":"https://cn.bing.com/search?q="+question.replace(" ","+")
# https://cn.bing.com/search?q=今天北京市天气
}
# 运行一下就会通过opeenAI提取搜索结果
result=request_chain(inputs)
print("result['output']",result['output'])