Spaces:
Sleeping
Sleeping
import os | |
import config | |
if "LOCALLLM" in os.environ and os.environ["LOCALLLM"]: | |
from ollama import chat as OllamaChat | |
################################################# | |
from langchain_community.tools import DuckDuckGoSearchRun | |
def callWebSearch(query): | |
return DuckDuckGo(query) | |
def DuckDuckGo(query): | |
search_tool = DuckDuckGoSearchRun() | |
results = search_tool.invoke(query) | |
return results | |
################################################# | |
from langchain_community.tools import WikipediaQueryRun | |
from langchain_community.utilities import WikipediaAPIWrapper | |
import requests | |
from bs4 import BeautifulSoup | |
def callWikipediaSearch(query): | |
return callWikipediaLangchain(query) | |
def callWikipediaLangchain(query): | |
wikipedia = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper()) | |
response = wikipedia.run(query) | |
return response | |
def callCustomWikipediaSearch(query): | |
searchURL = config.wikipediaSearchURL + query | |
response = requests.get(searchURL, timeout=60) | |
response.raise_for_status() | |
searchResult = response.json() | |
for pageId in searchResult['query']['pages']: | |
if searchResult['query']['pages'][pageId]['index'] == 1: | |
page = searchResult['query']['pages'][pageId]['title'] | |
# response3 = requests.get('https://en.wikipedia.org/w/api.php', | |
# params={'action': 'parse','page': page,'format': 'json'}).json() | |
# raw_html3 = response3['parse']['text']['*'] | |
response2 = requests.get(config.wikipediaRetrieveURL + page) | |
response2.raise_for_status() | |
raw_html = response2.text | |
soup = BeautifulSoup(raw_html, 'html.parser') | |
raw_text = soup.get_text(separator=" ",strip=True) | |
return raw_text | |
# response2 = requests.get(config.wikipediaRetrieveURL + page) | |
# response2.raise_for_status() | |
# return response2.text | |
# response2 = requests.get('https://en.wikipedia.org/w/api.php', | |
# params={'action': 'query', 'format': 'json', 'titles': page, 'prop': 'extracts', 'exintro': True, 'explaintext': True }) | |
# searchResult2 = response.json() | |
# for pageId2 in searchResult2['query']['pages']: | |
# if searchResult2['query']['pages'][pageId2]['index'] == 1: | |
# return searchResult2['query']['pages'][pageId2]['extract'] | |
return "No result found in wikipedia. Search elsewhere!!" | |
################################################# | |
from huggingface_hub import InferenceClient | |
def callLLM(query): | |
if "LOCALLLM" in os.environ: | |
return callLocalLLM(query) | |
else: | |
return callHfInferenceClientLLM(query) | |
def callLocalLLM(query): | |
response = OllamaChat(model=os.environ["LOCALLLM"], messages=[ { 'role': 'user', 'content': query } ]) | |
return response['message']['content'] | |
def callHfInferenceClientLLM(query): | |
client = InferenceClient(config.hfMoldel) | |
response = client.chat.completions.create( | |
messages = [ {"role": "user", "content": query } ], | |
stream=False, max_tokens=1024 ) | |
return response.choices[0].message.content | |
################################################# | |
if __name__ == "__main__": | |
os.environ["LOCALLLM"] = "llama3.2" | |
# from ollama import chat as OllamaChat | |
# response = callLLM("What is the capital of France?") | |
# print(response) | |
# response = callWebSearch("who is the president of France") | |
# print(response) | |
# response = callHfInferenceClientLLM("What is the capital of France?") | |
# print(response) | |
print(callWikipediaSearch("Mercedes Sosa discography")) | |