Shreyas094 commited on
Commit
570a345
·
verified ·
1 Parent(s): 2a4005c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -42
app.py CHANGED
@@ -2,13 +2,9 @@ import gradio as gr
2
  from duckduckgo_search import DDGS
3
  from typing import List, Dict
4
  import os
5
- from langchain.llms import HuggingFaceHub
6
- from langchain.chains import ConversationChain
7
- from langchain.memory import ConversationBufferMemory
8
- from langchain.prompts import PromptTemplate
9
 
10
  # Environment variables and configurations
11
- os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.environ.get("HUGGINGFACE_TOKEN") # Replace with your actual HuggingFace API token
12
 
13
  MODELS = [
14
  "mistralai/Mistral-7B-Instruct-v0.3",
@@ -26,7 +22,7 @@ def get_web_search_results(query: str, max_results: int = 10) -> List[Dict[str,
26
  print(f"An error occurred during web search: {str(e)}")
27
  return [{"error": f"An error occurred during web search: {str(e)}"}]
28
 
29
- def summarize_results(query: str, search_results: List[Dict[str, str]]) -> str:
30
  try:
31
  context = "\n\n".join([f"Title: {result['title']}\nContent: {result['body']}" for result in search_results])
32
 
@@ -44,44 +40,21 @@ def summarize_results(query: str, search_results: List[Dict[str, str]]) -> str:
44
  except Exception as e:
45
  return f"An error occurred during summarization: {str(e)}"
46
 
47
- # Initialize LangChain components
48
- llm = HuggingFaceHub(repo_id="mistralai/Mistral-7B-Instruct-v0.3", model_kwargs={"temperature": 0.7, "max_length": 512})
49
-
50
- template = """You are an AI assistant that helps with web searches and summarizes information.
51
- Use the provided web search results to answer questions.
52
-
53
- Current conversation:
54
- {history}
55
- Human: {input}
56
- AI Assistant:"""
57
-
58
- prompt = PromptTemplate(input_variables=["history", "input"], template=template)
59
-
60
- conversation = ConversationChain(
61
- llm=llm,
62
- memory=ConversationBufferMemory(ai_prefix="AI Assistant"),
63
- prompt=prompt,
64
- verbose=True
65
- )
66
-
67
  def respond(message, chat_history, model, temperature, num_api_calls):
68
- search_results = get_web_search_results(message)
69
-
70
- if not search_results:
71
- response = f"No search results found for the query: {message}"
72
- elif "error" in search_results[0]:
73
- response = search_results[0]["error"]
74
- else:
75
- summary = summarize_results(message, search_results)
76
-
77
- # Use LangChain to generate a response based on the summary
78
- langchain_response = conversation.predict(input=f"Based on this summary, answer the user's question: {summary}\n\nUser's question: {message}")
79
 
80
- response = langchain_response
 
 
 
 
 
 
81
 
82
- return response
83
 
84
- # The rest of your Gradio setup remains the same
85
  css = """
86
  Your custom CSS here
87
  """
@@ -110,7 +83,7 @@ theme = gr.themes.Soft(
110
  demo = gr.ChatInterface(
111
  respond,
112
  additional_inputs=[
113
- gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[0]),
114
  gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
115
  gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls")
116
  ],
@@ -134,4 +107,4 @@ demo = gr.ChatInterface(
134
  )
135
  )
136
 
137
- demo.launch()
 
2
  from duckduckgo_search import DDGS
3
  from typing import List, Dict
4
  import os
 
 
 
 
5
 
6
  # Environment variables and configurations
7
+ huggingface_token = os.environ.get("HUGGINGFACE_TOKEN")
8
 
9
  MODELS = [
10
  "mistralai/Mistral-7B-Instruct-v0.3",
 
22
  print(f"An error occurred during web search: {str(e)}")
23
  return [{"error": f"An error occurred during web search: {str(e)}"}]
24
 
25
+ def summarize_results(query: str, search_results: List[Dict[str, str]], model: str) -> str:
26
  try:
27
  context = "\n\n".join([f"Title: {result['title']}\nContent: {result['body']}" for result in search_results])
28
 
 
40
  except Exception as e:
41
  return f"An error occurred during summarization: {str(e)}"
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  def respond(message, chat_history, model, temperature, num_api_calls):
44
+ final_summary = ""
45
+ for _ in range(num_api_calls):
46
+ search_results = get_web_search_results(message)
 
 
 
 
 
 
 
 
47
 
48
+ if not search_results:
49
+ final_summary += f"No search results found for the query: {message}\n\n"
50
+ elif "error" in search_results[0]:
51
+ final_summary += search_results[0]["error"] + "\n\n"
52
+ else:
53
+ summary = summarize_results(message, search_results, model)
54
+ final_summary += summary + "\n\n"
55
 
56
+ return final_summary if final_summary else "Unable to generate a response. Please try a different query."
57
 
 
58
  css = """
59
  Your custom CSS here
60
  """
 
83
  demo = gr.ChatInterface(
84
  respond,
85
  additional_inputs=[
86
+ gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[2]),
87
  gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
88
  gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls")
89
  ],
 
107
  )
108
  )
109
 
110
+ demo.launch()