Update app.py

#1
by deleted - opened
Files changed (1) hide show
  1. app.py +70 -123
app.py CHANGED
@@ -1,141 +1,88 @@
1
  import gradio as gr
2
  import requests
3
- from html import escape
4
- from typing import List, Tuple
 
5
 
6
- # API endpoints
7
- API_URL = "https://aham2api-3.onrender.com/v1/chat/completions" # Update with your API URL
8
- MODELS_URL = "https://aham2api-3.onrender.com/v1/models" # Update with your API URL
9
 
10
- # CSS for styling
11
- css = """
12
- /* Your existing CSS remains the same */
13
- """
 
 
 
14
 
15
- def get_available_models() -> List[str]:
16
- """Fetch available models from the API."""
17
  try:
18
- response = requests.get(MODELS_URL, timeout=30)
19
- response.raise_for_status()
20
- models_data = response.json()
21
- return sorted([model['id'] for model in models_data.get('data', [])])
22
  except Exception as e:
23
- print(f"Error fetching models: {e}")
24
- return [
25
- "samura-deepseek-r1",
26
- "samura-gpt-4o",
27
- "groq-llama3-70b-8192"
28
- ]
29
 
30
- def chat_completion(
31
- messages: List[dict],
32
- model: str,
33
- ) -> dict:
34
- """Send chat completion request to the API."""
35
- headers = {"Content-Type": "application/json"}
36
- data = {
37
- "model": model,
38
- "messages": messages,
39
- "temperature": 0.7,
40
- "max_tokens": 2000,
41
- "stream": False
42
- }
 
43
 
44
- try:
45
- response = requests.post(API_URL, headers=headers, json=data, timeout=30)
46
- response.raise_for_status()
47
- return response.json()
48
- except Exception as e:
49
- return {"error": str(e)}
50
 
51
- def format_message(text: str) -> str:
52
- """Format message with HTML for display."""
53
- return escape(text).replace("\n", "<br>")
54
 
55
- def chat_interface(
56
- message: str,
57
- chat_history: List[Tuple[str, str]],
58
- model: str,
59
- ) -> Tuple[List[Tuple[str, str]], str]:
60
- """Handle chat interface interactions."""
61
- if not message.strip():
62
- return chat_history, ""
63
-
64
- # Convert chat history to API format
65
- messages = []
66
- for user_msg, assistant_msg in chat_history:
67
- messages.append({
68
- "role": "user",
69
- "content": user_msg.replace("<br>", "\n")
70
- })
71
- if assistant_msg:
72
- messages.append({
73
- "role": "assistant",
74
- "content": assistant_msg.replace("<br>", "\n")
75
- })
76
 
77
- # Add current message
78
- messages.append({
79
- "role": "user",
80
- "content": message
81
- })
 
 
 
 
 
 
 
82
 
83
- # Get API response
84
- response = chat_completion(messages=messages, model=model)
 
85
 
86
- if "error" in response:
87
- assistant_message = f"Error: {response['error']}"
88
- else:
89
- assistant_message = response.get("choices", [{}])[0].get("message", {}).get("content", "No response")
90
 
91
- # Update chat history
92
- chat_history.append((format_message(message), format_message(assistant_message)))
93
- return chat_history, ""
94
 
95
- def create_interface():
96
- """Create Gradio chat interface."""
97
- available_models = get_available_models()
98
-
99
- with gr.Blocks(css=css, theme=gr.themes.Default()) as demo:
100
- gr.Markdown("# API Chat Interface")
101
-
102
- model_dropdown = gr.Dropdown(
103
- choices=available_models,
104
- value=available_models[0] if available_models else "samura-deepseek-r1",
105
- label="Select Model"
106
- )
107
-
108
- chatbot = gr.Chatbot(
109
- elem_classes=["chatbot"],
110
- bubble_full_width=False,
111
- height=500
112
- )
113
-
114
- with gr.Row():
115
- message = gr.Textbox(
116
- placeholder="Type your message here...",
117
- show_label=False,
118
- container=False,
119
- scale=7
120
- )
121
- submit = gr.Button("Send", variant="primary", scale=1)
122
-
123
- clear = gr.ClearButton([message, chatbot])
124
-
125
- submit.click(
126
- fn=chat_interface,
127
- inputs=[message, chatbot, model_dropdown],
128
- outputs=[chatbot, message]
129
- )
130
-
131
- message.submit(
132
- fn=chat_interface,
133
- inputs=[message, chatbot, model_dropdown],
134
- outputs=[chatbot, message]
135
- )
136
-
137
- return demo
138
 
139
  if __name__ == "__main__":
140
- demo = create_interface()
141
- demo.launch()
 
1
  import gradio as gr
2
  import requests
3
+ import os
4
+ from duckduckgo_search import DDGS
5
+ from transformers import pipeline
6
 
7
+ # Initialize the DuckDuckGo search client
8
+ ddgs = DDGS()
 
9
 
10
+ # Initialize the language model for answering
11
+ model_id = "meta-llama/Meta-Llama-3-8B-Instruct" # You can change this to any model you prefer
12
+ try:
13
+ answerer = pipeline("text-generation", model=model_id, max_length=512)
14
+ except:
15
+ # Fallback to a smaller model if the primary one fails
16
+ answerer = pipeline("text-generation", model="google/flan-t5-base", max_length=512)
17
 
18
+ def search_web(query, num_results=5):
19
+ """Search the web using DuckDuckGo and return results."""
20
  try:
21
+ results = list(ddgs.text(query, max_results=num_results))
22
+ return results
 
 
23
  except Exception as e:
24
+ return [{"title": f"Error searching: {str(e)}", "body": "", "href": ""}]
 
 
 
 
 
25
 
26
+ def format_search_results(results):
27
+ """Format search results into a readable text format."""
28
+ formatted = "### Search Results:\n\n"
29
+ for i, result in enumerate(results, 1):
30
+ title = result.get("title", "No title")
31
+ body = result.get("body", "No description")
32
+ href = result.get("href", "No link")
33
+ formatted += f"**{i}. {title}**\n{body}\n[Link]({href})\n\n"
34
+ return formatted
35
+
36
+ def generate_answer(query, search_results):
37
+ """Generate an answer based on the search results."""
38
+ context = format_search_results(search_results)
39
+ prompt = f"""You are DeepSearch, a helpful AI assistant with web search capabilities.
40
 
41
+ Based on the following search results, please answer the user's question: "{query}"
 
 
 
 
 
42
 
43
+ {context}
 
 
44
 
45
+ Please provide a comprehensive answer using the information from the search results. If the search results don't contain relevant information, say so and provide your best answer based on your knowledge.
46
+
47
+ Answer:"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
+ try:
50
+ response = answerer(prompt, max_length=800, do_sample=True, temperature=0.7)[0]['generated_text']
51
+ # Extract just the answer part after the prompt
52
+ answer = response.split("Answer:")[-1].strip()
53
+ return answer
54
+ except Exception as e:
55
+ return f"Error generating answer: {str(e)}"
56
+
57
+ def deep_search(message, history):
58
+ """Main function to handle the chat interaction."""
59
+ # First, search the web for relevant information
60
+ search_results = search_web(message)
61
 
62
+ # If no results, return a message
63
+ if not search_results:
64
+ return "I couldn't find any information on that topic. Please try a different question."
65
 
66
+ # Generate an answer based on the search results
67
+ answer = generate_answer(message, search_results)
 
 
68
 
69
+ # Return the answer
70
+ return answer
 
71
 
72
+ # Create the Gradio interface
73
+ demo = gr.ChatInterface(
74
+ fn=deep_search,
75
+ title="DeepSearch Agent",
76
+ description="Ask me anything! I'll search the web using DuckDuckGo and provide an answer based on the search results.",
77
+ examples=[
78
+ "What is the capital of France?",
79
+ "How does photosynthesis work?",
80
+ "What are the latest developments in AI?",
81
+ "Who won the last World Cup?",
82
+ "What is the recipe for chocolate chip cookies?"
83
+ ],
84
+ theme="soft"
85
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
  if __name__ == "__main__":
88
+ demo.launch()