Julian Vanecek commited on
Commit
b387352
·
1 Parent(s): 5420a06

reverting to non streaming frontend

Browse files
Files changed (2) hide show
  1. app.py +43 -1
  2. app_stream.py +257 -0
app.py CHANGED
@@ -1,3 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import json
3
  import gradio as gr
@@ -45,7 +87,6 @@ def process_faq(question, user_id="anonymous", model="claude-sonnet"):
45
  # Determine the correct Lambda URL and model parameter based on selection
46
  if model.startswith("nova-"):
47
  lambda_url = "https://tz2ttiieoc5z4aq6pskg24zu740bvqup.lambda-url.us-west-2.on.aws/"
48
- # lambda_url = "https://l2fhyrulj6yjzonazngpxdiswm0mgfvp.lambda-url.us-west-2.on.aws/"
49
  model_param = model.replace("nova-", "") # Extract micro/lite/pro
50
  elif model.startswith("claude-"):
51
  lambda_url = "https://myzano2bfze54q6yqp32wwpj6q0ixpmy.lambda-url.us-west-2.on.aws/"
@@ -255,3 +296,4 @@ if __name__ == "__main__":
255
 
256
  # Launch the app
257
  demo.launch(**launch_params)
 
 
1
+ Hugging Face's logo
2
+ Hugging Face
3
+ Models
4
+ Datasets
5
+ Spaces
6
+ Community
7
+ Docs
8
+ Enterprise
9
+ Pricing
10
+
11
+
12
+
13
+ Spaces:
14
+
15
+ bitsinthesky
16
+ /
17
+ chatbot
18
+
19
+
20
+ like
21
+ 0
22
+
23
+ Logs
24
+ App
25
+ Files
26
+ Community
27
+ Settings
28
+ chatbot
29
+ /
30
+ app.py
31
+
32
+ Julian Vanecek
33
+ reverting frontend
34
+ 755b156
35
+ 3 days ago
36
+ raw
37
+
38
+ Copy download link
39
+ history
40
+ blame
41
+
42
+ 10.7 kB
43
  import os
44
  import json
45
  import gradio as gr
 
87
  # Determine the correct Lambda URL and model parameter based on selection
88
  if model.startswith("nova-"):
89
  lambda_url = "https://tz2ttiieoc5z4aq6pskg24zu740bvqup.lambda-url.us-west-2.on.aws/"
 
90
  model_param = model.replace("nova-", "") # Extract micro/lite/pro
91
  elif model.startswith("claude-"):
92
  lambda_url = "https://myzano2bfze54q6yqp32wwpj6q0ixpmy.lambda-url.us-west-2.on.aws/"
 
296
 
297
  # Launch the app
298
  demo.launch(**launch_params)
299
+
app_stream.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import gradio as gr
4
+ import requests
5
+ from dotenv import load_dotenv
6
+ import gradio.components as gc
7
+ import uuid
8
+
9
+ # Load environment variables
10
+ load_dotenv()
11
+
12
+ # Get sensitive config from environment variables (set these in your .env file)
13
+ #ELASTICSEARCH_URL = os.getenv("ELASTICSEARCH_URL")
14
+ #ELASTICSEARCH_USER = os.getenv("ELASTICSEARCH_USER")
15
+ #ELASTICSEARCH_PASSWORD = os.getenv("ELASTICSEARCH_PASSWORD")
16
+ #OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
17
+ #AWS_LAMBDA_URL = os.getenv("AWS_LAMBDA_URL")
18
+ GRADIO_AUTH_USERNAME = os.getenv("GRADIO_AUTH_USERNAME")
19
+ GRADIO_AUTH_PASSWORD = os.getenv("GRADIO_AUTH_PASSWORD")
20
+
21
+ # Check required env vars for local development only
22
+ if not os.getenv("SPACE_ID"):
23
+ missing_vars = []
24
+ for var in ["GRADIO_AUTH_USERNAME", "GRADIO_AUTH_PASSWORD"]:
25
+ if not os.getenv(var):
26
+ missing_vars.append(var)
27
+ if missing_vars:
28
+ print(f"Warning: Missing auth environment variables for local development: {', '.join(missing_vars)}")
29
+
30
+ es = None
31
+
32
+ # Initialize OpenAI
33
+ #openai_client = OpenAI(api_key=OPENAI_API_KEY)
34
+ def chat_completion(messages, model="gpt-3.5-turbo", temperature=0.1):
35
+ #return openai_client.chat.completions.create(
36
+ # model=model,
37
+ # messages=messages,
38
+ # temperature=temperature
39
+ #)
40
+ return None
41
+
42
+ def process_faq(question, user_id="anonymous", model="claude-sonnet"):
43
+ """Process FAQ by calling AWS Lambda function with streaming response"""
44
+ try:
45
+ # Determine the correct Lambda URL and model parameter based on selection
46
+ if model.startswith("nova-"):
47
+ lambda_url = "https://tz2ttiieoc5z4aq6pskg24zu740bvqup.lambda-url.us-west-2.on.aws/"
48
+ # lambda_url = "https://l2fhyrulj6yjzonazngpxdiswm0mgfvp.lambda-url.us-west-2.on.aws/"
49
+ model_param = model.replace("nova-", "") # Extract micro/lite/pro
50
+ elif model.startswith("claude-"):
51
+ lambda_url = "https://myzano2bfze54q6yqp32wwpj6q0ixpmy.lambda-url.us-west-2.on.aws/"
52
+ model_param = model.replace("claude-", "") # Extract haiku/sonnet
53
+ else:
54
+ return "Error: Invalid model selection"
55
+
56
+ # Prepare the request payload
57
+ payload = {
58
+ "message": question.strip(),
59
+ "user_id": user_id,
60
+ "model": model_param
61
+ }
62
+
63
+ print(f"DEBUG: Sending to {lambda_url}")
64
+ print(f"DEBUG: Payload: {json.dumps(payload, indent=2)}")
65
+
66
+ # Make the API call with streaming
67
+ with requests.post(
68
+ lambda_url,
69
+ headers={"Content-Type": "application/json"},
70
+ json=payload,
71
+ stream=True
72
+ ) as response:
73
+ if response.status_code != 200:
74
+ return f"Error: Lambda function returned status code {response.status_code}"
75
+
76
+ # Process the streaming response
77
+ full_response = ""
78
+ for chunk in response.iter_content(chunk_size=1024, decode_unicode=True):
79
+ if chunk:
80
+ try:
81
+ # Try to parse the chunk as JSON
82
+ chunk_data = json.loads(chunk)
83
+ if "response" in chunk_data:
84
+ chunk_text = chunk_data["response"]
85
+ full_response += chunk_text
86
+ yield full_response
87
+ except json.JSONDecodeError:
88
+ # If not JSON, treat as plain text
89
+ full_response += chunk
90
+ yield full_response
91
+
92
+ return full_response
93
+
94
+ except Exception as e:
95
+ return f"Error processing FAQ: {str(e)}"
96
+
97
+ def natural_to_query(natural_query):
98
+ """Convert natural language to Elasticsearch query body"""
99
+ try:
100
+ prompt = f"""Convert the following natural language query into an Elasticsearch query body.\nThe query should be in JSON format and follow Elasticsearch query DSL syntax.\n\nNatural language query: {natural_query}\n\nReturn only the JSON query body, nothing else."""
101
+ response = chat_completion([
102
+ {"role": "system", "content": "You are an expert in Elasticsearch query DSL. Convert natural language to Elasticsearch queries."},
103
+ {"role": "user", "content": prompt}
104
+ ], model="gpt-3.5-turbo", temperature=0.1)
105
+ # Extract and format the query
106
+ if hasattr(response, 'choices'):
107
+ # For OpenAI v1.x
108
+ content = response.choices[0].message.content.strip()
109
+ else:
110
+ # For OpenAI v0.x
111
+ content = response["choices"][0]["message"]["content"].strip()
112
+ try:
113
+ query_json = json.loads(content)
114
+ return json.dumps(query_json, indent=2)
115
+ except json.JSONDecodeError:
116
+ return content
117
+ except Exception as e:
118
+ return f"Error generating query: {str(e)}"
119
+
120
+ def execute_elasticsearch_query(query_body):
121
+ """Execute the Elasticsearch query"""
122
+ try:
123
+ # Parse the query body
124
+ query_json = json.loads(query_body)
125
+
126
+ # Execute the query
127
+ response = es.search(
128
+ index="your_index_name", # Replace with your actual index name
129
+ body=query_json
130
+ )
131
+
132
+ # Format the response
133
+ return json.dumps(response, indent=2)
134
+ except json.JSONDecodeError:
135
+ return "Error: Invalid JSON query body"
136
+ except Exception as e:
137
+ return f"Error executing query: {str(e)}"
138
+
139
+ # --- Gradio v4.x UI ---
140
+ def faq_wrapper(question, user_id, model):
141
+ # Gradio expects a non-generator for Interface
142
+ result = ""
143
+ for chunk in process_faq(question, user_id, model):
144
+ result = chunk
145
+ # Convert literal \n characters to actual newlines
146
+ result = result.replace('\\n', '\n')
147
+ # Remove leading/trailing quotes if present
148
+ result = result.strip('"\'')
149
+ return result
150
+
151
+ def elasticsearch_generate(natural_input):
152
+ return natural_to_query(natural_input)
153
+
154
+ def elasticsearch_execute(query_body):
155
+ return execute_elasticsearch_query(query_body)
156
+
157
+ with gr.Blocks() as demo:
158
+ gc.Markdown("# MCP Tools - Local Version")
159
+ with gr.Tab(label="FAQ"): # type: ignore
160
+ faq_input = gc.Textbox(label="Enter your question", lines=3)
161
+ model_selector = gc.Dropdown(
162
+ label="Select Model",
163
+ choices=["nova-micro", "nova-pro", "claude-haiku", "claude-sonnet"],
164
+ value="claude-sonnet",
165
+ interactive=True
166
+ )
167
+ # Generate random user ID for this session
168
+ session_user_id = str(uuid.uuid4())[:8]
169
+ faq_button = gc.Button("Process")
170
+ # Loading animation HTML
171
+ loading_html = """
172
+ <div style="display: flex; justify-content: center; align-items: center; min-height: 100px; border: 1px solid #ddd; border-radius: 8px; background-color: #f9f9f9;">
173
+ <div style="display: inline-block; width: 40px; height: 40px; border: 4px solid #f3f3f3; border-top: 4px solid #3498db; border-radius: 50%; animation: spin 1s linear infinite;"></div>
174
+ <style>
175
+ @keyframes spin {
176
+ 0% { transform: rotate(0deg); }
177
+ 100% { transform: rotate(360deg); }
178
+ }
179
+ </style>
180
+ </div>
181
+ """
182
+
183
+ # Empty bounding box HTML
184
+ empty_box_html = """
185
+ <div style="min-height: 100px; border: 1px solid #ddd; border-radius: 8px; background-color: #f9f9f9; padding: 20px;">
186
+ </div>
187
+ """
188
+
189
+ faq_output = gc.HTML(label="Response", value=empty_box_html)
190
+ with gr.Row(): # type: ignore
191
+ thumbs_down = gc.Button("Report bad response", elem_id="thumbs-down", interactive=True)
192
+ feedback_msg = gc.Markdown(visible=False)
193
+
194
+ def report_bad_response():
195
+ return gr.update(value="Bad response reported. Thank you for your feedback.", visible=True), gr.update(interactive=False)
196
+
197
+ thumbs_down.click(report_bad_response, outputs=[feedback_msg, thumbs_down])
198
+
199
+ # Combined function to handle loading state and processing
200
+ def process_with_loading(question, model):
201
+ # Show loading spinner
202
+ yield gr.update(value=loading_html)
203
+
204
+ # Process the question
205
+ result = faq_wrapper(question, session_user_id, model)
206
+
207
+ # Format response in bounding box and show result
208
+ response_html = f"""
209
+ <div style="min-height: 100px; border: 1px solid #ddd; border-radius: 8px; background-color: #ffffff; padding: 20px;">
210
+ <div style="white-space: pre-wrap; line-height: 1.5;">{result}</div>
211
+ </div>
212
+ """
213
+ yield gr.update(value=response_html)
214
+
215
+ faq_button.click(
216
+ process_with_loading,
217
+ inputs=[faq_input, model_selector],
218
+ outputs=[faq_output],
219
+ show_progress=False
220
+ )
221
+ with gr.Tab(label="Elasticsearch"): # type: ignore
222
+ gc.Markdown("### Step 1: Natural Language to Query")
223
+ natural_input = gc.Textbox(label="Describe what you want to search for", lines=3, placeholder="Example: Find all documents containing 'machine learning' in the title")
224
+ generate_button = gc.Button("Generate Query")
225
+ query_output = gc.Textbox(label="Generated Query Body", lines=10, placeholder="The generated Elasticsearch query will appear here")
226
+ generate_button.click(elasticsearch_generate, inputs=natural_input, outputs=query_output)
227
+ gc.Markdown("### Step 2: Execute Query")
228
+ gc.Markdown("You can modify the query above if needed, then click Execute")
229
+ execute_button = gc.Button("Execute Query")
230
+ result_output = gc.Textbox(label="Query Results", lines=10, placeholder="The query results will appear here")
231
+ execute_button.click(elasticsearch_execute, inputs=query_output, outputs=result_output)
232
+
233
+ if __name__ == "__main__":
234
+ # Check if running in Hugging Face Spaces
235
+ is_spaces = os.getenv("SPACE_ID") is not None
236
+
237
+ # Configure launch parameters for Spaces
238
+ if is_spaces:
239
+ launch_params = {
240
+ "server_name": "0.0.0.0",
241
+ "server_port": int(os.getenv("PORT", 7860)),
242
+ "share": False
243
+ }
244
+ else:
245
+ # Local development with auth
246
+ auth_username = GRADIO_AUTH_USERNAME
247
+ auth_password = GRADIO_AUTH_PASSWORD
248
+ launch_params = {
249
+ "server_name": "0.0.0.0",
250
+ "server_port": int(os.getenv("PORT", 7860)),
251
+ "share": True,
252
+ "auth": (auth_username, auth_password),
253
+ "auth_message": "Please enter your credentials to access the application."
254
+ }
255
+
256
+ # Launch the app
257
+ demo.launch(**launch_params)