Julian Vanecek commited on
Commit
23718e3
·
1 Parent(s): 05ab747

reverting frontend

Browse files
Files changed (1) hide show
  1. app.py +142 -44
app.py CHANGED
@@ -2,11 +2,45 @@ import os
2
  import json
3
  import gradio as gr
4
  import requests
 
5
  import gradio.components as gc
6
  import uuid
7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  def process_faq(question, user_id="anonymous", model="claude-sonnet"):
9
- """Process FAQ by calling AWS Lambda function"""
10
  try:
11
  # Determine the correct Lambda URL and model parameter based on selection
12
  if model.startswith("nova-"):
@@ -38,26 +72,69 @@ def process_faq(question, user_id="anonymous", model="claude-sonnet"):
38
  if response.status_code != 200:
39
  return f"Error: Lambda function returned status code {response.status_code}"
40
 
41
- # Process the response (Lambda returns complete response as plain text)
42
  full_response = ""
43
  for chunk in response.iter_content(chunk_size=1024, decode_unicode=True):
44
  if chunk:
45
- full_response += chunk
46
-
47
- # Simulate streaming by yielding progressively
48
- if full_response:
49
- words = full_response.split()
50
- current_text = ""
51
- for i, word in enumerate(words):
52
- current_text += word + " "
53
- if i % 2 == 0 or i == len(words) - 1: # Yield every 2 words or at the end
54
- yield current_text.strip()
 
55
 
56
  return full_response
57
 
58
  except Exception as e:
59
  return f"Error processing FAQ: {str(e)}"
60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  # --- Gradio v4.x UI ---
62
  def faq_wrapper(question, user_id, model):
63
  # Gradio expects a non-generator for Interface
@@ -66,53 +143,74 @@ def faq_wrapper(question, user_id, model):
66
  result = chunk
67
  return result
68
 
 
 
 
 
 
 
69
  with gr.Blocks() as demo:
70
- gc.Markdown("# MCP Chatbot")
71
- faq_input = gc.Textbox(label="Enter your question", lines=3)
72
- model_selector = gc.Dropdown(
73
- label="Select Model",
74
- choices=["nova-micro", "nova-pro", "claude-haiku", "claude-sonnet"],
75
- value="claude-sonnet",
76
- interactive=True
77
- )
78
- # Generate random user ID for this session
79
- session_user_id = str(uuid.uuid4())[:8]
80
- faq_button = gc.Button("Process")
81
- faq_output = gc.Textbox(label="Response", lines=10)
82
- with gr.Row(): # type: ignore
83
- thumbs_down = gc.Button("Report bad response", elem_id="thumbs-down", interactive=True)
84
- feedback_msg = gc.Markdown(visible=False)
85
-
86
- def report_bad_response():
87
- return gr.update(value="Bad response reported. Thank you for your feedback.", visible=True), gr.update(interactive=False)
88
-
89
- thumbs_down.click(report_bad_response, outputs=[feedback_msg, thumbs_down])
90
-
91
- # Re-enable report button and clear feedback when a new FAQ is processed
92
- def reset_feedback(*args):
93
- return gr.update(interactive=True), gr.update(value="", visible=False)
94
- faq_button.click(reset_feedback, outputs=[thumbs_down, feedback_msg], preprocess=False)
95
- faq_button.click(lambda q, m: faq_wrapper(q, session_user_id, m), inputs=[faq_input, model_selector], outputs=faq_output)
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
  if __name__ == "__main__":
98
  # Check if running in Hugging Face Spaces
99
  is_spaces = os.getenv("SPACE_ID") is not None
100
 
101
- # Configure launch parameters
102
  if is_spaces:
103
- # No auth in Hugging Face Spaces
104
  launch_params = {
105
  "server_name": "0.0.0.0",
106
  "server_port": int(os.getenv("PORT", 7860)),
107
  "share": False
108
  }
109
  else:
110
- # Local development - no auth required
 
 
111
  launch_params = {
112
- "server_name": "0.0.0.0",
113
  "server_port": int(os.getenv("PORT", 7860)),
114
- "share": True
 
 
115
  }
116
 
117
  # Launch the app
118
- demo.launch(**launch_params)
 
2
  import json
3
  import gradio as gr
4
  import requests
5
+ from dotenv import load_dotenv
6
  import gradio.components as gc
7
  import uuid
8
 
9
+ # Load environment variables
10
+ load_dotenv()
11
+
12
+ # Get sensitive config from environment variables (set these in your .env file)
13
+ #ELASTICSEARCH_URL = os.getenv("ELASTICSEARCH_URL")
14
+ #ELASTICSEARCH_USER = os.getenv("ELASTICSEARCH_USER")
15
+ #ELASTICSEARCH_PASSWORD = os.getenv("ELASTICSEARCH_PASSWORD")
16
+ #OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
17
+ #AWS_LAMBDA_URL = os.getenv("AWS_LAMBDA_URL")
18
+ GRADIO_AUTH_USERNAME = os.getenv("GRADIO_AUTH_USERNAME")
19
+ GRADIO_AUTH_PASSWORD = os.getenv("GRADIO_AUTH_PASSWORD")
20
+
21
+ # Check required env vars for local development only
22
+ if not os.getenv("SPACE_ID"):
23
+ missing_vars = []
24
+ for var in ["GRADIO_AUTH_USERNAME", "GRADIO_AUTH_PASSWORD"]:
25
+ if not os.getenv(var):
26
+ missing_vars.append(var)
27
+ if missing_vars:
28
+ print(f"Warning: Missing auth environment variables for local development: {', '.join(missing_vars)}")
29
+
30
+ es = None
31
+
32
+ # Initialize OpenAI
33
+ #openai_client = OpenAI(api_key=OPENAI_API_KEY)
34
+ def chat_completion(messages, model="gpt-3.5-turbo", temperature=0.1):
35
+ #return openai_client.chat.completions.create(
36
+ # model=model,
37
+ # messages=messages,
38
+ # temperature=temperature
39
+ #)
40
+ return None
41
+
42
  def process_faq(question, user_id="anonymous", model="claude-sonnet"):
43
+ """Process FAQ by calling AWS Lambda function with streaming response"""
44
  try:
45
  # Determine the correct Lambda URL and model parameter based on selection
46
  if model.startswith("nova-"):
 
72
  if response.status_code != 200:
73
  return f"Error: Lambda function returned status code {response.status_code}"
74
 
75
+ # Process the streaming response
76
  full_response = ""
77
  for chunk in response.iter_content(chunk_size=1024, decode_unicode=True):
78
  if chunk:
79
+ try:
80
+ # Try to parse the chunk as JSON
81
+ chunk_data = json.loads(chunk)
82
+ if "response" in chunk_data:
83
+ chunk_text = chunk_data["response"]
84
+ full_response += chunk_text
85
+ yield full_response
86
+ except json.JSONDecodeError:
87
+ # If not JSON, treat as plain text
88
+ full_response += chunk
89
+ yield full_response
90
 
91
  return full_response
92
 
93
  except Exception as e:
94
  return f"Error processing FAQ: {str(e)}"
95
 
96
+ def natural_to_query(natural_query):
97
+ """Convert natural language to Elasticsearch query body"""
98
+ try:
99
+ prompt = f"""Convert the following natural language query into an Elasticsearch query body.\nThe query should be in JSON format and follow Elasticsearch query DSL syntax.\n\nNatural language query: {natural_query}\n\nReturn only the JSON query body, nothing else."""
100
+ response = chat_completion([
101
+ {"role": "system", "content": "You are an expert in Elasticsearch query DSL. Convert natural language to Elasticsearch queries."},
102
+ {"role": "user", "content": prompt}
103
+ ], model="gpt-3.5-turbo", temperature=0.1)
104
+ # Extract and format the query
105
+ if hasattr(response, 'choices'):
106
+ # For OpenAI v1.x
107
+ content = response.choices[0].message.content.strip()
108
+ else:
109
+ # For OpenAI v0.x
110
+ content = response["choices"][0]["message"]["content"].strip()
111
+ try:
112
+ query_json = json.loads(content)
113
+ return json.dumps(query_json, indent=2)
114
+ except json.JSONDecodeError:
115
+ return content
116
+ except Exception as e:
117
+ return f"Error generating query: {str(e)}"
118
+
119
+ def execute_elasticsearch_query(query_body):
120
+ """Execute the Elasticsearch query"""
121
+ try:
122
+ # Parse the query body
123
+ query_json = json.loads(query_body)
124
+
125
+ # Execute the query
126
+ response = es.search(
127
+ index="your_index_name", # Replace with your actual index name
128
+ body=query_json
129
+ )
130
+
131
+ # Format the response
132
+ return json.dumps(response, indent=2)
133
+ except json.JSONDecodeError:
134
+ return "Error: Invalid JSON query body"
135
+ except Exception as e:
136
+ return f"Error executing query: {str(e)}"
137
+
138
  # --- Gradio v4.x UI ---
139
  def faq_wrapper(question, user_id, model):
140
  # Gradio expects a non-generator for Interface
 
143
  result = chunk
144
  return result
145
 
146
+ def elasticsearch_generate(natural_input):
147
+ return natural_to_query(natural_input)
148
+
149
+ def elasticsearch_execute(query_body):
150
+ return execute_elasticsearch_query(query_body)
151
+
152
  with gr.Blocks() as demo:
153
+ gc.Markdown("# MCP Tools - Local Version")
154
+ with gr.Tab(label="FAQ"): # type: ignore
155
+ faq_input = gc.Textbox(label="Enter your question", lines=3)
156
+ model_selector = gc.Dropdown(
157
+ label="Select Model",
158
+ choices=["nova-micro", "nova-pro", "claude-haiku", "claude-sonnet"],
159
+ value="claude-sonnet",
160
+ interactive=True
161
+ )
162
+ # Generate random user ID for this session
163
+ session_user_id = str(uuid.uuid4())[:8]
164
+ faq_button = gc.Button("Process")
165
+ faq_output = gc.Textbox(label="Response", lines=10)
166
+ with gr.Row(): # type: ignore
167
+ thumbs_down = gc.Button("Report bad response", elem_id="thumbs-down", interactive=True)
168
+ feedback_msg = gc.Markdown(visible=False)
169
+
170
+ def report_bad_response():
171
+ return gr.update(value="Bad response reported. Thank you for your feedback.", visible=True), gr.update(interactive=False)
172
+
173
+ thumbs_down.click(report_bad_response, outputs=[feedback_msg, thumbs_down])
174
+
175
+ # Re-enable report button and clear feedback when a new FAQ is processed
176
+ def reset_feedback(*args):
177
+ return gr.update(interactive=True), gr.update(value="", visible=False)
178
+ faq_button.click(reset_feedback, outputs=[thumbs_down, feedback_msg], preprocess=False)
179
+ faq_button.click(lambda q, m: faq_wrapper(q, session_user_id, m), inputs=[faq_input, model_selector], outputs=faq_output)
180
+ with gr.Tab(label="Elasticsearch"): # type: ignore
181
+ gc.Markdown("### Step 1: Natural Language to Query")
182
+ natural_input = gc.Textbox(label="Describe what you want to search for", lines=3, placeholder="Example: Find all documents containing 'machine learning' in the title")
183
+ generate_button = gc.Button("Generate Query")
184
+ query_output = gc.Textbox(label="Generated Query Body", lines=10, placeholder="The generated Elasticsearch query will appear here")
185
+ generate_button.click(elasticsearch_generate, inputs=natural_input, outputs=query_output)
186
+ gc.Markdown("### Step 2: Execute Query")
187
+ gc.Markdown("You can modify the query above if needed, then click Execute")
188
+ execute_button = gc.Button("Execute Query")
189
+ result_output = gc.Textbox(label="Query Results", lines=10, placeholder="The query results will appear here")
190
+ execute_button.click(elasticsearch_execute, inputs=query_output, outputs=result_output)
191
 
192
  if __name__ == "__main__":
193
  # Check if running in Hugging Face Spaces
194
  is_spaces = os.getenv("SPACE_ID") is not None
195
 
196
+ # Configure launch parameters for Spaces
197
  if is_spaces:
 
198
  launch_params = {
199
  "server_name": "0.0.0.0",
200
  "server_port": int(os.getenv("PORT", 7860)),
201
  "share": False
202
  }
203
  else:
204
+ # Local development with auth
205
+ auth_username = GRADIO_AUTH_USERNAME
206
+ auth_password = GRADIO_AUTH_PASSWORD
207
  launch_params = {
208
+ "server_name": "0.0.0.0",
209
  "server_port": int(os.getenv("PORT", 7860)),
210
+ "share": True,
211
+ "auth": (auth_username, auth_password),
212
+ "auth_message": "Please enter your credentials to access the application."
213
  }
214
 
215
  # Launch the app
216
+ demo.launch(**launch_params)