Julian Vanecek commited on
Commit
5f6f8bc
·
1 Parent(s): 4a3b0d1

reverting frontend

Browse files
Files changed (1) hide show
  1. app.py +71 -149
app.py CHANGED
@@ -2,51 +2,11 @@ import os
2
  import json
3
  import gradio as gr
4
  import requests
5
- from dotenv import load_dotenv
6
  import gradio.components as gc
7
  import uuid
8
 
9
- # Load environment variables
10
- load_dotenv()
11
-
12
- # Get sensitive config from environment variables (set these in your .env file)
13
- ELASTICSEARCH_URL = os.getenv("ELASTICSEARCH_URL")
14
- ELASTICSEARCH_USER = os.getenv("ELASTICSEARCH_USER")
15
- ELASTICSEARCH_PASSWORD = os.getenv("ELASTICSEARCH_PASSWORD")
16
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
17
- AWS_LAMBDA_URL = os.getenv("AWS_LAMBDA_URL")
18
- GRADIO_AUTH_USERNAME = os.getenv("GRADIO_AUTH_USERNAME")
19
- GRADIO_AUTH_PASSWORD = os.getenv("GRADIO_AUTH_PASSWORD")
20
-
21
- # Check required env vars (skip Elasticsearch-related)
22
- missing_vars = []
23
- for var in ["OPENAI_API_KEY", "AWS_LAMBDA_URL", "GRADIO_AUTH_USERNAME", "GRADIO_AUTH_PASSWORD"]:
24
- if not os.getenv(var):
25
- missing_vars.append(var)
26
- if missing_vars:
27
- raise RuntimeError(f"Missing required environment variables: {', '.join(missing_vars)}. Please set them in your .env file.")
28
-
29
- # Initialize clients
30
- if ELASTICSEARCH_URL:
31
- es = None #Elasticsearch(
32
- #ELASTICSEARCH_URL,
33
- #basic_auth=(str(ELASTICSEARCH_USER), str(ELASTICSEARCH_PASSWORD))
34
- #)
35
- else:
36
- es = None
37
-
38
- # Initialize OpenAI
39
- #openai_client = OpenAI(api_key=OPENAI_API_KEY)
40
- def chat_completion(messages, model="gpt-3.5-turbo", temperature=0.1):
41
- #return openai_client.chat.completions.create(
42
- # model=model,
43
- # messages=messages,
44
- # temperature=temperature
45
- #)
46
- return None
47
-
48
  def process_faq(question, user_id="anonymous", model="claude-sonnet"):
49
- """Process FAQ by calling AWS Lambda function with streaming response"""
50
  try:
51
  # Determine the correct Lambda URL and model parameter based on selection
52
  if model.startswith("nova-"):
@@ -68,129 +28,91 @@ def process_faq(question, user_id="anonymous", model="claude-sonnet"):
68
  print(f"DEBUG: Sending to {lambda_url}")
69
  print(f"DEBUG: Payload: {json.dumps(payload, indent=2)}")
70
 
71
- # Make the API call
72
- response = requests.post(
73
  lambda_url,
74
  headers={"Content-Type": "application/json"},
75
- json=payload
76
- )
77
-
78
- if response.status_code != 200:
79
- return f"Error: Lambda function returned status code {response.status_code}"
80
-
81
- return response.text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
  except Exception as e:
84
  return f"Error processing FAQ: {str(e)}"
85
 
86
- def natural_to_query(natural_query):
87
- """Convert natural language to Elasticsearch query body"""
88
- try:
89
- prompt = f"""Convert the following natural language query into an Elasticsearch query body.\nThe query should be in JSON format and follow Elasticsearch query DSL syntax.\n\nNatural language query: {natural_query}\n\nReturn only the JSON query body, nothing else."""
90
- response = chat_completion([
91
- {"role": "system", "content": "You are an expert in Elasticsearch query DSL. Convert natural language to Elasticsearch queries."},
92
- {"role": "user", "content": prompt}
93
- ], model="gpt-3.5-turbo", temperature=0.1)
94
- # Extract and format the query
95
- if hasattr(response, 'choices'):
96
- # For OpenAI v1.x
97
- content = response.choices[0].message.content.strip()
98
- else:
99
- # For OpenAI v0.x
100
- content = response["choices"][0]["message"]["content"].strip()
101
- try:
102
- query_json = json.loads(content)
103
- return json.dumps(query_json, indent=2)
104
- except json.JSONDecodeError:
105
- return content
106
- except Exception as e:
107
- return f"Error generating query: {str(e)}"
108
-
109
- def execute_elasticsearch_query(query_body):
110
- """Execute the Elasticsearch query"""
111
- try:
112
- # Parse the query body
113
- query_json = json.loads(query_body)
114
-
115
- # Execute the query
116
- response = es.search(
117
- index="your_index_name", # Replace with your actual index name
118
- body=query_json
119
- )
120
-
121
- # Format the response
122
- return json.dumps(response, indent=2)
123
- except json.JSONDecodeError:
124
- return "Error: Invalid JSON query body"
125
- except Exception as e:
126
- return f"Error executing query: {str(e)}"
127
-
128
  # --- Gradio v4.x UI ---
129
  def faq_wrapper(question, user_id, model):
130
- return process_faq(question, user_id, model)
131
-
132
- def elasticsearch_generate(natural_input):
133
- return natural_to_query(natural_input)
134
-
135
- def elasticsearch_execute(query_body):
136
- return execute_elasticsearch_query(query_body)
137
 
138
  with gr.Blocks() as demo:
139
- gc.Markdown("# MCP Tools - Local Version")
140
- with gr.Tab(label="FAQ"): # type: ignore
141
- faq_input = gc.Textbox(label="Enter your question", lines=3)
142
- model_selector = gc.Dropdown(
143
- label="Select Model",
144
- choices=["nova-micro", "nova-lite", "nova-pro", "claude-haiku", "claude-sonnet"],
145
- value="claude-sonnet",
146
- interactive=True
147
- )
148
- # Generate random user ID for this session
149
- session_user_id = str(uuid.uuid4())[:8]
150
- faq_button = gc.Button("Process")
151
- faq_output = gc.Textbox(label="Response", lines=10)
152
- with gr.Row(): # type: ignore
153
- thumbs_down = gc.Button("Report bad response", elem_id="thumbs-down", interactive=True)
154
- feedback_msg = gc.Markdown(visible=False)
155
-
156
- def report_bad_response():
157
- return gr.update(value="Bad response reported. Thank you for your feedback.", visible=True), gr.update(interactive=False)
158
-
159
- thumbs_down.click(report_bad_response, outputs=[feedback_msg, thumbs_down])
160
-
161
- # Re-enable report button and clear feedback when a new FAQ is processed
162
- def reset_feedback(*args):
163
- return gr.update(interactive=True), gr.update(value="", visible=False)
164
- faq_button.click(reset_feedback, outputs=[thumbs_down, feedback_msg], preprocess=False)
165
- faq_button.click(lambda q, m: faq_wrapper(q, session_user_id, m), inputs=[faq_input, model_selector], outputs=faq_output)
166
- with gr.Tab(label="Elasticsearch"): # type: ignore
167
- gc.Markdown("### Step 1: Natural Language to Query")
168
- natural_input = gc.Textbox(label="Describe what you want to search for", lines=3, placeholder="Example: Find all documents containing 'machine learning' in the title")
169
- generate_button = gc.Button("Generate Query")
170
- query_output = gc.Textbox(label="Generated Query Body", lines=10, placeholder="The generated Elasticsearch query will appear here")
171
- generate_button.click(elasticsearch_generate, inputs=natural_input, outputs=query_output)
172
- gc.Markdown("### Step 2: Execute Query")
173
- gc.Markdown("You can modify the query above if needed, then click Execute")
174
- execute_button = gc.Button("Execute Query")
175
- result_output = gc.Textbox(label="Query Results", lines=10, placeholder="The query results will appear here")
176
- execute_button.click(elasticsearch_execute, inputs=query_output, outputs=result_output)
177
 
178
  if __name__ == "__main__":
179
  # Check if running in Hugging Face Spaces
180
  is_spaces = os.getenv("SPACE_ID") is not None
181
 
182
- # Get auth credentials from environment variables
183
- auth_username = GRADIO_AUTH_USERNAME
184
- auth_password = GRADIO_AUTH_PASSWORD
185
-
186
  # Configure launch parameters
187
- launch_params = {
188
- "server_name": "0.0.0.0",
189
- "server_port": int(os.getenv("PORT", 7860)),
190
- "share": not is_spaces, # Only enable sharing when not in Spaces
191
- "auth": (auth_username, auth_password), # Add basic auth
192
- "auth_message": "Please enter your credentials to access the application."
193
- }
 
 
 
 
 
 
 
194
 
195
  # Launch the app
196
- demo.launch(**launch_params)
 
2
  import json
3
  import gradio as gr
4
  import requests
 
5
  import gradio.components as gc
6
  import uuid
7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  def process_faq(question, user_id="anonymous", model="claude-sonnet"):
9
+ """Process FAQ by calling AWS Lambda function"""
10
  try:
11
  # Determine the correct Lambda URL and model parameter based on selection
12
  if model.startswith("nova-"):
 
28
  print(f"DEBUG: Sending to {lambda_url}")
29
  print(f"DEBUG: Payload: {json.dumps(payload, indent=2)}")
30
 
31
+ # Make the API call with streaming
32
+ with requests.post(
33
  lambda_url,
34
  headers={"Content-Type": "application/json"},
35
+ json=payload,
36
+ stream=True
37
+ ) as response:
38
+ if response.status_code != 200:
39
+ return f"Error: Lambda function returned status code {response.status_code}"
40
+
41
+ # Process the response (Lambda returns complete response as plain text)
42
+ full_response = ""
43
+ for chunk in response.iter_content(chunk_size=1024, decode_unicode=True):
44
+ if chunk:
45
+ full_response += chunk
46
+
47
+ # Simulate streaming by yielding progressively
48
+ if full_response:
49
+ words = full_response.split()
50
+ current_text = ""
51
+ for i, word in enumerate(words):
52
+ current_text += word + " "
53
+ if i % 2 == 0 or i == len(words) - 1: # Yield every 2 words or at the end
54
+ yield current_text.strip()
55
+
56
+ return full_response
57
 
58
  except Exception as e:
59
  return f"Error processing FAQ: {str(e)}"
60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  # --- Gradio v4.x UI ---
62
  def faq_wrapper(question, user_id, model):
63
+ # Gradio expects a non-generator for Interface
64
+ result = ""
65
+ for chunk in process_faq(question, user_id, model):
66
+ result = chunk
67
+ return result
 
 
68
 
69
  with gr.Blocks() as demo:
70
+ gc.Markdown("# MCP Chatbot")
71
+ faq_input = gc.Textbox(label="Enter your question", lines=3)
72
+ model_selector = gc.Dropdown(
73
+ label="Select Model",
74
+ choices=["nova-micro", "nova-pro", "claude-haiku", "claude-sonnet"],
75
+ value="claude-sonnet",
76
+ interactive=True
77
+ )
78
+ # Generate random user ID for this session
79
+ session_user_id = str(uuid.uuid4())[:8]
80
+ faq_button = gc.Button("Process")
81
+ faq_output = gc.Textbox(label="Response", lines=10)
82
+ with gr.Row(): # type: ignore
83
+ thumbs_down = gc.Button("Report bad response", elem_id="thumbs-down", interactive=True)
84
+ feedback_msg = gc.Markdown(visible=False)
85
+
86
+ def report_bad_response():
87
+ return gr.update(value="Bad response reported. Thank you for your feedback.", visible=True), gr.update(interactive=False)
88
+
89
+ thumbs_down.click(report_bad_response, outputs=[feedback_msg, thumbs_down])
90
+
91
+ # Re-enable report button and clear feedback when a new FAQ is processed
92
+ def reset_feedback(*args):
93
+ return gr.update(interactive=True), gr.update(value="", visible=False)
94
+ faq_button.click(reset_feedback, outputs=[thumbs_down, feedback_msg], preprocess=False)
95
+ faq_button.click(lambda q, m: faq_wrapper(q, session_user_id, m), inputs=[faq_input, model_selector], outputs=faq_output)
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
  if __name__ == "__main__":
98
  # Check if running in Hugging Face Spaces
99
  is_spaces = os.getenv("SPACE_ID") is not None
100
 
 
 
 
 
101
  # Configure launch parameters
102
+ if is_spaces:
103
+ # No auth in Hugging Face Spaces
104
+ launch_params = {
105
+ "server_name": "0.0.0.0",
106
+ "server_port": int(os.getenv("PORT", 7860)),
107
+ "share": False
108
+ }
109
+ else:
110
+ # Local development - no auth required
111
+ launch_params = {
112
+ "server_name": "0.0.0.0",
113
+ "server_port": int(os.getenv("PORT", 7860)),
114
+ "share": True
115
+ }
116
 
117
  # Launch the app
118
+ demo.launch(**launch_params)