Julian Vanecek commited on
Commit
86ebbb9
·
1 Parent(s): 23718e3

reverting frontend

Browse files
Files changed (3) hide show
  1. .DS_Store +0 -0
  2. app.py +3 -1
  3. app_broken.py +0 -216
.DS_Store ADDED
Binary file (6.15 kB). View file
 
app.py CHANGED
@@ -141,6 +141,8 @@ def faq_wrapper(question, user_id, model):
141
  result = ""
142
  for chunk in process_faq(question, user_id, model):
143
  result = chunk
 
 
144
  return result
145
 
146
  def elasticsearch_generate(natural_input):
@@ -162,7 +164,7 @@ with gr.Blocks() as demo:
162
  # Generate random user ID for this session
163
  session_user_id = str(uuid.uuid4())[:8]
164
  faq_button = gc.Button("Process")
165
- faq_output = gc.Textbox(label="Response", lines=10)
166
  with gr.Row(): # type: ignore
167
  thumbs_down = gc.Button("Report bad response", elem_id="thumbs-down", interactive=True)
168
  feedback_msg = gc.Markdown(visible=False)
 
141
  result = ""
142
  for chunk in process_faq(question, user_id, model):
143
  result = chunk
144
+ # Convert literal \n characters to actual newlines
145
+ result = result.replace('\\n', '\n')
146
  return result
147
 
148
  def elasticsearch_generate(natural_input):
 
164
  # Generate random user ID for this session
165
  session_user_id = str(uuid.uuid4())[:8]
166
  faq_button = gc.Button("Process")
167
+ faq_output = gc.Markdown(label="Response")
168
  with gr.Row(): # type: ignore
169
  thumbs_down = gc.Button("Report bad response", elem_id="thumbs-down", interactive=True)
170
  feedback_msg = gc.Markdown(visible=False)
app_broken.py DELETED
@@ -1,216 +0,0 @@
1
- import os
2
- import json
3
- import gradio as gr
4
- import requests
5
- from dotenv import load_dotenv
6
- import gradio.components as gc
7
- import uuid
8
-
9
- # Load environment variables
10
- load_dotenv()
11
-
12
- # Get sensitive config from environment variables (set these in your .env file)
13
- #ELASTICSEARCH_URL = os.getenv("ELASTICSEARCH_URL")
14
- #ELASTICSEARCH_USER = os.getenv("ELASTICSEARCH_USER")
15
- #ELASTICSEARCH_PASSWORD = os.getenv("ELASTICSEARCH_PASSWORD")
16
- #OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
17
- #AWS_LAMBDA_URL = os.getenv("AWS_LAMBDA_URL")
18
- GRADIO_AUTH_USERNAME = os.getenv("GRADIO_AUTH_USERNAME")
19
- GRADIO_AUTH_PASSWORD = os.getenv("GRADIO_AUTH_PASSWORD")
20
-
21
- # Check required env vars for local development only
22
- if not os.getenv("SPACE_ID"):
23
- missing_vars = []
24
- for var in ["GRADIO_AUTH_USERNAME", "GRADIO_AUTH_PASSWORD"]:
25
- if not os.getenv(var):
26
- missing_vars.append(var)
27
- if missing_vars:
28
- print(f"Warning: Missing auth environment variables for local development: {', '.join(missing_vars)}")
29
-
30
- es = None
31
-
32
- # Initialize OpenAI
33
- #openai_client = OpenAI(api_key=OPENAI_API_KEY)
34
- def chat_completion(messages, model="gpt-3.5-turbo", temperature=0.1):
35
- #return openai_client.chat.completions.create(
36
- # model=model,
37
- # messages=messages,
38
- # temperature=temperature
39
- #)
40
- return None
41
-
42
- def process_faq(question, user_id="anonymous", model="claude-sonnet"):
43
- """Process FAQ by calling AWS Lambda function with streaming response"""
44
- try:
45
- # Determine the correct Lambda URL and model parameter based on selection
46
- if model.startswith("nova-"):
47
- lambda_url = "https://tz2ttiieoc5z4aq6pskg24zu740bvqup.lambda-url.us-west-2.on.aws/"
48
- model_param = model.replace("nova-", "") # Extract micro/lite/pro
49
- elif model.startswith("claude-"):
50
- lambda_url = "https://myzano2bfze54q6yqp32wwpj6q0ixpmy.lambda-url.us-west-2.on.aws/"
51
- model_param = model.replace("claude-", "") # Extract haiku/sonnet
52
- else:
53
- return "Error: Invalid model selection"
54
-
55
- # Prepare the request payload
56
- payload = {
57
- "message": question.strip(),
58
- "user_id": user_id,
59
- "model": model_param
60
- }
61
-
62
- print(f"DEBUG: Sending to {lambda_url}")
63
- print(f"DEBUG: Payload: {json.dumps(payload, indent=2)}")
64
-
65
- # Make the API call with streaming
66
- with requests.post(
67
- lambda_url,
68
- headers={"Content-Type": "application/json"},
69
- json=payload,
70
- stream=True
71
- ) as response:
72
- if response.status_code != 200:
73
- return f"Error: Lambda function returned status code {response.status_code}"
74
-
75
- # Process the streaming response
76
- full_response = ""
77
- for chunk in response.iter_content(chunk_size=1024, decode_unicode=True):
78
- if chunk:
79
- try:
80
- # Try to parse the chunk as JSON
81
- chunk_data = json.loads(chunk)
82
- if "response" in chunk_data:
83
- chunk_text = chunk_data["response"]
84
- full_response += chunk_text
85
- yield full_response
86
- except json.JSONDecodeError:
87
- # If not JSON, treat as plain text
88
- full_response += chunk
89
- yield full_response
90
-
91
- return full_response
92
-
93
- except Exception as e:
94
- return f"Error processing FAQ: {str(e)}"
95
-
96
- def natural_to_query(natural_query):
97
- """Convert natural language to Elasticsearch query body"""
98
- try:
99
- prompt = f"""Convert the following natural language query into an Elasticsearch query body.\nThe query should be in JSON format and follow Elasticsearch query DSL syntax.\n\nNatural language query: {natural_query}\n\nReturn only the JSON query body, nothing else."""
100
- response = chat_completion([
101
- {"role": "system", "content": "You are an expert in Elasticsearch query DSL. Convert natural language to Elasticsearch queries."},
102
- {"role": "user", "content": prompt}
103
- ], model="gpt-3.5-turbo", temperature=0.1)
104
- # Extract and format the query
105
- if hasattr(response, 'choices'):
106
- # For OpenAI v1.x
107
- content = response.choices[0].message.content.strip()
108
- else:
109
- # For OpenAI v0.x
110
- content = response["choices"][0]["message"]["content"].strip()
111
- try:
112
- query_json = json.loads(content)
113
- return json.dumps(query_json, indent=2)
114
- except json.JSONDecodeError:
115
- return content
116
- except Exception as e:
117
- return f"Error generating query: {str(e)}"
118
-
119
- def execute_elasticsearch_query(query_body):
120
- """Execute the Elasticsearch query"""
121
- try:
122
- # Parse the query body
123
- query_json = json.loads(query_body)
124
-
125
- # Execute the query
126
- response = es.search(
127
- index="your_index_name", # Replace with your actual index name
128
- body=query_json
129
- )
130
-
131
- # Format the response
132
- return json.dumps(response, indent=2)
133
- except json.JSONDecodeError:
134
- return "Error: Invalid JSON query body"
135
- except Exception as e:
136
- return f"Error executing query: {str(e)}"
137
-
138
- # --- Gradio v4.x UI ---
139
- def faq_wrapper(question, user_id, model):
140
- # Gradio expects a non-generator for Interface
141
- result = ""
142
- for chunk in process_faq(question, user_id, model):
143
- result = chunk
144
- return result
145
-
146
- def elasticsearch_generate(natural_input):
147
- return natural_to_query(natural_input)
148
-
149
- def elasticsearch_execute(query_body):
150
- return execute_elasticsearch_query(query_body)
151
-
152
- with gr.Blocks() as demo:
153
- gc.Markdown("# MCP Tools - Local Version")
154
- with gr.Tab(label="FAQ"): # type: ignore
155
- faq_input = gc.Textbox(label="Enter your question", lines=3)
156
- model_selector = gc.Dropdown(
157
- label="Select Model",
158
- choices=["nova-micro", "nova-pro", "claude-haiku", "claude-sonnet"],
159
- value="claude-sonnet",
160
- interactive=True
161
- )
162
- # Generate random user ID for this session
163
- session_user_id = str(uuid.uuid4())[:8]
164
- faq_button = gc.Button("Process")
165
- faq_output = gc.Textbox(label="Response", lines=10)
166
- with gr.Row(): # type: ignore
167
- thumbs_down = gc.Button("Report bad response", elem_id="thumbs-down", interactive=True)
168
- feedback_msg = gc.Markdown(visible=False)
169
-
170
- def report_bad_response():
171
- return gr.update(value="Bad response reported. Thank you for your feedback.", visible=True), gr.update(interactive=False)
172
-
173
- thumbs_down.click(report_bad_response, outputs=[feedback_msg, thumbs_down])
174
-
175
- # Re-enable report button and clear feedback when a new FAQ is processed
176
- def reset_feedback(*args):
177
- return gr.update(interactive=True), gr.update(value="", visible=False)
178
- faq_button.click(reset_feedback, outputs=[thumbs_down, feedback_msg], preprocess=False)
179
- faq_button.click(lambda q, m: faq_wrapper(q, session_user_id, m), inputs=[faq_input, model_selector], outputs=faq_output)
180
- with gr.Tab(label="Elasticsearch"): # type: ignore
181
- gc.Markdown("### Step 1: Natural Language to Query")
182
- natural_input = gc.Textbox(label="Describe what you want to search for", lines=3, placeholder="Example: Find all documents containing 'machine learning' in the title")
183
- generate_button = gc.Button("Generate Query")
184
- query_output = gc.Textbox(label="Generated Query Body", lines=10, placeholder="The generated Elasticsearch query will appear here")
185
- generate_button.click(elasticsearch_generate, inputs=natural_input, outputs=query_output)
186
- gc.Markdown("### Step 2: Execute Query")
187
- gc.Markdown("You can modify the query above if needed, then click Execute")
188
- execute_button = gc.Button("Execute Query")
189
- result_output = gc.Textbox(label="Query Results", lines=10, placeholder="The query results will appear here")
190
- execute_button.click(elasticsearch_execute, inputs=query_output, outputs=result_output)
191
-
192
- if __name__ == "__main__":
193
- # Check if running in Hugging Face Spaces
194
- is_spaces = os.getenv("SPACE_ID") is not None
195
-
196
- # Configure launch parameters for Spaces
197
- if is_spaces:
198
- launch_params = {
199
- "server_name": "0.0.0.0",
200
- "server_port": int(os.getenv("PORT", 7860)),
201
- "share": False
202
- }
203
- else:
204
- # Local development with auth
205
- auth_username = GRADIO_AUTH_USERNAME
206
- auth_password = GRADIO_AUTH_PASSWORD
207
- launch_params = {
208
- "server_name": "0.0.0.0",
209
- "server_port": int(os.getenv("PORT", 7860)),
210
- "share": True,
211
- "auth": (auth_username, auth_password),
212
- "auth_message": "Please enter your credentials to access the application."
213
- }
214
-
215
- # Launch the app
216
- demo.launch(**launch_params)