nyxionlabs commited on
Commit
8769b06
Β·
verified Β·
1 Parent(s): feafe1e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -17
app.py CHANGED
@@ -7,14 +7,20 @@ from sentence_transformers import SentenceTransformer
7
  # Optional LLM step (still works without it)
8
  OPENAI_API_KEY = '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'
9
  USE_OPENAI = bool(OPENAI_API_KEY)
 
 
 
10
  if USE_OPENAI:
11
  try:
12
  from openai import OpenAI
13
  oai = OpenAI(api_key=OPENAI_API_KEY)
14
  OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini")
 
15
  except Exception as e:
16
  print("[RAG] OpenAI import failed:", e)
17
  USE_OPENAI = False
 
 
18
 
19
  # Tunables (can override in Space β†’ Settings β†’ Variables)
20
  MODEL_NAME = os.getenv("EMBED_MODEL", "all-MiniLM-L6-v2")
@@ -32,6 +38,10 @@ def _fallback_corpus():
32
  "Mount Everest is Earth's highest mountain above sea level.",
33
  "Photosynthesis converts light energy into chemical energy in plants.",
34
  "The Nile is a major north-flowing river in northeastern Africa.",
 
 
 
 
35
  ]
36
 
37
  def build_index():
@@ -105,55 +115,96 @@ def answer(question: str, k: int):
105
  build_index()
106
 
107
  if not question.strip():
108
- return "Please enter a question.", [], {"status": "idle"}
109
 
110
  pairs = retrieve(question, k)
111
  if not pairs:
112
- return "No results in index.", [], {"status": "empty"}
113
 
114
  cites = [{"rank": p["rank"], "faiss_dist": round(p["faiss_dist"], 4), "snippet": p["snippet"]} for p in pairs]
115
 
116
  if USE_OPENAI:
117
  prompt = build_prompt(question, pairs)
118
  try:
 
119
  resp = oai.chat.completions.create(
120
  model=OPENAI_MODEL,
121
  messages=[{"role": "user", "content": prompt}],
122
- temperature=0.2
 
123
  )
124
  ans = resp.choices[0].message.content
 
125
  except Exception as e:
126
- ans = f"LLM call failed: {e}\n\nTop result shown below:\n\n{pairs[0]['full'][:MAX_CTX_CHAR]}"
 
127
  else:
128
- ans = ("(No OPENAI_API_KEY set β€” showing most relevant context instead.)\n\n"
 
129
  + pairs[0]["full"][:MAX_CTX_CHAR])
130
 
131
- return ans, cites, {"status": "ok", "ntotal": STATE['index'].ntotal, "model": MODEL_NAME}
 
 
 
 
 
 
132
 
133
  # ------------------- UI -------------------
134
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
135
- gr.Markdown("## Nyxion Labs Β· Grounded Q&A (no uploads β€” builds at startup)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
 
137
  with gr.Row():
138
- q = gr.Textbox(label="Ask a question", placeholder="e.g., What is the capital of France?")
139
- k = gr.Slider(1, 10, value=3, step=1, label="Citations (top-k)")
140
-
141
- btn = gr.Button("Ask")
 
 
 
 
142
  ans = gr.Markdown(label="Answer")
143
- cites = gr.Dataframe(headers=["rank", "faiss_dist", "snippet"], datatype=["number","number","str"],
144
- row_count=(0, "dynamic"), label="Retrieved contexts")
145
- meta = gr.JSON(label="Status")
 
 
 
 
146
 
147
  def _startup():
148
  try:
149
  msg = build_index()
 
 
 
 
 
 
150
  except Exception as e:
151
- msg = f"Startup build failed: {e}"
152
- return {"status": msg}
153
 
154
  demo.load(_startup, inputs=None, outputs=meta)
155
  btn.click(answer, [q, k], [ans, cites, meta])
 
156
 
157
  if __name__ == "__main__":
158
  build_index()
159
- demo.launch()
 
7
  # Optional LLM step (still works without it)
8
  OPENAI_API_KEY = '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'
9
  USE_OPENAI = bool(OPENAI_API_KEY)
10
+
11
+ print(f"[RAG] OPENAI_API_KEY found: {bool(OPENAI_API_KEY)}")
12
+
13
  if USE_OPENAI:
14
  try:
15
  from openai import OpenAI
16
  oai = OpenAI(api_key=OPENAI_API_KEY)
17
  OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini")
18
+ print(f"[RAG] OpenAI initialized with model: {OPENAI_MODEL}")
19
  except Exception as e:
20
  print("[RAG] OpenAI import failed:", e)
21
  USE_OPENAI = False
22
+ else:
23
+ print("[RAG] No OpenAI API key detected. Set OPENAI_API_KEY in Space Settings.")
24
 
25
  # Tunables (can override in Space β†’ Settings β†’ Variables)
26
  MODEL_NAME = os.getenv("EMBED_MODEL", "all-MiniLM-L6-v2")
 
38
  "Mount Everest is Earth's highest mountain above sea level.",
39
  "Photosynthesis converts light energy into chemical energy in plants.",
40
  "The Nile is a major north-flowing river in northeastern Africa.",
41
+ "Berlin is the capital and largest city of Germany.",
42
+ "Tokyo is the capital of Japan and one of the world's most populous cities.",
43
+ "The Great Wall of China is one of the most famous landmarks in the world.",
44
+ "DNA contains the genetic instructions for all living organisms.",
45
  ]
46
 
47
  def build_index():
 
115
  build_index()
116
 
117
  if not question.strip():
118
+ return "Please enter a question.", [], {"status": "idle", "openai_enabled": USE_OPENAI}
119
 
120
  pairs = retrieve(question, k)
121
  if not pairs:
122
+ return "No results in index.", [], {"status": "empty", "openai_enabled": USE_OPENAI}
123
 
124
  cites = [{"rank": p["rank"], "faiss_dist": round(p["faiss_dist"], 4), "snippet": p["snippet"]} for p in pairs]
125
 
126
  if USE_OPENAI:
127
  prompt = build_prompt(question, pairs)
128
  try:
129
+ print(f"[RAG] Calling OpenAI with model: {OPENAI_MODEL}")
130
  resp = oai.chat.completions.create(
131
  model=OPENAI_MODEL,
132
  messages=[{"role": "user", "content": prompt}],
133
+ temperature=0.2,
134
+ max_tokens=500
135
  )
136
  ans = resp.choices[0].message.content
137
+ print(f"[RAG] OpenAI response received successfully")
138
  except Exception as e:
139
+ print(f"[RAG] LLM call failed: {e}")
140
+ ans = f"❌ LLM call failed: {e}\n\n**Top result shown below:**\n\n{pairs[0]['full'][:MAX_CTX_CHAR]}"
141
  else:
142
+ ans = ("⚠️ **No OPENAI_API_KEY set** β€” Add it in Space Settings β†’ Repository secrets\n\n"
143
+ "**Showing most relevant context instead:**\n\n"
144
  + pairs[0]["full"][:MAX_CTX_CHAR])
145
 
146
+ return ans, cites, {
147
+ "status": "ok",
148
+ "ntotal": STATE['index'].ntotal,
149
+ "model": MODEL_NAME,
150
+ "openai_enabled": USE_OPENAI,
151
+ "openai_model": OPENAI_MODEL if USE_OPENAI else None
152
+ }
153
 
154
  # ------------------- UI -------------------
155
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
156
+ gr.Markdown("""
157
+ ## Nyxion Labs Β· Grounded Q&A (RAG Demo)
158
+
159
+ Ask questions and get answers grounded in context with citations.
160
+ """)
161
+
162
+ if not USE_OPENAI:
163
+ gr.Markdown("""
164
+ ⚠️ **OpenAI API Key Not Detected**
165
+
166
+ To enable AI-generated answers:
167
+ 1. Go to Space Settings
168
+ 2. Add `OPENAI_API_KEY` as a repository secret
169
+ 3. Restart the Space
170
+
171
+ Currently showing raw context retrieval only.
172
+ """)
173
 
174
  with gr.Row():
175
+ q = gr.Textbox(
176
+ label="Ask a question",
177
+ placeholder="e.g., What is the capital of Germany?",
178
+ lines=2
179
+ )
180
+ k = gr.Slider(1, 10, value=3, step=1, label="Number of Citations (top-k)")
181
+
182
+ btn = gr.Button("πŸ” Ask", variant="primary")
183
  ans = gr.Markdown(label="Answer")
184
+ cites = gr.Dataframe(
185
+ headers=["rank", "faiss_dist", "snippet"],
186
+ datatype=["number","number","str"],
187
+ row_count=(0, "dynamic"),
188
+ label="Retrieved Contexts"
189
+ )
190
+ meta = gr.JSON(label="System Status")
191
 
192
  def _startup():
193
  try:
194
  msg = build_index()
195
+ return {
196
+ "status": msg,
197
+ "openai_enabled": USE_OPENAI,
198
+ "openai_model": OPENAI_MODEL if USE_OPENAI else None,
199
+ "embed_model": MODEL_NAME
200
+ }
201
  except Exception as e:
202
+ return {"status": f"Startup build failed: {e}", "openai_enabled": False}
 
203
 
204
  demo.load(_startup, inputs=None, outputs=meta)
205
  btn.click(answer, [q, k], [ans, cites, meta])
206
+ q.submit(answer, [q, k], [ans, cites, meta]) # Allow Enter key to submit
207
 
208
  if __name__ == "__main__":
209
  build_index()
210
+ demo.launch()