lydiasolomon commited on
Commit
5fbb27d
·
verified ·
1 Parent(s): 0a3060f

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +192 -44
main.py CHANGED
@@ -1,14 +1,17 @@
 
1
  import os
2
  import tempfile
3
  import logging
4
  import traceback
5
- from fastapi import FastAPI, UploadFile, File, Header, HTTPException, Body
6
  from fastapi.responses import JSONResponse
7
  from pydantic import BaseModel
8
  from transformers import pipeline
9
  from langdetect import detect, DetectorFactory
10
  from PIL import Image
11
- from smebuilder_vector import retriever # Your vector retrieval module
 
 
12
 
13
  # ==============================
14
  # Logging Setup
@@ -16,10 +19,13 @@ from smebuilder_vector import retriever # Your vector retrieval module
16
  logging.basicConfig(level=logging.INFO)
17
  logger = logging.getLogger("DevAssist")
18
 
 
 
 
19
  # ==============================
20
  # App Init
21
  # ==============================
22
- app = FastAPI(title="DevAssist AI Backend")
23
 
24
  # ==============================
25
  # Config
@@ -27,17 +33,25 @@ app = FastAPI(title="DevAssist AI Backend")
27
  DetectorFactory.seed = 0
28
  PROJECT_API_KEY = os.getenv("PROJECT_API_KEY")
29
  SPITCH_API_KEY = os.getenv("SPITCH_API_KEY")
 
 
30
  HF_MODELS = {
31
- "chat": "bigcode/starcoderbase",
32
- "autodoc": "Salesforce/codegen-2B-mono",
33
- "sme": "deepseek-ai/deepseek-coder-1.3b-instruct"
 
34
  }
35
 
36
  if not SPITCH_API_KEY:
37
  raise RuntimeError("Set SPITCH_API_KEY in environment before starting.")
38
 
 
 
 
 
 
39
  # ==============================
40
- # Auth Check
41
  # ==============================
42
  def check_auth(authorization: str | None):
43
  if not PROJECT_API_KEY:
@@ -49,15 +63,15 @@ def check_auth(authorization: str | None):
49
  raise HTTPException(status_code=403, detail="Invalid token")
50
 
51
  # ==============================
52
- # Global Exception Handler
53
  # ==============================
54
  @app.exception_handler(Exception)
55
- async def global_exception_handler(request, exc: Exception):
56
- logger.error(f"Unhandled error: {exc}")
57
  return JSONResponse(status_code=500, content={"error": str(exc)})
58
 
59
  # ==============================
60
- # Request Models
61
  # ==============================
62
  class ChatRequest(BaseModel):
63
  question: str
@@ -68,53 +82,122 @@ class AutoDocRequest(BaseModel):
68
  class SMERequest(BaseModel):
69
  user_prompt: str
70
 
 
 
 
 
71
  # ==============================
72
- # Pipeline Loader
73
  # ==============================
74
  def load_pipeline(task: str, model_name: str, fallback: str = None):
 
 
 
 
75
  try:
 
76
  return pipeline(task, model=model_name)
77
  except Exception as e:
78
- logger.warning(f"Failed to load {model_name}: {e}")
79
  if fallback:
80
- logger.info(f"Falling back to {fallback}")
81
  return pipeline(task, model=fallback)
82
- raise e
83
 
84
  # ==============================
85
- # Pipelines
86
  # ==============================
87
- chat_pipe = load_pipeline("text-generation", HF_MODELS["chat"], "gpt2")
88
- autodoc_pipe = load_pipeline("text-generation", HF_MODELS["autodoc"], "gpt2")
89
- sme_pipe = load_pipeline("text-generation", HF_MODELS["sme"], "gpt2")
 
 
 
 
90
 
91
  # ==============================
92
- # Helper Functions
93
  # ==============================
94
- def run_pipeline(pipe, prompt: str):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  try:
96
- output_list = pipe(prompt, max_new_tokens=1024, do_sample=True)
97
- text = output_list[0].get("generated_text", "").strip() if isinstance(output_list, list) else str(output_list)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
 
99
- # Log prompt + output
100
- logger.info(f"Prompt:\n{prompt}\n--- Output:\n{text}\n--- End")
101
  if not text:
102
  return {"success": False, "error": "⚠️ LLM returned empty output", "prompt": prompt}
103
  return text
104
  except Exception as e:
105
- logger.error(f"Pipeline error: {e}")
106
- return {"success": False, "error": f"⚠️ LLM error: {str(e)}", "prompt": prompt, "trace": traceback.format_exc()}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
 
108
  # ==============================
109
- # Audio Processing Helper
110
  # ==============================
111
  async def process_audio(file: UploadFile, lang_hint: str | None = None):
112
- import spitch
113
- spitch_client = spitch.Spitch()
 
 
114
  suffix = os.path.splitext(file.filename)[1] or ".wav"
115
  with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tf:
116
  tf.write(await file.read())
117
  tmp_path = tf.name
 
118
  with open(tmp_path, "rb") as f:
119
  audio_bytes = f.read()
120
 
@@ -124,6 +207,7 @@ async def process_audio(file: UploadFile, lang_hint: str | None = None):
124
  else:
125
  resp = spitch_client.speech.transcribe(content=audio_bytes)
126
  except Exception:
 
127
  resp = spitch_client.speech.transcribe(language="en", content=audio_bytes)
128
 
129
  transcription = getattr(resp, "text", "") or (resp.get("text", "") if isinstance(resp, dict) else "")
@@ -131,13 +215,13 @@ async def process_audio(file: UploadFile, lang_hint: str | None = None):
131
  try:
132
  detected_lang = detect(transcription) if transcription.strip() else "en"
133
  except Exception:
134
- pass
135
 
136
  translation = transcription
137
  if detected_lang != "en":
138
  try:
139
  translation_resp = spitch_client.text.translate(text=transcription, source=detected_lang, target="en")
140
- translation = getattr(translation_resp, "text", "") or translation_resp.get("text", "")
141
  except Exception:
142
  translation = transcription
143
 
@@ -148,43 +232,72 @@ async def process_audio(file: UploadFile, lang_hint: str | None = None):
148
  # ==============================
149
  @app.get("/")
150
  async def root_endpoint():
151
- return {"status": "✅ DevAssist AI Backend running"}
152
 
 
153
  @app.post("/chat")
154
  async def chat_endpoint(req: ChatRequest, authorization: str | None = Header(None)):
155
  check_auth(authorization)
156
- prompt = f"You are a professional coding assistant. Answer clearly:\nQuestion: {req.question}\nAnswer:"
157
- result = run_pipeline(chat_pipe, prompt)
 
 
 
 
158
  return result if isinstance(result, dict) else {"reply": result}
159
 
 
160
  @app.post("/autodoc")
161
  async def autodoc_endpoint(req: AutoDocRequest, authorization: str | None = Header(None)):
162
  check_auth(authorization)
163
- prompt = f"Generate professional documentation for the following code in Markdown:\n{req.code}\nDocumentation:"
164
- result = run_pipeline(autodoc_pipe, prompt)
 
 
 
165
  return result if isinstance(result, dict) else {"documentation": result}
166
 
 
167
  @app.post("/sme/generate")
168
  async def sme_generate_endpoint(req: SMERequest, authorization: str | None = Header(None)):
169
  check_auth(authorization)
170
  try:
171
- context_docs = retriever.get_relevant_documents(req.user_prompt)
 
 
 
 
 
 
172
  context = "\n".join([doc.page_content for doc in context_docs]) if context_docs else "No extra context"
173
- prompt = f"Generate production-ready frontend code based on this prompt:\n{req.user_prompt}\nContext:\n{context}\nOutput:"
174
- result = run_pipeline(sme_pipe, prompt)
 
 
 
 
175
  return {"success": True, "data": result if isinstance(result, str) else result.get("reply", "")}
176
  except Exception as e:
 
177
  return {"success": False, "error": f"⚠️ LLM error: {str(e)}", "trace": traceback.format_exc()}
178
 
 
179
  @app.post("/sme/speech-generate")
180
  async def sme_speech_endpoint(file: UploadFile = File(...), lang_hint: str | None = None, authorization: str | None = Header(None)):
181
  check_auth(authorization)
182
  transcription, detected_lang, translation = await process_audio(file, lang_hint)
183
  try:
184
- context_docs = retriever.get_relevant_documents(translation)
 
 
 
 
185
  context = "\n".join([doc.page_content for doc in context_docs]) if context_docs else "No extra context"
186
- prompt = f"Generate production-ready frontend code based on this prompt:\n{translation}\nContext:\n{context}\nOutput:"
187
- result = run_pipeline(sme_pipe, prompt)
 
 
 
188
  return {
189
  "success": True,
190
  "transcription": transcription,
@@ -193,11 +306,46 @@ async def sme_speech_endpoint(file: UploadFile = File(...), lang_hint: str | Non
193
  "sme_site": result if isinstance(result, str) else result.get("reply", "")
194
  }
195
  except Exception as e:
 
196
  return {"success": False, "error": f"⚠️ LLM error: {str(e)}", "trace": traceback.format_exc()}
197
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
  # ==============================
199
  # Run App
200
  # ==============================
201
  if __name__ == "__main__":
202
  import uvicorn
203
- uvicorn.run("main:app", host="0.0.0.0", port=7860, reload=False)
 
1
+ # main.py
2
  import os
3
  import tempfile
4
  import logging
5
  import traceback
6
+ from fastapi import FastAPI, UploadFile, File, Header, HTTPException, Body, Request
7
  from fastapi.responses import JSONResponse
8
  from pydantic import BaseModel
9
  from transformers import pipeline
10
  from langdetect import detect, DetectorFactory
11
  from PIL import Image
12
+ import io
13
+ from smebuilder_vector import retriever # your existing retriever module
14
+ import spitch
15
 
16
  # ==============================
17
  # Logging Setup
 
19
  logging.basicConfig(level=logging.INFO)
20
  logger = logging.getLogger("DevAssist")
21
 
22
+ # Debug log file for prompts + outputs
23
+ DEBUG_LOG_FILE = os.getenv("LLM_DEBUG_LOG", "llm_debug.log")
24
+
25
  # ==============================
26
  # App Init
27
  # ==============================
28
+ app = FastAPI(title="DevAssist / CuraAI Backend")
29
 
30
  # ==============================
31
  # Config
 
33
  DetectorFactory.seed = 0
34
  PROJECT_API_KEY = os.getenv("PROJECT_API_KEY")
35
  SPITCH_API_KEY = os.getenv("SPITCH_API_KEY")
36
+
37
+ # Models chosen per task (public/reasonable defaults)
38
  HF_MODELS = {
39
+ "chat": os.getenv("CHAT_MODEL", "bigcode/starcoderbase"), # coding assistant
40
+ "autodoc": os.getenv("AUTODOC_MODEL", "Salesforce/codegen-2B-mono"), # code -> docs
41
+ "sme": os.getenv("SME_MODEL", "deepseek-ai/deepseek-coder-1.3b-instruct"), # frontend generation
42
+ "image_caption": os.getenv("IMAGE_CAPTION_MODEL", "Salesforce/blip-image-captioning-base")
43
  }
44
 
45
  if not SPITCH_API_KEY:
46
  raise RuntimeError("Set SPITCH_API_KEY in environment before starting.")
47
 
48
+ # Initialize Spitch client once
49
+ spitch_client = spitch.Spitch()
50
+ # Optionally set env var for Spitch API if required by client library
51
+ os.environ["SPITCH_API_KEY"] = SPITCH_API_KEY
52
+
53
  # ==============================
54
+ # Authentication helper
55
  # ==============================
56
  def check_auth(authorization: str | None):
57
  if not PROJECT_API_KEY:
 
63
  raise HTTPException(status_code=403, detail="Invalid token")
64
 
65
  # ==============================
66
+ # Global exception handler
67
  # ==============================
68
  @app.exception_handler(Exception)
69
+ async def global_exception_handler(request: Request, exc: Exception):
70
+ logger.error(f"Unhandled error: {exc}", exc_info=True)
71
  return JSONResponse(status_code=500, content={"error": str(exc)})
72
 
73
  # ==============================
74
+ # Request models
75
  # ==============================
76
  class ChatRequest(BaseModel):
77
  question: str
 
82
  class SMERequest(BaseModel):
83
  user_prompt: str
84
 
85
+ # For simple vector search API
86
+ class VectorRequest(BaseModel):
87
+ query: str
88
+
89
  # ==============================
90
+ # Pipeline loader with fallback
91
  # ==============================
92
  def load_pipeline(task: str, model_name: str, fallback: str = None):
93
+ """
94
+ Load a HuggingFace pipeline with a fallback option.
95
+ Keep the load minimal (no device_map here — set in env for production).
96
+ """
97
  try:
98
+ logger.info(f"Loading pipeline task={task} model={model_name}")
99
  return pipeline(task, model=model_name)
100
  except Exception as e:
101
+ logger.warning(f"Failed to load {model_name} for task={task}: {e}")
102
  if fallback:
103
+ logger.info(f"Falling back to {fallback} for task={task}")
104
  return pipeline(task, model=fallback)
105
+ raise
106
 
107
  # ==============================
108
+ # Pipelines (load on startup)
109
  # ==============================
110
+ # text-generation pipelines for chat/autodoc/sme
111
+ chat_pipe = load_pipeline("text-generation", HF_MODELS["chat"], fallback="gpt2")
112
+ autodoc_pipe = load_pipeline("text-generation", HF_MODELS["autodoc"], fallback="gpt2")
113
+ sme_pipe = load_pipeline("text-generation", HF_MODELS["sme"], fallback="gpt2")
114
+
115
+ # image caption / image-to-text pipeline for crop/vision tasks
116
+ image_caption_pipe = load_pipeline("image-to-text", HF_MODELS["image_caption"], fallback="Salesforce/blip-image-captioning-base")
117
 
118
  # ==============================
119
+ # Helper / wrapper functions
120
  # ==============================
121
+ def debug_log_prompt(prompt: str, output: str, tag: str = "LLM"):
122
+ try:
123
+ with open(DEBUG_LOG_FILE, "a", encoding="utf-8") as fh:
124
+ fh.write(f"=== {tag} PROMPT START ===\n")
125
+ fh.write(prompt + "\n")
126
+ fh.write("--- MODEL OUTPUT ---\n")
127
+ fh.write(output + "\n")
128
+ fh.write(f"=== {tag} PROMPT END ===\n\n")
129
+ except Exception:
130
+ logger.exception("Failed to write debug log")
131
+
132
+ def run_pipeline(pipe, prompt: str, max_new_tokens: int = 1024):
133
+ """
134
+ Run a text-generation pipeline and return text or structured error.
135
+ Logs prompt + output to debug file.
136
+ """
137
  try:
138
+ # call pipeline (many models return list with 'generated_text')
139
+ output_list = pipe(prompt, max_new_tokens=max_new_tokens, do_sample=True)
140
+ text = ""
141
+ if isinstance(output_list, list) and len(output_list) > 0:
142
+ # handle generators that include 'generated_text'
143
+ first = output_list[0]
144
+ if isinstance(first, dict) and "generated_text" in first:
145
+ text = first["generated_text"]
146
+ else:
147
+ text = str(first)
148
+ else:
149
+ text = str(output_list)
150
+
151
+ text = text.strip()
152
+ debug_log_prompt(prompt, text, tag="TEXT-GEN")
153
+ logger.info("Prompt executed successfully")
154
 
 
 
155
  if not text:
156
  return {"success": False, "error": "⚠️ LLM returned empty output", "prompt": prompt}
157
  return text
158
  except Exception as e:
159
+ logger.error("Pipeline execution error", exc_info=True)
160
+ trace = traceback.format_exc()
161
+ debug_log_prompt(prompt, f"EXCEPTION:\n{trace}", tag="TEXT-GEN")
162
+ return {"success": False, "error": f"⚠️ LLM error: {str(e)}", "trace": trace, "prompt": prompt}
163
+
164
+ def run_image_to_text(pipe, image_bytes: bytes, prompt: str):
165
+ """
166
+ Run image-to-text pipelines (image captioning / multimodal).
167
+ Returns generated_text or error structure.
168
+ """
169
+ try:
170
+ image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
171
+ output_list = pipe(image, prompt=prompt)
172
+ text = ""
173
+ if isinstance(output_list, list) and len(output_list) > 0 and isinstance(output_list[0], dict):
174
+ text = output_list[0].get("generated_text", "")
175
+ else:
176
+ text = str(output_list)
177
+ text = text.strip()
178
+ debug_log_prompt(prompt, text, tag="IMG-TO-TEXT")
179
+ if not text:
180
+ return {"success": False, "error": "⚠️ Vision model returned empty output", "prompt": prompt}
181
+ return text
182
+ except Exception as e:
183
+ logger.exception("Image-to-text pipeline error")
184
+ trace = traceback.format_exc()
185
+ debug_log_prompt(prompt, f"EXCEPTION:\n{trace}", tag="IMG-TO-TEXT")
186
+ return {"success": False, "error": f"⚠️ Vision model error: {str(e)}", "trace": trace, "prompt": prompt}
187
 
188
  # ==============================
189
+ # Audio processing (Spitch) helper
190
  # ==============================
191
  async def process_audio(file: UploadFile, lang_hint: str | None = None):
192
+ """
193
+ Save audio temporarily, transcribe via Spitch client, detect language and optionally translate to English.
194
+ Returns (transcription, detected_lang, translation)
195
+ """
196
  suffix = os.path.splitext(file.filename)[1] or ".wav"
197
  with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tf:
198
  tf.write(await file.read())
199
  tmp_path = tf.name
200
+
201
  with open(tmp_path, "rb") as f:
202
  audio_bytes = f.read()
203
 
 
207
  else:
208
  resp = spitch_client.speech.transcribe(content=audio_bytes)
209
  except Exception:
210
+ # fallback to english if Spitch fails with the given hint
211
  resp = spitch_client.speech.transcribe(language="en", content=audio_bytes)
212
 
213
  transcription = getattr(resp, "text", "") or (resp.get("text", "") if isinstance(resp, dict) else "")
 
215
  try:
216
  detected_lang = detect(transcription) if transcription.strip() else "en"
217
  except Exception:
218
+ detected_lang = "en"
219
 
220
  translation = transcription
221
  if detected_lang != "en":
222
  try:
223
  translation_resp = spitch_client.text.translate(text=transcription, source=detected_lang, target="en")
224
+ translation = getattr(translation_resp, "text", "") or translation_resp.get("text", "") or transcription
225
  except Exception:
226
  translation = transcription
227
 
 
232
  # ==============================
233
  @app.get("/")
234
  async def root_endpoint():
235
+ return {"status": "✅ DevAssist / CuraAI Backend running"}
236
 
237
+ # ----- Chat: coding assistant -----
238
  @app.post("/chat")
239
  async def chat_endpoint(req: ChatRequest, authorization: str | None = Header(None)):
240
  check_auth(authorization)
241
+ # prompt template tuned for coding Q&A
242
+ prompt = (
243
+ "You are DevAssist — a helpful, concise coding assistant. "
244
+ f"Answer clearly with code samples if relevant.\n\nQuestion:\n{req.question}\n\nAnswer:"
245
+ )
246
+ result = run_pipeline(chat_pipe, prompt, max_new_tokens=512)
247
  return result if isinstance(result, dict) else {"reply": result}
248
 
249
+ # ----- Autodoc: code -> documentation -----
250
  @app.post("/autodoc")
251
  async def autodoc_endpoint(req: AutoDocRequest, authorization: str | None = Header(None)):
252
  check_auth(authorization)
253
+ prompt = (
254
+ "You are DevAssist DocBot. Produce professional Markdown documentation for the provided code.\n\n"
255
+ f"Code:\n{req.code}\n\nDocumentation:"
256
+ )
257
+ result = run_pipeline(autodoc_pipe, prompt, max_new_tokens=512)
258
  return result if isinstance(result, dict) else {"documentation": result}
259
 
260
+ # ----- SME: production-ready frontend generation (with retriever context) -----
261
  @app.post("/sme/generate")
262
  async def sme_generate_endpoint(req: SMERequest, authorization: str | None = Header(None)):
263
  check_auth(authorization)
264
  try:
265
+ # Use retriever for context injection (keep old method for compatibility)
266
+ try:
267
+ context_docs = retriever.get_relevant_documents(req.user_prompt)
268
+ except AttributeError:
269
+ # if newer retriever API uses .invoke
270
+ context_docs = retriever.invoke(req.user_prompt)
271
+
272
  context = "\n".join([doc.page_content for doc in context_docs]) if context_docs else "No extra context"
273
+ prompt = (
274
+ "You are a senior full-stack engineer. "
275
+ "Generate production-ready frontend code (index.html, styles.css, script.js) "
276
+ f"based on the prompt:\n{req.user_prompt}\n\nContext:\n{context}\n\nOutput:"
277
+ )
278
+ result = run_pipeline(sme_pipe, prompt, max_new_tokens=1500)
279
  return {"success": True, "data": result if isinstance(result, str) else result.get("reply", "")}
280
  except Exception as e:
281
+ logger.exception("SME generate endpoint error")
282
  return {"success": False, "error": f"⚠️ LLM error: {str(e)}", "trace": traceback.format_exc()}
283
 
284
+ # ----- SME Speech generate: STT -> SME -----
285
  @app.post("/sme/speech-generate")
286
  async def sme_speech_endpoint(file: UploadFile = File(...), lang_hint: str | None = None, authorization: str | None = Header(None)):
287
  check_auth(authorization)
288
  transcription, detected_lang, translation = await process_audio(file, lang_hint)
289
  try:
290
+ try:
291
+ context_docs = retriever.get_relevant_documents(translation)
292
+ except AttributeError:
293
+ context_docs = retriever.invoke(translation)
294
+
295
  context = "\n".join([doc.page_content for doc in context_docs]) if context_docs else "No extra context"
296
+ prompt = (
297
+ "You are a senior full-stack engineer. Generate production-ready frontend code "
298
+ f"based on the prompt:\n{translation}\n\nContext:\n{context}\n\nOutput:"
299
+ )
300
+ result = run_pipeline(sme_pipe, prompt, max_new_tokens=1500)
301
  return {
302
  "success": True,
303
  "transcription": transcription,
 
306
  "sme_site": result if isinstance(result, str) else result.get("reply", "")
307
  }
308
  except Exception as e:
309
+ logger.exception("SME speech-generate error")
310
  return {"success": False, "error": f"⚠️ LLM error: {str(e)}", "trace": traceback.format_exc()}
311
 
312
+ # ----- Vision/crop doctor style endpoint (image + text -> diagnosis / explanation) -----
313
+ @app.post("/vision/diagnose")
314
+ async def vision_diagnose(symptoms: str = Header(...), image: UploadFile = File(...), authorization: str | None = Header(None)):
315
+ """
316
+ Use an image-to-text model (BLIP) to analyze an image + farmer description, then produce
317
+ a simple diagnosis & treatment plan. Returns a string or error object.
318
+ """
319
+ check_auth(authorization)
320
+ image_bytes = await image.read()
321
+ prompt = (
322
+ f"Farmer reports: {symptoms}. Analyze this plant image, diagnose the likely disease, "
323
+ "provide simple treatment steps and short prevention advice in plain language."
324
+ )
325
+ result = run_image_to_text(image_caption_pipe, image_bytes, prompt)
326
+ return {"diagnosis": result} if isinstance(result, str) else result
327
+
328
+ # ----- Vector search wrapper endpoint -----
329
+ @app.post("/vector-search")
330
+ async def vector_search(req: VectorRequest, authorization: str | None = Header(None)):
331
+ check_auth(authorization)
332
+ try:
333
+ # call your existing vector query function in smebuilder_vector (query_vector)
334
+ try:
335
+ results = retriever.get_relevant_documents(req.query)
336
+ except AttributeError:
337
+ # fallback to invoke if retriever API differs
338
+ results = retriever.invoke(req.query)
339
+ # normalize a simple list response
340
+ brief = [{"page_content": getattr(r, "page_content", str(r)), "meta": getattr(r, "metadata", {})} for r in results]
341
+ return {"results": brief}
342
+ except Exception as e:
343
+ logger.exception("Vector search error")
344
+ return {"error": f"Vector search error: {str(e)}", "trace": traceback.format_exc()}
345
+
346
  # ==============================
347
  # Run App
348
  # ==============================
349
  if __name__ == "__main__":
350
  import uvicorn
351
+ uvicorn.run("main:app", host="0.0.0.0", port=int(os.getenv("PORT", "7860")), reload=False)