Nexchan commited on
Commit
a83fbed
·
verified ·
1 Parent(s): c7c31e8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -3
app.py CHANGED
@@ -1,9 +1,10 @@
1
  from fastapi import FastAPI, Request
2
  from fastapi.responses import HTMLResponse
3
- import gradio as gr
4
  from huggingface_hub import InferenceClient
 
5
  import json
6
  import uvicorn
 
7
 
8
  # Inisialisasi FastAPI
9
  app = FastAPI()
@@ -71,10 +72,13 @@ def gradio_interface():
71
 
72
  return demo
73
 
 
 
 
 
74
  @app.get("/", response_class=HTMLResponse)
75
  async def read_root():
76
- interface = gradio_interface()
77
- return interface.launch(inline=True, share=False)
78
 
79
  @app.post("/chat_llama")
80
  async def chat_llama_endpoint(request: Request):
@@ -100,4 +104,6 @@ async def process_json_endpoint(request: Request):
100
 
101
  # Jalankan server FastAPI
102
  if __name__ == "__main__":
 
 
103
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
1
  from fastapi import FastAPI, Request
2
  from fastapi.responses import HTMLResponse
 
3
  from huggingface_hub import InferenceClient
4
+ import gradio as gr
5
  import json
6
  import uvicorn
7
+ import threading
8
 
9
  # Inisialisasi FastAPI
10
  app = FastAPI()
 
72
 
73
  return demo
74
 
75
+ def run_gradio():
76
+ interface = gradio_interface()
77
+ interface.launch(server_port=7861, share=False, server_name="0.0.0.0")
78
+
79
  @app.get("/", response_class=HTMLResponse)
80
  async def read_root():
81
+ return "FastAPI is running. Gradio is accessible on port 7861."
 
82
 
83
  @app.post("/chat_llama")
84
  async def chat_llama_endpoint(request: Request):
 
104
 
105
  # Jalankan server FastAPI
106
  if __name__ == "__main__":
107
+ gradio_thread = threading.Thread(target=run_gradio)
108
+ gradio_thread.start()
109
  uvicorn.run(app, host="0.0.0.0", port=7860)