Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,14 +1,25 @@
|
|
| 1 |
-
from fastapi import FastAPI, Request
|
| 2 |
-
from
|
| 3 |
-
import json
|
| 4 |
from huggingface_hub import InferenceClient
|
|
|
|
|
|
|
| 5 |
|
| 6 |
app = FastAPI()
|
| 7 |
|
| 8 |
# Inisialisasi HuggingFace client
|
| 9 |
client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
|
| 10 |
|
| 11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
chat_completion = client.chat_completion(
|
| 13 |
messages=chat_history,
|
| 14 |
max_tokens=500,
|
|
@@ -16,12 +27,11 @@ def chat_llama(chat_history):
|
|
| 16 |
chat_history.append({"role": "assistant", "content": chat_completion.choices[0].message.content})
|
| 17 |
return chat_history
|
| 18 |
|
| 19 |
-
def chat_mem(message, chat_history):
|
| 20 |
chat_history_role = [{"role": "system", "content": "You are a helpful assistant."}]
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
chat_history_role.append({"role": "assistant", "content": chat_history[i][1]})
|
| 25 |
chat_history_role.append({"role": "user", "content": message})
|
| 26 |
|
| 27 |
chat_completion = client.chat_completion(
|
|
@@ -36,7 +46,7 @@ def chat_mem(message, chat_history):
|
|
| 36 |
|
| 37 |
return "", chat_history
|
| 38 |
|
| 39 |
-
def process_json(json_input):
|
| 40 |
try:
|
| 41 |
chat_history = json.loads(json_input)
|
| 42 |
if not isinstance(chat_history, list):
|
|
@@ -48,34 +58,24 @@ def process_json(json_input):
|
|
| 48 |
return json.dumps(chat_history, indent=2), ""
|
| 49 |
|
| 50 |
@app.post("/chat_llama")
|
| 51 |
-
async def chat_llama_endpoint(request:
|
| 52 |
-
|
| 53 |
-
chat_history = data.get('chat_history', [])
|
| 54 |
response = chat_llama(chat_history)
|
| 55 |
-
return
|
| 56 |
|
| 57 |
@app.post("/chat_mem")
|
| 58 |
-
async def chat_mem_endpoint(request:
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
chat_history = data.get('chat_history', [])
|
| 62 |
response = chat_mem(message, chat_history)
|
| 63 |
-
return
|
| 64 |
|
| 65 |
@app.post("/process_json")
|
| 66 |
-
async def process_json_endpoint(request:
|
| 67 |
-
|
| 68 |
-
json_input = data.get('json_input', '')
|
| 69 |
response = process_json(json_input)
|
| 70 |
-
return
|
| 71 |
-
|
| 72 |
-
def schedule_task():
|
| 73 |
-
print("Scheduled task is running")
|
| 74 |
-
|
| 75 |
-
# Tambahkan tugas terjadwal menggunakan univcron
|
| 76 |
-
univcron.every(10).seconds.do(schedule_task)
|
| 77 |
|
| 78 |
if __name__ == "__main__":
|
| 79 |
import uvicorn
|
| 80 |
-
# Jalankan server FastAPI
|
| 81 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|
|
|
|
| 1 |
+
from fastapi import FastAPI, Request, HTTPException
|
| 2 |
+
from pydantic import BaseModel
|
|
|
|
| 3 |
from huggingface_hub import InferenceClient
|
| 4 |
+
import json
|
| 5 |
+
from typing import List, Dict, Tuple
|
| 6 |
|
| 7 |
app = FastAPI()
|
| 8 |
|
| 9 |
# Inisialisasi HuggingFace client
|
| 10 |
client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
|
| 11 |
|
| 12 |
+
class ChatRequest(BaseModel):
|
| 13 |
+
chat_history: List[Dict[str, str]]
|
| 14 |
+
|
| 15 |
+
class ChatMemRequest(BaseModel):
|
| 16 |
+
message: str
|
| 17 |
+
chat_history: List[Tuple[str, str]]
|
| 18 |
+
|
| 19 |
+
class ProcessJsonRequest(BaseModel):
|
| 20 |
+
json_input: str
|
| 21 |
+
|
| 22 |
+
def chat_llama(chat_history: List[Dict[str, str]]) -> List[Dict[str, str]]:
|
| 23 |
chat_completion = client.chat_completion(
|
| 24 |
messages=chat_history,
|
| 25 |
max_tokens=500,
|
|
|
|
| 27 |
chat_history.append({"role": "assistant", "content": chat_completion.choices[0].message.content})
|
| 28 |
return chat_history
|
| 29 |
|
| 30 |
+
def chat_mem(message: str, chat_history: List[Tuple[str, str]]) -> Tuple[str, List[Tuple[str, str]]]:
|
| 31 |
chat_history_role = [{"role": "system", "content": "You are a helpful assistant."}]
|
| 32 |
+
for user_msg, assistant_msg in chat_history:
|
| 33 |
+
chat_history_role.append({"role": "user", "content": user_msg})
|
| 34 |
+
chat_history_role.append({"role": "assistant", "content": assistant_msg})
|
|
|
|
| 35 |
chat_history_role.append({"role": "user", "content": message})
|
| 36 |
|
| 37 |
chat_completion = client.chat_completion(
|
|
|
|
| 46 |
|
| 47 |
return "", chat_history
|
| 48 |
|
| 49 |
+
def process_json(json_input: str) -> Tuple[str, str]:
|
| 50 |
try:
|
| 51 |
chat_history = json.loads(json_input)
|
| 52 |
if not isinstance(chat_history, list):
|
|
|
|
| 58 |
return json.dumps(chat_history, indent=2), ""
|
| 59 |
|
| 60 |
@app.post("/chat_llama")
|
| 61 |
+
async def chat_llama_endpoint(request: ChatRequest):
|
| 62 |
+
chat_history = request.chat_history
|
|
|
|
| 63 |
response = chat_llama(chat_history)
|
| 64 |
+
return {"chat_history": response}
|
| 65 |
|
| 66 |
@app.post("/chat_mem")
|
| 67 |
+
async def chat_mem_endpoint(request: ChatMemRequest):
|
| 68 |
+
message = request.message
|
| 69 |
+
chat_history = request.chat_history
|
|
|
|
| 70 |
response = chat_mem(message, chat_history)
|
| 71 |
+
return {"message": response[0], "chat_history": response[1]}
|
| 72 |
|
| 73 |
@app.post("/process_json")
|
| 74 |
+
async def process_json_endpoint(request: ProcessJsonRequest):
|
| 75 |
+
json_input = request.json_input
|
|
|
|
| 76 |
response = process_json(json_input)
|
| 77 |
+
return {"output_json": response[0], "error_message": response[1]}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
|
| 79 |
if __name__ == "__main__":
|
| 80 |
import uvicorn
|
|
|
|
| 81 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|