Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,32 +1,22 @@
|
|
| 1 |
from fastapi import FastAPI
|
| 2 |
from pydantic import BaseModel
|
| 3 |
-
from
|
| 4 |
-
from huggingface_hub import hf_hub_download
|
| 5 |
|
| 6 |
app = FastAPI()
|
| 7 |
-
|
| 8 |
-
# 1. Define your specific model details
|
| 9 |
-
REPO_ID = "SarmaHighOnAI/physics-tutor-gguf"
|
| 10 |
-
FILENAME = "llama-3.2-3b-instruct.Q4_K_M.gguf"
|
| 11 |
-
|
| 12 |
-
print("Downloading your fine-tuned model...")
|
| 13 |
-
model_path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME)
|
| 14 |
-
|
| 15 |
-
print("Loading model...")
|
| 16 |
-
# n_threads=2 ensures it runs smoothly on the free tier CPU
|
| 17 |
-
llm = Llama(model_path=model_path, n_ctx=2048, n_threads=2)
|
| 18 |
|
| 19 |
class Request(BaseModel):
|
| 20 |
prompt: str
|
| 21 |
|
| 22 |
@app.get("/")
|
| 23 |
def home():
|
| 24 |
-
return {"status": "Running"
|
| 25 |
|
| 26 |
@app.post("/generate")
|
| 27 |
-
def
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
|
|
|
|
|
| 1 |
from fastapi import FastAPI
|
| 2 |
from pydantic import BaseModel
|
| 3 |
+
from huggingface_hub import InferenceClient
|
|
|
|
| 4 |
|
| 5 |
app = FastAPI()
|
| 6 |
+
client = InferenceClient(token="YOUR_HF_TOKEN_HERE")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
class Request(BaseModel):
|
| 9 |
prompt: str
|
| 10 |
|
| 11 |
@app.get("/")
|
| 12 |
def home():
|
| 13 |
+
return {"status": "Running"}
|
| 14 |
|
| 15 |
@app.post("/generate")
|
| 16 |
+
def generate_text(request: Request):
|
| 17 |
+
response = client.text_generation(
|
| 18 |
+
request.prompt,
|
| 19 |
+
model="meta-llama/Llama-3.2-3B-Instruct",
|
| 20 |
+
max_new_tokens=256
|
| 21 |
+
)
|
| 22 |
+
return {"response": response}
|