SarmaHighOnAI commited on
Commit
c1b648c
·
verified ·
1 Parent(s): 9c9862c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -28
app.py CHANGED
@@ -1,35 +1,22 @@
1
  from fastapi import FastAPI
2
- from pydantic import BaseModel
3
- from llama_cpp import Llama
4
- from huggingface_hub import hf_hub_download
5
 
6
  app = FastAPI()
7
 
8
- # 1. Define your specific model details
9
- REPO_ID = "SarmaHighOnAI/physics-tutor-gguf"
10
- FILENAME = "llama-3.2-3b-instruct.Q4_K_M.gguf"
11
-
12
- print("Downloading your fine-tuned model...")
13
- # This downloads the file LOCALLY to the container
14
- model_path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME)
15
-
16
- print("Loading model into memory...")
17
- # This loads the 'brain' locally. n_threads=2 is safe for the free tier.
18
- llm = Llama(model_path=model_path, n_ctx=2048, n_threads=2)
19
-
20
- class Request(BaseModel):
21
- prompt: str
22
-
23
  @app.get("/")
24
  def home():
25
- return {"status": "Running", "message": "Physics Tutor API is Live (Local Inference)"}
26
-
27
- @app.post("/generate")
28
- def generate(request: Request):
29
- # Standard prompt format
30
- formatted_prompt = f"<|start_header_id|>user<|end_header_id|>\n\n{request.prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
31
-
32
- # Run inference LOCALLY (No API Key needed)
33
- output = llm(formatted_prompt, max_tokens=256, stop=["<|eot_id|>"], echo=False)
34
 
35
- return {"response": output["choices"][0]["text"]}
 
 
 
 
 
 
 
1
  from fastapi import FastAPI
2
+ import subprocess
3
+ import sys
 
4
 
5
  app = FastAPI()
6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  @app.get("/")
8
  def home():
9
+ # This forces the server to list all installed packages
10
+ # Check your "Logs" tab after visiting this page!
11
+ result = subprocess.run([sys.executable, "-m", "pip", "freeze"], capture_output=True, text=True)
12
+ print("=== INSTALLED PACKAGES ===")
13
+ print(result.stdout)
14
+ print("==========================")
 
 
 
15
 
16
+ try:
17
+ import llama_cpp
18
+ status = "llama_cpp is INSTALLED!"
19
+ except ImportError as e:
20
+ status = f"CRITICAL ERROR: {e}"
21
+
22
+ return {"status": status, "packages_printed_to_logs": True}