hello-ram commited on
Commit
af5708f
·
verified ·
1 Parent(s): 52bfa38

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -17
app.py CHANGED
@@ -1,16 +1,20 @@
1
- from fastapi import FastAPI
2
- from pydantic import BaseModel
3
- from fastapi.middleware.cors import CORSMiddleware
4
- from transformers import AutoTokenizer, AutoModelForCausalLM
5
  import torch
6
  import os
 
 
 
7
 
8
- # Make HF Spaces writable
9
  os.environ["HF_HOME"] = "/tmp"
10
  os.environ["TRANSFORMERS_CACHE"] = "/tmp"
11
 
 
12
  # FastAPI app
13
- app = FastAPI(title="Unsloth GPT API")
 
14
 
15
  app.add_middleware(
16
  CORSMiddleware,
@@ -19,20 +23,46 @@ app.add_middleware(
19
  allow_headers=["*"],
20
  )
21
 
 
22
  # Model variables
 
23
  model = None
24
  tokenizer = None
25
- model_id = "hello-ram/unsolth_gpt.20"
26
 
 
 
 
 
 
27
  # Load model lazily
 
28
  def load_model():
29
  global model, tokenizer
30
  if model is None or tokenizer is None:
31
- tokenizer = AutoTokenizer.from_pretrained(model_id, cache_dir="/tmp")
32
- model = AutoModelForCausalLM.from_pretrained(model_id, cache_dir="/tmp")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  model.eval()
34
 
 
35
  # Input schema
 
36
  class QueryRequest(BaseModel):
37
  question: str
38
  max_new_tokens: int = 64
@@ -40,26 +70,33 @@ class QueryRequest(BaseModel):
40
  top_p: float = 0.9
41
  reasoning_effort: str = "medium"
42
 
 
43
  # Health check
 
44
  @app.get("/")
45
  def health():
46
  return {"status": "ok"}
47
 
48
- # Main predict endpoint
 
 
49
  @app.post("/predict")
50
  def predict(req: QueryRequest):
51
  load_model()
 
52
 
53
- input_ids = tokenizer(req.question, return_tensors="pt").input_ids
 
 
 
 
 
 
54
 
55
  with torch.no_grad():
56
  output = model.generate(
57
- input_ids=input_ids,
58
- max_new_tokens=req.max_new_tokens,
59
- do_sample=True,
60
- temperature=req.temperature,
61
- top_p=req.top_p,
62
- pad_token_id=tokenizer.eos_token_id,
63
  )
64
 
65
  answer = tokenizer.decode(output[0], skip_special_tokens=True)
 
1
+ import unsloth # MUST be imported before transformers
2
+ from unsloth import FastLanguageModel
3
+ from transformers import AutoTokenizer
 
4
  import torch
5
  import os
6
+ from fastapi import FastAPI
7
+ from fastapi.middleware.cors import CORSMiddleware
8
+ from pydantic import BaseModel
9
 
10
+ # Optional: HF cache
11
  os.environ["HF_HOME"] = "/tmp"
12
  os.environ["TRANSFORMERS_CACHE"] = "/tmp"
13
 
14
+ # -------------------------------
15
  # FastAPI app
16
+ # -------------------------------
17
+ app = FastAPI(title="Unsolth GPT OSS API")
18
 
19
  app.add_middleware(
20
  CORSMiddleware,
 
23
  allow_headers=["*"],
24
  )
25
 
26
+ # -------------------------------
27
  # Model variables
28
+ # -------------------------------
29
  model = None
30
  tokenizer = None
 
31
 
32
+ # Paths
33
+ base_model_name = "unsloth/gpt-oss-20b" # Pretrained GPT-OSS base
34
+ lora_model_path = "./finetuned_model" # Your LoRA weights in the Space repo
35
+
36
+ # -------------------------------
37
  # Load model lazily
38
+ # -------------------------------
39
  def load_model():
40
  global model, tokenizer
41
  if model is None or tokenizer is None:
42
+ tokenizer = AutoTokenizer.from_pretrained(base_model_name, trust_remote_code=True)
43
+
44
+ base_model = FastLanguageModel.from_pretrained(
45
+ base_model_name, trust_remote_code=True
46
+ )
47
+
48
+ model = FastLanguageModel.get_peft_model(
49
+ base_model,
50
+ r=8,
51
+ target_modules=[
52
+ "q_proj", "k_proj", "v_proj", "o_proj",
53
+ "gate_proj", "up_proj", "down_proj"
54
+ ],
55
+ lora_alpha=16,
56
+ lora_dropout=0,
57
+ bias="none",
58
+ use_gradient_checkpointing="unsloth",
59
+ state_dict=torch.load(os.path.join(lora_model_path, "adapter_model.safetensors"))
60
+ )
61
  model.eval()
62
 
63
+ # -------------------------------
64
  # Input schema
65
+ # -------------------------------
66
  class QueryRequest(BaseModel):
67
  question: str
68
  max_new_tokens: int = 64
 
70
  top_p: float = 0.9
71
  reasoning_effort: str = "medium"
72
 
73
+ # -------------------------------
74
  # Health check
75
+ # -------------------------------
76
  @app.get("/")
77
  def health():
78
  return {"status": "ok"}
79
 
80
+ # -------------------------------
81
+ # Prediction endpoint
82
+ # -------------------------------
83
  @app.post("/predict")
84
  def predict(req: QueryRequest):
85
  load_model()
86
+ device = "cuda" if torch.cuda.is_available() else "cpu"
87
 
88
+ inputs = tokenizer.apply_chat_template(
89
+ [{"role": "user", "content": req.question}],
90
+ add_generation_prompt=True,
91
+ return_tensors="pt",
92
+ return_dict=True,
93
+ reasoning_effort=req.reasoning_effort
94
+ ).to(device)
95
 
96
  with torch.no_grad():
97
  output = model.generate(
98
+ **inputs,
99
+ max_new_tokens=req.max_new_tokens
 
 
 
 
100
  )
101
 
102
  answer = tokenizer.decode(output[0], skip_special_tokens=True)