Nutnell commited on
Commit
a5f4140
·
verified ·
1 Parent(s): 0919d6a

Update fine_tune.py

Browse files
Files changed (1) hide show
  1. fine_tune.py +7 -84
fine_tune.py CHANGED
@@ -9,9 +9,6 @@ from transformers import (
9
  )
10
  from peft import LoraConfig, PeftModel
11
  from trl import SFTTrainer
12
- from fastapi import FastAPI, UploadFile, File
13
- from huggingface_hub import upload_folder
14
- import shutil
15
  from fastapi import FastAPI
16
  from pydantic import BaseModel
17
  import uvicorn
@@ -108,88 +105,14 @@ print("Inference pipeline ready.")
108
  class GenerateRequest(BaseModel):
109
  prompt: str
110
 
111
- app = FastAPI(
112
- title="DirectEd AI Assistant",
113
- version="1.0",
114
- description="API for fine-tuned DirectEd AI chatbot."
115
- )
116
-
117
- # --- Load Model + Tokenizer ---
118
- try:
119
- tokenizer = AutoTokenizer.from_pretrained(base_model_name)
120
-
121
- model = AutoModelForCausalLM.from_pretrained(
122
- base_model_name,
123
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
124
- device_map="auto"
125
- )
126
 
127
- if os.path.exists(output_dir):
128
- print(f"Loading adapter from {output_dir}")
129
- model = PeftModel.from_pretrained(model, output_dir)
130
- else:
131
- print("⚠️ No adapter folder found, using base model only")
132
-
133
- except Exception as e:
134
- print("❌ Model load failed:", e)
135
- model, tokenizer = None, None
136
-
137
-
138
- # --- Routes ---
139
  @app.get("/")
140
- def health():
141
- return {"status": "ok", "message": "DirectEd AI Space running."}
142
-
143
 
144
  @app.post("/generate")
145
- def generate(prompt: str, max_new_tokens: int = 200):
146
- if model is None or tokenizer is None:
147
- return {"error": "Model not loaded."}
148
-
149
- inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
150
- outputs = model.generate(
151
- **inputs,
152
- max_new_tokens=max_new_tokens,
153
- do_sample=True,
154
- top_k=50,
155
- top_p=0.9
156
- )
157
- text = tokenizer.decode(outputs[0], skip_special_tokens=True)
158
- return {"response": text}
159
-
160
-
161
- @app.get("/list_adapter")
162
- def list_adapter():
163
- """List adapter files in output_dir"""
164
- if os.path.exists(output_dir):
165
- files = os.listdir(output_dir)
166
- return {"adapter_files": files}
167
- return {"adapter_files": [], "message": "No adapter directory found."}
168
-
169
-
170
- @app.post("/upload_adapter")
171
- def upload_adapter(file: UploadFile = File(...)):
172
- """Upload adapter files (e.g. adapter_config.json, adapter_model.bin)"""
173
- os.makedirs(output_dir, exist_ok=True)
174
- save_path = os.path.join(output_dir, file.filename)
175
- with open(save_path, "wb") as buffer:
176
- shutil.copyfileobj(file.file, buffer)
177
- return {"status": "success", "filename": file.filename}
178
-
179
-
180
- @app.post("/push_adapter")
181
- def push_adapter():
182
- """Push adapter folder to Hugging Face Hub"""
183
- if not os.path.exists(output_dir):
184
- return {"error": "No adapter folder found."}
185
-
186
- files = os.listdir(output_dir)
187
- if not files:
188
- return {"error": "Adapter folder is empty."}
189
-
190
- upload_folder(
191
- repo_id=hub_repo_id,
192
- folder_path=output_dir,
193
- commit_message="Upload LoRA adapter from Space"
194
- )
195
- return {"status": "uploaded", "repo": f"https://huggingface.co/{hub_repo_id}", "files": files}
 
9
  )
10
  from peft import LoraConfig, PeftModel
11
  from trl import SFTTrainer
 
 
 
12
  from fastapi import FastAPI
13
  from pydantic import BaseModel
14
  import uvicorn
 
105
  class GenerateRequest(BaseModel):
106
  prompt: str
107
 
108
+ app = FastAPI(title="Fine-tuned LLaMA API")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
 
 
 
 
 
 
 
 
 
 
 
 
110
  @app.get("/")
111
+ def home():
112
+ return {"status": "ok", "message": "Fine-tuned LLaMA is ready."}
 
113
 
114
  @app.post("/generate")
115
+ def generate(request: GenerateRequest):
116
+ formatted_prompt = f"<|start_header_id|>user<|end_header_id|>\n\n{request.prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
117
+ outputs = pipe(formatted_prompt, max_new_tokens=200, do_sample=True, temperature=0.7)
118
+ return {"response": outputs[0]["generated_text"]}