Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,60 +3,66 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
| 3 |
from peft import PeftModel
|
| 4 |
import gradio as gr
|
| 5 |
|
| 6 |
-
#
|
|
|
|
| 7 |
PEFT_ID = "rishu834763/java-explainer-lora"
|
| 8 |
|
| 9 |
-
# Load
|
| 10 |
-
base_model = "mistralai/Mistral-7B-Instruct-v0.2"
|
| 11 |
-
|
| 12 |
model = AutoModelForCausalLM.from_pretrained(
|
| 13 |
-
|
| 14 |
device_map="auto",
|
| 15 |
-
load_in_8bit=True,
|
| 16 |
torch_dtype=torch.bfloat16,
|
| 17 |
)
|
| 18 |
|
| 19 |
-
#
|
| 20 |
model = PeftModel.from_pretrained(model, PEFT_ID)
|
| 21 |
|
| 22 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
| 23 |
-
|
| 24 |
-
tokenizer.pad_token = tokenizer.eos_token
|
| 25 |
|
| 26 |
-
# Proper generation function
|
| 27 |
def chat(message, history):
|
| 28 |
messages = []
|
| 29 |
-
for
|
| 30 |
-
messages.append({"role": "user", "content":
|
| 31 |
-
if
|
| 32 |
-
messages.append({"role": "assistant", "content":
|
| 33 |
messages.append({"role": "user", "content": message})
|
| 34 |
|
| 35 |
-
input_ids = tokenizer.apply_chat_template(
|
| 36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
output_ids = model.generate(
|
| 38 |
input_ids,
|
| 39 |
max_new_tokens=1024,
|
|
|
|
| 40 |
temperature=0.6,
|
| 41 |
top_p=0.9,
|
| 42 |
-
|
| 43 |
-
repetition_penalty=1.1,
|
| 44 |
-
eos_token_id=tokenizer.eos_token_id,
|
| 45 |
pad_token_id=tokenizer.eos_token_id,
|
| 46 |
)
|
| 47 |
-
|
| 48 |
-
response = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
|
| 49 |
return response
|
| 50 |
|
| 51 |
-
#
|
| 52 |
gr.ChatInterface(
|
| 53 |
chat,
|
| 54 |
-
title="Java Explainer
|
| 55 |
-
description="Your own fine-tuned model · Starts in seconds · No OpenAI",
|
| 56 |
examples=[
|
| 57 |
-
"Explain this Java code simply
|
| 58 |
-
"What is the difference between
|
| 59 |
-
"Why
|
| 60 |
],
|
| 61 |
cache_examples=False,
|
| 62 |
-
|
|
|
|
|
|
|
|
|
| 3 |
from peft import PeftModel
|
| 4 |
import gradio as gr
|
| 5 |
|
| 6 |
+
# Direct base model (no auto-detection needed)
|
| 7 |
+
BASE_MODEL = "mistralai/Mistral-7B-Instruct-v0.2"
|
| 8 |
PEFT_ID = "rishu834763/java-explainer-lora"
|
| 9 |
|
| 10 |
+
# Load in 8-bit → super stable & fast cold start on free tier
|
|
|
|
|
|
|
| 11 |
model = AutoModelForCausalLM.from_pretrained(
|
| 12 |
+
BASE_MODEL,
|
| 13 |
device_map="auto",
|
| 14 |
+
load_in_8bit=True,
|
| 15 |
torch_dtype=torch.bfloat16,
|
| 16 |
)
|
| 17 |
|
| 18 |
+
# Load your LoRA on top (no merge = instant)
|
| 19 |
model = PeftModel.from_pretrained(model, PEFT_ID)
|
| 20 |
|
| 21 |
+
tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL)
|
| 22 |
+
tokenizer.pad_token = tokenizer.eos_token
|
|
|
|
| 23 |
|
|
|
|
| 24 |
def chat(message, history):
|
| 25 |
messages = []
|
| 26 |
+
for user_msg, bot_msg in history:
|
| 27 |
+
messages.append({"role": "user", "content": user_msg})
|
| 28 |
+
if bot_msg:
|
| 29 |
+
messages.append({"role": "assistant", "content": bot_msg})
|
| 30 |
messages.append({"role": "user", "content": message})
|
| 31 |
|
| 32 |
+
input_ids = tokenizer.apply_chat_template(
|
| 33 |
+
messages,
|
| 34 |
+
add_generation_prompt=True,
|
| 35 |
+
return_tensors="pt"
|
| 36 |
+
).to(model.device)
|
| 37 |
+
|
| 38 |
+
terminators = [
|
| 39 |
+
tokenizer.eos_token_id,
|
| 40 |
+
tokenizer.convert_tokens_to_ids("<|eot_id|>")
|
| 41 |
+
]
|
| 42 |
+
|
| 43 |
output_ids = model.generate(
|
| 44 |
input_ids,
|
| 45 |
max_new_tokens=1024,
|
| 46 |
+
do_sample=True,
|
| 47 |
temperature=0.6,
|
| 48 |
top_p=0.9,
|
| 49 |
+
eos_token_id=terminators,
|
|
|
|
|
|
|
| 50 |
pad_token_id=tokenizer.eos_token_id,
|
| 51 |
)
|
| 52 |
+
|
| 53 |
+
response = tokenizer.decode(output_ids[0][input_ids.shape[-1]:], skip_special_tokens=True)
|
| 54 |
return response
|
| 55 |
|
| 56 |
+
# Minimal interface – starts instantly
|
| 57 |
gr.ChatInterface(
|
| 58 |
chat,
|
| 59 |
+
title="Java Explainer – Your Model",
|
|
|
|
| 60 |
examples=[
|
| 61 |
+
"Explain this Java code simply:\npublic class Test {\n public static void main(String[] args) {\n System.out.println(\"Hello\");\n }\n}",
|
| 62 |
+
"What is the difference between HashMap and Hashtable?",
|
| 63 |
+
"Why main method is public static void?",
|
| 64 |
],
|
| 65 |
cache_examples=False,
|
| 66 |
+
submit_btn="Send",
|
| 67 |
+
retry_btn=None, # removes the retry that sometimes causes blank replies
|
| 68 |
+
).queue(max_size=20).launch()
|