DSDUDEd commited on
Commit
81b66ff
·
verified ·
1 Parent(s): 40e0bac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -9
app.py CHANGED
@@ -3,9 +3,9 @@ import torch
3
  from transformers import GPT2Tokenizer, AutoModelForCausalLM
4
  from peft import PeftModel
5
 
6
- # 1️⃣ Load tokenizer (fallback)
7
  tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
8
- tokenizer.pad_token = tokenizer.eos_token # Important for causal LM
9
 
10
  # 2️⃣ Load base model
11
  base_model_name = "TRM-coding/PythonCopilot"
@@ -13,18 +13,19 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
13
 
14
  base_model = AutoModelForCausalLM.from_pretrained(
15
  base_model_name,
16
- torch_dtype=torch.float16 if device=="cuda" else torch.float32
17
  ).to(device)
18
 
19
- # 3️⃣ Load PEFT/LoRA weights
 
 
 
 
20
  peft_model_name = "DSDUDEd/funfox"
21
  model = PeftModel.from_pretrained(base_model, peft_model_name)
22
  model.eval()
23
 
24
- # 4️⃣ Resize embeddings to match tokenizer
25
- model.resize_token_embeddings(len(tokenizer))
26
-
27
- # 5️⃣ Text generation function
28
  def generate_text(prompt, max_tokens=50):
29
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
30
  outputs = model.generate(
@@ -36,7 +37,7 @@ def generate_text(prompt, max_tokens=50):
36
  )
37
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
38
 
39
- # 6️⃣ Gradio interface
40
  iface = gr.Interface(
41
  fn=generate_text,
42
  inputs=[
 
3
  from transformers import GPT2Tokenizer, AutoModelForCausalLM
4
  from peft import PeftModel
5
 
6
+ # 1️⃣ Load fallback tokenizer (GPT2)
7
  tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
8
+ tokenizer.pad_token = tokenizer.eos_token # Required for causal LM
9
 
10
  # 2️⃣ Load base model
11
  base_model_name = "TRM-coding/PythonCopilot"
 
13
 
14
  base_model = AutoModelForCausalLM.from_pretrained(
15
  base_model_name,
16
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32
17
  ).to(device)
18
 
19
+ # 3️⃣ Resize embeddings to match PEFT checkpoint vocab
20
+ checkpoint_vocab_size = 50257 # From DSUDUDe/funfox PEFT model
21
+ base_model.resize_token_embeddings(checkpoint_vocab_size)
22
+
23
+ # 4️⃣ Load PEFT/LoRA adapter
24
  peft_model_name = "DSDUDEd/funfox"
25
  model = PeftModel.from_pretrained(base_model, peft_model_name)
26
  model.eval()
27
 
28
+ # 5️⃣ Define generation function
 
 
 
29
  def generate_text(prompt, max_tokens=50):
30
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
31
  outputs = model.generate(
 
37
  )
38
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
39
 
40
+ # 6️⃣ Build Gradio interface
41
  iface = gr.Interface(
42
  fn=generate_text,
43
  inputs=[