Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,11 +4,13 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
| 4 |
from peft import PeftModel
|
| 5 |
|
| 6 |
# 1. Configuration
|
|
|
|
| 7 |
base_model_name = "unsloth/Llama-3.2-3B-bnb-4bit"
|
| 8 |
adapter_path = "."
|
| 9 |
|
| 10 |
-
# 2. Load Tokenizer
|
| 11 |
-
tokenizer
|
|
|
|
| 12 |
|
| 13 |
# 3. Load Model with memory-saving settings for CPU
|
| 14 |
model = AutoModelForCausalLM.from_pretrained(
|
|
@@ -18,10 +20,10 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
| 18 |
low_cpu_mem_usage=True
|
| 19 |
)
|
| 20 |
|
| 21 |
-
# 4. Apply your adapters
|
| 22 |
model = PeftModel.from_pretrained(model, adapter_path)
|
| 23 |
|
| 24 |
-
# 5. Logic
|
| 25 |
def legal_summarizer(legal_text):
|
| 26 |
prompt = f"Analyze the following legal text and provide a grounded summary.\n\nInput:\n{legal_text}\n\nResponse:\n"
|
| 27 |
inputs = tokenizer(prompt, return_tensors="pt").to("cpu")
|
|
@@ -35,7 +37,7 @@ def legal_summarizer(legal_text):
|
|
| 35 |
return decoded.split("Response:")[-1].strip()
|
| 36 |
return decoded.strip()
|
| 37 |
|
| 38 |
-
# 6. UI
|
| 39 |
demo = gr.Interface(
|
| 40 |
fn=legal_summarizer,
|
| 41 |
inputs=gr.Textbox(lines=10, label="Paste Legal Clause"),
|
|
|
|
| 4 |
from peft import PeftModel
|
| 5 |
|
| 6 |
# 1. Configuration
|
| 7 |
+
# We point to the official unsloth version of the base model for the tokenizer
|
| 8 |
base_model_name = "unsloth/Llama-3.2-3B-bnb-4bit"
|
| 9 |
adapter_path = "."
|
| 10 |
|
| 11 |
+
# 2. Load Tokenizer directly from the base model source
|
| 12 |
+
# This fixes the "backend tokenizer" error by getting the files from Hugging Face directly
|
| 13 |
+
tokenizer = AutoTokenizer.from_pretrained(base_model_name)
|
| 14 |
|
| 15 |
# 3. Load Model with memory-saving settings for CPU
|
| 16 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
|
| 20 |
low_cpu_mem_usage=True
|
| 21 |
)
|
| 22 |
|
| 23 |
+
# 4. Apply your fine-tuned adapters
|
| 24 |
model = PeftModel.from_pretrained(model, adapter_path)
|
| 25 |
|
| 26 |
+
# 5. The Reasoning Logic
|
| 27 |
def legal_summarizer(legal_text):
|
| 28 |
prompt = f"Analyze the following legal text and provide a grounded summary.\n\nInput:\n{legal_text}\n\nResponse:\n"
|
| 29 |
inputs = tokenizer(prompt, return_tensors="pt").to("cpu")
|
|
|
|
| 37 |
return decoded.split("Response:")[-1].strip()
|
| 38 |
return decoded.strip()
|
| 39 |
|
| 40 |
+
# 6. Setup the UI
|
| 41 |
demo = gr.Interface(
|
| 42 |
fn=legal_summarizer,
|
| 43 |
inputs=gr.Textbox(lines=10, label="Paste Legal Clause"),
|