Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -127,36 +127,46 @@ def image_generator(prompt: str) -> str:
|
|
| 127 |
return f"Image saved at {output_path}"
|
| 128 |
|
| 129 |
# -------------------- Local LLM (Replaces HfApiModel) --------------------
|
| 130 |
-
from transformers import pipeline
|
|
|
|
| 131 |
|
| 132 |
class LocalModel:
|
| 133 |
"""
|
| 134 |
Minimal local model interface compatible with smolagents CodeAgent.
|
| 135 |
"""
|
| 136 |
def __init__(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
self.pipeline = pipeline(
|
| 138 |
"text-generation",
|
| 139 |
-
model=
|
| 140 |
-
|
| 141 |
-
|
| 142 |
)
|
| 143 |
|
| 144 |
def generate(self, prompt, **kwargs):
|
| 145 |
"""
|
| 146 |
Generate text from a given prompt.
|
| 147 |
-
|
| 148 |
Args:
|
| 149 |
prompt (str): Input prompt for generation.
|
| 150 |
**kwargs: Additional parameters for the pipeline.
|
| 151 |
-
|
| 152 |
Returns:
|
| 153 |
str: Generated text output.
|
| 154 |
"""
|
| 155 |
-
result = self.pipeline(prompt, max_new_tokens=500, do_sample=True)
|
| 156 |
return result[0]['generated_text']
|
| 157 |
|
| 158 |
-
model = LocalModel()
|
| 159 |
-
|
| 160 |
# -------------------- Agent Setup --------------------
|
| 161 |
final_answer = FinalAnswerTool()
|
| 162 |
search_tool = DuckDuckGoSearchTool()
|
|
|
|
| 127 |
return f"Image saved at {output_path}"
|
| 128 |
|
| 129 |
# -------------------- Local LLM (Replaces HfApiModel) --------------------
|
| 130 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
| 131 |
+
import torch
|
| 132 |
|
| 133 |
class LocalModel:
|
| 134 |
"""
|
| 135 |
Minimal local model interface compatible with smolagents CodeAgent.
|
| 136 |
"""
|
| 137 |
def __init__(self):
|
| 138 |
+
model_name = "openlm-research/open_llama_3b"
|
| 139 |
+
|
| 140 |
+
# Load tokenizer with use_fast=False to avoid SentencePiece conversion error
|
| 141 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
|
| 142 |
+
|
| 143 |
+
# Load model with appropriate dtype and device map
|
| 144 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
| 145 |
+
model_name,
|
| 146 |
+
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
| 147 |
+
device_map="auto" if torch.cuda.is_available() else None,
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
# Create pipeline with explicit tokenizer and model
|
| 151 |
self.pipeline = pipeline(
|
| 152 |
"text-generation",
|
| 153 |
+
model=self.model,
|
| 154 |
+
tokenizer=self.tokenizer,
|
| 155 |
+
device=0 if torch.cuda.is_available() else -1,
|
| 156 |
)
|
| 157 |
|
| 158 |
def generate(self, prompt, **kwargs):
|
| 159 |
"""
|
| 160 |
Generate text from a given prompt.
|
|
|
|
| 161 |
Args:
|
| 162 |
prompt (str): Input prompt for generation.
|
| 163 |
**kwargs: Additional parameters for the pipeline.
|
|
|
|
| 164 |
Returns:
|
| 165 |
str: Generated text output.
|
| 166 |
"""
|
| 167 |
+
result = self.pipeline(prompt, max_new_tokens=500, do_sample=True, **kwargs)
|
| 168 |
return result[0]['generated_text']
|
| 169 |
|
|
|
|
|
|
|
| 170 |
# -------------------- Agent Setup --------------------
|
| 171 |
final_answer = FinalAnswerTool()
|
| 172 |
search_tool = DuckDuckGoSearchTool()
|