Spaces:
Runtime error
Runtime error
Update RockPaperScissor/services/LLM_service.py
Browse files
RockPaperScissor/services/LLM_service.py
CHANGED
|
@@ -36,8 +36,8 @@ def generate_text(prompt_text: str, max_tokens: int = 150):
|
|
| 36 |
try:
|
| 37 |
_model = AutoModelForCausalLM.from_pretrained(
|
| 38 |
_model_name,
|
| 39 |
-
torch_dtype=torch.float32,
|
| 40 |
-
device_map="cpu",
|
| 41 |
trust_remote_code=True,
|
| 42 |
low_cpu_mem_usage=True
|
| 43 |
)
|
|
@@ -193,7 +193,7 @@ Your advice:
|
|
| 193 |
>>>"""
|
| 194 |
print("[LLMService] Using general strategy prompt")
|
| 195 |
|
| 196 |
-
# Use the text generation function
|
| 197 |
print("[LLMService] Calling text generation method...")
|
| 198 |
full_response = self.generate_with_model(analysis_prompt, max_tokens=100)
|
| 199 |
|
|
@@ -226,8 +226,6 @@ Your advice:
|
|
| 226 |
try:
|
| 227 |
if _model is not None:
|
| 228 |
del _model
|
| 229 |
-
_model = None
|
| 230 |
-
|
| 231 |
print("[LLMService] Cleanup completed successfully.")
|
| 232 |
except Exception as e:
|
| 233 |
print(f"[LLMService] Error during cleanup: {e}")
|
|
|
|
| 36 |
try:
|
| 37 |
_model = AutoModelForCausalLM.from_pretrained(
|
| 38 |
_model_name,
|
| 39 |
+
torch_dtype=torch.float32,
|
| 40 |
+
device_map="cpu",
|
| 41 |
trust_remote_code=True,
|
| 42 |
low_cpu_mem_usage=True
|
| 43 |
)
|
|
|
|
| 193 |
>>>"""
|
| 194 |
print("[LLMService] Using general strategy prompt")
|
| 195 |
|
| 196 |
+
# Use the text generation function
|
| 197 |
print("[LLMService] Calling text generation method...")
|
| 198 |
full_response = self.generate_with_model(analysis_prompt, max_tokens=100)
|
| 199 |
|
|
|
|
| 226 |
try:
|
| 227 |
if _model is not None:
|
| 228 |
del _model
|
|
|
|
|
|
|
| 229 |
print("[LLMService] Cleanup completed successfully.")
|
| 230 |
except Exception as e:
|
| 231 |
print(f"[LLMService] Error during cleanup: {e}")
|