Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -43,7 +43,6 @@ def load_model():
|
|
| 43 |
|
| 44 |
print("Model loaded successfully!")
|
| 45 |
|
| 46 |
-
@spaces.GPU(duration=120)
|
| 47 |
def chat_response(message, history, character_name="ηη", max_tokens=512, temperature=0.7, top_p=0.9):
|
| 48 |
"""Generate chat response using the loaded model"""
|
| 49 |
global model, tokenizer
|
|
@@ -105,7 +104,6 @@ def chat_response(message, history, character_name="ηη", max_tokens=512, tem
|
|
| 105 |
print(f"Error during generation: {e}")
|
| 106 |
return f"Sorry, I encountered an error: {str(e)}"
|
| 107 |
|
| 108 |
-
@spaces.GPU(duration=120)
|
| 109 |
def chat_response_streaming(message, history, character_name="ηη", max_tokens=512, temperature=0.7, top_p=0.9):
|
| 110 |
"""Generate streaming chat response"""
|
| 111 |
global model, tokenizer
|
|
@@ -253,6 +251,7 @@ def create_interface():
|
|
| 253 |
)
|
| 254 |
|
| 255 |
# Event handlers
|
|
|
|
| 256 |
def respond(message, chat_history, char_name, max_tok, temp, top_p_val, use_streaming):
|
| 257 |
if use_streaming:
|
| 258 |
# For streaming response - add user message first
|
|
|
|
| 43 |
|
| 44 |
print("Model loaded successfully!")
|
| 45 |
|
|
|
|
| 46 |
def chat_response(message, history, character_name="ηη", max_tokens=512, temperature=0.7, top_p=0.9):
|
| 47 |
"""Generate chat response using the loaded model"""
|
| 48 |
global model, tokenizer
|
|
|
|
| 104 |
print(f"Error during generation: {e}")
|
| 105 |
return f"Sorry, I encountered an error: {str(e)}"
|
| 106 |
|
|
|
|
| 107 |
def chat_response_streaming(message, history, character_name="ηη", max_tokens=512, temperature=0.7, top_p=0.9):
|
| 108 |
"""Generate streaming chat response"""
|
| 109 |
global model, tokenizer
|
|
|
|
| 251 |
)
|
| 252 |
|
| 253 |
# Event handlers
|
| 254 |
+
@spaces.GPU(duration=30)
|
| 255 |
def respond(message, chat_history, char_name, max_tok, temp, top_p_val, use_streaming):
|
| 256 |
if use_streaming:
|
| 257 |
# For streaming response - add user message first
|