Spaces:
Paused
Paused
Upload app.py with huggingface_hub
Browse files
app.py
CHANGED
|
@@ -1,12 +1,5 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import torch
|
| 3 |
-
|
| 4 |
-
# Check if ZeroGPU is available
|
| 5 |
-
try:
|
| 6 |
-
import spaces
|
| 7 |
-
ZEROGPU = True
|
| 8 |
-
except:
|
| 9 |
-
ZEROGPU = False
|
| 10 |
import json
|
| 11 |
import time
|
| 12 |
import os
|
|
@@ -158,15 +151,6 @@ def load_custom_dataset(file_path):
|
|
| 158 |
|
| 159 |
return data
|
| 160 |
|
| 161 |
-
# Conditional GPU decorator
|
| 162 |
-
def gpu_decorator(duration=300):
|
| 163 |
-
def decorator(func):
|
| 164 |
-
if ZEROGPU:
|
| 165 |
-
return spaces.GPU(duration=duration)(func)
|
| 166 |
-
return func
|
| 167 |
-
return decorator
|
| 168 |
-
|
| 169 |
-
@gpu_decorator(duration=300)
|
| 170 |
def train_model(model_name, datasets, max_samples, lora_r, lora_alpha, epochs, output_name, custom_file, continue_training, continue_from, progress=gr.Progress()):
|
| 171 |
"""Train model with LoRA"""
|
| 172 |
|
|
@@ -403,7 +387,6 @@ def train_model(model_name, datasets, max_samples, lora_r, lora_alpha, epochs, o
|
|
| 403 |
import traceback
|
| 404 |
yield log(f"ERROR: {e}\n{traceback.format_exc()}")
|
| 405 |
|
| 406 |
-
@gpu_decorator(duration=60)
|
| 407 |
def test_model(prompt, output_name):
|
| 408 |
"""Test a trained model"""
|
| 409 |
try:
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import torch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
import json
|
| 4 |
import time
|
| 5 |
import os
|
|
|
|
| 151 |
|
| 152 |
return data
|
| 153 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 154 |
def train_model(model_name, datasets, max_samples, lora_r, lora_alpha, epochs, output_name, custom_file, continue_training, continue_from, progress=gr.Progress()):
|
| 155 |
"""Train model with LoRA"""
|
| 156 |
|
|
|
|
| 387 |
import traceback
|
| 388 |
yield log(f"ERROR: {e}\n{traceback.format_exc()}")
|
| 389 |
|
|
|
|
| 390 |
def test_model(prompt, output_name):
|
| 391 |
"""Test a trained model"""
|
| 392 |
try:
|