Update app.py
Browse files
app.py
CHANGED
|
@@ -9,10 +9,15 @@ from itertools import islice
|
|
| 9 |
|
| 10 |
login(token=os.environ.get("hf_token"))
|
| 11 |
|
|
|
|
| 12 |
model = AutoModelForCausalLM.from_pretrained("flamiry/first")
|
| 13 |
tokenizer = AutoTokenizer.from_pretrained("flamiry/first")
|
| 14 |
tokenizer.pad_token = tokenizer.eos_token
|
| 15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
@spaces.GPU
|
| 17 |
def train_model(start, end):
|
| 18 |
start = int(start)
|
|
@@ -28,6 +33,9 @@ def train_model(start, end):
|
|
| 28 |
max_length=512
|
| 29 |
)
|
| 30 |
|
|
|
|
|
|
|
|
|
|
| 31 |
optimizer = torch.optim.Adam(model.parameters(), lr=5e-5)
|
| 32 |
for epoch in range(2):
|
| 33 |
optimizer.zero_grad()
|
|
@@ -35,7 +43,7 @@ def train_model(start, end):
|
|
| 35 |
loss = outputs.loss
|
| 36 |
loss.backward()
|
| 37 |
optimizer.step()
|
| 38 |
-
|
| 39 |
model.push_to_hub("flamiry/first")
|
| 40 |
tokenizer.push_to_hub("flamiry/first")
|
| 41 |
|
|
@@ -46,14 +54,14 @@ def train_model(start, end):
|
|
| 46 |
@spaces.GPU
|
| 47 |
def generate_text(prompt):
|
| 48 |
try:
|
| 49 |
-
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
| 50 |
output = model.generate(input_ids, max_length=50)
|
| 51 |
return tokenizer.decode(output[0], skip_special_tokens=True)
|
| 52 |
except Exception as e:
|
| 53 |
return f"❌ Error: {str(e)}"
|
| 54 |
|
| 55 |
with gr.Blocks() as demo:
|
| 56 |
-
gr.Markdown("#Slovak LLM Training")
|
| 57 |
|
| 58 |
with gr.Tab("Train Model"):
|
| 59 |
gr.Markdown("Click to train the model on Slovak data")
|
|
@@ -61,7 +69,7 @@ with gr.Blocks() as demo:
|
|
| 61 |
end_input = gr.Textbox(label="End")
|
| 62 |
train_btn = gr.Button("Start Training", variant="primary")
|
| 63 |
train_output = gr.Textbox(label="Result", interactive=False)
|
| 64 |
-
train_btn.click(train_model, inputs=[start_input, end_input]
|
| 65 |
|
| 66 |
with gr.Tab("Generate Text"):
|
| 67 |
gr.Markdown("Generate Slovak text")
|
|
|
|
| 9 |
|
| 10 |
login(token=os.environ.get("hf_token"))
|
| 11 |
|
| 12 |
+
# Load model and tokenizer
|
| 13 |
model = AutoModelForCausalLM.from_pretrained("flamiry/first")
|
| 14 |
tokenizer = AutoTokenizer.from_pretrained("flamiry/first")
|
| 15 |
tokenizer.pad_token = tokenizer.eos_token
|
| 16 |
|
| 17 |
+
# Move model to GPU
|
| 18 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 19 |
+
model = model.to(device)
|
| 20 |
+
|
| 21 |
@spaces.GPU
|
| 22 |
def train_model(start, end):
|
| 23 |
start = int(start)
|
|
|
|
| 33 |
max_length=512
|
| 34 |
)
|
| 35 |
|
| 36 |
+
# Move inputs to device
|
| 37 |
+
inputs = {k: v.to(device) for k, v in inputs.items()}
|
| 38 |
+
|
| 39 |
optimizer = torch.optim.Adam(model.parameters(), lr=5e-5)
|
| 40 |
for epoch in range(2):
|
| 41 |
optimizer.zero_grad()
|
|
|
|
| 43 |
loss = outputs.loss
|
| 44 |
loss.backward()
|
| 45 |
optimizer.step()
|
| 46 |
+
|
| 47 |
model.push_to_hub("flamiry/first")
|
| 48 |
tokenizer.push_to_hub("flamiry/first")
|
| 49 |
|
|
|
|
| 54 |
@spaces.GPU
|
| 55 |
def generate_text(prompt):
|
| 56 |
try:
|
| 57 |
+
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device)
|
| 58 |
output = model.generate(input_ids, max_length=50)
|
| 59 |
return tokenizer.decode(output[0], skip_special_tokens=True)
|
| 60 |
except Exception as e:
|
| 61 |
return f"❌ Error: {str(e)}"
|
| 62 |
|
| 63 |
with gr.Blocks() as demo:
|
| 64 |
+
gr.Markdown("# Slovak LLM Training")
|
| 65 |
|
| 66 |
with gr.Tab("Train Model"):
|
| 67 |
gr.Markdown("Click to train the model on Slovak data")
|
|
|
|
| 69 |
end_input = gr.Textbox(label="End")
|
| 70 |
train_btn = gr.Button("Start Training", variant="primary")
|
| 71 |
train_output = gr.Textbox(label="Result", interactive=False)
|
| 72 |
+
train_btn.click(train_model, inputs=[start_input, end_input], outputs=train_output)
|
| 73 |
|
| 74 |
with gr.Tab("Generate Text"):
|
| 75 |
gr.Markdown("Generate Slovak text")
|