File size: 1,064 Bytes
770fe7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
"""
Quick check that the pipeline works: load base BLOOM from Hub and generate.
Run: pip install -q transformers torch && python verify_model.py
No training required. Use test_model.py after training for your fine-tuned model.
"""
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import torch

model_id = "bigscience/bloom-560m"
print("Loading model and tokenizer from Hub...")
tokenizer = AutoTokenizer.from_pretrained(model_id)
if tokenizer.pad_token is None:
    tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(model_id)
device = 0 if torch.cuda.is_available() else -1
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=device)

prompt = "User: Explícame este documento:\nLa IA mejora la productividad.\nAssistant:"
print("Generating...")
out = pipe(prompt, max_new_tokens=60, do_sample=True, temperature=0.7, pad_token_id=tokenizer.pad_token_id)
print("\n--- Full output ---")
print(out[0]["generated_text"])
print("\n--- Model works: pipeline ran and produced text. ---")