File size: 880 Bytes
d65c73c 69abda4 d65c73c 69abda4 d65c73c 69abda4 d65c73c 69abda4 d65c73c 69abda4 d65c73c 69abda4 d65c73c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 | from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import os
# Same output dir as train.py (works from any cwd)
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
model_path = os.path.join(SCRIPT_DIR, "multilingual-doc-model")
if not os.path.isdir(model_path):
print(f"Model not found at {model_path}. Run train.py first to train the model.")
exit(1)
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(model_path)
# Use GPU if available, else CPU
device = 0 if __import__("torch").cuda.is_available() else -1
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=device)
prompt = """User: Explícame este documento:
La IA mejora la productividad.
Assistant:"""
result = pipe(prompt, max_new_tokens=120, do_sample=True, temperature=0.7)
print(result[0]["generated_text"])
|