from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import os # Same output dir as train.py (works from any cwd) SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) model_path = os.path.join(SCRIPT_DIR, "multilingual-doc-model") if not os.path.isdir(model_path): print(f"Model not found at {model_path}. Run train.py first to train the model.") exit(1) tokenizer = AutoTokenizer.from_pretrained(model_path) model = AutoModelForCausalLM.from_pretrained(model_path) # Use GPU if available, else CPU device = 0 if __import__("torch").cuda.is_available() else -1 pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=device) prompt = """User: ExplĂ­came este documento: La IA mejora la productividad. Assistant:""" result = pipe(prompt, max_new_tokens=120, do_sample=True, temperature=0.7) print(result[0]["generated_text"])