from transformers import pipeline import time print("Loading model...") start_time = time.time() pipe = pipeline("text2text-generation", model="google/flan-t5-base") end_time = time.time() print(f"Model loaded in {end_time - start_time:.2f} seconds.") prompt = "Translate to German: Hello, how are you?" print(f"Running inference on: '{prompt}'") result = pipe(prompt) print(f"Result: {result[0]['generated_text']}")