import os # Set cache environment variables BEFORE importing transformers os.environ["HF_HOME"] = "/tmp/hf_cache" os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf_cache" os.environ["HF_DATASETS_CACHE"] = "/tmp/hf_cache" os.environ["HF_METRICS_CACHE"] = "/tmp/hf_cache" import os from transformers import pipeline # Ensure cache directory exists and writable os.makedirs("/tmp/hf_cache", exist_ok=True) try: generator = pipeline( "text-generation", model="gpt2", cache_dir="/tmp/hf_cache" ) except Exception as e: generator = None print("⚠️ Failed to load model:", e) def generate_description(country_name: str) -> str: if not generator: return "⚠️ Model not available. Check server logs." prompt = f"Tell me about {country_name}." try: result = generator(prompt, max_length=100, do_sample=True) return result[0]["generated_text"].strip() except Exception as e: return f"⚠️ Error generating description: {str(e)}"