File size: 1,015 Bytes
3785779
e31c2f9
2f288b6
 
 
 
 
2bab570
f9775ce
2bab570
0ed17be
f9775ce
2f288b6
 
0ed17be
f9775ce
 
 
 
 
0ed17be
 
 
3785779
f9775ce
 
0ed17be
f9775ce
0ed17be
2f288b6
0ed17be
 
2f288b6
0ed17be
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import os

# Set cache environment variables BEFORE importing transformers
os.environ["HF_HOME"] = "/tmp/hf_cache"
os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf_cache"
os.environ["HF_DATASETS_CACHE"] = "/tmp/hf_cache"
os.environ["HF_METRICS_CACHE"] = "/tmp/hf_cache"

import os
from transformers import pipeline

# Ensure cache directory exists and writable
os.makedirs("/tmp/hf_cache", exist_ok=True)

try:
    generator = pipeline(
        "text-generation",
        model="gpt2",
        cache_dir="/tmp/hf_cache"
    )
except Exception as e:
    generator = None
    print("⚠️ Failed to load model:", e)


def generate_description(country_name: str) -> str:
    if not generator:
        return "⚠️ Model not available. Check server logs."

    prompt = f"Tell me about {country_name}."
    try:
        result = generator(prompt, max_length=100, do_sample=True)
        return result[0]["generated_text"].strip()
    except Exception as e:
        return f"⚠️ Error generating description: {str(e)}"