Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from transformers import pipeline, PipelineException | |
| # ---- Configuration: choose a small model for Free CPU Spaces ---- | |
| # If you have more resources you can change this to a larger model, | |
| # but small models (gpt2) are reliable on free CPU. | |
| MODEL_NAME = "gpt2" | |
| def make_generator(model_name=MODEL_NAME): | |
| try: | |
| # device=-1 forces CPU (works on Free CPU Spaces) | |
| generator = pipeline( | |
| task="text-generation", | |
| model=model_name, | |
| device=-1 | |
| ) | |
| return generator | |
| except Exception as e: | |
| # Return None on failure and log the error for debugging in Space logs | |
| print(f"[ERROR] Failed to load model {model_name}: {e}") | |
| return None | |
| # create pipeline at import time (cached by the Space) | |
| generator = make_generator() | |
| def research(seed: str): | |
| if not seed or not seed.strip(): | |
| return "Please enter a seed keyword." | |
| if generator is None: | |
| return ( | |
| "Model failed to load. Check Space logs or change MODEL_NAME to a small model " | |
| "(e.g., 'gpt2')." | |
| ) | |
| prompt = ( | |
| "Generate 100 SEO keywords for: " | |
| f"{seed.strip()}\n\n" | |
| "Output a clean numbered list or table with: Keyword | Type | Intent | PriorityScore(0-100)\n" | |
| "Keep results compact and easy to paste into a spreadsheet." | |
| ) | |
| try: | |
| # Use max_new_tokens rather than max_length to avoid token-count confusion. | |
| # Keep num_return_sequences=1 to save resources. | |
| out = generator( | |
| prompt, |