Spaces:
Running
on
Zero
Running
on
Zero
| from huggingface_hub import InferenceClient | |
| # Initialize client with Fal.AI provider and your API key (replace below) | |
| client = InferenceClient( | |
| provider="fal-ai", | |
| api_key="your_fal_ai_api_key", # Replace with your actual Fal.AI API key | |
| ) | |
| # Text prompt for image generation | |
| prompt = "Astronaut riding a horse" | |
| # Use a public or your deployed model on Fal.AI | |
| model_name = "black-forest-labs/FLUX.1-dev" # Make sure this model is deployed on Fal and accessible | |
| try: | |
| # Generate image | |
| image = client.text_to_image( | |
| prompt=prompt, | |
| model=model_name, | |
| ) | |
| # Display the image (if running in Jupyter/Colab) | |
| image.show() | |
| except Exception as e: | |
| print(f"Error during inference: {e}") | |