File size: 2,506 Bytes
3af9bb2
8be12ab
3af9bb2
 
7b1766e
 
8be12ab
c23f4d4
ec20299
 
3af9bb2
c23f4d4
 
 
3af9bb2
7b1766e
 
3af9bb2
7b1766e
c23f4d4
 
 
 
 
3af9bb2
 
ec20299
c23f4d4
3af9bb2
7b1766e
3af9bb2
 
 
7b1766e
3af9bb2
 
 
 
 
7b1766e
c23f4d4
7b1766e
c23f4d4
7b1766e
c23f4d4
 
 
33fcfe8
7b1766e
 
 
 
 
 
 
3af9bb2
 
 
c23f4d4
3af9bb2
1ff4547
f9c8cf7
8be12ab
ec20299
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
from flask import Flask, request, jsonify, send_file
from flask_cors import CORS
import os
from huggingface_hub import InferenceClient
from io import BytesIO
from PIL import Image  # Importing Pillow for image processing

# Initialize the Flask app
myapp = Flask(__name__)
CORS(myapp)  # Enable CORS for all routes

# Initialize the InferenceClient with your Hugging Face token
HF_TOKEN = os.environ.get("HF_TOKEN")  # Ensure to set your Hugging Face token in the environment
client = InferenceClient(token=HF_TOKEN)

# Function to generate an image from a prompt using the specified model
def generate_image(prompt, seed=1, model="prompthero/openjourney-v4"):  # Default model if none provided
    try:
        # Generate the image using Hugging Face's inference API
        result_image = client.text_to_image(prompt=prompt, seed=seed, model=model)
        return result_image
    except Exception as e:
        print(f"Error generating image: {str(e)}")
        return None

# Flask route for the API endpoint
@myapp.route('/generate_image', methods=['POST'])
def generate_api():
    data = request.get_json()

    # Extract required fields from the request
    prompt = data.get('prompt', '')
    seed = data.get('seed', 1)
    model_name = data.get('model', 'prompthero/openjourney-v4')  # Use the provided model name or a default one

    if not prompt:
        return jsonify({"error": "Prompt is required"}), 400

    try:
        # Call the generate_image function with the dynamically provided model name
        image = generate_image(prompt, seed, model_name)

        if image:
            # Save the image to a BytesIO object
            image_bytes = BytesIO()
            image.save(image_bytes, format='PNG')
            image_bytes.seek(0)  # Go to the start of the byte stream

            # Send the generated image as an attachment
            return send_file(
                image_bytes, 
                mimetype='image/png', 
                as_attachment=True,  # Send the file as an attachment
                download_name='generated_image.png'  # The file name for download
            )
        else:
            return jsonify({"error": "Failed to generate image"}), 500
    except Exception as e:
        print(f"Error in generate_api: {str(e)}")  # Log the error
        return jsonify({"error": str(e)}), 500

# Add this block to make sure your app runs when called
if __name__ == "__main__":
    myapp.run(host='0.0.0.0', port=7860)  # Run directly if needed for testing