Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
Settings
|
| 4 |
+
|
| 5 |
+
Hi! Here some our recommendations to get the best out of BLACKBOX:
|
| 6 |
+
|
| 7 |
+
Be as clear as possible
|
| 8 |
+
|
| 9 |
+
End the question in what language you want the answer to be, e.g: ‘connect to mongodb in python
|
| 10 |
+
or you can just
|
| 11 |
+
Watch tutorial video
|
| 12 |
+
Here are some suggestion (choose one):
|
| 13 |
+
Write a function that reads data from a json file
|
| 14 |
+
How to delete docs from mongodb in phyton
|
| 15 |
+
Connect to mongodb in nodejs
|
| 16 |
+
Ask any coding question
|
| 17 |
+
send
|
| 18 |
+
refresh
|
| 19 |
+
Blackbox AI Chat is in beta and Blackbox is not liable for the content generated. By using Blackbox, you acknowledge that you agree to agree to Blackbox's Terms and Privacy Policy
|
| 20 |
+
import openai
|
| 21 |
+
import io
|
| 22 |
+
import os
|
| 23 |
+
import matplotlib.pyplot as plt
|
| 24 |
+
from PIL import Image
|
| 25 |
+
import requests
|
| 26 |
+
from io import BytesIO
|
| 27 |
+
import base64
|
| 28 |
+
from IPython.display import display, clear_output, Image as IPyImage
|
| 29 |
+
import json
|
| 30 |
+
import gradio as gr
|
| 31 |
+
import PIL.Image
|
| 32 |
+
import io
|
| 33 |
+
import numpy as np
|
| 34 |
+
#All abstractions provided by langchain
|
| 35 |
+
from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate
|
| 36 |
+
from dotenv import load_dotenv
|
| 37 |
+
|
| 38 |
+
# Load environment variables from .env file
|
| 39 |
+
load_dotenv()
|
| 40 |
+
|
| 41 |
+
template = """Assistant is a large language model trained by OpenAI.
|
| 42 |
+
|
| 43 |
+
Assistant is designed to generate catchy texts for Facebook Ads post. Assistant will receive some details about a product such as type, color, price, and potential customers.
|
| 44 |
+
|
| 45 |
+
Based on these parameters it will generate the following 4 fields -
|
| 46 |
+
|
| 47 |
+
primary: max 125 characters
|
| 48 |
+
|
| 49 |
+
headline: max 27 characters
|
| 50 |
+
|
| 51 |
+
description: max 27 characters
|
| 52 |
+
|
| 53 |
+
Each field should be totally different with minimal repetition. Do not repeatedly use phrases that sound to sales like. Output your responses as a JSON format with the keys being 'primary', 'headline', and 'description'. Be strict about this format. It will be read as a JSON file. Just directly output responses nothing else, no leading words, etc.
|
| 54 |
+
|
| 55 |
+
Consider this product : {human_input}
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
prompt = PromptTemplate(
|
| 59 |
+
input_variables=["human_input"],
|
| 60 |
+
template=template
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
chatgpt_chain = LLMChain(
|
| 64 |
+
llm=OpenAI(temperature=1),
|
| 65 |
+
prompt=prompt,
|
| 66 |
+
verbose=True,
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
#Helper function as this is what Pebblely API takes in
|
| 70 |
+
def image_to_base64(img_path):
|
| 71 |
+
with open(img_path, "rb") as image_file:
|
| 72 |
+
return base64.b64encode(image_file.read()).decode('utf-8')
|
| 73 |
+
|
| 74 |
+
#Removes backdrop from image using Pebblely API
|
| 75 |
+
def removeBackdrop(image_path, output_path):
|
| 76 |
+
endpoint_url = "https://api.pebblely.com/remove-background/v1/"
|
| 77 |
+
|
| 78 |
+
response = requests.post(
|
| 79 |
+
endpoint_url,
|
| 80 |
+
headers={
|
| 81 |
+
"Content-Type": "application/json",
|
| 82 |
+
"X-Pebblely-Access-Token": os.environ["PEBBLELY_TOKEN"]
|
| 83 |
+
},
|
| 84 |
+
json={
|
| 85 |
+
"image": image_to_base64(image_path),
|
| 86 |
+
}
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
image_b64 = response.json()["data"]
|
| 90 |
+
image_encoded = image_b64.encode("utf-8")
|
| 91 |
+
image_bytes = io.BytesIO(base64.b64decode(image_encoded))
|
| 92 |
+
image = Image.open(image_bytes)
|
| 93 |
+
image.save(output_path)
|
| 94 |
+
|
| 95 |
+
#Takes the images from Pebblely output and generates images with different backdrops based on user prompts
|
| 96 |
+
def DallEBackdropGen(image_path, prompt, n_img, size):
|
| 97 |
+
response = openai.Image.create_edit(
|
| 98 |
+
image=open(image_path, "rb"),
|
| 99 |
+
prompt=prompt,
|
| 100 |
+
n=n_img,
|
| 101 |
+
size=size
|
| 102 |
+
)
|
| 103 |
+
return response['data']
|
| 104 |
+
|
| 105 |
+
#Just outputs the images from DallE in a nice grid for easy consumption
|
| 106 |
+
def outputGenImages(ori_image, response_data):
|
| 107 |
+
nrows = len(response_data)
|
| 108 |
+
# List of image URLs
|
| 109 |
+
image_urls = [response_data[i]['url'] for i in range(nrows)]
|
| 110 |
+
|
| 111 |
+
# Load all the images into a list
|
| 112 |
+
images = [Image.open(BytesIO(requests.get(url).content)) for url in image_urls]
|
| 113 |
+
|
| 114 |
+
# Create a new image of the right size
|
| 115 |
+
width, height = images[0].size # assumes all images are the same size
|
| 116 |
+
total_height = height * nrows
|
| 117 |
+
new_im = Image.new('RGB', (width, total_height))
|
| 118 |
+
|
| 119 |
+
# Paste in each image
|
| 120 |
+
for i, img in enumerate(images):
|
| 121 |
+
new_im.paste(img, (0, i * height))
|
| 122 |
+
|
| 123 |
+
return new_im
|
| 124 |
+
|
| 125 |
+
def upload_and_process(image, promptText, promptImg, lens_option, n_images, size):
|
| 126 |
+
# We are assuming the removeBackdrop, DallEBackdropGen, outputGenImages are defined and available in your code
|
| 127 |
+
image_path = "temp.png"
|
| 128 |
+
image.save(image_path)
|
| 129 |
+
# Remove backdrop
|
| 130 |
+
responses = chatgpt_chain.predict(human_input=promptText)
|
| 131 |
+
print(responses)
|
| 132 |
+
responses = json.loads(responses)
|
| 133 |
+
|
| 134 |
+
dall_e_prompt = promptImg + " , " + lens_option
|
| 135 |
+
primary = responses["primary"]
|
| 136 |
+
headline = responses["headline"]
|
| 137 |
+
description = responses["description"]
|
| 138 |
+
# Generate variations using DALL-E
|
| 139 |
+
removeBackdrop(image_path, "temp_no_bg.png")
|
| 140 |
+
NoBackImg = "temp_no_bg.png"
|
| 141 |
+
response_data = DallEBackdropGen("temp_no_bg.png", dall_e_prompt, n_images, size)
|
| 142 |
+
|
| 143 |
+
# Get and return the images
|
| 144 |
+
result_images = outputGenImages(image_path, response_data)
|
| 145 |
+
|
| 146 |
+
text_output = f"<h2>{headline}</h2><h3>{primary}</h3><p>{description}</p>"
|
| 147 |
+
|
| 148 |
+
return text_output, result_images # Output should be a list of PIL Image objects or numpy arrays
|
| 149 |
+
|
| 150 |
+
lens_options = [
|
| 151 |
+
"Sigma 85 mm f/1.4 (good for portrait)",
|
| 152 |
+
"Sigma 85 mm f/8(sharper background)",
|
| 153 |
+
"Sigma 24 mm f/8 (wider angle)",
|
| 154 |
+
"Nikon D810 | ISO 64 | focal length 20 mm (Voigtländer 20 mm f3.5) | aperture f/9 | exposure time 1/40 Sec (DRI)",
|
| 155 |
+
"Canon EOS 1000D, ƒ/3.5, focal length: 18.0 mm, exposure time: 1/5, ISO 400"
|
| 156 |
+
]
|
| 157 |
+
|
| 158 |
+
iface = gr.Interface(
|
| 159 |
+
fn=upload_and_process,
|
| 160 |
+
inputs=[
|
| 161 |
+
gr.inputs.Image(type="pil", label="Upload Image", image_mode='RGB', tool="editor", source="upload"),
|
| 162 |
+
gr.inputs.Textbox(default='Enter product name, price, target customer, etc.', label="Text Prompt"),
|
| 163 |
+
gr.inputs.Textbox(default='Enter the desired image to be generated', label="Image Prompt"),
|
| 164 |
+
gr.inputs.Dropdown(choices=lens_options, label="Lens Option"),
|
| 165 |
+
gr.inputs.Slider(minimum=1, maximum=10, default=4, step=1, label="No. of Images"),
|
| 166 |
+
gr.inputs.Textbox(default='512x512', label="Size"),
|
| 167 |
+
],
|
| 168 |
+
outputs=[
|
| 169 |
+
gr.outputs.HTML(label="Generated Text"),
|
| 170 |
+
gr.outputs.Image(type="pil", label="Generated Images"), # adjust the size as per your need
|
| 171 |
+
],
|
| 172 |
+
title="Facebook Ad Creation",
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
iface.launch()
|