File size: 7,533 Bytes
f85e88a 5fe5d0d f85e88a 2ec8697 f85e88a 43c54bb f85e88a 43c54bb f85e88a 43c54bb f85e88a 43c54bb f85e88a 43c54bb f85e88a 43c54bb f85e88a 43c54bb f85e88a 0bba7a4 f85e88a 133766b c685ebf 66e95de eb36da7 71f10c8 ce8b301 3d8efe9 9ce873e ce8b301 9ce873e 40437e6 ce8b301 71f10c8 e3fb292 0454597 8b04e6b 0454597 ff5ee2b 0454597 ff5ee2b 8b04e6b 7dc3755 40437e6 dc24926 f85e88a dc24926 f85e88a 7433736 f85e88a dc24926 f85e88a dc24926 f85e88a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 |
import openai
import io
import os
import matplotlib.pyplot as plt
from PIL import Image
import requests
from io import BytesIO
import base64
from IPython.display import display, clear_output, Image as IPyImage
import json
import gradio as gr
import PIL.Image
import io
import numpy as np
#All abstractions provided by langchain
from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate
from dotenv import load_dotenv
import sys
import os
from io import StringIO
import re
# Load environment variables from .env file
load_dotenv()
template = """Assistant is a large language model trained by OpenAI.
Assistant is designed to generate catchy texts for Facebook Ads post. Assistant will receive some details about a product such as type, color, price, and potential customers.
Based on these parameters it will generate the following 4 fields -
primary: max 125 characters
headline: max 27 characters
description: max 27 characters
Each field should be totally different with minimal repetition. Do not repeatedly use phrases that sound to sales like. Output your responses as a string with the keys being 'primary', 'headline', and 'description' and getting a new Line after each key. Be strict about this format. Just directly output responses nothing else, no leading words, etc.
Consider this product : {human_input}
"""
prompt = PromptTemplate(
input_variables=["human_input"],
template=template
)
chatgpt_chain = LLMChain(
llm=OpenAI(temperature=1),
prompt=prompt,
verbose=True,
)
#Helper function as this is what Pebblely API takes in
# def image_to_base64(img_path):
# with open(img_path, "rb") as image_file:
# return base64.b64encode(image_file.read()).decode('utf-8')
# #Removes backdrop from image using Pebblely API
# def removeBackdrop(image_path, output_path):
# endpoint_url = "https://api.pebblely.com/remove-background/v1/"
# response = requests.post(
# endpoint_url,
# headers={
# "Content-Type": "application/json",
# "X-Pebblely-Access-Token": os.environ["PEBBLELY_TOKEN"]
# },
# json={
# "image": image_to_base64(image_path),
# }
# )
# print(response)
# image_b64 = response.json()["data"]
# image_encoded = image_b64.encode("utf-8")
# image_bytes = io.BytesIO(base64.b64decode(image_encoded))
# image = Image.open(image_bytes)
# image.save(output_path)
#Takes the images from Pebblely output and generates images with different backdrops based on user prompts
# def DallEBackdropGen(image_path, prompt, n_img, size):
# response = openai.Image.create_edit(
# image=open(image_path, "rb"),
# prompt=prompt,
# n=n_img,
# size=size
# )
# print(type(response))
# print(response)
# return response['data']
#Just outputs the images from DallE in a nice grid for easy consumption
# def outputGenImages(ori_image, response_data):
# nrows = len(response_data)
# # List of image URLs
# image_urls = [response_data[i]['url'] for i in range(nrows)]
# # Load all the images into a list
# images = [Image.open(BytesIO(requests.get(url).content)) for url in image_urls]
# # Create a new image of the right size
# width, height = images[0].size # assumes all images are the same size
# total_height = height * nrows
# new_im = Image.new('RGB', (width, total_height))
# # Paste in each image
# for i, img in enumerate(images):
# new_im.paste(img, (0, i * height))
# return new_im
def upload_and_process(image, promptText, promptImg, lens_option, n_images, size):
# We are assuming the removeBackdrop, DallEBackdropGen, outputGenImages are defined and available in your code
# image_path = "temp.png"
# image.save(image_path)
# Remove backdrop
responses = chatgpt_chain.predict(human_input=promptText)
print(responses)
index = responses.find("Output:")
# Remove "Output:" from the string if found
if index != -1:
responses = responses[:index] + responses[index + len("Output:"):]
split_pairs = responses.split(" ")
pk =""
hk =""
dk = ""
primary = ""
headline = ""
description = ""
# Initialize an empty dictionary to store the key-value pairs
key_value_dict = {}
key = None
value = ""
for pair in split_pairs:
if ":" in pair:
if key is not None:
key_value_dict[key.strip()] = value.strip()
key, value = pair.split(":", 1)
else:
value += " " + pair.strip()
# Add the last key-value pair
if key is not None:
key_value_dict[key.strip()] = value.strip()
# Print each key-value pair on a new line
for key, value in key_value_dict.items():
print(key + ": " + value)
items = list(key_value_dict.items())
if items[0][0] in'primary':
primary=items[0][1]
elif items[0][0] in 'headline':
headline=items[0][1]
elif items[0][0] in 'description':
description=items[0][1]
# json_string = responses.strip()
# # Add indentation and new lines after each key-value pair
# formatted_json = ''
# indentation_level = 0
# for char in json_string:
# if char == '{' or char == '[':
# formatted_json += char + '\n' + ' ' * indentation_level
# indentation_level += 1
# elif char == '}' or char == ']':
# indentation_level -= 1
# formatted_json += '\n' + ' ' * indentation_level + char
# elif char == ',':
# formatted_json += char + '\n' + ' ' * indentation_level
# else:
# formatted_json += char
# print(formatted_json)
# dall_e_prompt = promptImg + " , " + lens_option
# # Generate variations using DALL-E
# removeBackdrop(image_path, "temp_no_bg.png")
# NoBackImg = "temp_no_bg.png"
# response_data = DallEBackdropGen("temp_no_bg.png", dall_e_prompt, n_images, size)
# # Get and return the images
# result_images = outputGenImages(image_path, response_data)
text_output = f"<h2>{headline}</h2><h3>{primary}</h3><p>{description}</p>"
return text_output#, result_images # Output should be a list of PIL Image objects or numpy arrays
lens_options = [
"Sigma 85 mm f/1.4 (good for portrait)",
"Sigma 85 mm f/8(sharper background)",
"Sigma 24 mm f/8 (wider angle)",
"Nikon D810 | ISO 64 | focal length 20 mm (Voigtländer 20 mm f3.5) | aperture f/9 | exposure time 1/40 Sec (DRI)",
"Canon EOS 1000D, ƒ/3.5, focal length: 18.0 mm, exposure time: 1/5, ISO 400"
]
iface = gr.Interface(
fn=upload_and_process,
inputs=[
gr.inputs.Image(type="pil", label="Upload Image", image_mode='RGB', tool="editor", source="upload"),
gr.inputs.Textbox(default='Enter product name, price, target customer, etc.', label="Text Prompt"),
gr.inputs.Textbox(default='Enter the desired image to be generated', label="Image Prompt"),
gr.inputs.Dropdown(choices=lens_options, label="Lens Option"),
gr.inputs.Slider(minimum=1, maximum=10, default=4, step=1, label="No. of Images"),
gr.inputs.Textbox(default='512x512', label="Size"),
],
outputs=[
gr.outputs.HTML(label="Generated Text"),
# gr.outputs.Image(type="pil", label="Generated Images"), # adjust the size as per your need
],
title="Facebook Ad Creation",
)
iface.launch() |