Spaces:
Sleeping
Sleeping
kpkishankrishna commited on
Commit ·
0bef56f
1
Parent(s): 373df8e
added font
Browse files
app.py
CHANGED
|
@@ -10,10 +10,14 @@ import cv2
|
|
| 10 |
import numpy as np
|
| 11 |
|
| 12 |
|
| 13 |
-
def predict(action,image, text_in_image, words_line, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 14 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space):
|
| 15 |
-
def titleonImage(action,image, text_in_image, words_line, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 16 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
inp = text_in_image
|
| 19 |
string = ""
|
|
@@ -30,7 +34,7 @@ def predict(action,image, text_in_image, words_line, model, url_or_text, prompt,
|
|
| 30 |
for i in text_in_image.split("\n"):
|
| 31 |
image = ps.putBText(image, i, text_offset_x = int(x_cor),
|
| 32 |
text_offset_y = int(y_cor)+y_gap, vspace = 5,
|
| 33 |
-
hspace = 5, font_scale = int(font_size),
|
| 34 |
background_RGB = ast.literal_eval(backgound_RGB),
|
| 35 |
text_RGB = ast.literal_eval(text_RGB), thickness = int(thickness),
|
| 36 |
alpha = float(opacity))
|
|
@@ -44,7 +48,7 @@ def predict(action,image, text_in_image, words_line, model, url_or_text, prompt,
|
|
| 44 |
article.parse()
|
| 45 |
text = article.text
|
| 46 |
return text
|
| 47 |
-
def getTitle(text_in_image, words_line, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 48 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space):
|
| 49 |
response = openai.Completion.create(
|
| 50 |
model=model,
|
|
@@ -57,7 +61,7 @@ def predict(action,image, text_in_image, words_line, model, url_or_text, prompt,
|
|
| 57 |
).choices[0].text
|
| 58 |
return response
|
| 59 |
|
| 60 |
-
def getSummary(text_in_image, words_line, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 61 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space):
|
| 62 |
summary = openai.Completion.create(
|
| 63 |
model=model,
|
|
@@ -73,35 +77,35 @@ def predict(action,image, text_in_image, words_line, model, url_or_text, prompt,
|
|
| 73 |
# text_in_image = "title was not asked" --> only need in summary
|
| 74 |
summary = "summary was not asked"
|
| 75 |
if action == "title on Image":
|
| 76 |
-
image_final = titleonImage(action,image, text_in_image, words_line, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 77 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space)
|
| 78 |
|
| 79 |
if action == "title on image from url/text":
|
| 80 |
url_or_text = getText(url_or_text)
|
| 81 |
-
text_in_image = getTitle(text_in_image, words_line, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 82 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space)
|
| 83 |
|
| 84 |
-
image_final = titleonImage(action,image, text_in_image, words_line, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 85 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space)
|
| 86 |
|
| 87 |
if action == "title on image and summary from url/text":
|
| 88 |
url_or_text = getText(url_or_text)
|
| 89 |
-
text_in_image = getTitle(text_in_image, words_line, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 90 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space)
|
| 91 |
-
summary = getSummary(text_in_image, words_line, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 92 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space)
|
| 93 |
-
image_final = titleonImage(action,image, text_in_image, words_line, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 94 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space)
|
| 95 |
|
| 96 |
if action == "title from url/text":
|
| 97 |
url_or_text = getText(url_or_text)
|
| 98 |
-
text_in_image = getTitle(text_in_image, words_line, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 99 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space)
|
| 100 |
|
| 101 |
if action == "summary from url/text":
|
| 102 |
url_or_text = getText(url_or_text)
|
| 103 |
text_in_image = "title was not asked"
|
| 104 |
-
summary = getSummary(text_in_image, words_line, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 105 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space)
|
| 106 |
return image_final, text_in_image, summary
|
| 107 |
|
|
@@ -115,6 +119,11 @@ intr = gr.Interface(predict,
|
|
| 115 |
"image",
|
| 116 |
gr.Textbox(value="Text was not given", label = "Text you want to see in the image"),
|
| 117 |
gr.Number(value=5, label = "Number of words in a line"),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
gr.Dropdown(["text-curie-001", "text-davinci-003"], value = "text-curie-001", label = "GPT3- Model"),
|
| 119 |
gr.Textbox(value="https://www.datasciencecentral.com/will-chatgpt-make-fraud-easier/", label = "Provide blog url or directly input the text for better results"),
|
| 120 |
gr.Textbox(value="Title for instagram post based on above article", label= "Prompt for generating the title"),
|
|
|
|
| 10 |
import numpy as np
|
| 11 |
|
| 12 |
|
| 13 |
+
def predict(action,image, text_in_image, words_line, font, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 14 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space):
|
| 15 |
+
def titleonImage(action,image, text_in_image, words_line,font, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 16 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space):
|
| 17 |
+
font_dict = {'normal size sans-serif':cv2.FONT_HERSHEY_SCRIPT_COMPLEX, 'small size sans-serif':cv2.FONT_HERSHEY_PLAIN,
|
| 18 |
+
'normal size sans-serif (complexity +1)': cv2.FONT_HERSHEY_DUPLEX, 'normal size sans-serif (complexity +2)': cv2.FONT_HERSHEY_COMPLEX ,
|
| 19 |
+
'normal size sans-serif (complexity +3)': cv2.FONT_HERSHEY_TRIPLEX, 'small size sans-serif (complexity +2)': cv2.FONT_HERSHEY_COMPLEX_SMALL,
|
| 20 |
+
'hand-writing style font': cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 'hand-writing style font (complexity +1)': cv2.FONT_HERSHEY_SCRIPT_COMPLEX }
|
| 21 |
|
| 22 |
inp = text_in_image
|
| 23 |
string = ""
|
|
|
|
| 34 |
for i in text_in_image.split("\n"):
|
| 35 |
image = ps.putBText(image, i, text_offset_x = int(x_cor),
|
| 36 |
text_offset_y = int(y_cor)+y_gap, vspace = 5,
|
| 37 |
+
hspace = 5, font_scale = int(font_size), font = font_dict[font],
|
| 38 |
background_RGB = ast.literal_eval(backgound_RGB),
|
| 39 |
text_RGB = ast.literal_eval(text_RGB), thickness = int(thickness),
|
| 40 |
alpha = float(opacity))
|
|
|
|
| 48 |
article.parse()
|
| 49 |
text = article.text
|
| 50 |
return text
|
| 51 |
+
def getTitle(text_in_image, words_line, font, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 52 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space):
|
| 53 |
response = openai.Completion.create(
|
| 54 |
model=model,
|
|
|
|
| 61 |
).choices[0].text
|
| 62 |
return response
|
| 63 |
|
| 64 |
+
def getSummary(text_in_image, words_line, font, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 65 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space):
|
| 66 |
summary = openai.Completion.create(
|
| 67 |
model=model,
|
|
|
|
| 77 |
# text_in_image = "title was not asked" --> only need in summary
|
| 78 |
summary = "summary was not asked"
|
| 79 |
if action == "title on Image":
|
| 80 |
+
image_final = titleonImage(action,image, text_in_image, words_line, font, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 81 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space)
|
| 82 |
|
| 83 |
if action == "title on image from url/text":
|
| 84 |
url_or_text = getText(url_or_text)
|
| 85 |
+
text_in_image = getTitle(text_in_image, words_line, font, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 86 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space)
|
| 87 |
|
| 88 |
+
image_final = titleonImage(action,image, text_in_image, words_line, font, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 89 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space)
|
| 90 |
|
| 91 |
if action == "title on image and summary from url/text":
|
| 92 |
url_or_text = getText(url_or_text)
|
| 93 |
+
text_in_image = getTitle(text_in_image, words_line, font, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 94 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space)
|
| 95 |
+
summary = getSummary(text_in_image, words_line, font, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 96 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space)
|
| 97 |
+
image_final = titleonImage(action,image, text_in_image, words_line, font, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 98 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space)
|
| 99 |
|
| 100 |
if action == "title from url/text":
|
| 101 |
url_or_text = getText(url_or_text)
|
| 102 |
+
text_in_image = getTitle(text_in_image, words_line, font, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 103 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space)
|
| 104 |
|
| 105 |
if action == "summary from url/text":
|
| 106 |
url_or_text = getText(url_or_text)
|
| 107 |
text_in_image = "title was not asked"
|
| 108 |
+
summary = getSummary(text_in_image, words_line, font, model, url_or_text, prompt, summary_prompt, temperature, max_tokens, x_cor, y_cor,
|
| 109 |
font_size, backgound_RGB, text_RGB, thickness, opacity, line_space)
|
| 110 |
return image_final, text_in_image, summary
|
| 111 |
|
|
|
|
| 119 |
"image",
|
| 120 |
gr.Textbox(value="Text was not given", label = "Text you want to see in the image"),
|
| 121 |
gr.Number(value=5, label = "Number of words in a line"),
|
| 122 |
+
gr.Dropdown(['normal size sans-serif', 'small size sans-serif',
|
| 123 |
+
'normal size sans-serif (complexity +1)', 'normal size sans-serif (complexity +2)',
|
| 124 |
+
'normal size sans-serif (complexity +3)', 'small size sans-serif (complexity +2)',
|
| 125 |
+
'hand-writing style font', 'hand-writing style font (complexity +1)'],
|
| 126 |
+
label = "Font for the text", value = 'normal size sans-serif'),
|
| 127 |
gr.Dropdown(["text-curie-001", "text-davinci-003"], value = "text-curie-001", label = "GPT3- Model"),
|
| 128 |
gr.Textbox(value="https://www.datasciencecentral.com/will-chatgpt-make-fraud-easier/", label = "Provide blog url or directly input the text for better results"),
|
| 129 |
gr.Textbox(value="Title for instagram post based on above article", label= "Prompt for generating the title"),
|