Spaces:
Runtime error
Runtime error
File size: 6,629 Bytes
cdeaff9 32d77ab cdeaff9 acaad0e f2c66fa acaad0e cdeaff9 ee6f56e cdeaff9 ee6f56e cdeaff9 ee6f56e d5c7d91 ee6f56e cdeaff9 ee6f56e cdeaff9 f2c66fa cdeaff9 ee6f56e f2c66fa cdeaff9 f2c66fa cdeaff9 31c5ccc cdeaff9 953667c cdeaff9 f2c66fa cdeaff9 316b743 ee6f56e f2c66fa 31c5ccc f2c66fa 953667c cdeaff9 f2c66fa cdeaff9 ee6f56e f2c66fa cdeaff9 f2c66fa | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 | import torch
from diffusers import StableDiffusionPipeline
from torch import autocast
import numpy as np
import gradio as gr
import openai
import os
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=os.environ.get('hugging-token'))
device="cpu"
openai.api_key = os.environ['openai-key']
pipe = pipe.to(device)
def generateStory(theme1, theme2):
prompt_text = "Write the first paragraph of a story integrates the themes \"{}\" and \"{}\" in a creative way in the style of Kurt Vonnegut.\n\nFirst paragraph of story:\n\n".format(theme1,theme2)
response = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt_text,
temperature=0.7,
max_tokens=250,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
story = response["choices"][0]["text"]
content_to_classify = "Your content here"
response = openai.Completion.create(
model="content-filter-alpha",
prompt = "<|endoftext|>"+story+"\n--\nLabel:",
temperature=0,
max_tokens=1,
top_p=0,
logprobs=10
)
output_label = response["choices"][0]["text"]
# This is the probability at which we evaluate that a "2" is likely real
# vs. should be discarded as a false positive
toxic_threshold = -0.355
if output_label == "2":
story='Please generate again'
if story.startswith('\n\n'):
story = story[2:]
return story
def illustratedStory(story):
if story != 'Please generate again':
illustration_response = openai.Completion.create(
model="text-davinci-002",
prompt="Transform the following story into a caption of an accompanying illustration. Start with 'Beautiful digital illustration of':\n\nStory:\n\nI stand at the edge of the Blue Mountains and gaze out at the vastness before me. It's a beautiful day, and the sun is shining. I can see for miles and miles, and it feels like I'm standing at the edge of the world. I'm here with the person I love, and we're about to embark on a great adventure. I can't wait to explore every inch of this place with them.\n\nIllustration caption:\n\nBeautiful digital illustration of two people standing by the edge of a mountain holding hands looking out\n\nStory:\n\n{}\n\nIllustration caption:".format(story),
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
image_prompt = illustration_response["choices"][0]["text"]
generator = torch.Generator('cpu')
image = pipe(image_prompt).images[0] # image here is in [PIL format](https://pillow.readthedocs.io/en/stable/)
else:
image = np.zeros([100,100,3],dtype=np.uint8)
image.fill(255) # or img[:] = 255
return image
'''
demo = gr.Interface(
fn=themes,
inputs=["text", "text"],
outputs=["text", "image"],
)
demo.launch()
'''
def continueStory(inputStory):
prompt_text = inputStory
response = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt_text,
temperature=0.7,
max_tokens=250,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
story = response["choices"][0]["text"]
content_to_classify = "Your content here"
response = openai.Completion.create(
model="content-filter-alpha",
prompt = "<|endoftext|>"+story+"\n--\nLabel:",
temperature=0,
max_tokens=1,
top_p=0,
logprobs=10
)
output_label = response["choices"][0]["text"]
# This is the probability at which we evaluate that a "2" is likely real
# vs. should be discarded as a false positive
toxic_threshold = -0.355
if output_label == "2":
story='Please generate again'
if story.startswith('\n\n'):
story = story[2:]
return inputStory + story
'''
demo = gr.Interface(
fn=themes,
inputs=["text", "text"],
outputs=["text", "image"],
)
demo.launch()
'''
with gr.Blocks(css='''
.h1 {
font-family: HK Grotesk;
font-style: normal;
font-weight: bold;
font-size: 100px;
line-height: 105%;
margin: 0;
}
''') as demo:
title = gr.HTML(
"""
<div style="text-align: center; margin: 0;">
<div style="
display: inline-flex;
align-items: center;
gap: 0.8rem;
font-size: 1.75rem;
">
<h1 style="font-weight: 900; margin-bottom: 7px;">
Illustrated Narrative Device
</h1>
</div>
<p style="margin-bottom: 10px; font-size: 94%;">
A playful AI co-writer!
</p>
<br>
<p style="font-size: 70%;>Generate the beginning of a story by writing two themes, then edit, add to it, extend it and illustrate it! </p>
</div>
""")
with gr.Row():
theme1 = gr.Textbox(label='Theme 1', elem_id = 'theme')
theme2 = gr.Textbox(label='Theme 2', elem_id = 'theme')
b1 = gr.Button("Generate starting paragraph", elem_id="generate-btn")
story_output = gr.Textbox(label='Story (pro tip: you can edit this!)')
with gr.Row():
b3 = gr.Button("Continue Story", elem_id="continue-btn")
b2 = gr.Button("Illustrate Story", elem_id="illustrated-btn")
gr.HTML('<p>Illustrations can take up to 10 minutes to generate. See it as an exercise in patience, amidst a sea of immediacy!</p>')
with gr.Row():
illustration = gr.Image(label='Illustration')
gr.HTML('<div style="text-align: center; max-width: 650px; margin: 0 auto;"><p style="margin-bottom: 10px; font-size: 94%;">Compute credits are expensive. Please help me keep this experiment running by buying me a coffee <a href="https://www.buymeacoffee.com/jrodolfoocG"> <u><b>here</u></b> :) </a></p></div><br>')
gr.HTML('<div style="text-align: center; max-width: 650px; margin: 0 auto;"><p style="margin-bottom: 10px; font-size: 70%;">Built with GPT-3, Stable Diffusion, the Diffusers library and Gradio, by <a href="https://research.rodolfoocampo.com"><u><b>Rodolfo Ocampo</u></b></a></p></div>')
b1.click(generateStory, inputs=[theme1,theme2], outputs=[story_output])
b2.click(illustratedStory, inputs=[story_output], outputs=[illustration])
b3.click(continueStory, inputs=[story_output], outputs=[story_output])
demo.launch(debug=True) |