Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,20 +1,19 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import os
|
| 3 |
-
from PIL import Image, ImageDraw
|
| 4 |
import re
|
|
|
|
| 5 |
import requests
|
| 6 |
from io import BytesIO
|
|
|
|
|
|
|
| 7 |
|
| 8 |
-
|
| 9 |
-
from diffusers import StableDiffusionPipeline
|
| 10 |
-
import torch
|
| 11 |
|
| 12 |
-
|
| 13 |
-
|
|
|
|
| 14 |
|
| 15 |
-
|
| 16 |
-
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
|
| 17 |
-
pipe = pipe.to("cuda" if torch.cuda.is_available() else "cpu")
|
| 18 |
|
| 19 |
def screenwriter(prompt: str) -> str:
|
| 20 |
instructions = f"""
|
|
@@ -33,10 +32,15 @@ def screenwriter(prompt: str) -> str:
|
|
| 33 |
|
| 34 |
STORY PROMPT: {prompt}
|
| 35 |
"""
|
| 36 |
-
result = story_gen(instructions, max_new_tokens=250)[0]["generated_text"]
|
| 37 |
-
return result
|
| 38 |
|
| 39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
return re.sub(r'<think>.*?</think>', '', text, flags=re.DOTALL).strip()
|
| 41 |
|
| 42 |
def parse_screenwriter_output(output: str):
|
|
@@ -64,11 +68,17 @@ def illustrator(story: str, character: str):
|
|
| 64 |
raise ValueError('Could not parse story or character from input.')
|
| 65 |
|
| 66 |
scenes = [s.strip() for s in story.split('.') if s.strip()]
|
|
|
|
| 67 |
images = []
|
| 68 |
for idx, scene in enumerate(scenes):
|
| 69 |
-
prompt = f
|
| 70 |
try:
|
| 71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
images.append((image, scene))
|
| 73 |
except Exception as e:
|
| 74 |
images.append((error_image(f'Error: {str(e)}'), f'Error in scene {idx + 1}'))
|
|
@@ -82,15 +92,20 @@ def pipeline(prompt: str):
|
|
| 82 |
images = illustrator(story, character)
|
| 83 |
return f"{story}\n---\n{character}", images
|
| 84 |
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
gr.Markdown(
|
| 88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
generated_story = gr.Button('Generate Story')
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
generated_story.click(pipeline, inputs=story_input, outputs=[story_output, gallery])
|
| 94 |
|
| 95 |
if __name__ == "__main__":
|
| 96 |
-
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import os
|
|
|
|
| 3 |
import re
|
| 4 |
+
from PIL import Image, ImageDraw
|
| 5 |
import requests
|
| 6 |
from io import BytesIO
|
| 7 |
+
from huggingface_hub import InferenceClient
|
| 8 |
+
from dotenv import load_dotenv
|
| 9 |
|
| 10 |
+
load_dotenv()
|
|
|
|
|
|
|
| 11 |
|
| 12 |
+
hf_token = os.getenv("HF_API_TOKEN")
|
| 13 |
+
if not hf_token:
|
| 14 |
+
raise ValueError("Set your HF_API_TOKEN environment variable before running.")
|
| 15 |
|
| 16 |
+
client = InferenceClient(token=hf_token)
|
|
|
|
|
|
|
| 17 |
|
| 18 |
def screenwriter(prompt: str) -> str:
|
| 19 |
instructions = f"""
|
|
|
|
| 32 |
|
| 33 |
STORY PROMPT: {prompt}
|
| 34 |
"""
|
|
|
|
|
|
|
| 35 |
|
| 36 |
+
response = client.text_generation(
|
| 37 |
+
model="tiiuae/falcon-7b-instruct",
|
| 38 |
+
inputs=instructions,
|
| 39 |
+
max_new_tokens=250
|
| 40 |
+
)
|
| 41 |
+
return response[0]['generated_text']
|
| 42 |
+
|
| 43 |
+
def remove_think_block(text:str):
|
| 44 |
return re.sub(r'<think>.*?</think>', '', text, flags=re.DOTALL).strip()
|
| 45 |
|
| 46 |
def parse_screenwriter_output(output: str):
|
|
|
|
| 68 |
raise ValueError('Could not parse story or character from input.')
|
| 69 |
|
| 70 |
scenes = [s.strip() for s in story.split('.') if s.strip()]
|
| 71 |
+
|
| 72 |
images = []
|
| 73 |
for idx, scene in enumerate(scenes):
|
| 74 |
+
prompt = f'Comic book illustration of the scene. No text. Scene: {scene}. Character: {character}'
|
| 75 |
try:
|
| 76 |
+
response = client.text_to_image(
|
| 77 |
+
model="stabilityai/stable-diffusion-2",
|
| 78 |
+
inputs=prompt
|
| 79 |
+
)
|
| 80 |
+
image_url = response['generated_image_url']
|
| 81 |
+
image = Image.open(BytesIO(requests.get(image_url).content))
|
| 82 |
images.append((image, scene))
|
| 83 |
except Exception as e:
|
| 84 |
images.append((error_image(f'Error: {str(e)}'), f'Error in scene {idx + 1}'))
|
|
|
|
| 92 |
images = illustrator(story, character)
|
| 93 |
return f"{story}\n---\n{character}", images
|
| 94 |
|
| 95 |
+
with gr.Blocks(theme=gr.themes.Ocean(),
|
| 96 |
+
title='Comic Generator') as demo:
|
| 97 |
+
gr.Markdown(
|
| 98 |
+
'''
|
| 99 |
+
# Comic Generator
|
| 100 |
+
Generates a comic off of your prompt.
|
| 101 |
+
''')
|
| 102 |
+
with gr.Row():
|
| 103 |
+
story_input = gr.Textbox(label='Story Prompt', placeholder='A unicorn named Jeff discovers a mysterious dish')
|
| 104 |
generated_story = gr.Button('Generate Story')
|
| 105 |
+
with gr.Row():
|
| 106 |
+
story_output = gr.Textbox(label='Screenwriter', lines=5)
|
| 107 |
+
gallery = gr.Gallery(label='Comic Scenes')
|
| 108 |
generated_story.click(pipeline, inputs=story_input, outputs=[story_output, gallery])
|
| 109 |
|
| 110 |
if __name__ == "__main__":
|
| 111 |
+
demo.launch(mcp_server=True)
|