K00B404 commited on
Commit
854f23e
·
verified ·
1 Parent(s): a393fe3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -4
app.py CHANGED
@@ -1,13 +1,20 @@
1
  import gradio as gr
2
  import requests
3
  from PIL import Image
 
4
  from transformers import BlipProcessor, BlipForConditionalGeneration
5
  import time
6
  from gradio_client import Client
7
 
 
8
  blipper="Salesforce/blip-image-captioning-large"
9
  chatter="K00B404/transcript_image_generator"
10
 
 
 
 
 
 
11
  # Load BLIP model for image captioning
12
  processor = BlipProcessor.from_pretrained(blipper)
13
  model = BlipForConditionalGeneration.from_pretrained(blipper)
@@ -32,9 +39,9 @@ Some characters might be terse while others might be more verbose."""
32
  return persona
33
 
34
 
35
- def dramaturg(message, max_tokens=256, temperature=0.5, top_p=0.95):
36
  """Function to interact with the chatbot API using the generated persona"""
37
- system_message="You are a Expert Dramaturg and your task is to use the input persona information and write a 'Role' description as compact instuctions for the actor"
38
  try:
39
  # Call the API with the current message and system prompt (persona)
40
  response = chatbot_client.predict(
@@ -78,8 +85,11 @@ def generate_persona(img, min_len, max_len, persona_detail_level):
78
  # Calculate processing time
79
  end = time.time()
80
  total_time = f"Processing time: {end - start:.2f} seconds"
81
-
82
- return caption, dramaturg(persona), total_time
 
 
 
83
 
84
 
85
  def chat_with_persona(message, history, system_message, max_tokens, temperature, top_p):
@@ -98,6 +108,48 @@ def chat_with_persona(message, history, system_message, max_tokens, temperature,
98
  except Exception as e:
99
  return f"Error communicating with the chatbot API: {str(e)}"
100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  # Create Gradio interface with tabs
102
  with gr.Blocks(title="Image Character Persona Generator") as iface:
103
  # Store the generated persona in a state variable to share between tabs
@@ -129,6 +181,7 @@ with gr.Blocks(title="Image Character Persona Generator") as iface:
129
  3. Select detail level for the persona
130
  4. Click "Generate Character Persona"
131
  5. Switch to the "Test Persona" tab to chat with your character
 
132
  """)
133
 
134
  # Second tab: Test Character Chat
@@ -176,6 +229,31 @@ with gr.Blocks(title="Image Character Persona Generator") as iface:
176
  [msg, chatbot])
177
 
178
  clear_btn.click(clear_chat, outputs=chatbot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
  # Function to update system prompt in Test tab when persona is generated
181
  def update_persona_state(caption, persona, time_output):
 
1
  import gradio as gr
2
  import requests
3
  from PIL import Image
4
+ import os
5
  from transformers import BlipProcessor, BlipForConditionalGeneration
6
  import time
7
  from gradio_client import Client
8
 
9
+ token = os.getenv('HF_TOKEN')
10
  blipper="Salesforce/blip-image-captioning-large"
11
  chatter="K00B404/transcript_image_generator"
12
 
13
+ # Set your API endpoint and authorization details
14
+ API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
15
+ headers = {fAuthorization": f"Bearer {token}"} # Replace with your actual token
16
+ timeout = 60 # seconds
17
+
18
  # Load BLIP model for image captioning
19
  processor = BlipProcessor.from_pretrained(blipper)
20
  model = BlipForConditionalGeneration.from_pretrained(blipper)
 
39
  return persona
40
 
41
 
42
+ def helper_llm(message, system_prompt, max_tokens=256, temperature=0.5, top_p=0.95):
43
  """Function to interact with the chatbot API using the generated persona"""
44
+
45
  try:
46
  # Call the API with the current message and system prompt (persona)
47
  response = chatbot_client.predict(
 
85
  # Calculate processing time
86
  end = time.time()
87
  total_time = f"Processing time: {end - start:.2f} seconds"
88
+
89
+ # dramaturg to mae a solid role for a actor from pragmatic description
90
+ system_message="You are a Expert Dramaturg and your task is to use the input persona information and write a 'Role' description as compact instuctions for the actor"
91
+ persona = helper_llm(persona, system_prompt=system_prompt)
92
+ return caption, persona, total_time
93
 
94
 
95
  def chat_with_persona(message, history, system_message, max_tokens, temperature, top_p):
 
108
  except Exception as e:
109
  return f"Error communicating with the chatbot API: {str(e)}"
110
 
111
+
112
+
113
+
114
+
115
+ def generate_flux_image(final_prompt, is_negative, steps, cfg_scale, seed, strength):
116
+ """
117
+ Generate an image using the FLUX model via Hugging Face's inference API.
118
+ The function sends a POST request with the given payload and returns the image,
119
+ along with the seed and prompt used.
120
+ """
121
+ payload = {
122
+ "inputs": final_prompt,
123
+ "is_negative": is_negative,
124
+ "steps": steps,
125
+ "cfg_scale": cfg_scale,
126
+ "seed": seed,
127
+ "strength": strength
128
+ }
129
+
130
+ response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
131
+
132
+ if response.status_code != 200:
133
+ print(f"Error: Failed to get image. Response status: {response.status_code}")
134
+ print(f"Response content: {response.text}")
135
+ if response.status_code == 503:
136
+ raise gr.Error(f"{response.status_code} : The model is being loaded")
137
+ raise gr.Error(f"{response.status_code}")
138
+
139
+ try:
140
+ image_bytes = response.content
141
+ image = Image.open(io.BytesIO(image_bytes))
142
+ # Optionally save the image to a file (filename based on seed)
143
+ output_path = f"./output_{seed}.png"
144
+ image.save(output_path)
145
+ print(f'\033[1mGeneration completed!\033[0m (Prompt: {final_prompt})')
146
+ return output_path, str(seed), final_prompt
147
+ except Exception as e:
148
+ print(f"Error when trying to open the image: {e}")
149
+ return None, None, None
150
+
151
+
152
+
153
  # Create Gradio interface with tabs
154
  with gr.Blocks(title="Image Character Persona Generator") as iface:
155
  # Store the generated persona in a state variable to share between tabs
 
181
  3. Select detail level for the persona
182
  4. Click "Generate Character Persona"
183
  5. Switch to the "Test Persona" tab to chat with your character
184
+ 6. create similar images inspired by the 'role'
185
  """)
186
 
187
  # Second tab: Test Character Chat
 
229
  [msg, chatbot])
230
 
231
  clear_btn.click(clear_chat, outputs=chatbot)
232
+
233
+
234
+ # New Tab 3: Flux Image Generation
235
+ with gr.Tab("Flux Image Generation"):
236
+ gr.Markdown("### Flux Image Generation")
237
+ final_prompt = gr.Textbox(label="Prompt", lines=2, placeholder="Enter your prompt for Flux...")
238
+ is_negative = gr.Checkbox(label="Use Negative Prompt", value=False)
239
+ steps = gr.Slider(minimum=10, maximum=100, step=1, value=50, label="Steps")
240
+ cfg_scale = gr.Slider(minimum=1, maximum=20, step=1, value=7, label="CFG Scale")
241
+ seed = gr.Number(value=42, label="Seed")
242
+ strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, value=0.8, label="Strength")
243
+
244
+ generate_button = gr.Button("Generate Flux Image")
245
+ output_image = gr.Image(label="Generated Image")
246
+ output_seed = gr.Textbox(label="Seed Used")
247
+ output_prompt = gr.Textbox(label="Prompt Used")
248
+
249
+ generate_button.click(
250
+ fn=generate_flux_image,
251
+ inputs=[final_prompt, is_negative, steps, cfg_scale, seed, strength],
252
+ outputs=[output_image, output_seed, output_prompt]
253
+ )
254
+
255
+
256
+
257
 
258
  # Function to update system prompt in Test tab when persona is generated
259
  def update_persona_state(caption, persona, time_output):