umang-immersfy commited on
Commit
5c88e58
·
1 Parent(s): ab8d95b

modified the code, WIP

Browse files
__pycache__/core.cpython-310.pyc CHANGED
Binary files a/__pycache__/core.cpython-310.pyc and b/__pycache__/core.cpython-310.pyc differ
 
__pycache__/openai_wrapper.cpython-310.pyc CHANGED
Binary files a/__pycache__/openai_wrapper.cpython-310.pyc and b/__pycache__/openai_wrapper.cpython-310.pyc differ
 
__pycache__/parameters.cpython-310.pyc CHANGED
Binary files a/__pycache__/parameters.cpython-310.pyc and b/__pycache__/parameters.cpython-310.pyc differ
 
app.py CHANGED
@@ -234,12 +234,7 @@ with gr.Blocks() as demo:
234
 
235
  regenerate_comps_btn.click(
236
  core.regenerate_composition_data,
237
- inputs=[image_description,
238
- narration,
239
- character,
240
- dialouge,
241
- location,
242
- setting,
243
  current_episode,
244
  current_scene,
245
  current_frame,
 
234
 
235
  regenerate_comps_btn.click(
236
  core.regenerate_composition_data,
237
+ inputs=[
 
 
 
 
 
238
  current_episode,
239
  current_scene,
240
  current_frame,
core.py CHANGED
@@ -215,48 +215,54 @@ def toggle_developer_options(is_developer: bool):
215
 
216
 
217
  def regenerate_composition_data(
218
- description: str,
219
- narration: str,
220
- character: str,
221
- dialouge: str,
222
- location: str,
223
- frame_setting: str,
224
  current_episode: int,
225
  current_scene: int,
226
  current_frame: int,
227
  episodes_data: dict,
228
  ):
229
- pass
230
- # print(
231
- # f"Generating compositions for episode: {current_episode} and scene: {current_scene} and frame: {current_frame}."
232
- # )
233
- # frame = episodes_data[current_episode][current_frame]
234
- # print(frame)
235
- # prompt_dict = {
236
- # "system": script_gen.generate_image_compositions_instruction,
237
- # "user": jinja2.Template(
238
- # source=script_gen.generate_image_compositions_user_prompt
239
- # ).render(
240
- # {
241
- # "FRAME": frame,
242
- # }
243
- # ),
244
- # }
245
-
246
- # compositions = llm.generate_valid_json_response(prompt_dict)
247
- # print(compositions)
248
- # frame.compositions = [
249
- # Composition(**composition)
250
- # for composition in compositions["compositions"]
251
- # ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
 
253
 
254
  def regenerate_data(
255
  frame_data: ComicFrame,
256
  ):
257
  pass
258
-
259
-
260
  # # for
261
  # payload = {
262
  # "prompt": composition.prompt,
 
215
 
216
 
217
  def regenerate_composition_data(
 
 
 
 
 
 
218
  current_episode: int,
219
  current_scene: int,
220
  current_frame: int,
221
  episodes_data: dict,
222
  ):
223
+ print(
224
+ f"Generating compositions for episode: {current_episode} and scene: {current_scene} and frame: {current_frame}."
225
+ )
226
+
227
+ # Retrieve the current frame data
228
+ frame = episodes_data[current_episode][current_frame]
229
+ print(frame)
230
+
231
+ # Generate the prompt for compositions
232
+ prompt_dict = {
233
+ "system": script_gen.generate_image_compositions_instruction,
234
+ "user": jinja2.Template(
235
+ source=script_gen.generate_image_compositions_user_prompt
236
+ ).render(
237
+ {
238
+ "FRAME": frame,
239
+ }
240
+ ),
241
+ }
242
+
243
+ # Generate compositions using LLM
244
+ compositions = llm.generate_valid_json_response(prompt_dict)
245
+ print(compositions)
246
+
247
+ # Update frame with new compositions
248
+ frame.compositions = [
249
+ Composition(
250
+ **composition,
251
+ seed="", # Set default empty seed
252
+ image="" # Set default empty image
253
+ ) # Create a Composition dataclass from the response
254
+ for composition in compositions["compositions"]
255
+ ]
256
+
257
+ # Update the episodes_data dictionary with the modified frame
258
+ episodes_data[current_episode][current_frame] = frame
259
+ print(f"Updated frame {current_frame} in episode {current_episode} with new compositions.")
260
 
261
 
262
  def regenerate_data(
263
  frame_data: ComicFrame,
264
  ):
265
  pass
 
 
266
  # # for
267
  # payload = {
268
  # "prompt": composition.prompt,
openai_wrapper.py CHANGED
@@ -70,7 +70,7 @@ class OpenAIModel(abc.ABC):
70
  temperature: int = 0.6,
71
  ) -> str:
72
  """Generate a response with retries, returning a valid JSON."""
73
- for _ in range(parameters.MAX_TRIES):
74
  try:
75
  model_response = self.generate_response(
76
  prompt_dict, max_output_tokens, temperature, {"type": "json_object"}
 
70
  temperature: int = 0.6,
71
  ) -> str:
72
  """Generate a response with retries, returning a valid JSON."""
73
+ for _ in range(int(parameters.MAX_TRIES)):
74
  try:
75
  model_response = self.generate_response(
76
  prompt_dict, max_output_tokens, temperature, {"type": "json_object"}