KingNish commited on
Commit
bf3e4f5
·
1 Parent(s): 7e21d29

modified: app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -13
app.py CHANGED
@@ -8,36 +8,62 @@ def set_client_for_session(request: gr.Request):
8
  ltx_client = Client("KingNish/ltx-video-distilled", headers={"x-ip-token": x_ip_token})
9
  return flux_client, ltx_client
10
 
11
- def text_to_image(prompt, image, client):
12
- result = client.predict(
 
13
  input_image=handle_file(image),
14
  prompt=prompt,
15
  seed=0,
16
  randomize_seed=True,
17
  guidance_scale=2.5,
18
- steps=30,
19
  api_name="/infer"
20
  )
 
 
 
 
 
 
 
 
 
 
 
 
21
  print(result)
22
  return result
23
 
 
 
 
 
 
 
 
 
24
  with gr.Blocks() as demo:
25
  flux_client = gr.State(None)
26
  ltx_client = gr.State(None)
27
- image = gr.Image()
28
  input_image = gr.Image(type="filepath")
29
  prompt = gr.Textbox(max_lines=1)
30
- prompt.submit(text_to_image, [prompt, input_image, flux_client], [image])
 
 
 
31
 
32
- # gr.Examples(
33
- # examples=[
34
- # ["A sunny day", "https://black-forest-labs-flux-1-kontext-dev.hf.space/gradio_api/file=/tmp/gradio/927b05b08b8ec2201f7d95722471d2089d4197f152c9c07d0bc9f9672962d03b/flowers.png"],
35
- # ],
36
- # inputs=[prompt, input_image],
37
- # outputs=image,
38
- # )
 
 
 
39
 
40
- demo.load(set_client_for_session, None, [flux_client, ltx_client])
41
 
42
  demo.launch()
43
 
 
8
  ltx_client = Client("KingNish/ltx-video-distilled", headers={"x-ip-token": x_ip_token})
9
  return flux_client, ltx_client
10
 
11
+ def image_to_image(prompt, image, flux_client):
12
+ prompt = "You are an AI image-to-image editor that transforms a user-provided input image based on their prompt while maintaining consistency and fidelity to the original image. Generate an output image that accurately reflects the user's requested modifications, preserving key elements like style, composition, and context from the input image unless explicitly instructed otherwise. " + prompt
13
+ image = flux_client.predict(
14
  input_image=handle_file(image),
15
  prompt=prompt,
16
  seed=0,
17
  randomize_seed=True,
18
  guidance_scale=2.5,
19
+ steps=20,
20
  api_name="/infer"
21
  )
22
+ print(image)
23
+ return image[0]
24
+
25
+ def image_to_video(prompt, image, ltx_client):
26
+ result = ltx_client.predict(
27
+ prompt=prompt,
28
+ input_image_url=None,
29
+ middle_image_url=handle_file(image),
30
+ final_image_url=None,
31
+ duration_ui=4,
32
+ api_name="/generate_video"
33
+ )
34
  print(result)
35
  return result
36
 
37
+ def personalized_video(prompt, image, flux_client, ltx_client, request: gr.Request):
38
+ x_ip_token = request.headers['x-ip-token']
39
+ print(x_ip_token)
40
+ image = image_to_image(prompt, image, flux_client)
41
+ yield image, None
42
+ video = image_to_video(prompt, image, ltx_client)
43
+ yield image, video
44
+
45
  with gr.Blocks() as demo:
46
  flux_client = gr.State(None)
47
  ltx_client = gr.State(None)
 
48
  input_image = gr.Image(type="filepath")
49
  prompt = gr.Textbox(max_lines=1)
50
+ output_video = gr.Video()
51
+ submit_button = gr.Button("Submit")
52
+ submit_button.click(personalized_video, [prompt, input_image, flux_client, ltx_client], [output_video])
53
+ prompt.submit(personalized_video, [prompt, input_image, flux_client, ltx_client], [output_video])
54
 
55
+ gr.Examples(
56
+ examples=[
57
+ ["A sunny day", "https://black-forest-labs-flux-1-kontext-dev.hf.space/gradio_api/file=/tmp/gradio/927b05b08b8ec2201f7d95722471d2089d4197f152c9c07d0bc9f9672962d03b/flowers.png", None, None],
58
+ ],
59
+ inputs=[prompt, input_image, flux_client, ltx_client],
60
+ outputs=image,
61
+ fn=personalized_video,
62
+ cache_examples=True,
63
+ cache_mode = "eager"
64
+ )
65
 
66
+ # demo.load(set_client_for_session, None, [flux_client, ltx_client])
67
 
68
  demo.launch()
69