| import base64 | |
| import json | |
| from handler import EndpointHandler | |
| from PIL import Image | |
| import io | |
| # Initialize the handler | |
| handler = EndpointHandler() | |
| # Read a sample reference image and encode it in base64 | |
| with open("rithwik.png", "rb") as image_file: | |
| ref_image_base64 = base64.b64encode(image_file.read()).decode("utf-8") | |
| # Define sample inputs | |
| inputs = { | |
| "inputs": { | |
| "ref_image": ref_image_base64, | |
| "pose_video_path": "pose_video.mp4", | |
| "width": 378, | |
| "height": 504, | |
| "length": 24, | |
| "num_inference_steps": 25, | |
| "cfg": 3.5, | |
| "seed": 123 | |
| } | |
| } | |
| # Simulate an inference call | |
| output = handler(inputs) | |
| # # Decode the base64 video output | |
| # video_base64 = output.get("video", "") | |
| # video_bytes = base64.b64decode(video_base64) | |
| # # Save the video to a file | |
| # with open("output_video.mp4", "wb") as video_file: | |
| # video_file.write(video_bytes) | |
| print("Inference completed. Output video saved as output_video.mp4") | |