3v324v23's picture
lfs
1e3b872
import aiohttp
from PIL import Image
import io
import torch
import numpy as np
import base64
import json
import asyncio
import ssl
ssl_context = ssl.create_default_context()
class HumanParseNode:
CATEGORY = "WEFA-DOOR"
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"input_image": ("IMAGE", {}),
},
}
RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE")
RETURN_NAMES = ("top_mask", "bottom_mask", "combined_map")
FUNCTION = "send_to_api_sync"
@staticmethod
async def image_to_base64(image_tensor):
# Convert PyTorch tensor to numpy array
image_np = image_tensor.cpu().detach().numpy()
# Handle the case for a single image in a batch
if image_np.ndim == 4 and image_np.shape[0] == 1:
# Remove the batch dimension if it's a single image in a batch
image_np = image_np.squeeze(0)
# If the image is in CHW format, convert it to HWC
if image_np.ndim == 3 and image_np.shape[0] in {1, 3}:
# Assume CHW format and convert to HWC
image_np = image_np.transpose(1, 2, 0)
# Ensure the image data is in uint8
image_np = (image_np * 255).astype(np.uint8)
# Handle the case where we have a grayscale image (H, W, 1) -> (H, W)
if image_np.ndim == 3 and image_np.shape[2] == 1:
image_np = image_np.squeeze(2)
# Create PIL Image and encode to base64
image_pil = Image.fromarray(image_np)
buffer = io.BytesIO()
image_pil.save(buffer, format="PNG")
return base64.b64encode(buffer.getvalue()).decode()
async def send_to_api(self, input_image):
url = 'http://63.141.33.9:22161'
headers = {
'X-API-KEY': 'xiCQTaoQKXUNATzuFLWRgtoJKiFXiDGvnk',
'Content-Type': 'application/json'
}
json_payload = json.dumps({
"image": await HumanParseNode.image_to_base64(input_image)
})
# print("Sending JSON payload:", json_payload)
# Ensure aiohttp uses the SSL context, particularly important for handling HTTPS requests securely
async with aiohttp.ClientSession() as session:
async with session.post(url, data=json_payload, headers=headers, ssl=ssl_context) as response:
print(f"Response status: {response.status}")
if response.status != 200:
response_text = await response.text()
print(f"Error: Response status {response.status}. Response text: {response_text}")
return None
else:
response_json = await response.json()
top_mask_base64 = response_json.get("1.png")
bottom_mask_base64 = response_json.get("2.png")
combined_map_base64 = response_json.get("final_seg.png")
top_mask_image = Image.open(io.BytesIO(base64.b64decode(top_mask_base64))).convert('L')
bottom_mask_image = Image.open(io.BytesIO(base64.b64decode(bottom_mask_base64))).convert('L')
combined_map_image = Image.open(io.BytesIO(base64.b64decode(combined_map_base64))).convert('RGB')
top_mask_tensor = torch.tensor(np.array(top_mask_image)).unsqueeze(0).float() / 255.0
bottom_mask_tensor = torch.tensor(np.array(bottom_mask_image)).unsqueeze(0).float() / 255.0
combined_map_tensor = torch.tensor(np.array(combined_map_image)).unsqueeze(0).float() / 255.0
return (top_mask_tensor, bottom_mask_tensor, combined_map_tensor)
def send_to_api_sync(self, input_image):
# This wrapper is synchronous and will block until the coroutine has finished executing.
return asyncio.run(self.send_to_api(input_image))