|
|
import aiohttp |
|
|
from PIL import Image |
|
|
import io |
|
|
import torch |
|
|
import numpy as np |
|
|
import base64 |
|
|
import json |
|
|
import asyncio |
|
|
import ssl |
|
|
|
|
|
ssl_context = ssl.create_default_context() |
|
|
|
|
|
class HumanParseNode: |
|
|
CATEGORY = "WEFA-DOOR" |
|
|
|
|
|
@classmethod |
|
|
def INPUT_TYPES(cls): |
|
|
return { |
|
|
"required": { |
|
|
"input_image": ("IMAGE", {}), |
|
|
}, |
|
|
} |
|
|
|
|
|
RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE") |
|
|
RETURN_NAMES = ("top_mask", "bottom_mask", "combined_map") |
|
|
FUNCTION = "send_to_api_sync" |
|
|
|
|
|
@staticmethod |
|
|
async def image_to_base64(image_tensor): |
|
|
|
|
|
image_np = image_tensor.cpu().detach().numpy() |
|
|
|
|
|
|
|
|
if image_np.ndim == 4 and image_np.shape[0] == 1: |
|
|
|
|
|
image_np = image_np.squeeze(0) |
|
|
|
|
|
|
|
|
if image_np.ndim == 3 and image_np.shape[0] in {1, 3}: |
|
|
|
|
|
image_np = image_np.transpose(1, 2, 0) |
|
|
|
|
|
|
|
|
image_np = (image_np * 255).astype(np.uint8) |
|
|
|
|
|
|
|
|
if image_np.ndim == 3 and image_np.shape[2] == 1: |
|
|
image_np = image_np.squeeze(2) |
|
|
|
|
|
|
|
|
image_pil = Image.fromarray(image_np) |
|
|
buffer = io.BytesIO() |
|
|
image_pil.save(buffer, format="PNG") |
|
|
return base64.b64encode(buffer.getvalue()).decode() |
|
|
|
|
|
async def send_to_api(self, input_image): |
|
|
url = 'http://63.141.33.9:22161' |
|
|
headers = { |
|
|
'X-API-KEY': 'xiCQTaoQKXUNATzuFLWRgtoJKiFXiDGvnk', |
|
|
'Content-Type': 'application/json' |
|
|
} |
|
|
|
|
|
json_payload = json.dumps({ |
|
|
"image": await HumanParseNode.image_to_base64(input_image) |
|
|
}) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async with aiohttp.ClientSession() as session: |
|
|
async with session.post(url, data=json_payload, headers=headers, ssl=ssl_context) as response: |
|
|
print(f"Response status: {response.status}") |
|
|
if response.status != 200: |
|
|
response_text = await response.text() |
|
|
print(f"Error: Response status {response.status}. Response text: {response_text}") |
|
|
return None |
|
|
else: |
|
|
response_json = await response.json() |
|
|
top_mask_base64 = response_json.get("1.png") |
|
|
bottom_mask_base64 = response_json.get("2.png") |
|
|
combined_map_base64 = response_json.get("final_seg.png") |
|
|
|
|
|
top_mask_image = Image.open(io.BytesIO(base64.b64decode(top_mask_base64))).convert('L') |
|
|
bottom_mask_image = Image.open(io.BytesIO(base64.b64decode(bottom_mask_base64))).convert('L') |
|
|
combined_map_image = Image.open(io.BytesIO(base64.b64decode(combined_map_base64))).convert('RGB') |
|
|
|
|
|
top_mask_tensor = torch.tensor(np.array(top_mask_image)).unsqueeze(0).float() / 255.0 |
|
|
bottom_mask_tensor = torch.tensor(np.array(bottom_mask_image)).unsqueeze(0).float() / 255.0 |
|
|
combined_map_tensor = torch.tensor(np.array(combined_map_image)).unsqueeze(0).float() / 255.0 |
|
|
|
|
|
return (top_mask_tensor, bottom_mask_tensor, combined_map_tensor) |
|
|
|
|
|
def send_to_api_sync(self, input_image): |
|
|
|
|
|
return asyncio.run(self.send_to_api(input_image)) |
|
|
|