| import torch |
| from PIL import Image |
| from app_utils import * |
| import torch.nn.functional as F |
| import numpy as np |
| from torchvision import transforms as TF |
|
|
| from scipy.special import i0 |
| from scipy.optimize import curve_fit |
| from scipy.integrate import trapezoid |
| from functools import partial |
|
|
|
|
| def von_mises_pdf_alpha_numpy(alpha, x, mu, kappa): |
| normalization = 2 * np.pi |
| pdf = np.exp(kappa * np.cos(alpha * (x - mu))) / normalization |
| return pdf |
|
|
| def val_fit_alpha(distribute): |
| fit_alphas = [] |
| for y_noise in distribute: |
| x = np.linspace(0, 2 * np.pi, 360) |
| y_noise /= trapezoid(y_noise, x) + 1e-8 |
| |
| initial_guess = [x[np.argmax(y_noise)], 1] |
| |
| |
| alphas = [1.0, 2.0, 4.0] |
| saved_params = [] |
| saved_r_squared = [] |
|
|
| for alpha in alphas: |
| try: |
| von_mises_pdf_alpha_partial = partial(von_mises_pdf_alpha_numpy, alpha) |
| params, covariance = curve_fit(von_mises_pdf_alpha_partial, x, y_noise, p0=initial_guess) |
|
|
| residuals = y_noise - von_mises_pdf_alpha_partial(x, *params) |
| ss_res = np.sum(residuals**2) |
| ss_tot = np.sum((y_noise - np.mean(y_noise))**2) |
| r_squared = 1 - (ss_res / (ss_tot+1e-8)) |
|
|
| saved_params.append(params) |
| saved_r_squared.append(r_squared) |
| if r_squared > 0.8: |
| break |
| except: |
| saved_params.append((0.,0.)) |
| saved_r_squared.append(0.) |
|
|
| max_index = np.argmax(saved_r_squared) |
| alpha = alphas[max_index] |
| mu_fit, kappa_fit = saved_params[max_index] |
| r_squared = saved_r_squared[max_index] |
| |
| if alpha == 1. and kappa_fit>=0.6 and r_squared>=0.45: |
| pass |
| elif alpha == 2. and kappa_fit>=0.4 and r_squared>=0.45: |
| pass |
| elif alpha == 4. and kappa_fit>=0.25 and r_squared>=0.45: |
| pass |
| else: |
| alpha=0. |
| fit_alphas.append(alpha) |
| return torch.tensor(fit_alphas) |
|
|
| def preprocess_images(image_list, mode="crop"): |
|
|
| |
| if len(image_list) == 0: |
| raise ValueError("At least 1 image is required") |
| |
| |
| if mode not in ["crop", "pad"]: |
| raise ValueError("Mode must be either 'crop' or 'pad'") |
|
|
| images = [] |
| shapes = set() |
| to_tensor = TF.ToTensor() |
| target_size = 518 |
|
|
| |
| |
| for img in image_list: |
| |
| if img.mode == "RGBA": |
| |
| background = Image.new("RGBA", img.size, (255, 255, 255, 255)) |
| |
| img = Image.alpha_composite(background, img) |
|
|
| |
| img = img.convert("RGB") |
| width, height = img.size |
| |
| if mode == "pad": |
| |
| if width >= height: |
| new_width = target_size |
| new_height = round(height * (new_width / width) / 14) * 14 |
| else: |
| new_height = target_size |
| new_width = round(width * (new_height / height) / 14) * 14 |
| else: |
| |
| new_width = target_size |
| |
| new_height = round(height * (new_width / width) / 14) * 14 |
|
|
| |
| try: |
| img = img.resize((new_width, new_height), Image.Resampling.BICUBIC) |
| img = to_tensor(img) |
| except Exception as e: |
| print(e) |
| print(width, height) |
| print(new_width, new_height) |
| assert False |
|
|
| |
| if mode == "crop" and new_height > target_size: |
| start_y = (new_height - target_size) // 2 |
| img = img[:, start_y : start_y + target_size, :] |
| |
| |
| if mode == "pad": |
| h_padding = target_size - img.shape[1] |
| w_padding = target_size - img.shape[2] |
| |
| if h_padding > 0 or w_padding > 0: |
| pad_top = h_padding // 2 |
| pad_bottom = h_padding - pad_top |
| pad_left = w_padding // 2 |
| pad_right = w_padding - pad_left |
| |
| |
| img = torch.nn.functional.pad( |
| img, (pad_left, pad_right, pad_top, pad_bottom), mode="constant", value=1.0 |
| ) |
|
|
| shapes.add((img.shape[1], img.shape[2])) |
| images.append(img) |
|
|
| |
| |
| if len(shapes) > 1: |
| print(f"Warning: Found images with different shapes: {shapes}") |
| |
| max_height = max(shape[0] for shape in shapes) |
| max_width = max(shape[1] for shape in shapes) |
|
|
| |
| padded_images = [] |
| for img in images: |
| h_padding = max_height - img.shape[1] |
| w_padding = max_width - img.shape[2] |
|
|
| if h_padding > 0 or w_padding > 0: |
| pad_top = h_padding // 2 |
| pad_bottom = h_padding - pad_top |
| pad_left = w_padding // 2 |
| pad_right = w_padding - pad_left |
|
|
| img = torch.nn.functional.pad( |
| img, (pad_left, pad_right, pad_top, pad_bottom), mode="constant", value=1.0 |
| ) |
| padded_images.append(img) |
| images = padded_images |
|
|
| images = torch.stack(images) |
|
|
| |
| if len(image_list) == 1: |
| |
| if images.dim() == 3: |
| images = images.unsqueeze(0) |
|
|
| return images |
|
|