AlekseyCalvin commited on
Commit
e28a784
·
verified ·
1 Parent(s): d940e49

Update utils.py

Browse files
Files changed (1) hide show
  1. utils.py +83 -46
utils.py CHANGED
@@ -6,72 +6,109 @@ import torch
6
  from PIL import Image
7
 
8
  def parse_weight_string(string, max_frames):
 
 
 
9
  string = re.sub(r'\s+', '', str(string))
10
  keyframes = {}
11
  parts = string.split(',')
 
12
  for part in parts:
13
  try:
14
  if ':' not in part: continue
15
  f_str, v_str = part.split(':', 1)
16
  keyframes[int(f_str)] = v_str.strip('()')
17
  except: continue
 
18
  if 0 not in keyframes: keyframes[0] = "0"
19
 
20
  series = np.zeros(int(max_frames))
21
- sorted_k = sorted(keyframes.keys())
22
- for i in range(len(sorted_k)):
23
- f_start = sorted_k[i]
24
- f_end = sorted_k[i+1] if i < len(sorted_k)-1 else int(max_frames)
 
25
  formula = keyframes[f_start]
 
26
  for f in range(f_start, f_end):
 
27
  try:
28
- series[f] = float(numexpr.evaluate(formula, local_dict={'t':f,'pi':np.pi,'sin':np.sin,'cos':np.cos}))
 
29
  except:
30
- series[f] = float(formula) if formula.replace('.','',1).isdigit() else (series[f-1] if f>0 else 0.0)
 
 
31
  return series
32
 
33
- def interpolate_prompts(pipe, prompt_dict, max_frames):
34
- """Blends CLIP embeddings between keyframes for smooth conceptual transitions."""
35
- sorted_keys = sorted(prompt_dict.keys())
36
- # Pre-calculate embeddings for all keyframe prompts
37
- key_embs = {}
38
- for k in sorted_keys:
39
- tokens = pipe.tokenizer(prompt_dict[k], padding="max_length", max_length=pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt").input_ids.to(pipe.device)
40
- with torch.no_grad():
41
- key_embs[k] = pipe.text_encoder(tokens)[0]
42
-
43
- full_embs = []
44
- for f in range(max_frames):
45
- # Find surrounding keyframes
46
- before = [k for k in sorted_keys if k <= f]
47
- after = [k for k in sorted_keys if k > f]
48
-
49
- if not after:
50
- full_embs.append(key_embs[before[-1]])
51
- elif not before:
52
- full_embs.append(key_embs[after[0]])
53
- else:
54
- k1, k2 = before[-1], after[0]
55
- alpha = (f - k1) / (k2 - k1)
56
- # Spherical Linear Interpolation (Slerp) or simple Lerp
57
- blended = torch.lerp(key_embs[k1], key_embs[k2], alpha)
58
- full_embs.append(blended)
59
- return full_embs
60
 
61
- def maintain_colors(img, anchor, mode='LAB'):
62
- if mode == 'None' or anchor is None: return img
63
- img_np, anc_np = np.array(img), np.array(anchor)
 
 
 
 
 
 
64
  if mode == 'LAB':
65
- img_lab, anc_lab = cv2.cvtColor(img_np, cv2.COLOR_RGB2LAB), cv2.cvtColor(anc_np, cv2.COLOR_RGB2LAB)
 
66
  for i in range(3):
67
- img_lab[:,:,i] = np.clip(img_lab[:,:,i] - np.mean(img_lab[:,:,i]) + np.mean(anc_lab[:,:,i]), 0, 255)
68
- return Image.fromarray(cv2.cvtColor(img_lab, cv2.COLOR_LAB2RGB))
69
- return img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
- def anim_frame_warp_2d(img, args, mode='Reflect'):
72
- cv_img = np.array(img)
 
 
 
 
73
  h, w = cv_img.shape[:2]
74
- mat = cv2.getRotationMatrix2D((w//2, h//2), args.get('angle',0), args.get('zoom',1))
75
- mat[0, 2] += args.get('tx',0); mat[1, 2] += args.get('ty',0)
76
- b = {'Reflect':cv2.BORDER_REFLECT_101, 'Replicate':cv2.BORDER_REPLICATE, 'Wrap':cv2.BORDER_WRAP}[mode]
77
- return Image.fromarray(cv2.warpAffine(cv_img, mat, (w, h), borderMode=b))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  from PIL import Image
7
 
8
  def parse_weight_string(string, max_frames):
9
+ """
10
+ Parses complex Deforum weight strings with math support (sin, cos, t).
11
+ """
12
  string = re.sub(r'\s+', '', str(string))
13
  keyframes = {}
14
  parts = string.split(',')
15
+
16
  for part in parts:
17
  try:
18
  if ':' not in part: continue
19
  f_str, v_str = part.split(':', 1)
20
  keyframes[int(f_str)] = v_str.strip('()')
21
  except: continue
22
+
23
  if 0 not in keyframes: keyframes[0] = "0"
24
 
25
  series = np.zeros(int(max_frames))
26
+ sorted_keys = sorted(keyframes.keys())
27
+
28
+ for i in range(len(sorted_keys)):
29
+ f_start = sorted_keys[i]
30
+ f_end = sorted_keys[i+1] if i < len(sorted_keys)-1 else int(max_frames)
31
  formula = keyframes[f_start]
32
+
33
  for f in range(f_start, f_end):
34
+ t = f
35
  try:
36
+ val = numexpr.evaluate(formula, local_dict={'t': t, 'pi': np.pi, 'sin': np.sin, 'cos': np.cos, 'tan': np.tan, 'abs': np.abs})
37
+ series[f] = float(val)
38
  except:
39
+ try: series[f] = float(formula)
40
+ except: series[f] = series[f-1] if f > 0 else 0.0
41
+
42
  return series
43
 
44
+ def get_border_mode(mode_str):
45
+ return {
46
+ 'Reflect': cv2.BORDER_REFLECT_101,
47
+ 'Replicate': cv2.BORDER_REPLICATE,
48
+ 'Wrap': cv2.BORDER_WRAP,
49
+ 'Black': cv2.BORDER_CONSTANT
50
+ }.get(mode_str, cv2.BORDER_REFLECT_101)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
+ def maintain_colors(image, anchor, mode='LAB'):
53
+ """
54
+ Matches the color distribution of 'image' to 'anchor'.
55
+ """
56
+ if mode == 'None' or anchor is None: return image
57
+
58
+ img_np = np.array(image).astype(np.uint8)
59
+ anc_np = np.array(anchor).astype(np.uint8)
60
+
61
  if mode == 'LAB':
62
+ img_cvt = cv2.cvtColor(img_np, cv2.COLOR_RGB2LAB)
63
+ anc_cvt = cv2.cvtColor(anc_np, cv2.COLOR_RGB2LAB)
64
  for i in range(3):
65
+ img_cvt[:,:,i] = np.clip(img_cvt[:,:,i] - img_cvt[:,:,i].mean() + anc_cvt[:,:,i].mean(), 0, 255)
66
+ out = cv2.cvtColor(img_cvt, cv2.COLOR_LAB2RGB)
67
+
68
+ elif mode == 'HSV':
69
+ img_cvt = cv2.cvtColor(img_np, cv2.COLOR_RGB2HSV)
70
+ anc_cvt = cv2.cvtColor(anc_np, cv2.COLOR_RGB2HSV)
71
+ # Match S and V, keep Hue
72
+ for i in [1, 2]:
73
+ img_cvt[:,:,i] = np.clip(img_cvt[:,:,i] - img_cvt[:,:,i].mean() + anc_cvt[:,:,i].mean(), 0, 255)
74
+ out = cv2.cvtColor(img_cvt, cv2.COLOR_HSV2RGB)
75
+
76
+ elif mode == 'RGB':
77
+ for i in range(3):
78
+ img_np[:,:,i] = np.clip(img_np[:,:,i] - img_np[:,:,i].mean() + anc_np[:,:,i].mean(), 0, 255)
79
+ out = img_np
80
+
81
+ else:
82
+ return image
83
+
84
+ return Image.fromarray(out)
85
 
86
+ def anim_frame_warp_2d(prev_img, args, border_mode_str):
87
+ """
88
+ Applies 2D affine transformation (Zoom, Rotate, Pan).
89
+ """
90
+ if prev_img is None: return None
91
+ cv_img = np.array(prev_img)
92
  h, w = cv_img.shape[:2]
93
+ center = (w // 2, h // 2)
94
+
95
+ angle = args.get('angle', 0)
96
+ zoom = args.get('zoom', 1.0)
97
+ tx = args.get('tx', 0)
98
+ ty = args.get('ty', 0)
99
+
100
+ # Create Matrix
101
+ mat = cv2.getRotationMatrix2D(center, angle, zoom)
102
+ mat[0, 2] += tx
103
+ mat[1, 2] += ty
104
+
105
+ border = get_border_mode(border_mode_str)
106
+ warped = cv2.warpAffine(cv_img, mat, (w, h), borderMode=border)
107
+ return Image.fromarray(warped)
108
+
109
+ def add_noise(img, noise_amt):
110
+ if noise_amt <= 0: return img
111
+ img_np = np.array(img).astype(np.float32)
112
+ noise = np.random.normal(0, noise_amt * 255, img_np.shape).astype(np.float32)
113
+ noisy = np.clip(img_np + noise, 0, 255).astype(np.uint8)
114
+ return Image.fromarray(noisy)