diff --git a/.gitattributes b/.gitattributes index 324557ac4cb15f98e8c09825ac4750eb8b1e516a..c23a0fd72ef29b65aa835b148717a3dff53d3fac 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1825,3 +1825,4 @@ parrot/lib/python3.10/site-packages/opencv_python.libs/libavutil-a0a0531e.so.57. parrot/lib/python3.10/site-packages/scipy/ndimage/_nd_image.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/scipy/interpolate/_ppoly.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/scipy/optimize/_trlib/_trlib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +videollama2/lib/python3.10/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/__pycache__/VideoClip.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/__pycache__/VideoClip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9737df93b410d6d52c2971c9d939c2cda6fb0a57 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/__pycache__/VideoClip.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/__pycache__/__init__.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5efb0da870fb31985f28315947144bcbb2e2ca6c Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/__pycache__/__init__.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/compositing/__pycache__/__init__.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/compositing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..942f23f8bf13525be9fffa445d4813c104b17ec2 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/compositing/__pycache__/__init__.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/compositing/__pycache__/on_color.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/compositing/__pycache__/on_color.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f342997acdef81402604ff345c9e3709ad9c913d Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/compositing/__pycache__/on_color.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/compositing/__pycache__/positioning.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/compositing/__pycache__/positioning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f5e7cfd100bae09c57ef68baa29fa431454a3c7 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/compositing/__pycache__/positioning.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/compositing/__pycache__/transitions.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/compositing/__pycache__/transitions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0125cdcb9d2ab7bc1c52afc5b6f76c3157e82a4 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/compositing/__pycache__/transitions.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/compositing/on_color.py b/videollama2/lib/python3.10/site-packages/moviepy/video/compositing/on_color.py new file mode 100644 index 0000000000000000000000000000000000000000..c5fcb147bd36f320833af44386b346b589ff7d8d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/compositing/on_color.py @@ -0,0 +1,27 @@ +from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip +from moviepy.video.VideoClip import ColorClip + + +def on_color(clip, size=None, color=(0, 0, 0), pos=None, col_opacity=None): + """ + Returns a clip made of the current clip overlaid on a color + clip of a possibly bigger size. Can serve to flatten transparent + clips (ideal for previewing clips with masks). + + :param size: size of the final clip. By default it will be the + size of the current clip. + :param bg_color: the background color of the final clip + :param pos: the position of the clip in the final clip. + :param col_opacity: should the added zones be transparent ? + """ + + if size is None: + size = clip.size + if pos is None: + pos = 'center' + colorclip = ColorClip(size, color=color) + if col_opacity: + colorclip = colorclip.with_mask().set_opacity(col_opacity) + + return CompositeVideoClip([colorclip, clip.set_position(pos)], + transparent=(col_opacity is not None)) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/compositing/positioning.py b/videollama2/lib/python3.10/site-packages/moviepy/video/compositing/positioning.py new file mode 100644 index 0000000000000000000000000000000000000000..910e48b51f3f6ef61293740cca1e904db5bc8d9d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/compositing/positioning.py @@ -0,0 +1,5 @@ +""" +This module provides classes that make positioning easy +""" + +# class ClipPosition: diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/__init__.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..67fe3b194e96e613d1db34848bc081d228b4693f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/__init__.py @@ -0,0 +1,4 @@ +""" +This module contains transformation functions (clip->clip) +One file for one fx. The file's name is the fx's name +""" diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/freeze.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/freeze.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1cf8774c839d4b5ea1e2cbf0d4491422868b1688 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/freeze.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/headblur.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/headblur.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3925697b46ea35e16de64ba0521d3721ced9f9c Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/headblur.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/painting.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/painting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffc982a3af9242c3985cf2b5913fdcffd6115d1d Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/painting.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/scroll.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/scroll.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..111fc2e9c53dfc4f2d484de058ba3354d1b8cfc9 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/scroll.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/accel_decel.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/accel_decel.py new file mode 100644 index 0000000000000000000000000000000000000000..6fbb4cca5f5ee0a9567e0b9cc3b3e0802d80322c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/accel_decel.py @@ -0,0 +1,44 @@ +def f_accel_decel(t, old_d, new_d, abruptness=1, soonness=1.0): + """ + abruptness + negative abruptness (>-1): speed up down up + zero abruptness : no effect + positive abruptness: speed down up down + + soonness + for positive abruptness, determines how soon the + speedup occurs (0=.5)*f2(t) + + return old_d*_f((t/new_d)**soonness) + + +def accel_decel(clip, new_duration=None, abruptness=1.0, soonness=1.0): + """ + + new_duration + If None, will be that of the current clip. + + abruptness + negative abruptness (>-1): speed up down up + zero abruptness : no effect + positive abruptness: speed down up down + + soonness + for positive abruptness, determines how soon the + speedup occurs (0>> crop(clip, x1=50, y1=60, x2=460, y2=275) + + Only remove the part above y=30: + + >>> crop(clip, y1=30) + + Crop a rectangle that starts 10 pixels left and is 200px wide + + >>> crop(clip, x1=10, width=200) + + Crop a rectangle centered in x,y=(300,400), width=50, height=150 : + + >>> crop(clip, x_center=300 , y_center=400, + width=50, height=150) + + Any combination of the above should work, like for this rectangle + centered in x=300, with explicit y-boundaries: + + >>> crop(x_center=300, width=400, y1=100, y2=600) + + """ + + if width and x1 is not None: + x2 = x1 + width + elif width and x2 is not None: + x1 = x2 - width + + if height and y1 is not None: + y2 = y1 + height + elif height and y2 is not None: + y1 = y2 - height + + if x_center: + x1, x2 = x_center - width / 2, x_center + width / 2 + + if y_center: + y1, y2 = y_center - height / 2, y_center + height / 2 + + x1 = x1 or 0 + y1 = y1 or 0 + x2 = x2 or clip.size[0] + y2 = y2 or clip.size[1] + + return clip.fl_image(lambda pic: pic[int(y1) : int(y2), int(x1) : int(x2)], apply_to=["mask"]) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/even_size.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/even_size.py new file mode 100644 index 0000000000000000000000000000000000000000..c7290b8e29e75d8661659a11794eb874406520a2 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/even_size.py @@ -0,0 +1,22 @@ +from moviepy.decorators import apply_to_mask + + +@apply_to_mask +def even_size(clip): + """ + Crops the clip to make dimensions even. + """ + w, h = clip.size + w_even = w % 2 == 0 + h_even = h % 2 == 0 + if w_even and h_even: + return clip + + if not w_even and not h_even: + fl_image = lambda a : a[:-1,:-1,:] + elif w_even: + fl_image = lambda a : a[:,:-1,:] + else: + fl_image = lambda a : a[:-1,:,:] + + return clip.fl_image(fl_image) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/fadein.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/fadein.py new file mode 100644 index 0000000000000000000000000000000000000000..c9fda9254746a2b890aaeed89579a386f64deed8 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/fadein.py @@ -0,0 +1,25 @@ +import numpy as np + + +def fadein(clip, duration, initial_color=None): + """ + Makes the clip progressively appear from some color (black by default), + over ``duration`` seconds at the beginning of the clip. Can be used for + masks too, where the initial color must be a number between 0 and 1. + For cross-fading (progressive appearance or disappearance of a clip + over another clip, see ``composition.crossfade`` + """ + + if initial_color is None: + initial_color = 0 if clip.ismask else [0,0,0] + + initial_color = np.array(initial_color) + + def fl(gf, t): + if t>=duration: + return gf(t) + else: + fading = (1.0*t/duration) + return fading*gf(t) + (1-fading)*initial_color + + return clip.fl(fl) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/fadeout.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/fadeout.py new file mode 100644 index 0000000000000000000000000000000000000000..66c7230a8e91ee04672c7995730578743486e453 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/fadeout.py @@ -0,0 +1,28 @@ +import numpy as np + +from moviepy.decorators import requires_duration + + +@requires_duration +def fadeout(clip, duration, final_color=None): + """ + Makes the clip progressively fade to some color (black by default), + over ``duration`` seconds at the end of the clip. Can be used for + masks too, where the final color must be a number between 0 and 1. + For cross-fading (progressive appearance or disappearance of a clip + over another clip, see ``composition.crossfade`` + """ + + if final_color is None: + final_color = 0 if clip.ismask else [0,0,0] + + final_color = np.array(final_color) + + def fl(gf, t): + if (clip.duration-t)>=duration: + return gf(t) + else: + fading = 1.0 * (clip.duration - t) / duration + return fading*gf(t) + (1-fading)*final_color + + return clip.fl(fl) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/gamma_corr.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/gamma_corr.py new file mode 100644 index 0000000000000000000000000000000000000000..5668698ad87341cb14423a66824a2fe025fb090f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/gamma_corr.py @@ -0,0 +1,8 @@ + +def gamma_corr(clip, gamma): + """ Gamma-correction of a video clip """ + def fl(im): + corrected = (255*(1.0*im/255)**gamma) + return corrected.astype('uint8') + + return clip.fl_image(fl) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/headblur.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/headblur.py new file mode 100644 index 0000000000000000000000000000000000000000..2d46d5a77dbb14c9ef236c1a0e9a939c1733014a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/headblur.py @@ -0,0 +1,58 @@ +import numpy as np + +#------- CHECKING DEPENDENCIES ----------------------------------------- +try: + import cv2 + headblur_possible = True + if cv2.__version__ >= '3.0.0': + cv2.CV_AA=cv2.LINE_AA +except: + headblur_possible = False +#----------------------------------------------------------------------- + + +def headblur(clip,fx,fy,r_zone,r_blur=None): + """ + Returns a filter that will blurr a moving part (a head ?) of + the frames. The position of the blur at time t is + defined by (fx(t), fy(t)), the radius of the blurring + by ``r_zone`` and the intensity of the blurring by ``r_blur``. + Requires OpenCV for the circling and the blurring. + Automatically deals with the case where part of the image goes + offscreen. + """ + + if r_blur is None: r_blur = 2*r_zone/3 + + def fl(gf,t): + + im = gf(t) + h,w,d = im.shape + x,y = int(fx(t)),int(fy(t)) + x1,x2 = max(0,x-r_zone),min(x+r_zone,w) + y1,y2 = max(0,y-r_zone),min(y+r_zone,h) + region_size = y2-y1,x2-x1 + + mask = np.zeros(region_size).astype('uint8') + cv2.circle(mask, (r_zone,r_zone), r_zone, 255, -1, + lineType=cv2.CV_AA) + + mask = np.dstack(3*[(1.0/255)*mask]) + + orig = im[y1:y2, x1:x2] + blurred = cv2.blur(orig,(r_blur, r_blur)) + im[y1:y2, x1:x2] = mask*blurred + (1-mask)*orig + return im + + return clip.fl(fl) + + + +#------- OVERWRITE IF REQUIREMENTS NOT MET ----------------------------- +if not headblur_possible: + doc = headblur.__doc__ + def headblur(clip,fx,fy,r_zone,r_blur=None): + raise IOError("fx painting needs opencv") + + headblur.__doc__ = doc +#----------------------------------------------------------------------- diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/invert_colors.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/invert_colors.py new file mode 100644 index 0000000000000000000000000000000000000000..0fb948eb39151b34e71185db61cf00e9400d9d62 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/invert_colors.py @@ -0,0 +1,8 @@ +def invert_colors(clip): + """ Returns the color-inversed clip. + + The values of all pixels are replaced with (255-v) or (1-v) for masks + Black becomes white, green becomes purple, etc. + """ + maxi = (1.0 if clip.ismask else 255) + return clip.fl_image(lambda f : maxi - f) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/loop.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/loop.py new file mode 100644 index 0000000000000000000000000000000000000000..b0c04beb3496050e72785f23aa33810790b015ec --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/loop.py @@ -0,0 +1,26 @@ +from moviepy.decorators import apply_to_audio, apply_to_mask, requires_duration + + +@requires_duration +@apply_to_mask +@apply_to_audio +def loop(self, n=None, duration=None): + """ + Returns a clip that plays the current clip in an infinite loop. + Ideal for clips coming from gifs. + + Parameters + ------------ + n + Number of times the clip should be played. If `None` the + the clip will loop indefinitely (i.e. with no set duration). + + duration + Total duration of the clip. Can be specified instead of n. + """ + result = self.fl_time(lambda t: t % self.duration) + if n: + duration = n*self.duration + if duration: + result = result.set_duration(duration) + return result diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/lum_contrast.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/lum_contrast.py new file mode 100644 index 0000000000000000000000000000000000000000..a1ae55cf2000b8d829b33f227fc3c5151fd49134 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/lum_contrast.py @@ -0,0 +1,11 @@ +def lum_contrast(clip, lum = 0, contrast=0, contrast_thr=127): + """ luminosity-contrast correction of a clip """ + + def fl_image(im): + im = 1.0*im # float conversion + corrected = im + lum + contrast*(im-float(contrast_thr)) + corrected[corrected < 0] = 0 + corrected[corrected > 255] = 255 + return corrected.astype('uint8') + + return clip.fl_image(fl_image) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/make_loopable.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/make_loopable.py new file mode 100644 index 0000000000000000000000000000000000000000..485887c7f9804556dfc5c811d0ea5b5798f6a205 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/make_loopable.py @@ -0,0 +1,14 @@ +import moviepy.video.compositing.transitions as transfx +from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip + + +def make_loopable(clip, cross): + """ + Makes the clip fade in progressively at its own end, this way + it can be looped indefinitely. ``cross`` is the duration in seconds + of the fade-in. """ + d = clip.duration + clip2 = clip.fx(transfx.crossfadein, cross).\ + set_start(d - cross) + return CompositeVideoClip([ clip, clip2 ]).\ + subclip(cross,d) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/margin.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/margin.py new file mode 100644 index 0000000000000000000000000000000000000000..20dd1930e14fcae8b39e318a435e63a02b46882e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/margin.py @@ -0,0 +1,58 @@ +import numpy as np + +from moviepy.decorators import apply_to_mask +from moviepy.video.VideoClip import ImageClip + + +@apply_to_mask +def margin(clip, mar=None, left=0, right=0, top=0, + bottom=0, color=(0, 0, 0), opacity = 1.0): + """ + Draws an external margin all around the frame. + + :param mar: if not ``None``, then the new clip has a margin of + size ``mar`` in pixels on the left, right, top, and bottom. + + :param left, right, top, bottom: width of the margin in pixel + in these directions. + + :param color: color of the margin. + + :param mask_margin: value of the mask on the margin. Setting + this value to 0 yields transparent margins. + + """ + + if (opacity != 1.0) and (clip.mask is None) and not (clip.ismask): + clip = clip.add_mask() + + if mar is not None: + left = right = top = bottom = mar + + def make_bg(w,h): + new_w, new_h = w + left + right, h + top + bottom + if clip.ismask: + shape = (new_h, new_w) + bg = ( np.tile(opacity, (new_h, new_w)) + .astype(float) + .reshape(shape)) + else: + shape = (new_h, new_w, 3) + bg = np.tile(color, (new_h, new_w)).reshape(shape) + return bg + + if isinstance(clip, ImageClip): + + im = make_bg(clip.w,clip.h) + im[top:top + clip.h, left:left + clip.w] = clip.img + return clip.fl_image(lambda pic:im) + + else: + + def fl(gf, t): + pic = gf(t) + h,w = pic.shape[:2] + im = make_bg(w,h) + im[top:top + h, left:left + w] = pic + return im + return clip.fl(fl) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/mask_and.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/mask_and.py new file mode 100644 index 0000000000000000000000000000000000000000..ccec602ab315efac909e6a109e4574f2c4f3a751 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/mask_and.py @@ -0,0 +1,20 @@ +import numpy as np + +from ..VideoClip import ImageClip + + +def mask_and(clip, other_clip): + """ Returns the logical 'and' (min) between two masks. + other_clip can be a mask clip or a picture (np.array). + The result has the duration of 'clip' (if it has any) + """ + + # To ensure that 'or' of two ImageClips will be an ImageClip. + if isinstance(other_clip, ImageClip): + other_clip = other_clip.img + + if isinstance(other_clip, np.ndarray): + return clip.fl_image(lambda f : np.minimum(f, other_clip)) + else: + return clip.fl(lambda gf, t : np.minimum(gf(t), + other_clip.get_frame(t))) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/mask_or.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/mask_or.py new file mode 100644 index 0000000000000000000000000000000000000000..abe5e74f4f997296b7082209c1ebdbbd9399a5a7 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/mask_or.py @@ -0,0 +1,20 @@ +import numpy as np + +from ..VideoClip import ImageClip + + +def mask_or(clip, other_clip): + """ Returns the logical 'or' (max) between two masks. + other_clip can be a mask clip or a picture (np.array). + The result has the duration of 'clip' (if it has any) + """ + + # To ensure that 'or' of two ImageClips will be an ImageClip. + if isinstance(other_clip, ImageClip): + other_clip = other_clip.img + + if isinstance(other_clip, np.ndarray): + return clip.fl_image(lambda f : np.maximum(f, other_clip)) + else: + return clip.fl(lambda gf, t : np.maximum(gf(t), + other_clip.get_frame(t))) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/mirror_x.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/mirror_x.py new file mode 100644 index 0000000000000000000000000000000000000000..3173dac5c123e50b9cda9c51d22de07f5d1800af --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/mirror_x.py @@ -0,0 +1,4 @@ + +def mirror_x(clip, apply_to= "mask"): + """ flips the clip horizontally (and its mask too, by default) """ + return clip.fl_image(lambda f: f[:,::-1], apply_to = apply_to) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/mirror_y.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/mirror_y.py new file mode 100644 index 0000000000000000000000000000000000000000..0a602e9fc5727b9d9975373ab694e243f8f2340a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/mirror_y.py @@ -0,0 +1,3 @@ +def mirror_y(clip, apply_to= "mask"): + """ flips the clip vertically (and its mask too, by default) """ + return clip.fl_image(lambda f : f[::-1], apply_to = apply_to) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/painting.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/painting.py new file mode 100644 index 0000000000000000000000000000000000000000..b41bac30d8c5112d4bed3e0beeb40f2aa67c7b00 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/painting.py @@ -0,0 +1,43 @@ +#------- CHECKING DEPENDENCIES ----------------------------------------- +painting_possible = True +try: + from skimage.filter import sobel +except: + try: + from scipy.ndimage.filters import sobel + except: + painting_possible = False +#----------------------------------------------------------------------- + + + +import numpy as np + + +def to_painting(image,saturation = 1.4,black = 0.006): + """ transforms any photo into some kind of painting """ + edges = sobel(image.mean(axis=2)) + darkening = black*(255*np.dstack(3*[edges])) + painting = saturation*image-darkening + return np.maximum(0,np.minimum(255,painting)).astype('uint8') + +def painting(clip, saturation = 1.4,black = 0.006): + """ + Transforms any photo into some kind of painting. Saturation + tells at which point the colors of the result should be + flashy. ``black`` gives the anount of black lines wanted. + Requires Scikit-image or Scipy installed. + """ + return clip.fl_image(lambda im : to_painting(im,saturation,black)) + + + +#------- OVERWRITE IF REQUIREMENTS NOT MET ----------------------------- + +if not painting_possible: + doc = painting.__doc__ + def painting(clip, newsize=None, height=None, width=None): + raise IOError("fx painting needs scikit-image or scipy") + + painting.__doc__ = doc +#----------------------------------------------------------------------- diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/rotate.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/rotate.py new file mode 100644 index 0000000000000000000000000000000000000000..da22268d03db51071131d21a90c01877e7b7280f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/rotate.py @@ -0,0 +1,73 @@ +import numpy as np + +from moviepy.decorators import apply_to_mask + +try: + from PIL import Image + PIL_FOUND = True + def pil_rotater(pic, angle, resample, expand): + return np.array( Image.fromarray(pic).rotate(angle, expand=expand, + resample=resample)) +except ImportError: + PIL_FOUND = False + +def rotate(clip, angle, unit='deg', resample="bicubic", expand=True): + """ + Change unit to 'rad' to define angles as radians. + If the angle is not one of 90, 180, -90, -180 (degrees) there will be + black borders. You can make them transparent with + + >>> newclip = clip.add_mask().rotate(72) + + Parameters + =========== + + clip + A video clip + + angle + Either a value or a function angle(t) representing the angle of rotation + + unit + Unit of parameter `angle` (either `deg` for degrees or `rad` for radians) + + resample + One of "nearest", "bilinear", or "bicubic". + + expand + Only applIf False, the clip will maintain the same True, the clip will be resized so that the whole + """ + + resample = {"bilinear": Image.BILINEAR, + "nearest": Image.NEAREST, + "bicubic": Image.BICUBIC}[resample] + + if not hasattr(angle, '__call__'): + # if angle is a constant, convert to a constant function + a = +angle + angle = lambda t: a + + transpo = [1,0] if clip.ismask else [1,0,2] + + def fl(gf, t): + + a = angle(t) + im = gf(t) + + if unit == 'rad': + a = 360.0*a/(2*np.pi) + + if (a==90) and expand: + return np.transpose(im, axes=transpo)[::-1] + elif (a==-90) and expand: + return np.transpose(im, axes=transpo)[:,::-1] + elif (a in [180, -180]) and expand: + return im[::-1,::-1] + elif not PIL_FOUND: + raise ValueError('Without "Pillow" installed, only angles 90, -90,' + '180 are supported, please install "Pillow" with' + "pip install pillow") + else: + return pil_rotater(im, a, resample=resample, expand=expand) + + return clip.fl(fl, apply_to=["mask"]) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/scroll.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/scroll.py new file mode 100644 index 0000000000000000000000000000000000000000..34f4f40125b0a8bc3796e02aa48c4c4d1ad1f893 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/scroll.py @@ -0,0 +1,16 @@ +def scroll(clip, h=None, w=None, x_speed=0, y_speed=0, + x_start=0, y_start=0, apply_to="mask"): + """ Scrolls horizontally or vertically a clip, e.g. to make end + credits """ + if h is None: h = clip.h + if w is None: w = clip.w + + xmax = clip.w-w-1 + ymax = clip.h-h-1 + + def f(gf,t): + x = int(max(0, min(xmax, x_start+ round(x_speed*t)))) + y = int(max(0, min(ymax, y_start+ round(y_speed*t)))) + return gf(t)[y:y+h, x:x+w] + + return clip.fl(f, apply_to = apply_to) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/speedx.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/speedx.py new file mode 100644 index 0000000000000000000000000000000000000000..1c6b82cff2b842a52c1427a1e8bc5d32b14b18c6 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/speedx.py @@ -0,0 +1,21 @@ +from moviepy.decorators import apply_to_audio, apply_to_mask + + +def speedx(clip, factor = None, final_duration=None): + """ + Returns a clip playing the current clip but at a speed multiplied + by ``factor``. Instead of factor one can indicate the desired + ``final_duration`` of the clip, and the factor will be automatically + computed. + The same effect is applied to the clip's audio and mask if any. + """ + + if final_duration: + factor = 1.0* clip.duration / final_duration + + newclip = clip.fl_time(lambda t: factor * t, apply_to=['mask', 'audio']) + + if clip.duration is not None: + newclip = newclip.set_duration(1.0 * clip.duration / factor) + + return newclip diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/supersample.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/supersample.py new file mode 100644 index 0000000000000000000000000000000000000000..93b9d7a43e84c4f018858f9b2c1c225745034992 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/supersample.py @@ -0,0 +1,13 @@ +import numpy as np + + +def supersample(clip, d, nframes): + """ Replaces each frame at time t by the mean of `nframes` equally spaced frames + taken in the interval [t-d, t+d]. This results in motion blur.""" + + def fl(gf, t): + tt = np.linspace(t-d, t+d, nframes) + avg = np.mean(1.0*np.array([gf(t_) for t_ in tt], dtype='uint16'), axis=0) + return avg.astype("uint8") + + return clip.fl(fl) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/fx/time_symmetrize.py b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/time_symmetrize.py new file mode 100644 index 0000000000000000000000000000000000000000..de2f3181d4b7617c0769324246e918eb28442c00 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/fx/time_symmetrize.py @@ -0,0 +1,17 @@ +from moviepy.decorators import apply_to_audio, apply_to_mask, requires_duration +from moviepy.video.compositing.concatenate import concatenate_videoclips + +from .time_mirror import time_mirror + + +@requires_duration +@apply_to_mask +def time_symmetrize(clip): + """ + Returns a clip that plays the current clip once forwards and + then once backwards. This is very practival to make video that + loop well, e.g. to create animated GIFs. + This effect is automatically applied to the clip's mask and audio + if they exist. + """ + return concatenate_videoclips([clip, clip.fx( time_mirror )]) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/io/VideoFileClip.py b/videollama2/lib/python3.10/site-packages/moviepy/video/io/VideoFileClip.py new file mode 100644 index 0000000000000000000000000000000000000000..1d5c8b63ea34c7fbdb94b05a1de5a45ca097a1a8 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/io/VideoFileClip.py @@ -0,0 +1,134 @@ +import os + +from moviepy.audio.io.AudioFileClip import AudioFileClip +from moviepy.Clip import Clip +from moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader +from moviepy.video.VideoClip import VideoClip + + +class VideoFileClip(VideoClip): + + """ + + A video clip originating from a movie file. For instance: :: + + >>> clip = VideoFileClip("myHolidays.mp4") + >>> clip.close() + >>> with VideoFileClip("myMaskVideo.avi") as clip2: + >>> pass # Implicit close called by context manager. + + + Parameters + ------------ + + filename: + The name of the video file. It can have any extension supported + by ffmpeg: .ogv, .mp4, .mpeg, .avi, .mov etc. + + has_mask: + Set this to 'True' if there is a mask included in the videofile. + Video files rarely contain masks, but some video codecs enable + that. For istance if you have a MoviePy VideoClip with a mask you + can save it to a videofile with a mask. (see also + ``VideoClip.write_videofile`` for more details). + + audio: + Set to `False` if the clip doesn't have any audio or if you do not + wish to read the audio. + + target_resolution: + Set to (desired_height, desired_width) to have ffmpeg resize the frames + before returning them. This is much faster than streaming in high-res + and then resizing. If either dimension is None, the frames are resized + by keeping the existing aspect ratio. + + resize_algorithm: + The algorithm used for resizing. Default: "bicubic", other popular + options include "bilinear" and "fast_bilinear". For more information, see + https://ffmpeg.org/ffmpeg-scaler.html + + fps_source: + The fps value to collect from the metadata. Set by default to 'tbr', but + can be set to 'fps', which may be helpful if importing slow-motion videos + that get messed up otherwise. + + + Attributes + ----------- + + filename: + Name of the original video file. + + fps: + Frames per second in the original file. + + + Read docs for Clip() and VideoClip() for other, more generic, attributes. + + Lifetime + -------- + + Note that this creates subprocesses and locks files. If you construct one of these instances, you must call + close() afterwards, or the subresources will not be cleaned up until the process ends. + + If copies are made, and close() is called on one, it may cause methods on the other copies to fail. + + """ + + def __init__(self, filename, has_mask=False, + audio=True, audio_buffersize=200000, + target_resolution=None, resize_algorithm='bicubic', + audio_fps=44100, audio_nbytes=2, verbose=False, + fps_source='tbr'): + + VideoClip.__init__(self) + + # Make a reader + pix_fmt = "rgba" if has_mask else "rgb24" + self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt, + target_resolution=target_resolution, + resize_algo=resize_algorithm, + fps_source=fps_source) + + # Make some of the reader's attributes accessible from the clip + self.duration = self.reader.duration + self.end = self.reader.duration + + self.fps = self.reader.fps + self.size = self.reader.size + self.rotation = self.reader.rotation + + self.filename = self.reader.filename + + if has_mask: + + self.make_frame = lambda t: self.reader.get_frame(t)[:,:,:3] + mask_mf = lambda t: self.reader.get_frame(t)[:,:,3]/255.0 + self.mask = (VideoClip(ismask=True, make_frame=mask_mf) + .set_duration(self.duration)) + self.mask.fps = self.fps + + else: + + self.make_frame = lambda t: self.reader.get_frame(t) + + # Make a reader for the audio, if any. + if audio and self.reader.infos['audio_found']: + + self.audio = AudioFileClip(filename, + buffersize=audio_buffersize, + fps=audio_fps, + nbytes=audio_nbytes) + + def close(self): + """ Close the internal reader. """ + if self.reader: + self.reader.close() + self.reader = None + + try: + if self.audio: + self.audio.close() + self.audio = None + except AttributeError: + pass diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/ImageSequenceClip.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/ImageSequenceClip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fadf562a768f64c5d370bab9b70750bf579cc995 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/ImageSequenceClip.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/__init__.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66ea3e1a5a9993a2dbd63fd7c6d7e7a9a36c2f1f Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/__init__.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/bindings.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/bindings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc5f9af8142282aa9784a678302982522ba91234 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/bindings.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/ffmpeg_reader.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/ffmpeg_reader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..176c646327a6247e419f35fdf8c55022b5912750 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/ffmpeg_reader.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/gif_writers.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/gif_writers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a9a100a3fe347534ddd43994cc46337dafcb5f2 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/gif_writers.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/html_tools.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/html_tools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db748862be40af37fdb182882058df31db6590ae Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/html_tools.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/preview.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/preview.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a75de482570caf9abe6854c45e644eb1d0394ea Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/preview.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/sliders.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/sliders.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9630ba030bf59daf61d902ea5314aef65709079 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/io/__pycache__/sliders.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/io/bindings.py b/videollama2/lib/python3.10/site-packages/moviepy/video/io/bindings.py new file mode 100644 index 0000000000000000000000000000000000000000..4b206f75d943cccb3ca6370e2dee0ec4b184dc1b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/io/bindings.py @@ -0,0 +1,32 @@ +""" +This module implements all the functions to communicate with other Python +modules (PIL, matplotlib, mayavi, etc.) +""" + +import numpy as np + + +def PIL_to_npimage(im): + """ Transforms a PIL/Pillow image into a numpy RGB(A) image. + Actually all this do is returning numpy.array(im).""" + return np.array(im) + #w,h = im.size + #d = (4 if im.mode=="RGBA" else 3) + #return +np.frombuffer(im.tobytes(), dtype='uint8').reshape((h,w,d)) + + +def mplfig_to_npimage(fig): + """ Converts a matplotlib figure to a RGB frame after updating the canvas""" + # only the Agg backend now supports the tostring_rgb function + from matplotlib.backends.backend_agg import FigureCanvasAgg + canvas = FigureCanvasAgg(fig) + canvas.draw() # update/draw the elements + + # get the width and the height to resize the matrix + l,b,w,h = canvas.figure.bbox.bounds + w, h = int(w), int(h) + + # exports the canvas to a string buffer and then to a numpy nd.array + buf = canvas.tostring_rgb() + image= np.frombuffer(buf, dtype=np.uint8) + return image.reshape(h,w,3) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/io/ffmpeg_reader.py b/videollama2/lib/python3.10/site-packages/moviepy/video/io/ffmpeg_reader.py new file mode 100644 index 0000000000000000000000000000000000000000..7ef5b2d1848418b48ffeb429f1af072d2fe2fd4a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/io/ffmpeg_reader.py @@ -0,0 +1,393 @@ +""" +This module implements all the functions to read a video or a picture +using ffmpeg. It is quite ugly, as there are many pitfalls to avoid +""" + +from __future__ import division + +import logging +import os +import re +import subprocess as sp +import warnings + +import numpy as np + +from moviepy.compat import DEVNULL, PY3 +from moviepy.config import get_setting # ffmpeg, ffmpeg.exe, etc... +from moviepy.tools import cvsecs + +logging.captureWarnings(True) + + + + + +class FFMPEG_VideoReader: + + def __init__(self, filename, print_infos=False, bufsize = None, + pix_fmt="rgb24", check_duration=True, + target_resolution=None, resize_algo='bicubic', + fps_source='tbr'): + + self.filename = filename + self.proc = None + infos = ffmpeg_parse_infos(filename, print_infos, check_duration, + fps_source) + self.fps = infos['video_fps'] + self.size = infos['video_size'] + self.rotation = infos['video_rotation'] + + if target_resolution: + # revert the order, as ffmpeg used (width, height) + target_resolution = target_resolution[1], target_resolution[0] + + if None in target_resolution: + ratio = 1 + for idx, target in enumerate(target_resolution): + if target: + ratio = target / self.size[idx] + self.size = (int(self.size[0] * ratio), int(self.size[1] * ratio)) + else: + self.size = target_resolution + self.resize_algo = resize_algo + + self.duration = infos['video_duration'] + self.ffmpeg_duration = infos['duration'] + self.nframes = infos['video_nframes'] + + self.infos = infos + + self.pix_fmt = pix_fmt + self.depth = 4 if pix_fmt == 'rgba' else 3 + + if bufsize is None: + w, h = self.size + bufsize = self.depth * w * h + 100 + + self.bufsize= bufsize + self.initialize() + + + self.pos = 1 + self.lastread = self.read_frame() + + + def initialize(self, starttime=0): + """Opens the file, creates the pipe. """ + + self.close() # if any + + if starttime != 0 : + offset = min(1, starttime) + i_arg = ['-ss', "%.06f" % (starttime - offset), + '-i', self.filename, + '-ss', "%.06f" % offset] + else: + i_arg = [ '-i', self.filename] + + cmd = ([get_setting("FFMPEG_BINARY")] + i_arg + + ['-loglevel', 'error', + '-f', 'image2pipe', + '-vf', 'scale=%d:%d' % tuple(self.size), + '-sws_flags', self.resize_algo, + "-pix_fmt", self.pix_fmt, + '-vcodec', 'rawvideo', '-']) + popen_params = {"bufsize": self.bufsize, + "stdout": sp.PIPE, + "stderr": sp.PIPE, + "stdin": DEVNULL} + + if os.name == "nt": + popen_params["creationflags"] = 0x08000000 + + self.proc = sp.Popen(cmd, **popen_params) + + + def skip_frames(self, n=1): + """Reads and throws away n frames """ + w, h = self.size + for i in range(n): + self.proc.stdout.read(self.depth*w*h) + #self.proc.stdout.flush() + self.pos += n + + + def read_frame(self): + w, h = self.size + nbytes= self.depth*w*h + + s = self.proc.stdout.read(nbytes) + if len(s) != nbytes: + + warnings.warn("Warning: in file %s, "%(self.filename)+ + "%d bytes wanted but %d bytes read,"%(nbytes, len(s))+ + "at frame %d/%d, at time %.02f/%.02f sec. "%( + self.pos,self.nframes, + 1.0*self.pos/self.fps, + self.duration)+ + "Using the last valid frame instead.", + UserWarning) + + if not hasattr(self, 'lastread'): + raise IOError(("MoviePy error: failed to read the first frame of " + "video file %s. That might mean that the file is " + "corrupted. That may also mean that you are using " + "a deprecated version of FFMPEG. On Ubuntu/Debian " + "for instance the version in the repos is deprecated. " + "Please update to a recent version from the website.")%( + self.filename)) + + result = self.lastread + + else: + if hasattr(np, 'frombuffer'): + result = np.frombuffer(s, dtype='uint8') + else: + result = np.fromstring(s, dtype='uint8') + result.shape =(h, w, len(s)//(w*h)) # reshape((h, w, len(s)//(w*h))) + self.lastread = result + + return result + + def get_frame(self, t): + """ Read a file video frame at time t. + + Note for coders: getting an arbitrary frame in the video with + ffmpeg can be painfully slow if some decoding has to be done. + This function tries to avoid fetching arbitrary frames + whenever possible, by moving between adjacent frames. + """ + + # these definitely need to be rechecked sometime. Seems to work. + + # I use that horrible '+0.00001' hack because sometimes due to numerical + # imprecisions a 3.0 can become a 2.99999999... which makes the int() + # go to the previous integer. This makes the fetching more robust in the + # case where you get the nth frame by writing get_frame(n/fps). + + pos = int(self.fps*t + 0.00001)+1 + + # Initialize proc if it is not open + if not self.proc: + self.initialize(t) + self.pos = pos + self.lastread = self.read_frame() + + if pos == self.pos: + return self.lastread + elif (pos < self.pos) or (pos > self.pos + 100): + self.initialize(t) + self.pos = pos + else: + self.skip_frames(pos-self.pos-1) + result = self.read_frame() + self.pos = pos + return result + + def close(self): + if self.proc: + self.proc.terminate() + self.proc.stdout.close() + self.proc.stderr.close() + self.proc.wait() + self.proc = None + if hasattr(self, 'lastread'): + del self.lastread + + def __del__(self): + self.close() + + +def ffmpeg_read_image(filename, with_mask=True): + """ Read an image file (PNG, BMP, JPEG...). + + Wraps FFMPEG_Videoreader to read just one image. + Returns an ImageClip. + + This function is not meant to be used directly in MoviePy, + use ImageClip instead to make clips out of image files. + + Parameters + ----------- + + filename + Name of the image file. Can be of any format supported by ffmpeg. + + with_mask + If the image has a transparency layer, ``with_mask=true`` will save + this layer as the mask of the returned ImageClip + + """ + pix_fmt = 'rgba' if with_mask else "rgb24" + reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt, check_duration=False) + im = reader.lastread + del reader + return im + + +def ffmpeg_parse_infos(filename, print_infos=False, check_duration=True, + fps_source='tbr'): + """Get file infos using ffmpeg. + + Returns a dictionnary with the fields: + "video_found", "video_fps", "duration", "video_nframes", + "video_duration", "audio_found", "audio_fps" + + "video_duration" is slightly smaller than "duration" to avoid + fetching the uncomplete frames at the end, which raises an error. + + """ + + + # open the file in a pipe, provoke an error, read output + is_GIF = filename.endswith('.gif') + cmd = [get_setting("FFMPEG_BINARY"), "-i", filename] + if is_GIF: + cmd += ["-f", "null", "/dev/null"] + + popen_params = {"bufsize": 10**5, + "stdout": sp.PIPE, + "stderr": sp.PIPE, + "stdin": DEVNULL} + + if os.name == "nt": + popen_params["creationflags"] = 0x08000000 + + proc = sp.Popen(cmd, **popen_params) + (output, error) = proc.communicate() + infos = error.decode('utf8') + + del proc + + if print_infos: + # print the whole info text returned by FFMPEG + print(infos) + + + lines = infos.splitlines() + if "No such file or directory" in lines[-1]: + raise IOError(("MoviePy error: the file %s could not be found!\n" + "Please check that you entered the correct " + "path.")%filename) + + result = dict() + + + # get duration (in seconds) + result['duration'] = None + + if check_duration: + try: + keyword = ('frame=' if is_GIF else 'Duration: ') + # for large GIFS the "full" duration is presented as the last element in the list. + index = -1 if is_GIF else 0 + line = [l for l in lines if keyword in l][index] + match = re.findall("([0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9])", line)[0] + result['duration'] = cvsecs(match) + except: + raise IOError(("MoviePy error: failed to read the duration of file %s.\n" + "Here are the file infos returned by ffmpeg:\n\n%s")%( + filename, infos)) + + # get the output line that speaks about video + lines_video = [l for l in lines if ' Video: ' in l and re.search('\d+x\d+', l)] + + result['video_found'] = ( lines_video != [] ) + + if result['video_found']: + try: + line = lines_video[0] + + # get the size, of the form 460x320 (w x h) + match = re.search(" [0-9]*x[0-9]*(,| )", line) + s = list(map(int, line[match.start():match.end()-1].split('x'))) + result['video_size'] = s + except: + raise IOError(("MoviePy error: failed to read video dimensions in file %s.\n" + "Here are the file infos returned by ffmpeg:\n\n%s")%( + filename, infos)) + + # Get the frame rate. Sometimes it's 'tbr', sometimes 'fps', sometimes + # tbc, and sometimes tbc/2... + # Current policy: Trust tbr first, then fps unless fps_source is + # specified as 'fps' in which case try fps then tbr + + # If result is near from x*1000/1001 where x is 23,24,25,50, + # replace by x*1000/1001 (very common case for the fps). + + def get_tbr(): + match = re.search("( [0-9]*.| )[0-9]* tbr", line) + + # Sometimes comes as e.g. 12k. We need to replace that with 12000. + s_tbr = line[match.start():match.end()].split(' ')[1] + if "k" in s_tbr: + tbr = float(s_tbr.replace("k", "")) * 1000 + else: + tbr = float(s_tbr) + return tbr + + def get_fps(): + match = re.search("( [0-9]*.| )[0-9]* fps", line) + fps = float(line[match.start():match.end()].split(' ')[1]) + return fps + + if fps_source == 'tbr': + try: + result['video_fps'] = get_tbr() + except: + result['video_fps'] = get_fps() + + elif fps_source == 'fps': + try: + result['video_fps'] = get_fps() + except: + result['video_fps'] = get_tbr() + + # It is known that a fps of 24 is often written as 24000/1001 + # but then ffmpeg nicely rounds it to 23.98, which we hate. + coef = 1000.0/1001.0 + fps = result['video_fps'] + for x in [23,24,25,30,50]: + if (fps!=x) and abs(fps - x*coef) < .01: + result['video_fps'] = x*coef + + if check_duration: + result['video_nframes'] = int(result['duration']*result['video_fps'])+1 + result['video_duration'] = result['duration'] + else: + result['video_nframes'] = 1 + result['video_duration'] = None + # We could have also recomputed the duration from the number + # of frames, as follows: + # >>> result['video_duration'] = result['video_nframes'] / result['video_fps'] + + # get the video rotation info. + try: + rotation_lines = [l for l in lines if 'rotate :' in l and re.search('\d+$', l)] + if len(rotation_lines): + rotation_line = rotation_lines[0] + match = re.search('\d+$', rotation_line) + result['video_rotation'] = int(rotation_line[match.start() : match.end()]) + else: + result['video_rotation'] = 0 + except: + raise IOError(("MoviePy error: failed to read video rotation in file %s.\n" + "Here are the file infos returned by ffmpeg:\n\n%s")%( + filename, infos)) + + + lines_audio = [l for l in lines if ' Audio: ' in l] + + result['audio_found'] = lines_audio != [] + + if result['audio_found']: + line = lines_audio[0] + try: + match = re.search(" [0-9]* Hz", line) + hz_string = line[match.start()+1:match.end()-3] # Removes the 'hz' from the end + result['audio_fps'] = int(hz_string) + except: + result['audio_fps'] = 'unknown' + + return result diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/io/ffmpeg_tools.py b/videollama2/lib/python3.10/site-packages/moviepy/video/io/ffmpeg_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..ef6642115febe9bdc46d909f61b504ba3225d579 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/io/ffmpeg_tools.py @@ -0,0 +1,68 @@ +""" Misc. bindings to ffmpeg and ImageMagick.""" + +import os +import subprocess as sp +import sys + +from moviepy.config import get_setting +from moviepy.tools import subprocess_call + + +def ffmpeg_movie_from_frames(filename, folder, fps, digits=6, bitrate='v'): + """ + Writes a movie out of the frames (picture files) in a folder. + Almost deprecated. + """ + s = "%" + "%02d" % digits + "d.png" + cmd = [get_setting("FFMPEG_BINARY"), "-y", "-f","image2", + "-r", "%d"%fps, + "-i", os.path.join(folder,folder) + '/' + s, + "-b", "%dk"%bitrate, + "-r", "%d"%fps, + filename] + + subprocess_call(cmd) + + +def ffmpeg_extract_subclip(filename, t1, t2, targetname=None): + """ Makes a new video file playing video file ``filename`` between + the times ``t1`` and ``t2``. """ + name, ext = os.path.splitext(filename) + if not targetname: + T1, T2 = [int(1000*t) for t in [t1, t2]] + targetname = "%sSUB%d_%d.%s" % (name, T1, T2, ext) + + cmd = [get_setting("FFMPEG_BINARY"),"-y", + "-ss", "%0.2f"%t1, + "-i", filename, + "-t", "%0.2f"%(t2-t1), + "-map", "0", "-vcodec", "copy", "-acodec", "copy", targetname] + + subprocess_call(cmd) + + +def ffmpeg_merge_video_audio(video,audio,output, vcodec='copy', + acodec='copy', ffmpeg_output=False, + logger = 'bar'): + """ merges video file ``video`` and audio file ``audio`` into one + movie file ``output``. """ + cmd = [get_setting("FFMPEG_BINARY"), "-y", "-i", audio,"-i", video, + "-vcodec", vcodec, "-acodec", acodec, output] + + subprocess_call(cmd, logger = logger) + + +def ffmpeg_extract_audio(inputfile,output,bitrate=3000,fps=44100): + """ extract the sound from a video file and save it in ``output`` """ + cmd = [get_setting("FFMPEG_BINARY"), "-y", "-i", inputfile, "-ab", "%dk"%bitrate, + "-ar", "%d"%fps, output] + subprocess_call(cmd) + + +def ffmpeg_resize(video,output,size): + """ resizes ``video`` to new size ``size`` and write the result + in file ``output``. """ + cmd= [get_setting("FFMPEG_BINARY"), "-i", video, "-vf", "scale=%d:%d"%(size[0], size[1]), + output] + + subprocess_call(cmd) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/io/ffmpeg_writer.py b/videollama2/lib/python3.10/site-packages/moviepy/video/io/ffmpeg_writer.py new file mode 100644 index 0000000000000000000000000000000000000000..7fdd1dd64b2cf448242d71633e35aa9803a5e167 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/io/ffmpeg_writer.py @@ -0,0 +1,269 @@ +""" +On the long term this will implement several methods to make videos +out of VideoClips +""" + +import os +import subprocess as sp + +import numpy as np +from proglog import proglog + +from moviepy.compat import DEVNULL, PY3 +from moviepy.config import get_setting + + +class FFMPEG_VideoWriter: + """ A class for FFMPEG-based video writing. + + A class to write videos using ffmpeg. ffmpeg will write in a large + choice of formats. + + Parameters + ----------- + + filename + Any filename like 'video.mp4' etc. but if you want to avoid + complications it is recommended to use the generic extension + '.avi' for all your videos. + + size + Size (width,height) of the output video in pixels. + + fps + Frames per second in the output video file. + + codec + FFMPEG codec. It seems that in terms of quality the hierarchy is + 'rawvideo' = 'png' > 'mpeg4' > 'libx264' + 'png' manages the same lossless quality as 'rawvideo' but yields + smaller files. Type ``ffmpeg -codecs`` in a terminal to get a list + of accepted codecs. + + Note for default 'libx264': by default the pixel format yuv420p + is used. If the video dimensions are not both even (e.g. 720x405) + another pixel format is used, and this can cause problem in some + video readers. + + audiofile + Optional: The name of an audio file that will be incorporated + to the video. + + preset + Sets the time that FFMPEG will take to compress the video. The slower, + the better the compression rate. Possibilities are: ultrafast,superfast, + veryfast, faster, fast, medium (default), slow, slower, veryslow, + placebo. + + bitrate + Only relevant for codecs which accept a bitrate. "5000k" offers + nice results in general. + + withmask + Boolean. Set to ``True`` if there is a mask in the video to be + encoded. + + """ + + def __init__(self, filename, size, fps, codec="libx264", audiofile=None, + preset="medium", bitrate=None, withmask=False, + logfile=None, threads=None, ffmpeg_params=None): + + if logfile is None: + logfile = sp.PIPE + + self.filename = filename + self.codec = codec + self.ext = self.filename.split(".")[-1] + + # order is important + cmd = [ + get_setting("FFMPEG_BINARY"), + '-y', + '-loglevel', 'error' if logfile == sp.PIPE else 'info', + '-f', 'rawvideo', + '-vcodec', 'rawvideo', + '-s', '%dx%d' % (size[0], size[1]), + '-pix_fmt', 'rgba' if withmask else 'rgb24', + '-r', '%.02f' % fps, + '-an', '-i', '-' + ] + if audiofile is not None: + cmd.extend([ + '-i', audiofile, + '-acodec', 'copy' + ]) + cmd.extend([ + '-vcodec', codec, + '-preset', preset, + ]) + if ffmpeg_params is not None: + cmd.extend(ffmpeg_params) + if bitrate is not None: + cmd.extend([ + '-b', bitrate + ]) + + if threads is not None: + cmd.extend(["-threads", str(threads)]) + + if ((codec == 'libx264') and + (size[0] % 2 == 0) and + (size[1] % 2 == 0)): + cmd.extend([ + '-pix_fmt', 'yuv420p' + ]) + cmd.extend([ + filename + ]) + + popen_params = {"stdout": DEVNULL, + "stderr": logfile, + "stdin": sp.PIPE} + + # This was added so that no extra unwanted window opens on windows + # when the child process is created + if os.name == "nt": + popen_params["creationflags"] = 0x08000000 # CREATE_NO_WINDOW + + self.proc = sp.Popen(cmd, **popen_params) + + + def write_frame(self, img_array): + """ Writes one frame in the file.""" + try: + if PY3: + self.proc.stdin.write(img_array.tobytes()) + else: + self.proc.stdin.write(img_array.tostring()) + except IOError as err: + _, ffmpeg_error = self.proc.communicate() + error = (str(err) + ("\n\nMoviePy error: FFMPEG encountered " + "the following error while writing file %s:" + "\n\n %s" % (self.filename, str(ffmpeg_error)))) + + if b"Unknown encoder" in ffmpeg_error: + + error = error+("\n\nThe video export " + "failed because FFMPEG didn't find the specified " + "codec for video encoding (%s). Please install " + "this codec or change the codec when calling " + "write_videofile. For instance:\n" + " >>> clip.write_videofile('myvid.webm', codec='libvpx')")%(self.codec) + + elif b"incorrect codec parameters ?" in ffmpeg_error: + + error = error+("\n\nThe video export " + "failed, possibly because the codec specified for " + "the video (%s) is not compatible with the given " + "extension (%s). Please specify a valid 'codec' " + "argument in write_videofile. This would be 'libx264' " + "or 'mpeg4' for mp4, 'libtheora' for ogv, 'libvpx for webm. " + "Another possible reason is that the audio codec was not " + "compatible with the video codec. For instance the video " + "extensions 'ogv' and 'webm' only allow 'libvorbis' (default) as a" + "video codec." + )%(self.codec, self.ext) + + elif b"encoder setup failed" in ffmpeg_error: + + error = error+("\n\nThe video export " + "failed, possibly because the bitrate you specified " + "was too high or too low for the video codec.") + + elif b"Invalid encoder type" in ffmpeg_error: + + error = error + ("\n\nThe video export failed because the codec " + "or file extension you provided is not a video") + + + raise IOError(error) + + def close(self): + if self.proc: + self.proc.stdin.close() + if self.proc.stderr is not None: + self.proc.stderr.close() + self.proc.wait() + + self.proc = None + + # Support the Context Manager protocol, to ensure that resources are cleaned up. + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + +def ffmpeg_write_video(clip, filename, fps, codec="libx264", bitrate=None, + preset="medium", withmask=False, write_logfile=False, + audiofile=None, verbose=True, threads=None, ffmpeg_params=None, + logger='bar'): + """ Write the clip to a videofile. See VideoClip.write_videofile for details + on the parameters. + """ + logger = proglog.default_bar_logger(logger) + + if write_logfile: + logfile = open(filename + ".log", 'w+') + else: + logfile = None + logger(message='Moviepy - Writing video %s\n' % filename) + with FFMPEG_VideoWriter(filename, clip.size, fps, codec = codec, + preset=preset, bitrate=bitrate, logfile=logfile, + audiofile=audiofile, threads=threads, + ffmpeg_params=ffmpeg_params) as writer: + + nframes = int(clip.duration*fps) + + for t,frame in clip.iter_frames(logger=logger, with_times=True, + fps=fps, dtype="uint8"): + if withmask: + mask = (255*clip.mask.get_frame(t)) + if mask.dtype != "uint8": + mask = mask.astype("uint8") + frame = np.dstack([frame,mask]) + + writer.write_frame(frame) + + if write_logfile: + logfile.close() + logger(message='Moviepy - Done !') + + +def ffmpeg_write_image(filename, image, logfile=False): + """ Writes an image (HxWx3 or HxWx4 numpy array) to a file, using + ffmpeg. """ + + if image.dtype != 'uint8': + image = image.astype("uint8") + + cmd = [ get_setting("FFMPEG_BINARY"), '-y', + '-s', "%dx%d"%(image.shape[:2][::-1]), + "-f", 'rawvideo', + '-pix_fmt', "rgba" if (image.shape[2] == 4) else "rgb24", + '-i','-', filename] + + if logfile: + log_file = open(filename + ".log", 'w+') + else: + log_file = sp.PIPE + + popen_params = {"stdout": DEVNULL, + "stderr": log_file, + "stdin": sp.PIPE} + + if os.name == "nt": + popen_params["creationflags"] = 0x08000000 + + proc = sp.Popen(cmd, **popen_params) + out, err = proc.communicate(image.tostring()) + + if proc.returncode: + err = "\n".join(["[MoviePy] Running : %s\n" % cmd, + "WARNING: this command returned an error:", + err.decode('utf8')]) + raise IOError(err) + + del proc diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/io/html_tools.py b/videollama2/lib/python3.10/site-packages/moviepy/video/io/html_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..f24af513f391f000da963b0577afc8143f9018ac --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/io/html_tools.py @@ -0,0 +1,221 @@ +""" +This module implements ipython_display +A function to embed images/videos/audio in the IPython Notebook +""" + +# Notes: +# All media are physically embedded in the IPython Notebook +# (instead of simple links to the original files) +# That is because most browsers use a cache system and they won't +# properly refresh the media when the original files are changed. + +import os +from base64 import b64encode + +from moviepy.audio.AudioClip import AudioClip +from moviepy.tools import extensions_dict + +from ..VideoClip import ImageClip, VideoClip +from .ffmpeg_reader import ffmpeg_parse_infos + +try: + from IPython.display import HTML + ipython_available = True + class HTML2(HTML): + def __add__(self, other): + return HTML2(self.data+other.data) + +except ImportError: + ipython_available = False + + +sorry = "Sorry, seems like your browser doesn't support HTML5 audio/video" +templates = {"audio":(""), + "image":"", + "video":("")} + + +def html_embed(clip, filetype=None, maxduration=60, rd_kwargs=None, + center=True, **html_kwargs): + """ Returns HTML5 code embedding the clip + + clip + Either a file name, or a clip to preview. + Either an image, a sound or a video. Clips will actually be + written to a file and embedded as if a filename was provided. + + + filetype + One of 'video','image','audio'. If None is given, it is determined + based on the extension of ``filename``, but this can bug. + + rd_kwargs + keyword arguments for the rendering, like {'fps':15, 'bitrate':'50k'} + + + **html_kwargs + Allow you to give some options, like width=260, autoplay=True, + loop=1 etc. + + Examples + ========= + + >>> import moviepy.editor as mpy + >>> # later ... + >>> clip.write_videofile("test.mp4") + >>> mpy.ipython_display("test.mp4", width=360) + + >>> clip.audio.write_audiofile('test.ogg') # Sound ! + >>> mpy.ipython_display('test.ogg') + + >>> clip.write_gif("test.gif") + >>> mpy.ipython_display('test.gif') + + >>> clip.save_frame("first_frame.jpeg") + >>> mpy.ipython_display("first_frame.jpeg") + + """ + + if rd_kwargs is None: + rd_kwargs = {} + + if "Clip" in str(clip.__class__): + TEMP_PREFIX = "__temp__" + if isinstance(clip,ImageClip): + filename = TEMP_PREFIX+".png" + kwargs = {'filename':filename, 'withmask':True} + kwargs.update(rd_kwargs) + clip.save_frame(**kwargs) + elif isinstance(clip,VideoClip): + filename = TEMP_PREFIX+".mp4" + kwargs = {'filename':filename, 'verbose':False, 'preset':'ultrafast'} + kwargs.update(rd_kwargs) + clip.write_videofile(**kwargs) + elif isinstance(clip,AudioClip): + filename = TEMP_PREFIX+".mp3" + kwargs = {'filename': filename, 'verbose':False} + kwargs.update(rd_kwargs) + clip.write_audiofile(**kwargs) + else: + raise ValueError("Unknown class for the clip. Cannot embed and preview.") + + return html_embed(filename, maxduration=maxduration, rd_kwargs=rd_kwargs, + center=center, **html_kwargs) + + filename = clip + options = " ".join(["%s='%s'"%(str(k), str(v)) for k,v in html_kwargs.items()]) + name, ext = os.path.splitext(filename) + ext = ext[1:] + + if filetype is None: + ext = filename.split('.')[-1].lower() + if ext == "gif": + filetype = 'image' + elif ext in extensions_dict: + filetype = extensions_dict[ext]['type'] + else: + raise ValueError("No file type is known for the provided file. Please provide " + "argument `filetype` (one of 'image', 'video', 'sound') to the " + "ipython display function.") + + + if filetype== 'video': + # The next lines set the HTML5-cvompatible extension and check that the + # extension is HTML5-valid + exts_htmltype = {'mp4': 'mp4', 'webm':'webm', 'ogv':'ogg'} + allowed_exts = " ".join(exts_htmltype.keys()) + try: + ext = exts_htmltype[ext] + except: + raise ValueError("This video extension cannot be displayed in the " + "IPython Notebook. Allowed extensions: "+allowed_exts) + + if filetype in ['audio', 'video']: + + duration = ffmpeg_parse_infos(filename)['duration'] + if duration > maxduration: + raise ValueError("The duration of video %s (%.1f) exceeds the 'maxduration' "%(filename, duration)+ + "attribute. You can increase 'maxduration', by passing 'maxduration' parameter" + "to ipython_display function." + "But note that embedding large videos may take all the memory away !") + + with open(filename, "rb") as f: + data= b64encode(f.read()).decode("utf-8") + + template = templates[filetype] + + result = template%{'data':data, 'options':options, 'ext':ext} + if center: + result = r"
%s
"%result + + return result + + +def ipython_display(clip, filetype=None, maxduration=60, t=None, fps=None, + rd_kwargs=None, center=True, **html_kwargs): + """ + clip + Either the name of a file, or a clip to preview. The clip will + actually be written to a file and embedded as if a filename was + provided. + + filetype: + One of 'video','image','audio'. If None is given, it is determined + based on the extension of ``filename``, but this can bug. + + maxduration + An error will be raised if the clip's duration is more than the indicated + value (in seconds), to avoid spoiling the browser's cache and the RAM. + + t + If not None, only the frame at time t will be displayed in the notebook, + instead of a video of the clip + + fps + Enables to specify an fps, as required for clips whose fps is unknown. + + **kwargs: + Allow you to give some options, like width=260, etc. When editing + looping gifs, a good choice is loop=1, autoplay=1. + + Remarks: If your browser doesn't support HTML5, this should warn you. + If nothing is displayed, maybe your file or filename is wrong. + Important: The media will be physically embedded in the notebook. + + Examples + ========= + + >>> import moviepy.editor as mpy + >>> # later ... + >>> clip.write_videofile("test.mp4") + >>> mpy.ipython_display("test.mp4", width=360) + + >>> clip.audio.write_audiofile('test.ogg') # Sound ! + >>> mpy.ipython_display('test.ogg') + + >>> clip.write_gif("test.gif") + >>> mpy.ipython_display('test.gif') + + >>> clip.save_frame("first_frame.jpeg") + >>> mpy.ipython_display("first_frame.jpeg") + """ + + if not ipython_available: + raise ImportError("Only works inside an IPython Notebook") + + if rd_kwargs is None: + rd_kwargs = {} + + if fps is not None: + rd_kwargs['fps'] = fps + + if t is not None: + clip = clip.to_ImageClip(t) + + return HTML2(html_embed(clip, filetype=filetype, maxduration=maxduration, + center=center, rd_kwargs=rd_kwargs, **html_kwargs)) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/io/preview.py b/videollama2/lib/python3.10/site-packages/moviepy/video/io/preview.py new file mode 100644 index 0000000000000000000000000000000000000000..fbab22d41630d0acf927fe2f271e9286b6e0b86c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/io/preview.py @@ -0,0 +1,151 @@ +import threading +import time + +import numpy as np + +import pygame as pg +from moviepy.decorators import convert_masks_to_RGB, requires_duration +from moviepy.tools import cvsecs + +pg.init() +pg.display.set_caption('MoviePy') + + +def imdisplay(imarray, screen=None): + """Splashes the given image array on the given pygame screen """ + a = pg.surfarray.make_surface(imarray.swapaxes(0, 1)) + if screen is None: + screen = pg.display.set_mode(imarray.shape[:2][::-1]) + screen.blit(a, (0, 0)) + pg.display.flip() + + +@convert_masks_to_RGB +def show(clip, t=0, with_mask=True, interactive=False): + """ + Splashes the frame of clip corresponding to time ``t``. + + Parameters + ------------ + + t + Time in seconds of the frame to display. + + with_mask + ``False`` if the clip has a mask but you want to see the clip + without the mask. + + """ + + if isinstance(t, tuple): + t = cvsecs(*t) + + if with_mask and (clip.mask is not None): + import moviepy.video.compositing.CompositeVideoClip as cvc + clip = cvc.CompositeVideoClip([clip.set_position((0, 0))]) + img = clip.get_frame(t) + imdisplay(img) + + if interactive: + result = [] + while True: + for event in pg.event.get(): + if event.type == pg.KEYDOWN: + if event.key == pg.K_ESCAPE: + print("Keyboard interrupt") + return result + elif event.type == pg.MOUSEBUTTONDOWN: + x, y = pg.mouse.get_pos() + rgb = img[y, x] + result.append({'position': (x, y), 'color': rgb}) + print("position, color : ", "%s, %s" % + (str((x, y)), str(rgb))) + time.sleep(.03) + + +@requires_duration +@convert_masks_to_RGB +def preview(clip, fps=15, audio=True, audio_fps=22050, audio_buffersize=3000, + audio_nbytes=2, fullscreen=False): + """ + Displays the clip in a window, at the given frames per second + (of movie) rate. It will avoid that the clip be played faster + than normal, but it cannot avoid the clip to be played slower + than normal if the computations are complex. In this case, try + reducing the ``fps``. + + Parameters + ------------ + + fps + Number of frames per seconds in the displayed video. + + audio + ``True`` (default) if you want the clip's audio be played during + the preview. + + audio_fps + The frames per second to use when generating the audio sound. + + fullscreen + ``True`` if you want the preview to be displayed fullscreen. + + """ + if fullscreen: + flags = pg.FULLSCREEN + else: + flags = 0 + + # compute and splash the first image + screen = pg.display.set_mode(clip.size, flags) + + audio = audio and (clip.audio is not None) + + if audio: + # the sound will be played in parrallel. We are not + # parralellizing it on different CPUs because it seems that + # pygame and openCV already use several cpus it seems. + + # two synchro-flags to tell whether audio and video are ready + videoFlag = threading.Event() + audioFlag = threading.Event() + # launch the thread + audiothread = threading.Thread(target=clip.audio.preview, + args=(audio_fps, + audio_buffersize, + audio_nbytes, + audioFlag, videoFlag)) + audiothread.start() + + img = clip.get_frame(0) + imdisplay(img, screen) + if audio: # synchronize with audio + videoFlag.set() # say to the audio: video is ready + audioFlag.wait() # wait for the audio to be ready + + result = [] + + t0 = time.time() + for t in np.arange(1.0 / fps, clip.duration-.001, 1.0 / fps): + + img = clip.get_frame(t) + + for event in pg.event.get(): + if event.type == pg.QUIT or \ + (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE): + if audio: + videoFlag.clear() + print("Interrupt") + return result + + elif event.type == pg.MOUSEBUTTONDOWN: + x, y = pg.mouse.get_pos() + rgb = img[y, x] + result.append({'time': t, 'position': (x, y), + 'color': rgb}) + print("time, position, color : ", "%.03f, %s, %s" % + (t, str((x, y)), str(rgb))) + + t1 = time.time() + time.sleep(max(0, t - (t1-t0))) + imdisplay(img, screen) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/io/sliders.py b/videollama2/lib/python3.10/site-packages/moviepy/video/io/sliders.py new file mode 100644 index 0000000000000000000000000000000000000000..f94eae0e190682179214c7b1be0595a1979a5c5f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/io/sliders.py @@ -0,0 +1,74 @@ +import matplotlib.pyplot as plt +from matplotlib.widgets import Button, Slider + + +def sliders(f, sliders_properties, wait_for_validation = False): + """ A light GUI to manually explore and tune the outputs of + a function. + slider_properties is a list of dicts (arguments for Slider ) + + def volume(x,y,z): + return x*y*z + + intervals = [ { 'label' : 'width', 'valmin': 1 , 'valmax': 5 }, + { 'label' : 'height', 'valmin': 1 , 'valmax': 5 }, + { 'label' : 'depth', 'valmin': 1 , 'valmax': 5 } ] + inputExplorer(volume,intervals) + """ + + nVars = len(sliders_properties) + slider_width = 1.0/nVars + + # CREATE THE CANVAS + + figure,ax = plt.subplots(1) + figure.canvas.set_window_title( "Inputs for '%s'"%(f.func_name) ) + + # choose an appropriate height + + width,height = figure.get_size_inches() + height = min(0.5*nVars,8) + figure.set_size_inches(width,height,forward = True) + + + # hide the axis + ax.set_frame_on(False) + ax.get_xaxis().set_visible(False) + ax.get_yaxis().set_visible(False) + + + # CREATE THE SLIDERS + + sliders = [] + + for i, properties in enumerate(sliders_properties): + ax = plt.axes([0.1 , 0.95-0.9*(i+1)*slider_width, + 0.8 , 0.8* slider_width]) + if not isinstance(properties,dict): + properties =dict(zip(['label','valmin', 'valmax', 'valinit'], + properties)) + sliders.append( Slider(ax=ax, **properties) ) + + + # CREATE THE CALLBACK FUNCTIONS + + def on_changed(event) : + res = f(*(s.val for s in sliders)) + if res is not None: + print( res ) + + def on_key_press(event): + if event.key is 'enter': + on_changed(event) + + figure.canvas.mpl_connect('key_press_event', on_key_press) + + # AUTOMATIC UPDATE ? + + if not wait_for_validation: + for s in sliders : + s.on_changed(on_changed) + + + # DISPLAY THE SLIDERS + plt.show() diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__init__.py b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/__init__.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5383da7f94f90286461afc012079e6dae567bb6 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/__init__.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/credits.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/credits.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c40049f8fa2891df24ef654ff0e2be58fc30d58 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/credits.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/cuts.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/cuts.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2b798c97cd6233a6feae3e0ebcdb9368dabe51f Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/cuts.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/drawing.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/drawing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c89cddeb758a9183ff2f52f62baa06e212bda81 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/drawing.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/interpolators.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/interpolators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e5538997907c5820776b1e1a48e17112648d302 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/interpolators.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/segmenting.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/segmenting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75e79a6e930a1faa2f3115cb27671fa0e2e4922d Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/segmenting.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/subtitles.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/subtitles.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa6a79a3dddc17e2f485c92423503e2c3713b6e6 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/subtitles.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/tracking.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/tracking.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..066f7c05d8587db9c1ba3f7dc7b3be907059e341 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/__pycache__/tracking.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/tools/credits.py b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/credits.py new file mode 100644 index 0000000000000000000000000000000000000000..a5adfbb1bde369300e9b3f83a645a6ff9d357594 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/credits.py @@ -0,0 +1,118 @@ +""" +This module contains different functions to make end and opening +credits, even though it is difficult to fill everyone needs in this +matter. +""" + +from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip +from moviepy.video.fx.resize import resize +from moviepy.video.VideoClip import ImageClip, TextClip + + +def credits1(creditfile, width, stretch=30, color='white', stroke_color='black', + stroke_width=2, font='Impact-Normal', fontsize=60, gap=0): + """ + + Parameters + ----------- + + creditfile + A text file whose content must be as follows: :: + + # This is a comment + # The next line says : leave 4 blank lines + .blank 4 + + ..Executive Story Editor + MARCEL DURAND + + ..Associate Producers + MARTIN MARCEL + DIDIER MARTIN + + ..Music Supervisor + JEAN DIDIER + + width + Total width of the credits text in pixels + + gap + Horizontal gap in pixels between the jobs and the names + + color + Color of the text. See ``TextClip.list('color')`` + for a list of acceptable names. + + font + Name of the font to use. See ``TextClip.list('font')`` for + the list of fonts you can use on your computer. + + fontsize + Size of font to use + + stroke_color + Color of the stroke (=contour line) of the text. If ``None``, + there will be no stroke. + + stroke_width + Width of the stroke, in pixels. Can be a float, like 1.5. + + + Returns + --------- + + image + An ImageClip instance that looks like this and can be scrolled + to make some credits: + + Executive Story Editor MARCEL DURAND + Associate Producers MARTIN MARCEL + DIDIER MARTIN + Music Supervisor JEAN DIDIER + + """ + + # PARSE THE TXT FILE + texts = [] + oneline = True + + with open(creditfile) as f: + for l in f: + if l.startswith(('\n', '#')): + # exclude blank lines or comments + continue + elif l.startswith('.blank'): + # ..blank n + for i in range(int(l.split(' ')[1])): + texts.append(['\n', '\n']) + elif l.startswith('..'): + texts.append([l[2:], '']) + oneline = True + elif oneline: + texts.append(['', l]) + oneline = False + else: + texts.append(['\n', l]) + + left, right = ("".join(l) for l in zip(*texts)) + + # MAKE TWO COLUMNS FOR THE CREDITS + left, right = [TextClip(txt, color=color, stroke_color=stroke_color, + stroke_width=stroke_width, font=font, + fontsize=fontsize, align=al) + for txt, al in [(left, 'East'), (right, 'West')]] + + cc = CompositeVideoClip([left, right.set_position((left.w + gap, 0))], + size=(left.w + right.w + gap, right.h), + bg_color=None) + + # SCALE TO THE REQUIRED SIZE + + scaled = resize(cc, width=width) + + # TRANSFORM THE WHOLE CREDIT CLIP INTO AN ImageCLip + + imclip = ImageClip(scaled.get_frame(0)) + amask = ImageClip(scaled.mask.get_frame(0), ismask=True) + + return imclip.set_mask(amask) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/tools/cuts.py b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/cuts.py new file mode 100644 index 0000000000000000000000000000000000000000..de75f788aa8feeef241e5034d3cd74142a20849f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/cuts.py @@ -0,0 +1,328 @@ +""" This module contains everything that can help automatize +the cuts in MoviePy """ + +from collections import defaultdict + +import numpy as np + +from moviepy.decorators import use_clip_fps_by_default + + +@use_clip_fps_by_default +def find_video_period(clip,fps=None,tmin=.3): + """ Finds the period of a video based on frames correlation """ + + frame = lambda t: clip.get_frame(t).flatten() + tt = np.arange(tmin, clip.duration, 1.0/ fps)[1:] + ref = frame(0) + corrs = [ np.corrcoef(ref, frame(t))[0,1] for t in tt] + return tt[np.argmax(corrs)] + + +class FramesMatch: + """ + + Parameters + ----------- + + t1 + Starting time + + t2 + End time + + d_min + Lower bound on the distance between the first and last frames + + d_max + Upper bound on the distance between the first and last frames + + """ + + def __init__(self, t1, t2, d_min, d_max): + self.t1 = t1 + self.t2 = t2 + self.d_min = d_min + self.d_max = d_max + self.time_span = t2-t1 + + def __str__(self): + + return '(%.04f, %.04f, %.04f, %.04f)'%( + self.t1, self.t2, self.d_min, self.d_max) + + def __repr__(self): + return '(%.04f, %.04f, %.04f, %.04f)'%( + self.t1, self.t2, self.d_min, self.d_max) + + def __iter__(self): + return iter((self.t1, self.t2, self.d_min, self.d_max)) + + +class FramesMatches(list): + + def __init__(self, lst): + + list.__init__(self, sorted(lst, key=lambda e: e.d_max)) + + def best(self, n=1, percent=None): + if percent is not None: + n = len(self)*percent/100 + return self[0] if n==1 else FramesMatches(self[:n]) + + def filter(self, cond): + """ + Returns a FramesMatches object obtained by filtering out the FramesMatch + which do not satistify the condition ``cond``. ``cond`` is a function + (FrameMatch -> bool). + + Examples + --------- + >>> # Only keep the matches corresponding to (> 1 second) sequences. + >>> new_matches = matches.filter( lambda match: match.time_span > 1) + """ + return FramesMatches(filter(cond, self)) + + def save(self, filename): + np.savetxt(filename, np.array([np.array(list(e)) for e in self]), + fmt='%.03f', delimiter='\t') + + @staticmethod + def load(filename): + """ Loads a FramesMatches object from a file. + >>> matching_frames = FramesMatches.load("somefile") + """ + arr = np.loadtxt(filename) + mfs = [FramesMatch(*e) for e in arr] + return FramesMatches(mfs) + + + + @staticmethod + def from_clip(clip, dist_thr, max_d, fps=None): + """ Finds all the frames tht look alike in a clip, for instance to make a + looping gif. + + This teturns a FramesMatches object of the all pairs of frames with + (t2-t1 < max_d) and whose distance is under dist_thr. + + This is well optimized routine and quite fast. + + Examples + --------- + + We find all matching frames in a given video and turn the best match with + a duration of 1.5s or more into a GIF: + + >>> from moviepy.editor import VideoFileClip + >>> from moviepy.video.tools.cuts import find_matching_frames + >>> clip = VideoFileClip("foo.mp4").resize(width=200) + >>> matches = find_matching_frames(clip, 10, 3) # will take time + >>> best = matches.filter(lambda m: m.time_span > 1.5).best() + >>> clip.subclip(best.t1, best.t2).write_gif("foo.gif") + + Parameters + ----------- + + clip + A MoviePy video clip, possibly transformed/resized + + dist_thr + Distance above which a match is rejected + + max_d + Maximal duration (in seconds) between two matching frames + + fps + Frames per second (default will be clip.fps) + + """ + + N_pixels = clip.w * clip.h * 3 + dot_product = lambda F1, F2: (F1*F2).sum()/N_pixels + F = {} # will store the frames and their mutual distances + + def distance(t1, t2): + uv = dot_product(F[t1]['frame'], F[t2]['frame']) + u, v = F[t1]['|F|sq'], F[t2]['|F|sq'] + return np.sqrt(u+v - 2*uv) + + matching_frames = [] # the final result. + + for (t,frame) in clip.iter_frames(with_times=True, logger='bar'): + + flat_frame = 1.0*frame.flatten() + F_norm_sq = dot_product(flat_frame, flat_frame) + F_norm = np.sqrt(F_norm_sq) + + for t2 in list(F.keys()): + # forget old frames, add 't' to the others frames + # check for early rejections based on differing norms + if (t-t2) > max_d: + F.pop(t2) + else: + F[t2][t] = {'min':abs(F[t2]['|F|'] - F_norm), + 'max':F[t2]['|F|'] + F_norm} + F[t2][t]['rejected']= (F[t2][t]['min'] > dist_thr) + + t_F = sorted(F.keys()) + + F[t] = {'frame': flat_frame, '|F|sq': F_norm_sq, '|F|': F_norm} + + for i,t2 in enumerate(t_F): + # Compare F(t) to all the previous frames + + if F[t2][t]['rejected']: + continue + + dist = distance(t, t2) + F[t2][t]['min'] = F[t2][t]['max'] = dist + F[t2][t]['rejected'] = (dist >= dist_thr) + + for t3 in t_F[i+1:]: + # For all the next times t3, use d(F(t), F(t2)) to + # update the bounds on d(F(t), F(t3)). See if you can + # conclude on wether F(t) and F(t3) match. + t3t, t2t3 = F[t3][t], F[t2][t3] + t3t['max'] = min(t3t['max'], dist+ t2t3['max']) + t3t['min'] = max(t3t['min'], dist - t2t3['max'], + t2t3['min'] - dist) + + if t3t['min'] > dist_thr: + t3t['rejected'] = True + + # Store all the good matches (t2,t) + matching_frames += [(t1, t, F[t1][t]['min'], F[t1][t]['max']) for t1 in F + if (t1!=t) and not F[t1][t]['rejected']] + + return FramesMatches([FramesMatch(*e) for e in matching_frames]) + + + + def select_scenes(self, match_thr, min_time_span, nomatch_thr=None, + time_distance=0): + """ + + match_thr + The smaller, the better-looping the gifs are. + + min_time_span + Only GIFs with a duration longer than min_time_span (in seconds) + will be extracted. + + nomatch_thr + If None, then it is chosen equal to match_thr + + """ + + if nomatch_thr is None: + nomatch_thr = match_thr + + dict_starts = defaultdict(lambda : []) + for (start, end, d_min, d_max) in self: + dict_starts[start].append([end, d_min, d_max]) + + starts_ends = sorted(dict_starts.items(), key = lambda k: k[0]) + + result = [] + min_start= 0 + for start, ends_distances in starts_ends: + + if start < min_start: + continue + + ends = [end for (end, d_min, d_max) in ends_distances] + great_matches = [(end,d_min, d_max) + for (end,d_min, d_max) in ends_distances + if d_maxmin_time_span] + + + if not great_long_matches: + continue # No GIF can be made starting at this time + + poor_matches = {end for (end,d_min, d_max) in ends_distances if d_min > nomatch_thr} + short_matches = {end for end in ends if (end-start) <= 0.6} + + if not poor_matches.intersection(short_matches): + continue + + end = max(end for (end, d_min, d_max) in great_long_matches) + end, d_min, d_max = next(e for e in great_long_matches if e[0]==end) + + result.append(FramesMatch(start, end, d_min, d_max)) + min_start = start + time_distance + + return FramesMatches(result) + + + def write_gifs(self, clip, gif_dir): + for (start, end, _, _) in self: + name = "%s/%08d_%08d.gif" % (gif_dir, 100*start, 100*end) + clip.subclip(start, end).write_gif(name, verbose=False) + + + + +@use_clip_fps_by_default +def detect_scenes(clip=None, luminosities=None, thr=10, + logger='bar', fps=None): + """ Detects scenes of a clip based on luminosity changes. + + Note that for large clip this may take some time + + Returns + -------- + cuts, luminosities + cuts is a series of cuts [(0,t1), (t1,t2),...(...,tf)] + luminosities are the luminosities computed for each + frame of the clip. + + Parameters + ----------- + + clip + A video clip. Can be None if a list of luminosities is + provided instead. If provided, the luminosity of each + frame of the clip will be computed. If the clip has no + 'fps' attribute, you must provide it. + + luminosities + A list of luminosities, e.g. returned by detect_scenes + in a previous run. + + thr + Determines a threshold above which the 'luminosity jumps' + will be considered as scene changes. A scene change is defined + as a change between 2 consecutive frames that is larger than + (avg * thr) where avg is the average of the absolute changes + between consecutive frames. + + progress_bar + We all love progress bars ! Here is one for you, in option. + + fps + Must be provided if you provide no clip or a clip without + fps attribute. + + + """ + if luminosities is None: + luminosities = [f.sum() for f in clip.iter_frames( + fps=fps, dtype='uint32', logger=logger)] + + luminosities = np.array(luminosities, dtype=float) + if clip is not None: + end = clip.duration + else: + end = len(luminosities)*(1.0/fps) + lum_diffs = abs(np.diff(luminosities)) + avg = lum_diffs.mean() + luminosity_jumps = 1+np.array(np.nonzero(lum_diffs> thr*avg))[0] + tt = [0]+list((1.0/fps) *luminosity_jumps) + [end] + #print tt + cuts = [(t1,t2) for t1,t2 in zip(tt,tt[1:])] + return cuts, luminosities diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/tools/drawing.py b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/drawing.py new file mode 100644 index 0000000000000000000000000000000000000000..fdc3705546905890d6358b6b08a434936c0e3dba --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/drawing.py @@ -0,0 +1,269 @@ +""" +This module deals with making images (np arrays). It provides drawing +methods that are difficult to do with the existing Python libraries. +""" + +import numpy as np + + +def blit(im1, im2, pos=None, mask=None, ismask=False): + """ Blit an image over another. + + Blits ``im1`` on ``im2`` as position ``pos=(x,y)``, using the + ``mask`` if provided. If ``im1`` and ``im2`` are mask pictures + (2D float arrays) then ``ismask`` must be ``True``. + """ + if pos is None: + pos = [0, 0] + + # xp1,yp1,xp2,yp2 = blit area on im2 + # x1,y1,x2,y2 = area of im1 to blit on im2 + xp, yp = pos + x1 = max(0, -xp) + y1 = max(0, -yp) + h1, w1 = im1.shape[:2] + h2, w2 = im2.shape[:2] + xp2 = min(w2, xp + w1) + yp2 = min(h2, yp + h1) + x2 = min(w1, w2 - xp) + y2 = min(h1, h2 - yp) + xp1 = max(0, xp) + yp1 = max(0, yp) + + if (xp1 >= xp2) or (yp1 >= yp2): + return im2 + + blitted = im1[y1:y2, x1:x2] + + new_im2 = +im2 + + if mask is None: + new_im2[yp1:yp2, xp1:xp2] = blitted + else: + mask = mask[y1:y2, x1:x2] + if len(im1.shape) == 3: + mask = np.dstack(3 * [mask]) + blit_region = new_im2[yp1:yp2, xp1:xp2] + new_im2[yp1:yp2, xp1:xp2] = (1.0 * mask * blitted + (1.0 - mask) * blit_region) + + return new_im2.astype('uint8') if (not ismask) else new_im2 + + + +def color_gradient(size,p1,p2=None,vector=None, r=None, col1=0,col2=1.0, + shape='linear', offset = 0): + """Draw a linear, bilinear, or radial gradient. + + The result is a picture of size ``size``, whose color varies + gradually from color `col1` in position ``p1`` to color ``col2`` + in position ``p2``. + + If it is a RGB picture the result must be transformed into + a 'uint8' array to be displayed normally: + + + Parameters + ------------ + + size + Size (width, height) in pixels of the final picture/array. + + p1, p2 + Coordinates (x,y) in pixels of the limit point for ``col1`` + and ``col2``. The color 'before' ``p1`` is ``col1`` and it + gradually changes in the direction of ``p2`` until it is ``col2`` + when it reaches ``p2``. + + vector + A vector [x,y] in pixels that can be provided instead of ``p2``. + ``p2`` is then defined as (p1 + vector). + + col1, col2 + Either floats between 0 and 1 (for gradients used in masks) + or [R,G,B] arrays (for colored gradients). + + shape + 'linear', 'bilinear', or 'circular'. + In a linear gradient the color varies in one direction, + from point ``p1`` to point ``p2``. + In a bilinear gradient it also varies symetrically form ``p1`` + in the other direction. + In a circular gradient it goes from ``col1`` to ``col2`` in all + directions. + + offset + Real number between 0 and 1 indicating the fraction of the vector + at which the gradient actually starts. For instance if ``offset`` + is 0.9 in a gradient going from p1 to p2, then the gradient will + only occur near p2 (before that everything is of color ``col1``) + If the offset is 0.9 in a radial gradient, the gradient will + occur in the region located between 90% and 100% of the radius, + this creates a blurry disc of radius d(p1,p2). + + Returns + -------- + + image + An Numpy array of dimensions (W,H,ncolors) of type float + representing the image of the gradient. + + + Examples + --------- + + >>> grad = color_gradient(blabla).astype('uint8') + + """ + + # np-arrayize and change x,y coordinates to y,x + w,h = size + + col1 = np.array(col1).astype(float) + col2 = np.array(col2).astype(float) + + if shape == 'bilinear': + if vector is None: + vector = np.array(p2) - np.array(p1) + + m1, m2 = [ color_gradient(size, p1, vector=v, col1 = 1.0, col2 = 0, + shape = 'linear', offset= offset) + for v in [vector,-vector]] + + arr = np.maximum(m1, m2) + if col1.size > 1: + arr = np.dstack(3*[arr]) + return arr*col1 + (1-arr)*col2 + + + p1 = np.array(p1[::-1]).astype(float) + + if vector is None and p2: + p2 = np.array(p2[::-1]) + vector = p2-p1 + else: + vector = np.array(vector[::-1]) + p2 = p1 + vector + + if vector: + norm = np.linalg.norm(vector) + + M = np.dstack(np.meshgrid(range(w),range(h))[::-1]).astype(float) + + if shape == 'linear': + + n_vec = vector/norm**2 # norm 1/norm(vector) + + p1 = p1 + offset*vector + arr = (M- p1).dot(n_vec)/(1-offset) + arr = np.minimum(1,np.maximum(0,arr)) + if col1.size > 1: + arr = np.dstack(3*[arr]) + return arr*col1 + (1-arr)*col2 + + elif shape == 'radial': + if r is None: + r = norm + + if r == 0: + arr = np.ones((h,w)) + else: + arr = (np.sqrt(((M - p1) ** 2).sum(axis=2))) - offset * r + arr = arr / ((1-offset)*r) + arr = np.minimum(1.0, np.maximum(0, arr)) + + if col1.size > 1: + arr = np.dstack(3*[arr]) + return (1-arr)*col1 + arr*col2 + + +def color_split(size,x=None,y=None,p1=None,p2=None,vector=None, + col1=0,col2=1.0, grad_width=0): + """Make an image splitted in 2 colored regions. + + Returns an array of size ``size`` divided in two regions called 1 and + 2 in wht follows, and which will have colors col& and col2 + respectively. + + Parameters + ----------- + + x: (int) + If provided, the image is splitted horizontally in x, the left + region being region 1. + + y: (int) + If provided, the image is splitted vertically in y, the top region + being region 1. + + p1,p2: + Positions (x1,y1),(x2,y2) in pixels, where the numbers can be + floats. Region 1 is defined as the whole region on the left when + going from ``p1`` to ``p2``. + + p1, vector: + ``p1`` is (x1,y1) and vector (v1,v2), where the numbers can be + floats. Region 1 is then the region on the left when starting + in position ``p1`` and going in the direction given by ``vector``. + + gradient_width + If not zero, the split is not sharp, but gradual over a region of + width ``gradient_width`` (in pixels). This is preferable in many + situations (for instance for antialiasing). + + + Examples + --------- + + >>> size = [200,200] + >>> # an image with all pixels with x<50 =0, the others =1 + >>> color_split(size, x=50, col1=0, col2=1) + >>> # an image with all pixels with y<50 red, the others green + >>> color_split(size, x=50, col1=[255,0,0], col2=[0,255,0]) + >>> # An image splitted along an arbitrary line (see below) + >>> color_split(size, p1=[20,50], p2=[25,70] col1=0, col2=1) + + """ + + if grad_width or ( (x is None) and (y is None)): + if p2 is not None: + vector = (np.array(p2) - np.array(p1)) + elif x is not None: + vector = np.array([0,-1.0]) + p1 = np.array([x, 0]) + elif y is not None: + vector = np.array([1.0, 0.0]) + p1 = np.array([0,y]) + + x,y = vector + vector = np.array([y,-x]).astype('float') + norm = np.linalg.norm(vector) + vector = max(0.1, grad_width) * vector / norm + return color_gradient(size,p1,vector=vector, + col1 = col1, col2 = col2, shape='linear') + else: + w, h = size + shape = (h, w) if np.isscalar(col1) else (h, w, len(col1)) + arr = np.zeros(shape) + if x: + arr[:,:x] = col1 + arr[:,x:] = col2 + elif y: + arr[:y] = col1 + arr[y:] = col2 + return arr + + # if we are here, it means we didn't exit with a proper 'return' + print( "Arguments in color_split not understood !" ) + raise + +def circle(screensize, center, radius, col1=1.0, col2=0, blur=1): + """ Draw an image with a circle. + + Draws a circle of color ``col1``, on a background of color ``col2``, + on a screen of size ``screensize`` at the position ``center=(x,y)``, + with a radius ``radius`` but slightly blurred on the border by ``blur`` + pixels + """ + offset = 1.0*(radius-blur)/radius if radius else 0 + return color_gradient(screensize,p1=center,r=radius, col1=col1, + col2=col2, shape='radial', offset=offset) diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/tools/interpolators.py b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/interpolators.py new file mode 100644 index 0000000000000000000000000000000000000000..9d1f4955aa605a8d4d35e35455d0b16d0b5c3323 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/interpolators.py @@ -0,0 +1,73 @@ +""" +Classes for easy interpolation of trajectories and Curves. +Requires Scipy installed. +""" + +import numpy as np + + +class Interpolator: + """ Poorman's linear interpolator, doesn't require Scipy. """ + + def __init__(self, tt=None, ss=None, ttss = None, left=None, right=None): + + if ttss is not None: + tt, ss = zip(*ttss) + + self.tt = 1.0*np.array(tt) + self.ss = 1.0*np.array(ss) + self.left = left + self.right = right + self.tmin, self.tmax = min(tt), max(tt) + + def __call__(self, t): + return np.interp(t, self.tt, self.ss, self.left, self.right) + +class Trajectory: + + def __init__(self, tt, xx, yy): + + self.tt = 1.0*np.array(tt) + self.xx = np.array(xx) + self.yy = np.array(yy) + self.update_interpolators() + + def __call__(self, t): + return np.array([self.xi(t), self.yi(t)]) + + def addx(self, x): + return Trajectory(self.tt, self.xx+x, self.yy) + + def addy(self, y): + return Trajectory(self.tt, self.xx, self.yy+y) + + def update_interpolators(self): + self.xi = Interpolator(self.tt, self.xx) + self.yi = Interpolator(self.tt, self.yy) + + def txy(self, tms=False): + return zip((1000 if tms else 1)*self.tt, self.xx, self.yy) + + def to_file(self, filename): + np.savetxt(filename, np.array(self.txy(tms=True)), + fmt="%d", delimiter='\t') + + @staticmethod + def from_file(filename): + arr = np.loadtxt(filename, delimiter='\t') + tt, xx, yy = arr.T + return Trajectory(1.0*tt/1000, xx, yy) + + @staticmethod + def save_list(trajs, filename): + N = len(trajs) + arr = np.hstack([np.array(list(t.txy(tms=True))) for t in trajs]) + np.savetxt( filename, arr, fmt="%d", delimiter='\t', + header = "\t".join(N*['t(ms)', 'x', 'y'])) + + @staticmethod + def load_list(filename): + arr = np.loadtxt(filename, delimiter='\t').T + Nlines = arr.shape[0] + return [Trajectory(tt=1.0*a[0]/1000, xx=a[1], yy=a[2]) + for a in np.split(arr, Nlines/3)] diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/tools/segmenting.py b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/segmenting.py new file mode 100644 index 0000000000000000000000000000000000000000..1e6c06bf5c7d143aab4ed8ce5477b86adfad4908 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/segmenting.py @@ -0,0 +1,59 @@ +import numpy as np + +import scipy.ndimage as ndi +from moviepy.video.VideoClip import ImageClip + + +def findObjects(clip,rem_thr=500, preview=False): + """ + Returns a list of ImageClips representing each a separate object on + the screen. + + rem_thr : all objects found with size < rem_Thr will be + considered false positives and will be removed + + """ + + image = clip.get_frame(0) + if not clip.mask: + clip = clip.add_mask() + + mask = clip.mask.get_frame(0) + labelled, num_features = ndi.measurements.label(image[:,:,0]) + + #find the objects + slices = [] + for e in ndi.find_objects(labelled): + if mask[e[0],e[1]].mean() <= 0.2: + # remove letter holes (in o,e,a, etc.) + continue + if image[e[0],e[1]].size <= rem_thr: + # remove very small slices + continue + slices.append(e) + islices = sorted(enumerate(slices), key = lambda s : s[1][1].start) + + letters = [] + for i,(ind,(sy,sx)) in enumerate(islices): + """ crop each letter separately """ + sy = slice(sy.start-1,sy.stop+1) + sx = slice(sx.start-1,sx.stop+1) + letter = image[sy,sx] + labletter = labelled[sy,sx] + maskletter = (labletter==(ind+1))*mask[sy,sx] + letter = ImageClip(image[sy,sx]) + letter.mask = ImageClip( maskletter,ismask=True) + letter.screenpos = np.array((sx.start,sy.start)) + letters.append(letter) + + if preview: + import matplotlib.pyplot as plt + print( "found %d objects"%(num_features) ) + fig,ax = plt.subplots(2) + ax[0].axis('off') + ax[0].imshow(labelled) + ax[1].imshow([range(num_features)],interpolation='nearest') + ax[1].set_yticks([]) + plt.show() + + return letters diff --git a/videollama2/lib/python3.10/site-packages/moviepy/video/tools/subtitles.py b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/subtitles.py new file mode 100644 index 0000000000000000000000000000000000000000..34a8615c2f7a60eee56142f40583d83a3181aafc --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/moviepy/video/tools/subtitles.py @@ -0,0 +1,163 @@ +""" Experimental module for subtitles support. """ + +import re + +import numpy as np + +from moviepy.tools import cvsecs +from moviepy.video.VideoClip import TextClip, VideoClip + + +class SubtitlesClip(VideoClip): + """ A Clip that serves as "subtitle track" in videos. + + One particularity of this class is that the images of the + subtitle texts are not generated beforehand, but only if + needed. + + Parameters + ========== + + subtitles + Either the name of a file, or a list + + Examples + ========= + + >>> from moviepy.video.tools.subtitles import SubtitlesClip + >>> from moviepy.video.io.VideoFileClip import VideoFileClip + >>> generator = lambda txt: TextClip(txt, font='Georgia-Regular', fontsize=24, color='white') + >>> sub = SubtitlesClip("subtitles.srt", generator) + >>> myvideo = VideoFileClip("myvideo.avi") + >>> final = CompositeVideoClip([clip, subtitles]) + >>> final.write_videofile("final.mp4", fps=myvideo.fps) + + """ + + def __init__(self, subtitles, make_textclip=None): + + VideoClip.__init__(self, has_constant_size=False) + + if isinstance(subtitles, str): + subtitles = file_to_subtitles(subtitles) + + #subtitles = [(map(cvsecs, tt),txt) for tt, txt in subtitles] + self.subtitles = subtitles + self.textclips = dict() + + if make_textclip is None: + make_textclip = lambda txt: TextClip(txt, font='Georgia-Bold', + fontsize=24, color='white', + stroke_color='black', stroke_width=0.5) + + self.make_textclip = make_textclip + self.start=0 + self.duration = max([tb for ((ta,tb), txt) in self.subtitles]) + self.end=self.duration + + def add_textclip_if_none(t): + """ Will generate a textclip if it hasn't been generated asked + to generate it yet. If there is no subtitle to show at t, return + false. """ + sub =[((ta,tb),txt) for ((ta,tb),txt) in self.textclips.keys() + if (ta<=t>> from moviepy.editor import VideoFileClip + >>> from moviepy.video.tools.tracking import manual_tracking + >>> clip = VideoFileClip("myvideo.mp4") + >>> # manually indicate 3 trajectories, save them to a file + >>> trajectories = manual_tracking(clip, t1=5, t2=7, fps=5, + nobjects=3, savefile="track.txt") + >>> # ... + >>> # LATER, IN ANOTHER SCRIPT, RECOVER THESE TRAJECTORIES + >>> from moviepy.video.tools.tracking import Trajectory + >>> traj1, traj2, traj3 = Trajectory.load_list('track.txt') + >>> # If ever you only have one object being tracked, recover it with + >>> traj, = Trajectory.load_list('track.txt') + + """ + + import pygame as pg + + screen = pg.display.set_mode(clip.size) + step = 1.0 / fps + if (t1 is None) and (t2 is None): + t1,t2 = 0, clip.duration + elif (t2 is None): + t2 = t1 + step / 2 + t = t1 + txy_list = [] + + def gatherClicks(t): + + imdisplay(clip.get_frame(t), screen) + objects_to_click = nobjects + clicks = [] + while objects_to_click: + + for event in pg.event.get(): + + if event.type == pg.KEYDOWN: + if (event.key == pg.K_BACKSLASH): + return "return" + elif (event.key == pg.K_ESCAPE): + raise KeyboardInterrupt() + + + elif event.type == pg.MOUSEBUTTONDOWN: + x, y = pg.mouse.get_pos() + clicks.append((x, y)) + objects_to_click -= 1 + + return clicks + + while t < t2: + + clicks =gatherClicks(t) + if clicks == 'return': + txy_list.pop() + t -= step + else: + txy_list.append((t,clicks)) + t += step + + tt, xylist = zip(*txy_list) + result = [] + for i in range(nobjects): + xys = [e[i] for e in xylist] + xx, yy = zip(*xys) + result.append(Trajectory(tt, xx, yy)) + + if savefile is not None: + Trajectory.save_list(result, savefile) + return result + + +# AUTOMATED TRACKING OF A PATTERN + +def findAround(pic,pat,xy=None,r=None): + """ + find image pattern ``pat`` in ``pic[x +/- r, y +/- r]``. + if xy is none, consider the whole picture. + """ + + if xy and r: + h,w = pat.shape[:2] + x,y = xy + pic = pic[y-r : y+h+r , x-r : x+w+r] + + matches = cv2.matchTemplate(pat,pic,cv2.TM_CCOEFF_NORMED) + yf,xf = np.unravel_index(matches.argmax(),matches.shape) + return (x-r+xf,y-r+yf) if (xy and r) else (xf,yf) + + +def autoTrack(clip, pattern, tt=None, fps=None, radius=20, xy0=None): + """ + Tracks a given pattern (small image array) in a video clip. + Returns [(x1,y1),(x2,y2)...] where xi,yi are + the coordinates of the pattern in the clip on frame i. + To select the frames you can either specify a list of times with ``tt`` + or select a frame rate with ``fps``. + This algorithm assumes that the pattern's aspect does not vary much + and that the distance between two occurences of the pattern in + two consecutive frames is smaller than ``radius`` (if you set ``radius`` + to -1 the pattern will be searched in the whole screen at each frame). + You can also provide the original position of the pattern with xy0. + """ + + if not autotracking_possible: + raise IOError("Sorry, autotrack requires OpenCV for the moment. " + "Install OpenCV (aka cv2) to use it.") + + + if not xy0: + xy0 = findAround(clip.get_frame(tt[0]),pattern) + + if tt is None: + tt = np.arange(0, clip.duration, 1.0/fps) + + xys = [xy0] + for t in tt[1:]: + xys.append( findAround(clip.get_frame(t),pattern, + xy=xys[-1],r=radius)) + + xx,yy = zip(*xys) + + return Trajectory(tt, xx, yy) diff --git a/videollama2/lib/python3.10/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a2ca408b7aab95f00321baff52a278571e1787e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4942eee0cf21d6a4dac0c768c07192b4ac81e674b0477018f43a7640d5c521ca +size 194433 diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/nn/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/nn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7fd3964cc521f5572df6389d79c5a70eed418a6 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/nn/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/nn/api/__pycache__/remote_module.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/nn/api/__pycache__/remote_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52c85688cf0b343fccd4692bef72a8f683cf4308 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/nn/api/__pycache__/remote_module.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/nn/jit/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/nn/jit/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8067e5a65d0e9ccb6e306982de17f89ff96d12d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/nn/jit/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/nn/jit/__pycache__/instantiator.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/nn/jit/__pycache__/instantiator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5af782c4496bc78317207dfe3b97fa3bef9373ec Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/nn/jit/__pycache__/instantiator.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/nn/jit/templates/__pycache__/remote_module_template.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/nn/jit/templates/__pycache__/remote_module_template.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ab3a8204e71ff4bff156490520ebb6f734e4fab Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/nn/jit/templates/__pycache__/remote_module_template.cpython-310.pyc differ