id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
156,265 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
The provided code snippet includes necessary dependencies for implementing the `get_fonts_dir` function. Write a Python function `def get_fonts_dir() -> str` to solve the following problem:
Gets the fonts directory. Returns: dir (str): The fonts directory
Here is the function:
def get_fonts_dir() -> str:
"""
Gets the fonts directory.
Returns:
dir (str): The fonts directory
"""
return os.path.join(ROOT_DIR, "fonts") | Gets the fonts directory. Returns: dir (str): The fonts directory |
156,266 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
The provided code snippet includes necessary dependencies for implementing the `get_imagemagick_path` function. Write a Python function `def get_imagemagick_path() -> str` to solve the following problem:
Gets the path to ImageMagick. Returns: path (str): The path to ImageMagick
Here is the function:
def get_imagemagick_path() -> str:
"""
Gets the path to ImageMagick.
Returns:
path (str): The path to ImageMagick
"""
with open(os.path.join(ROOT_DIR, "config.json"), "r") as file:
return json.load(file)["imagemagick_path"] | Gets the path to ImageMagick. Returns: path (str): The path to ImageMagick |
156,267 | import os
import json
from typing import List
from config import ROOT_DIR
def get_twitter_cache_path() -> str:
"""
Gets the path to the Twitter cache file.
Returns:
path (str): The path to the Twitter cache folder
"""
return os.path.join(get_cache_path(), 'twitter.json')
def get_youtube_cache_path() -> str:
"""
Gets the path to the YouTube cache file.
Returns:
path (str): The path to the YouTube cache folder
"""
return os.path.join(get_cache_path(), 'youtube.json')
def get_accounts(provider: str) -> List[dict]:
"""
Gets the accounts from the cache.
Args:
provider (str): The provider to get the accounts for
Returns:
account (List[dict]): The accounts
"""
cache_path = ""
if provider == "twitter":
cache_path = get_twitter_cache_path()
elif provider == "youtube":
cache_path = get_youtube_cache_path()
if not os.path.exists(cache_path):
# Create the cache file
with open(cache_path, 'w') as file:
json.dump({
"accounts": []
}, file, indent=4)
with open(cache_path, 'r') as file:
parsed = json.load(file)
if parsed is None:
return []
if 'accounts' not in parsed:
return []
# Get accounts dictionary
return parsed['accounts']
The provided code snippet includes necessary dependencies for implementing the `add_account` function. Write a Python function `def add_account(provider: str, account: dict) -> None` to solve the following problem:
Adds an account to the cache. Args: account (dict): The account to add Returns: None
Here is the function:
def add_account(provider: str, account: dict) -> None:
"""
Adds an account to the cache.
Args:
account (dict): The account to add
Returns:
None
"""
if provider == "twitter":
# Get the current accounts
accounts = get_accounts("twitter")
# Add the new account
accounts.append(account)
# Write the new accounts to the cache
with open(get_twitter_cache_path(), 'w') as file:
json.dump({
"accounts": accounts
}, file, indent=4)
elif provider == "youtube":
# Get the current accounts
accounts = get_accounts("youtube")
# Add the new account
accounts.append(account)
# Write the new accounts to the cache
with open(get_youtube_cache_path(), 'w') as file:
json.dump({
"accounts": accounts
}, file, indent=4) | Adds an account to the cache. Args: account (dict): The account to add Returns: None |
156,268 | import os
import json
from typing import List
from config import ROOT_DIR
def get_twitter_cache_path() -> str:
"""
Gets the path to the Twitter cache file.
Returns:
path (str): The path to the Twitter cache folder
"""
return os.path.join(get_cache_path(), 'twitter.json')
def get_accounts(provider: str) -> List[dict]:
"""
Gets the accounts from the cache.
Args:
provider (str): The provider to get the accounts for
Returns:
account (List[dict]): The accounts
"""
cache_path = ""
if provider == "twitter":
cache_path = get_twitter_cache_path()
elif provider == "youtube":
cache_path = get_youtube_cache_path()
if not os.path.exists(cache_path):
# Create the cache file
with open(cache_path, 'w') as file:
json.dump({
"accounts": []
}, file, indent=4)
with open(cache_path, 'r') as file:
parsed = json.load(file)
if parsed is None:
return []
if 'accounts' not in parsed:
return []
# Get accounts dictionary
return parsed['accounts']
The provided code snippet includes necessary dependencies for implementing the `remove_account` function. Write a Python function `def remove_account(account_id: str) -> None` to solve the following problem:
Removes an account from the cache. Args: account_id (str): The ID of the account to remove Returns: None
Here is the function:
def remove_account(account_id: str) -> None:
"""
Removes an account from the cache.
Args:
account_id (str): The ID of the account to remove
Returns:
None
"""
# Get the current accounts
accounts = get_accounts()
# Remove the account
accounts = [account for account in accounts if account['id'] != account_id]
# Write the new accounts to the cache
with open(get_twitter_cache_path(), 'w') as file:
json.dump({
"accounts": accounts
}, file, indent=4) | Removes an account from the cache. Args: account_id (str): The ID of the account to remove Returns: None |
156,269 | import os
import json
from typing import List
from config import ROOT_DIR
def get_afm_cache_path() -> str:
"""
Gets the path to the Affiliate Marketing cache file.
Returns:
path (str): The path to the AFM cache folder
"""
return os.path.join(get_cache_path(), 'afm.json')
def get_products() -> List[dict]:
"""
Gets the products from the cache.
Returns:
products (List[dict]): The products
"""
if not os.path.exists(get_afm_cache_path()):
# Create the cache file
with open(get_afm_cache_path(), 'w') as file:
json.dump({
"products": []
}, file, indent=4)
with open(get_afm_cache_path(), 'r') as file:
parsed = json.load(file)
# Get the products
return parsed["products"]
The provided code snippet includes necessary dependencies for implementing the `add_product` function. Write a Python function `def add_product(product: dict) -> None` to solve the following problem:
Adds a product to the cache. Args: product (dict): The product to add Returns: None
Here is the function:
def add_product(product: dict) -> None:
"""
Adds a product to the cache.
Args:
product (dict): The product to add
Returns:
None
"""
# Get the current products
products = get_products()
# Add the new product
products.append(product)
# Write the new products to the cache
with open(get_afm_cache_path(), 'w') as file:
json.dump({
"products": products
}, file, indent=4) | Adds a product to the cache. Args: product (dict): The product to add Returns: None |
156,270 | import os
import json
from typing import List
from config import ROOT_DIR
def get_cache_path() -> str:
"""
Gets the path to the cache file.
Returns:
path (str): The path to the cache folder
"""
return os.path.join(ROOT_DIR, '.mp')
The provided code snippet includes necessary dependencies for implementing the `get_results_cache_path` function. Write a Python function `def get_results_cache_path() -> str` to solve the following problem:
Gets the path to the results cache file. Returns: path (str): The path to the results cache folder
Here is the function:
def get_results_cache_path() -> str:
"""
Gets the path to the results cache file.
Returns:
path (str): The path to the results cache folder
"""
return os.path.join(get_cache_path(), 'scraper_results.csv') | Gets the path to the results cache file. Returns: path (str): The path to the results cache folder |
156,271 | import g4f
def parse_model(model_name: str) -> any:
if model_name == "gpt4":
return g4f.models.gpt_4
elif model_name == "gpt35_turbo":
return g4f.models.gpt_35_turbo
elif model_name == "llama2_7b":
return g4f.models.llama2_7b
elif model_name == "llama2_13b":
return g4f.models.llama2_13b
elif model_name == "llama2_70b":
return g4f.models.llama2_70b
elif model_name == "mixtral_8x7b":
return g4f.models.mixtral_8x7b
else:
# Default model is gpt3.5-turbo
return g4f.models.gpt_35_turbo | null |
156,272 | from config import ROOT_DIR
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
The provided code snippet includes necessary dependencies for implementing the `print_banner` function. Write a Python function `def print_banner() -> None` to solve the following problem:
Prints the introductory ASCII Art Banner. Returns: None
Here is the function:
def print_banner() -> None:
"""
Prints the introductory ASCII Art Banner.
Returns:
None
"""
with open(f"{ROOT_DIR}/assets/banner.txt", "r") as file:
print(colored(file.read(), "green")) | Prints the introductory ASCII Art Banner. Returns: None |
156,273 | import tkinter as tk
import socket
import json
import time
from datetime import timedelta
from select import select
from tkinter import ttk
from view_model import *
from PIL import ImageTk, Image
from sys import platform
import os
SCALE = 1
def flattenAlpha(img):
global SCALE
[img_w, img_h] = img.size
img = img.resize((int(img_w * SCALE), int(img_h * SCALE)), Image.ANTIALIAS)
alpha = img.split()[-1] # Pull off the alpha layer
ab = alpha.tobytes() # Original 8-bit alpha
checked = [] # Create a new array to store the cleaned up alpha layer bytes
# Walk through all pixels and set them either to 0 for transparent or 255 for opaque fancy pants
transparent = 50 # change to suit your tolerance for what is and is not transparent
p = 0
for pixel in range(0, len(ab)):
if ab[pixel] < transparent:
checked.append(0) # Transparent
else:
checked.append(255) # Opaque
p += 1
mask = Image.frombytes('L', img.size, bytes(checked))
img.putalpha(mask)
return img | null |
156,274 | import tkinter as tk
import socket
import json
import time
from datetime import timedelta
from select import select
from tkinter import ttk
from view_model import *
from PIL import ImageTk, Image
from sys import platform
import os
UP_KEY_CODE = 8255233 if platform == "darwin" else 111
DOWN_KEY_CODE = 8320768 if platform == "darwin" else 116
LEFT_KEY_CODE = 8124162 if platform == "darwin" else 113
RIGHT_KEY_CODE = 8189699 if platform == "darwin" else 114
PREV_KEY_CODE = 2818092 if platform == "darwin" else 0
NEXT_KEY_CODE = 3080238 if platform == "darwin" else 0
PLAY_KEY_CODE = 3211296 if platform == "darwin" else 0
def onPlayPressed():
global page, app
page.nav_play()
render(app, page.render())
def onSelectPressed():
global page, app
if (not page.has_sub_page):
return
page.render().unsubscribe()
page = page.nav_select()
render(app, page.render())
def onBackPressed():
global page, app
previous_page = page.nav_back()
if (previous_page):
page.render().unsubscribe()
page = previous_page
render(app, page.render())
def onNextPressed():
global page, app
page.nav_next()
render(app, page.render())
def onPrevPressed():
global page, app
page.nav_prev()
render(app, page.render())
def onUpPressed():
global page, app
page.nav_up()
render(app, page.render())
def onDownPressed():
global page, app
page.nav_down()
render(app, page.render())
def onKeyPress(event):
c = event.keycode
if (c == UP_KEY_CODE):
onUpPressed()
elif (c == DOWN_KEY_CODE):
onDownPressed()
elif (c == RIGHT_KEY_CODE):
onSelectPressed()
elif (c == LEFT_KEY_CODE):
onBackPressed()
elif (c == NEXT_KEY_CODE):
onNextPressed()
elif (c == PREV_KEY_CODE):
onPrevPressed()
elif (c == PLAY_KEY_CODE):
onPlayPressed()
else:
print("unrecognized key: ", c) | null |
156,275 | import tkinter as tk
import socket
import json
import time
from datetime import timedelta
from select import select
from tkinter import ttk
from view_model import *
from PIL import ImageTk, Image
from sys import platform
import os
SCREEN_TIMEOUT_SECONDS = 60
last_interaction = time.time()
screen_on = True
def screen_sleep():
global screen_on
screen_on = False
os.system('xset -display :0 dpms force off')
def processInput(app, input):
global wheel_position, last_button, last_interaction
position = input[2]
button = input[0]
button_state = input[1]
if button == 29 and button_state == 0:
wheel_position = -1
elif wheel_position == -1:
wheel_position = position
elif position % 2 != 0:
pass
elif wheel_position <=1 and position > 44:
onDownPressed()
wheel_position = position
elif wheel_position >=44 and position < 1:
onUpPressed()
wheel_position = position
elif abs(wheel_position - position) > 6:
wheel_position = -1
elif wheel_position > position:
onDownPressed()
wheel_position = position
elif wheel_position < position:
onUpPressed()
wheel_position = position
if button_state == 0:
last_button = -1
elif button == last_button:
pass
elif button == 7:
onSelectPressed()
last_button = button
elif button == 11:
onBackPressed()
last_button = button
elif button == 10:
onPlayPressed()
last_button = button
elif button == 8:
onNextPressed()
last_button = button
elif button == 9:
onPrevPressed()
last_button = button
now = time.time()
if (now - last_interaction > SCREEN_TIMEOUT_SECONDS):
print("waking")
screen_wake()
last_interaction = now
# app.frames[StartPage].set_list_item(0, "Test")
def render(app, render):
if (render.type == MENU_RENDER_TYPE):
render_menu(app, render)
elif (render.type == NOW_PLAYING_RENDER):
render_now_playing(app, render)
elif (render.type == SEARCH_RENDER):
render_search(app, render)
page = RootPage(None)
app = tkinterApp()
render(app, page.render())
app.overrideredirect(True)
app.overrideredirect(False)
socket_list = [sock]
loop_count = 0
app.bind('<KeyPress>', onKeyPress)
app.after(5, app_main_loop)
app.mainloop()
def app_main_loop():
global app, page, loop_count, last_interaction, screen_on
try:
read_sockets = select(socket_list, [], [], 0)[0]
for socket in read_sockets:
data = socket.recv(128)
processInput(app, data)
loop_count += 1
if (loop_count >= 300):
if (time.time() - last_interaction > SCREEN_TIMEOUT_SECONDS and screen_on):
screen_sleep()
render(app, page.render())
loop_count = 0
except:
pass
finally:
app.after(2, app_main_loop) | null |
156,276 | import spotipy
import datastore
from spotipy.oauth2 import SpotifyOAuth
import threading
import time
import json
class UserTrack():
__slots__ = ['title', 'artist', 'album', 'uri']
def __init__(self, title, artist, album, uri):
self.title = title
self.artist = artist
self.album = album
self.uri = uri
def __str__(self):
return self.title + " - " + self.artist + " - " + self.album
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))
pageSize = 50
def get_album_tracks(id):
tracks = []
results = sp.playlist_tracks(id, limit=pageSize)
while(results['next']):
for _, item in enumerate(results['items']):
track = item['track']
tracks.append(UserTrack(track['name'], track['artists'][0]['name'], track['album']['name'], track['uri']))
results = sp.next(results)
for _, item in enumerate(results['items']):
track = item['track']
tracks.append(UserTrack(track['name'], track['artists'][0]['name'], track['album']['name'], track['uri']))
return tracks | null |
156,277 | import spotipy
import datastore
from spotipy.oauth2 import SpotifyOAuth
import threading
import time
import json
class UserTrack():
def __init__(self, title, artist, album, uri):
def __str__(self):
class UserArtist():
def __init__(self, name, uri):
def __str__(self):
class UserPlaylist():
def __init__(self, name, idx, uri, track_count):
def __str__(self):
DATASTORE = datastore.Datastore()
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))
pageSize = 50
def get_playlist_tracks(id):
def refresh_devices():
def parse_album(album):
def parse_show(show):
def refresh_data():
DATASTORE.clear()
results = sp.current_user_saved_tracks(limit=pageSize, offset=0)
while(results['next']):
offset = results['offset']
for idx, item in enumerate(results['items']):
track = item['track']
DATASTORE.setSavedTrack(idx + offset, UserTrack(track['name'], track['artists'][0]['name'], track['album']['name'], track['uri']))
results = sp.next(results)
offset = results['offset']
for idx, item in enumerate(results['items']):
track = item['track']
DATASTORE.setSavedTrack(idx + offset, UserTrack(track['name'], track['artists'][0]['name'], track['album']['name'], track['uri']))
print("Spotify tracks fetched")
offset = 0
results = sp.current_user_followed_artists(limit=pageSize)
while(results['artists']['next']):
for idx, item in enumerate(results['artists']['items']):
DATASTORE.setArtist(idx + offset, UserArtist(item['name'], item['uri']))
results = sp.next(results['artists'])
offset = offset + pageSize
for idx, item in enumerate(results['artists']['items']):
DATASTORE.setArtist(idx + offset, UserArtist(item['name'], item['uri']))
print("Spotify artists fetched: " + str(DATASTORE.getArtistCount()))
results = sp.current_user_playlists(limit=pageSize)
totalindex = 0 # variable to preserve playlist sort index when calling offset loop down below
while(results['next']):
offset = results['offset']
for idx, item in enumerate(results['items']):
tracks = get_playlist_tracks(item['id'])
DATASTORE.setPlaylist(UserPlaylist(item['name'], totalindex, item['uri'], len(tracks)), tracks, index=idx + offset)
totalindex = totalindex + 1
results = sp.next(results)
offset = results['offset']
for idx, item in enumerate(results['items']):
tracks = get_playlist_tracks(item['id'])
DATASTORE.setPlaylist(UserPlaylist(item['name'], totalindex, item['uri'], len(tracks)), tracks, index=idx + offset)
totalindex = totalindex + 1
print("Spotify playlists fetched: " + str(DATASTORE.getPlaylistCount()))
results = sp.current_user_saved_albums(limit=pageSize)
while(results['next']):
offset = results['offset']
for idx, item in enumerate(results['items']):
album, tracks = parse_album(item['album'])
DATASTORE.setAlbum(album, tracks, index=idx + offset)
results = sp.next(results)
offset = results['offset']
for idx, item in enumerate(results['items']):
album, tracks = parse_album(item['album'])
DATASTORE.setAlbum(album, tracks, index=idx + offset)
print("Refreshed user albums")
results = sp.new_releases(limit=pageSize)
for idx, item in enumerate(results['albums']['items']):
album, tracks = parse_album(item)
DATASTORE.setNewRelease(album, tracks, index=idx)
print("Refreshed new releases")
results = sp.current_user_saved_shows(limit=pageSize)
if(len(results['items']) > 0):
offset = results['offset']
for idx, item in enumerate(results['items']):
show, episodes = parse_show(item['show'])
DATASTORE.setShow(show, episodes, index=idx)
print("Spotify Shows fetched")
refresh_devices()
print("Refreshed devices") | null |
156,278 | import spotipy
import datastore
from spotipy.oauth2 import SpotifyOAuth
import threading
import time
import json
DATASTORE = datastore.Datastore()
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))
def refresh_now_playing():
DATASTORE.now_playing = get_now_playing()
def play_artist(artist_uri, device_id = None):
if (not device_id):
devices = DATASTORE.getAllSavedDevices()
if (len(devices) == 0):
print("error! no devices")
return
device_id = devices[0].id
response = sp.start_playback(device_id=device_id, context_uri=artist_uri)
refresh_now_playing()
print(response) | null |
156,279 | import spotipy
import datastore
from spotipy.oauth2 import SpotifyOAuth
import threading
import time
import json
DATASTORE = datastore.Datastore()
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))
def play_track(track_uri, device_id = None):
if (not device_id):
devices = DATASTORE.getAllSavedDevices()
if (len(devices) == 0):
print("error! no devices")
return
device_id = devices[0].id
sp.start_playback(device_id=device_id, uris=[track_uri]) | null |
156,280 | import spotipy
import datastore
from spotipy.oauth2 import SpotifyOAuth
import threading
import time
import json
DATASTORE = datastore.Datastore()
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))
def play_episode(episode_uri, device_id = None):
if(not device_id):
devices = DATASTORE.getAllSavedDevices()
if(len(devices) == 0):
print("error! no devices")
return
device_id = devices[0].id
sp.start_playback(device_id=device_id, uris=[episode_uri]) | null |
156,281 | import spotipy
import datastore
from spotipy.oauth2 import SpotifyOAuth
import threading
import time
import json
DATASTORE = datastore.Datastore()
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))
def refresh_now_playing():
DATASTORE.now_playing = get_now_playing()
def play_from_playlist(playist_uri, track_uri, device_id = None):
print("playing ", playist_uri, track_uri)
if (not device_id):
devices = DATASTORE.getAllSavedDevices()
if (len(devices) == 0):
print("error! no devices")
return
device_id = devices[0].id
sp.start_playback(device_id=device_id, context_uri=playist_uri, offset={"uri": track_uri})
refresh_now_playing() | null |
156,282 | import spotipy
import datastore
from spotipy.oauth2 import SpotifyOAuth
import threading
import time
import json
DATASTORE = datastore.Datastore()
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))
def refresh_now_playing():
DATASTORE.now_playing = get_now_playing()
def play_from_show(show_uri, episode_uri, device_id = None):
print("playing ", show_uri, episode_uri)
if(not device_id):
devices = DATASTORE.getAllSavedDevices()
if (len(devices) == 0):
print("error! no devices")
return
device_id = devices[0].id
sp.start_playback(device_id=device_id, context_uri=show_uri, offset={"uri": episode_uri})
refresh_now_playing() | null |
156,283 | import spotipy
import datastore
from spotipy.oauth2 import SpotifyOAuth
import threading
import time
import json
class UserTrack():
__slots__ = ['title', 'artist', 'album', 'uri']
def __init__(self, title, artist, album, uri):
self.title = title
self.artist = artist
self.album = album
self.uri = uri
def __str__(self):
return self.title + " - " + self.artist + " - " + self.album
class UserArtist():
__slots__ = ['name', 'uri']
def __init__(self, name, uri):
self.name = name
self.uri = uri
def __str__(self):
return self.name
class SearchResults():
__slots__ = ['tracks', 'artists', 'albums', 'album_track_map']
def __init__(self, tracks, artists, albums, album_track_map):
self.tracks = tracks
self.artists = artists
self.albums = albums
self.album_track_map = album_track_map
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))
def parse_album(album):
artist = album['artists'][0]['name']
tracks = []
if 'tracks' not in album :
return get_album(album['id'])
for _, track in enumerate(album['tracks']['items']):
tracks.append(UserTrack(track['name'], artist, album['name'], track['uri']))
return (UserAlbum(album['name'], artist, len(tracks), album['uri']), tracks)
def search(query):
track_results = sp.search(query, limit=5, type='track')
tracks = []
for _, item in enumerate(track_results['tracks']['items']):
tracks.append(UserTrack(item['name'], item['artists'][0]['name'], item['album']['name'], item['uri']))
artist_results = sp.search(query, limit=5, type='artist')
artists = []
for _, item in enumerate(artist_results['artists']['items']):
artists.append(UserArtist(item['name'], item['uri']))
album_results = sp.search(query, limit=5, type='album')
albums = []
album_track_map = {}
for _, item in enumerate(album_results['albums']['items']):
album, album_tracks = parse_album(item)
albums.append(album)
album_track_map[album.uri] = album_tracks
return SearchResults(tracks, artists, albums, album_track_map) | null |
156,284 | import spotipy
import datastore
from spotipy.oauth2 import SpotifyOAuth
import threading
import time
import json
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))
def refresh_now_playing():
DATASTORE.now_playing = get_now_playing()
sleep_time = 0.3
def play_next():
global sleep_time
sp.next_track()
sleep_time = 0.4
refresh_now_playing() | null |
156,285 | import spotipy
import datastore
from spotipy.oauth2 import SpotifyOAuth
import threading
import time
import json
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))
def refresh_now_playing():
sleep_time = 0.3
def play_previous():
global sleep_time
sp.previous_track()
sleep_time = 0.4
refresh_now_playing() | null |
156,286 | import spotipy
import datastore
from spotipy.oauth2 import SpotifyOAuth
import threading
import time
import json
DATASTORE = datastore.Datastore()
def pause():
global sleep_time
sp.pause_playback()
sleep_time = 0.4
refresh_now_playing()
def resume():
global sleep_time
sp.start_playback()
sleep_time = 0.4
refresh_now_playing()
def toggle_play():
now_playing = DATASTORE.now_playing
if not now_playing:
return
if now_playing['is_playing']:
pause()
else:
resume() | null |
156,287 | import spotipy
import datastore
from spotipy.oauth2 import SpotifyOAuth
import threading
import time
import json
def refresh_now_playing():
DATASTORE.now_playing = get_now_playing()
sleep_time = 0.3
def bg_loop():
global sleep_time
while True:
refresh_now_playing()
time.sleep(sleep_time)
sleep_time = min(4, sleep_time * 2) | null |
156,288 | import spotipy
import datastore
from spotipy.oauth2 import SpotifyOAuth
import threading
import time
import json
def run_async(fun):
threading.Thread(target=fun, args=()).start() | null |
156,289 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
settings = Settings()
def handle_animations(scene: Scene) -> None:
class Add(GitSimBaseCommand):
def __init__(self, files: List[str]):
def construct(self):
def populate_zones(
self,
firstColumnFileNames,
secondColumnFileNames,
thirdColumnFileNames,
firstColumnArrowMap={},
secondColumnArrowMap={},
thirdColumnArrowMap={},
):
def add(
files: List[str] = typer.Argument(
default=None,
help="The names of one or more files to add to Git's staging area",
)
):
from git_sim.add import Add
settings.hide_first_tag = True
scene = Add(files=files)
handle_animations(scene=scene) | null |
156,290 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
from git_sim.animations import handle_animations as _handle_animations
with settings.font_context:
return _handle_animations(scene)
def handle_animations(scene: Scene) -> None:
scene.render()
if settings.video_format == VideoFormat.WEBM:
webm_file_path = str(scene.renderer.file_writer.movie_file_path)[:-3] + "webm"
cmd = f"ffmpeg -y -i {scene.renderer.file_writer.movie_file_path} -hide_banner -loglevel error -c:v libvpx-vp9 -crf 50 -b:v 0 -b:a 128k -c:a libopus {webm_file_path}"
print("Converting video output to .webm format...")
# Start ffmpeg conversion
p = subprocess.Popen(cmd, shell=True)
p.wait()
# if the conversion is successful, delete the .mp4
if os.path.exists(webm_file_path):
os.remove(scene.renderer.file_writer.movie_file_path)
scene.renderer.file_writer.movie_file_path = webm_file_path
if not settings.animate:
video = cv2.VideoCapture(str(scene.renderer.file_writer.movie_file_path))
success, image = video.read()
if success:
t = datetime.datetime.fromtimestamp(time.time()).strftime(
"%m-%d-%y_%H-%M-%S"
)
image_file_name = (
"git-sim-"
+ inspect.stack()[2].function
+ "_"
+ t
+ "."
+ settings.img_format
)
image_file_path = os.path.join(
os.path.join(settings.media_dir, "images"), image_file_name
)
if settings.transparent_bg:
unsharp_image = cv2.GaussianBlur(image, (0, 0), 3)
image = cv2.addWeighted(image, 1.5, unsharp_image, -0.5, 0)
tmp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if settings.light_mode:
_, alpha = cv2.threshold(tmp, 225, 255, cv2.THRESH_BINARY_INV)
else:
_, alpha = cv2.threshold(tmp, 25, 255, cv2.THRESH_BINARY)
b, g, r = cv2.split(image)
rgba = [b, g, r, alpha]
image = cv2.merge(rgba, 4)
cv2.imwrite(image_file_path, image)
if (
not settings.stdout
and not settings.output_only_path
and not settings.quiet
):
print("Output image location:", image_file_path)
elif (
not settings.stdout and settings.output_only_path and not settings.quiet
):
print(image_file_path)
if settings.stdout and not settings.quiet:
sys.stdout.buffer.write(cv2.imencode(".jpg", image)[1].tobytes())
else:
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print("Output video location:", scene.renderer.file_writer.movie_file_path)
elif not settings.stdout and settings.output_only_path and not settings.quiet:
print(scene.renderer.file_writer.movie_file_path)
if settings.auto_open and not settings.stdout:
try:
if not settings.animate:
open_file(image_file_path)
else:
open_file(scene.renderer.file_writer.movie_file_path)
except FileNotFoundError:
print(
"Error automatically opening media, please manually open the image or video file to view."
)
class Branch(GitSimBaseCommand):
def __init__(self, name: str):
super().__init__()
self.name = name
def construct(self):
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print(f"{settings.INFO_STRING} {type(self).__name__.lower()} {self.name}")
self.show_intro()
self.parse_commits()
self.parse_all()
self.center_frame_on_commit(self.get_commit())
branchText = m.Text(
self.name,
font=self.font,
font_size=20,
color=self.fontColor,
)
branchRec = m.Rectangle(
color=m.GREEN,
fill_color=m.GREEN,
fill_opacity=0.25,
height=0.4,
width=branchText.width + 0.25,
)
branchRec.next_to(self.topref, m.UP)
branchText.move_to(branchRec.get_center())
fullbranch = m.VGroup(branchRec, branchText)
if settings.animate:
self.play(m.Create(fullbranch), run_time=1 / settings.speed)
else:
self.add(fullbranch)
self.toFadeOut.add(branchRec, branchText)
self.drawnRefs[self.name] = fullbranch
self.recenter_frame()
self.scale_frame()
self.color_by()
self.fadeout()
self.show_outro()
def branch(
name: str = typer.Argument(
...,
help="The name of the new branch",
)
):
from git_sim.branch import Branch
scene = Branch(name=name)
handle_animations(scene=scene) | null |
156,291 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
from git_sim.animations import handle_animations as _handle_animations
with settings.font_context:
return _handle_animations(scene)
def handle_animations(scene: Scene) -> None:
scene.render()
if settings.video_format == VideoFormat.WEBM:
webm_file_path = str(scene.renderer.file_writer.movie_file_path)[:-3] + "webm"
cmd = f"ffmpeg -y -i {scene.renderer.file_writer.movie_file_path} -hide_banner -loglevel error -c:v libvpx-vp9 -crf 50 -b:v 0 -b:a 128k -c:a libopus {webm_file_path}"
print("Converting video output to .webm format...")
# Start ffmpeg conversion
p = subprocess.Popen(cmd, shell=True)
p.wait()
# if the conversion is successful, delete the .mp4
if os.path.exists(webm_file_path):
os.remove(scene.renderer.file_writer.movie_file_path)
scene.renderer.file_writer.movie_file_path = webm_file_path
if not settings.animate:
video = cv2.VideoCapture(str(scene.renderer.file_writer.movie_file_path))
success, image = video.read()
if success:
t = datetime.datetime.fromtimestamp(time.time()).strftime(
"%m-%d-%y_%H-%M-%S"
)
image_file_name = (
"git-sim-"
+ inspect.stack()[2].function
+ "_"
+ t
+ "."
+ settings.img_format
)
image_file_path = os.path.join(
os.path.join(settings.media_dir, "images"), image_file_name
)
if settings.transparent_bg:
unsharp_image = cv2.GaussianBlur(image, (0, 0), 3)
image = cv2.addWeighted(image, 1.5, unsharp_image, -0.5, 0)
tmp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if settings.light_mode:
_, alpha = cv2.threshold(tmp, 225, 255, cv2.THRESH_BINARY_INV)
else:
_, alpha = cv2.threshold(tmp, 25, 255, cv2.THRESH_BINARY)
b, g, r = cv2.split(image)
rgba = [b, g, r, alpha]
image = cv2.merge(rgba, 4)
cv2.imwrite(image_file_path, image)
if (
not settings.stdout
and not settings.output_only_path
and not settings.quiet
):
print("Output image location:", image_file_path)
elif (
not settings.stdout and settings.output_only_path and not settings.quiet
):
print(image_file_path)
if settings.stdout and not settings.quiet:
sys.stdout.buffer.write(cv2.imencode(".jpg", image)[1].tobytes())
else:
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print("Output video location:", scene.renderer.file_writer.movie_file_path)
elif not settings.stdout and settings.output_only_path and not settings.quiet:
print(scene.renderer.file_writer.movie_file_path)
if settings.auto_open and not settings.stdout:
try:
if not settings.animate:
open_file(image_file_path)
else:
open_file(scene.renderer.file_writer.movie_file_path)
except FileNotFoundError:
print(
"Error automatically opening media, please manually open the image or video file to view."
)
class Checkout(GitSimBaseCommand):
def __init__(self, branch: str, b: bool):
super().__init__()
self.branch = branch
self.b = b
if self.b:
if self.branch in self.repo.heads:
print(
"git-sim error: can't create new branch '"
+ self.branch
+ "', it already exists"
)
sys.exit(1)
else:
try:
git.repo.fun.rev_parse(self.repo, self.branch)
except git.exc.BadName:
print(
"git-sim error: '"
+ self.branch
+ "' is not a valid Git ref or identifier."
)
sys.exit(1)
if self.branch == self.repo.active_branch.name:
print("git-sim error: already on branch '" + self.branch + "'")
sys.exit(1)
self.is_ancestor = False
self.is_descendant = False
# branch being checked out is behind HEAD
if self.repo.active_branch.name in self.repo.git.branch(
"--contains", self.branch
):
self.is_ancestor = True
# HEAD is behind branch being checked out
elif self.branch in self.repo.git.branch(
"--contains", self.repo.active_branch.name
):
self.is_descendant = True
if self.branch in [branch.name for branch in self.repo.heads]:
self.selected_branches.append(self.branch)
try:
self.selected_branches.append(self.repo.active_branch.name)
except TypeError:
pass
def construct(self):
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print(
f"{settings.INFO_STRING } {type(self).__name__.lower()}{' -b' if self.b else ''} {self.branch}"
)
self.show_intro()
head_commit = self.get_commit()
# using -b flag, create new branch label and exit
if self.b:
self.parse_commits(head_commit)
self.recenter_frame()
self.scale_frame()
self.draw_ref(head_commit, self.topref, text=self.branch, color=m.GREEN)
else:
branch_commit = self.get_commit(self.branch)
if self.is_ancestor:
commits_in_range = list(self.repo.iter_commits(self.branch + "..HEAD"))
# branch is reached from HEAD, so draw everything
if len(commits_in_range) <= self.n:
self.parse_commits(head_commit)
reset_head_to = branch_commit.hexsha
self.recenter_frame()
self.scale_frame()
self.reset_head(reset_head_to)
self.reset_branch(head_commit.hexsha)
# branch is not reached, so start from branch
else:
self.parse_commits(branch_commit)
self.draw_ref(branch_commit, self.topref)
self.recenter_frame()
self.scale_frame()
elif self.is_descendant:
self.parse_commits(branch_commit)
reset_head_to = branch_commit.hexsha
self.recenter_frame()
self.scale_frame()
if "HEAD" in self.drawnRefs:
self.reset_head(reset_head_to)
self.reset_branch(head_commit.hexsha)
else:
self.draw_ref(branch_commit, self.topref)
else:
self.parse_commits(head_commit)
self.parse_commits(branch_commit, shift=4 * m.DOWN)
self.center_frame_on_commit(branch_commit)
self.recenter_frame()
self.scale_frame()
self.reset_head(branch_commit.hexsha)
self.reset_branch(head_commit.hexsha)
self.color_by()
self.fadeout()
self.show_outro()
def checkout(
branch: str = typer.Argument(
...,
help="The name of the branch to checkout",
),
b: bool = typer.Option(
False,
"-b",
help="Create the specified branch if it doesn't already exist",
),
):
from git_sim.checkout import Checkout
scene = Checkout(branch=branch, b=b)
handle_animations(scene=scene) | null |
156,292 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
from git_sim.animations import handle_animations as _handle_animations
with settings.font_context:
return _handle_animations(scene)
def handle_animations(scene: Scene) -> None:
scene.render()
if settings.video_format == VideoFormat.WEBM:
webm_file_path = str(scene.renderer.file_writer.movie_file_path)[:-3] + "webm"
cmd = f"ffmpeg -y -i {scene.renderer.file_writer.movie_file_path} -hide_banner -loglevel error -c:v libvpx-vp9 -crf 50 -b:v 0 -b:a 128k -c:a libopus {webm_file_path}"
print("Converting video output to .webm format...")
# Start ffmpeg conversion
p = subprocess.Popen(cmd, shell=True)
p.wait()
# if the conversion is successful, delete the .mp4
if os.path.exists(webm_file_path):
os.remove(scene.renderer.file_writer.movie_file_path)
scene.renderer.file_writer.movie_file_path = webm_file_path
if not settings.animate:
video = cv2.VideoCapture(str(scene.renderer.file_writer.movie_file_path))
success, image = video.read()
if success:
t = datetime.datetime.fromtimestamp(time.time()).strftime(
"%m-%d-%y_%H-%M-%S"
)
image_file_name = (
"git-sim-"
+ inspect.stack()[2].function
+ "_"
+ t
+ "."
+ settings.img_format
)
image_file_path = os.path.join(
os.path.join(settings.media_dir, "images"), image_file_name
)
if settings.transparent_bg:
unsharp_image = cv2.GaussianBlur(image, (0, 0), 3)
image = cv2.addWeighted(image, 1.5, unsharp_image, -0.5, 0)
tmp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if settings.light_mode:
_, alpha = cv2.threshold(tmp, 225, 255, cv2.THRESH_BINARY_INV)
else:
_, alpha = cv2.threshold(tmp, 25, 255, cv2.THRESH_BINARY)
b, g, r = cv2.split(image)
rgba = [b, g, r, alpha]
image = cv2.merge(rgba, 4)
cv2.imwrite(image_file_path, image)
if (
not settings.stdout
and not settings.output_only_path
and not settings.quiet
):
print("Output image location:", image_file_path)
elif (
not settings.stdout and settings.output_only_path and not settings.quiet
):
print(image_file_path)
if settings.stdout and not settings.quiet:
sys.stdout.buffer.write(cv2.imencode(".jpg", image)[1].tobytes())
else:
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print("Output video location:", scene.renderer.file_writer.movie_file_path)
elif not settings.stdout and settings.output_only_path and not settings.quiet:
print(scene.renderer.file_writer.movie_file_path)
if settings.auto_open and not settings.stdout:
try:
if not settings.animate:
open_file(image_file_path)
else:
open_file(scene.renderer.file_writer.movie_file_path)
except FileNotFoundError:
print(
"Error automatically opening media, please manually open the image or video file to view."
)
class CherryPick(GitSimBaseCommand):
def __init__(self, commit: str, edit: str):
super().__init__()
self.commit = commit
self.edit = edit
try:
git.repo.fun.rev_parse(self.repo, self.commit)
except git.exc.BadName:
print(
"git-sim error: '"
+ self.commit
+ "' is not a valid Git ref or identifier."
)
sys.exit(1)
if self.commit in [branch.name for branch in self.repo.heads]:
self.selected_branches.append(self.commit)
try:
self.selected_branches.append(self.repo.active_branch.name)
except TypeError:
pass
def construct(self):
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print(
f"{settings.INFO_STRING} cherry-pick {self.commit}"
+ ((' -e "' + self.edit + '"') if self.edit else "")
)
if self.repo.active_branch.name in self.repo.git.branch(
"--contains", self.commit
):
print(
"git-sim error: Commit '"
+ self.commit
+ "' is already included in the history of active branch '"
+ self.repo.active_branch.name
+ "'."
)
sys.exit(1)
self.show_intro()
head_commit = self.get_commit()
self.parse_commits(head_commit)
cherry_picked_commit = self.get_commit(self.commit)
self.parse_commits(cherry_picked_commit, shift=4 * m.DOWN)
self.parse_all()
self.center_frame_on_commit(head_commit)
self.setup_and_draw_parent(
head_commit,
self.edit if self.edit else cherry_picked_commit.message,
)
self.draw_arrow_between_commits(cherry_picked_commit.hexsha, "abcdef")
self.recenter_frame()
self.scale_frame()
self.reset_head_branch("abcdef")
self.color_by(offset=2)
self.fadeout()
self.show_outro()
def cherry_pick(
commit: str = typer.Argument(
...,
help="The ref (branch/tag), or commit ID to simulate cherry-pick onto active branch",
),
edit: str = typer.Option(
None,
"--edit",
"-e",
help="Specify a new commit message for the cherry-picked commit",
),
):
from git_sim.cherrypick import CherryPick
scene = CherryPick(commit=commit, edit=edit)
handle_animations(scene=scene) | null |
156,293 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
settings = Settings()
def handle_animations(scene: Scene) -> None:
class Clean(GitSimBaseCommand):
def __init__(self):
def construct(self):
def create_zone_text(
self,
firstColumnFileNames,
secondColumnFileNames,
thirdColumnFileNames,
firstColumnFiles,
secondColumnFiles,
thirdColumnFiles,
firstColumnFilesDict,
secondColumnFilesDict,
thirdColumnFilesDict,
firstColumnTitle,
secondColumnTitle,
thirdColumnTitle,
horizontal2,
):
def populate_zones(
self,
firstColumnFileNames,
secondColumnFileNames,
thirdColumnFileNames,
firstColumnArrowMap={},
secondColumnArrowMap={},
thirdColumnArrowMap={},
):
def clean():
from git_sim.clean import Clean
settings.hide_first_tag = True
scene = Clean()
handle_animations(scene=scene) | null |
156,294 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
from git_sim.animations import handle_animations as _handle_animations
with settings.font_context:
return _handle_animations(scene)
def handle_animations(scene: Scene) -> None:
scene.render()
if settings.video_format == VideoFormat.WEBM:
webm_file_path = str(scene.renderer.file_writer.movie_file_path)[:-3] + "webm"
cmd = f"ffmpeg -y -i {scene.renderer.file_writer.movie_file_path} -hide_banner -loglevel error -c:v libvpx-vp9 -crf 50 -b:v 0 -b:a 128k -c:a libopus {webm_file_path}"
print("Converting video output to .webm format...")
# Start ffmpeg conversion
p = subprocess.Popen(cmd, shell=True)
p.wait()
# if the conversion is successful, delete the .mp4
if os.path.exists(webm_file_path):
os.remove(scene.renderer.file_writer.movie_file_path)
scene.renderer.file_writer.movie_file_path = webm_file_path
if not settings.animate:
video = cv2.VideoCapture(str(scene.renderer.file_writer.movie_file_path))
success, image = video.read()
if success:
t = datetime.datetime.fromtimestamp(time.time()).strftime(
"%m-%d-%y_%H-%M-%S"
)
image_file_name = (
"git-sim-"
+ inspect.stack()[2].function
+ "_"
+ t
+ "."
+ settings.img_format
)
image_file_path = os.path.join(
os.path.join(settings.media_dir, "images"), image_file_name
)
if settings.transparent_bg:
unsharp_image = cv2.GaussianBlur(image, (0, 0), 3)
image = cv2.addWeighted(image, 1.5, unsharp_image, -0.5, 0)
tmp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if settings.light_mode:
_, alpha = cv2.threshold(tmp, 225, 255, cv2.THRESH_BINARY_INV)
else:
_, alpha = cv2.threshold(tmp, 25, 255, cv2.THRESH_BINARY)
b, g, r = cv2.split(image)
rgba = [b, g, r, alpha]
image = cv2.merge(rgba, 4)
cv2.imwrite(image_file_path, image)
if (
not settings.stdout
and not settings.output_only_path
and not settings.quiet
):
print("Output image location:", image_file_path)
elif (
not settings.stdout and settings.output_only_path and not settings.quiet
):
print(image_file_path)
if settings.stdout and not settings.quiet:
sys.stdout.buffer.write(cv2.imencode(".jpg", image)[1].tobytes())
else:
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print("Output video location:", scene.renderer.file_writer.movie_file_path)
elif not settings.stdout and settings.output_only_path and not settings.quiet:
print(scene.renderer.file_writer.movie_file_path)
if settings.auto_open and not settings.stdout:
try:
if not settings.animate:
open_file(image_file_path)
else:
open_file(scene.renderer.file_writer.movie_file_path)
except FileNotFoundError:
print(
"Error automatically opening media, please manually open the image or video file to view."
)
class Clone(GitSimBaseCommand):
# Override since 'clone' subcommand shouldn't require repo to exist
def init_repo(self):
pass
def __init__(self, url: str):
super().__init__()
self.url = url
settings.max_branches_per_commit = 2
def construct(self):
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print(f"{settings.INFO_STRING } {type(self).__name__.lower()} {self.url}")
self.show_intro()
# Configure paths to make local clone to run networked commands in
repo_name = re.search(r"/([^/]+)/?$", self.url)
if repo_name:
repo_name = repo_name.group(1)
if repo_name.endswith(".git"):
repo_name = repo_name[:-4]
else:
print(
f"git-sim error: Invalid repo URL, please confirm repo URL and try again"
)
sys.exit(1)
new_dir = os.path.join(tempfile.gettempdir(), "git_sim", repo_name)
# Create local clone of local repo
try:
self.repo = git.Repo.clone_from(self.url, new_dir, no_hardlinks=True)
except git.GitCommandError as e:
print(
f"git-sim error: Invalid repo URL, please confirm repo URL and try again"
)
sys.exit(1)
head_commit = self.get_commit()
self.parse_commits(head_commit)
self.recenter_frame()
self.scale_frame()
self.add_details(repo_name)
self.color_by()
self.fadeout()
self.show_outro()
# Unlink the program from the filesystem
self.repo.git.clear_cache()
# Delete the local clones
shutil.rmtree(new_dir, onerror=self.del_rw)
def add_details(self, repo_name):
text1 = m.Text(
f"Successfully cloned from {self.url} into ./{repo_name}",
font=self.font,
font_size=20,
color=self.fontColor,
weight=m.BOLD,
)
text1.move_to([self.camera.frame.get_center()[0], 4, 0])
text2 = m.Text(
f"Cloned repo log:",
font=self.font,
font_size=20,
color=self.fontColor,
weight=m.BOLD,
)
text2.move_to(text1.get_center()).shift(m.DOWN / 2)
self.toFadeOut.add(text1)
self.toFadeOut.add(text2)
self.recenter_frame()
self.scale_frame()
if settings.animate:
self.play(m.AddTextLetterByLetter(text1), m.AddTextLetterByLetter(text2))
else:
self.add(text1, text2)
def clone(
url: str = typer.Argument(
...,
help="The web URL or filesystem path of the Git repo to clone",
),
):
from git_sim.clone import Clone
scene = Clone(url=url)
handle_animations(scene=scene) | null |
156,295 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
from git_sim.animations import handle_animations as _handle_animations
with settings.font_context:
return _handle_animations(scene)
settings = Settings()
def handle_animations(scene: Scene) -> None:
scene.render()
if settings.video_format == VideoFormat.WEBM:
webm_file_path = str(scene.renderer.file_writer.movie_file_path)[:-3] + "webm"
cmd = f"ffmpeg -y -i {scene.renderer.file_writer.movie_file_path} -hide_banner -loglevel error -c:v libvpx-vp9 -crf 50 -b:v 0 -b:a 128k -c:a libopus {webm_file_path}"
print("Converting video output to .webm format...")
# Start ffmpeg conversion
p = subprocess.Popen(cmd, shell=True)
p.wait()
# if the conversion is successful, delete the .mp4
if os.path.exists(webm_file_path):
os.remove(scene.renderer.file_writer.movie_file_path)
scene.renderer.file_writer.movie_file_path = webm_file_path
if not settings.animate:
video = cv2.VideoCapture(str(scene.renderer.file_writer.movie_file_path))
success, image = video.read()
if success:
t = datetime.datetime.fromtimestamp(time.time()).strftime(
"%m-%d-%y_%H-%M-%S"
)
image_file_name = (
"git-sim-"
+ inspect.stack()[2].function
+ "_"
+ t
+ "."
+ settings.img_format
)
image_file_path = os.path.join(
os.path.join(settings.media_dir, "images"), image_file_name
)
if settings.transparent_bg:
unsharp_image = cv2.GaussianBlur(image, (0, 0), 3)
image = cv2.addWeighted(image, 1.5, unsharp_image, -0.5, 0)
tmp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if settings.light_mode:
_, alpha = cv2.threshold(tmp, 225, 255, cv2.THRESH_BINARY_INV)
else:
_, alpha = cv2.threshold(tmp, 25, 255, cv2.THRESH_BINARY)
b, g, r = cv2.split(image)
rgba = [b, g, r, alpha]
image = cv2.merge(rgba, 4)
cv2.imwrite(image_file_path, image)
if (
not settings.stdout
and not settings.output_only_path
and not settings.quiet
):
print("Output image location:", image_file_path)
elif (
not settings.stdout and settings.output_only_path and not settings.quiet
):
print(image_file_path)
if settings.stdout and not settings.quiet:
sys.stdout.buffer.write(cv2.imencode(".jpg", image)[1].tobytes())
else:
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print("Output video location:", scene.renderer.file_writer.movie_file_path)
elif not settings.stdout and settings.output_only_path and not settings.quiet:
print(scene.renderer.file_writer.movie_file_path)
if settings.auto_open and not settings.stdout:
try:
if not settings.animate:
open_file(image_file_path)
else:
open_file(scene.renderer.file_writer.movie_file_path)
except FileNotFoundError:
print(
"Error automatically opening media, please manually open the image or video file to view."
)
class Commit(GitSimBaseCommand):
def __init__(self, message: str, amend: bool):
super().__init__()
self.message = message
self.amend = amend
self.n_default = 4 if not self.amend else 5
self.n = self.n_default
self.hide_first_tag = True
settings.hide_merged_branches = True
try:
self.selected_branches.append(self.repo.active_branch.name)
except TypeError:
pass
if self.amend and self.message == "New commit":
print(
"git-sim error: The --amend flag must be used with the -m flag to specify the amended commit message."
)
sys.exit(1)
def construct(self):
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print(
f"{settings.INFO_STRING } {type(self).__name__.lower()} {'--amend ' if self.amend else ''}"
+ '-m "'
+ self.message
+ '"'
)
self.show_intro()
head_commit = self.get_commit()
if self.amend:
tree = self.repo.tree()
amended = git.Commit.create_from_tree(
self.repo,
tree,
self.message,
)
head_commit = amended
self.parse_commits(head_commit)
self.center_frame_on_commit(head_commit)
if not self.amend:
self.setup_and_draw_parent(head_commit, self.message)
else:
self.draw_ref(head_commit, self.drawnCommitIds[amended.hexsha])
self.draw_ref(
head_commit,
self.drawnRefs["HEAD"],
text=self.repo.active_branch.name,
color=m.GREEN,
)
self.recenter_frame()
self.scale_frame()
if not self.amend:
self.reset_head_branch("abcdef")
self.vsplit_frame()
self.setup_and_draw_zones(
first_column_name="Working directory",
second_column_name="Staging area",
third_column_name="New commit",
)
self.fadeout()
self.show_outro()
def populate_zones(
self,
firstColumnFileNames,
secondColumnFileNames,
thirdColumnFileNames,
firstColumnArrowMap={},
secondColumnArrowMap={},
thirdColumnArrowMap={},
):
for x in self.repo.index.diff(None):
if "git-sim_media" not in x.a_path:
firstColumnFileNames.add(x.a_path)
for y in self.repo.index.diff("HEAD"):
if "git-sim_media" not in y.a_path:
secondColumnFileNames.add(y.a_path)
thirdColumnFileNames.add(y.a_path)
secondColumnArrowMap[y.a_path] = m.Arrow(
stroke_width=3, color=self.fontColor
)
def commit(
message: str = typer.Option(
"New commit",
"--message",
"-m",
help="The commit message of the new commit",
),
amend: bool = typer.Option(
default=False,
help="Amend the last commit message, must be used with the --message flag",
),
):
from git_sim.commit import Commit
settings.hide_first_tag = True
scene = Commit(message=message, amend=amend)
handle_animations(scene=scene) | null |
156,296 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
from git_sim.animations import handle_animations as _handle_animations
with settings.font_context:
return _handle_animations(scene)
def handle_animations(scene: Scene) -> None:
scene.render()
if settings.video_format == VideoFormat.WEBM:
webm_file_path = str(scene.renderer.file_writer.movie_file_path)[:-3] + "webm"
cmd = f"ffmpeg -y -i {scene.renderer.file_writer.movie_file_path} -hide_banner -loglevel error -c:v libvpx-vp9 -crf 50 -b:v 0 -b:a 128k -c:a libopus {webm_file_path}"
print("Converting video output to .webm format...")
# Start ffmpeg conversion
p = subprocess.Popen(cmd, shell=True)
p.wait()
# if the conversion is successful, delete the .mp4
if os.path.exists(webm_file_path):
os.remove(scene.renderer.file_writer.movie_file_path)
scene.renderer.file_writer.movie_file_path = webm_file_path
if not settings.animate:
video = cv2.VideoCapture(str(scene.renderer.file_writer.movie_file_path))
success, image = video.read()
if success:
t = datetime.datetime.fromtimestamp(time.time()).strftime(
"%m-%d-%y_%H-%M-%S"
)
image_file_name = (
"git-sim-"
+ inspect.stack()[2].function
+ "_"
+ t
+ "."
+ settings.img_format
)
image_file_path = os.path.join(
os.path.join(settings.media_dir, "images"), image_file_name
)
if settings.transparent_bg:
unsharp_image = cv2.GaussianBlur(image, (0, 0), 3)
image = cv2.addWeighted(image, 1.5, unsharp_image, -0.5, 0)
tmp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if settings.light_mode:
_, alpha = cv2.threshold(tmp, 225, 255, cv2.THRESH_BINARY_INV)
else:
_, alpha = cv2.threshold(tmp, 25, 255, cv2.THRESH_BINARY)
b, g, r = cv2.split(image)
rgba = [b, g, r, alpha]
image = cv2.merge(rgba, 4)
cv2.imwrite(image_file_path, image)
if (
not settings.stdout
and not settings.output_only_path
and not settings.quiet
):
print("Output image location:", image_file_path)
elif (
not settings.stdout and settings.output_only_path and not settings.quiet
):
print(image_file_path)
if settings.stdout and not settings.quiet:
sys.stdout.buffer.write(cv2.imencode(".jpg", image)[1].tobytes())
else:
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print("Output video location:", scene.renderer.file_writer.movie_file_path)
elif not settings.stdout and settings.output_only_path and not settings.quiet:
print(scene.renderer.file_writer.movie_file_path)
if settings.auto_open and not settings.stdout:
try:
if not settings.animate:
open_file(image_file_path)
else:
open_file(scene.renderer.file_writer.movie_file_path)
except FileNotFoundError:
print(
"Error automatically opening media, please manually open the image or video file to view."
)
class Fetch(GitSimBaseCommand):
def __init__(self, remote: str, branch: str):
super().__init__()
self.remote = remote
self.branch = branch
settings.max_branches_per_commit = 2
if self.remote and self.remote not in self.repo.remotes:
print("git-sim error: no remote with name '" + self.remote + "'")
sys.exit(1)
def construct(self):
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print(
f"{settings.INFO_STRING } {type(self).__name__.lower()} {self.remote if self.remote else ''} {self.branch if self.branch else ''}"
)
if not self.remote:
self.remote = "origin"
if not self.branch:
self.branch = self.repo.active_branch.name
self.show_intro()
git_root = self.repo.git.rev_parse("--show-toplevel")
repo_name = os.path.basename(self.repo.working_dir)
new_dir = os.path.join(tempfile.gettempdir(), "git_sim", repo_name)
orig_remotes = self.repo.remotes
self.repo = git.Repo.clone_from(git_root, new_dir, no_hardlinks=True)
for r1 in orig_remotes:
for r2 in self.repo.remotes:
if r1.name == r2.name:
r2.set_url(r1.url)
try:
self.repo.git.fetch(self.remote, self.branch)
except git.GitCommandError as e:
print(e)
sys.exit(1)
# local branch doesn't exist
if self.branch not in self.repo.heads:
start_parse_from_remote = True
# fetched branch is ahead of local branch
elif (self.remote + "/" + self.branch) in self.repo.git.branch(
"-r", "--contains", self.branch
):
start_parse_from_remote = True
# fetched branch is behind local branch
elif self.branch in self.repo.git.branch(
"--contains", (self.remote + "/" + self.branch)
):
start_parse_from_remote = False
else:
start_parse_from_remote = True
if start_parse_from_remote:
commit = self.get_commit(self.remote + "/" + self.branch)
else:
commit = self.get_commit(self.branch)
self.parse_commits(commit)
self.recenter_frame()
self.scale_frame()
self.color_by()
self.fadeout()
self.show_outro()
self.repo.git.clear_cache()
shutil.rmtree(new_dir, onerror=self.del_rw)
def fetch(
remote: str = typer.Argument(
default=None,
help="The name of the remote to fetch from",
),
branch: str = typer.Argument(
default=None,
help="The name of the branch to fetch",
),
):
from git_sim.fetch import Fetch
scene = Fetch(remote=remote, branch=branch)
handle_animations(scene=scene) | null |
156,297 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
from git_sim.animations import handle_animations as _handle_animations
with settings.font_context:
return _handle_animations(scene)
def handle_animations(scene: Scene) -> None:
scene.render()
if settings.video_format == VideoFormat.WEBM:
webm_file_path = str(scene.renderer.file_writer.movie_file_path)[:-3] + "webm"
cmd = f"ffmpeg -y -i {scene.renderer.file_writer.movie_file_path} -hide_banner -loglevel error -c:v libvpx-vp9 -crf 50 -b:v 0 -b:a 128k -c:a libopus {webm_file_path}"
print("Converting video output to .webm format...")
# Start ffmpeg conversion
p = subprocess.Popen(cmd, shell=True)
p.wait()
# if the conversion is successful, delete the .mp4
if os.path.exists(webm_file_path):
os.remove(scene.renderer.file_writer.movie_file_path)
scene.renderer.file_writer.movie_file_path = webm_file_path
if not settings.animate:
video = cv2.VideoCapture(str(scene.renderer.file_writer.movie_file_path))
success, image = video.read()
if success:
t = datetime.datetime.fromtimestamp(time.time()).strftime(
"%m-%d-%y_%H-%M-%S"
)
image_file_name = (
"git-sim-"
+ inspect.stack()[2].function
+ "_"
+ t
+ "."
+ settings.img_format
)
image_file_path = os.path.join(
os.path.join(settings.media_dir, "images"), image_file_name
)
if settings.transparent_bg:
unsharp_image = cv2.GaussianBlur(image, (0, 0), 3)
image = cv2.addWeighted(image, 1.5, unsharp_image, -0.5, 0)
tmp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if settings.light_mode:
_, alpha = cv2.threshold(tmp, 225, 255, cv2.THRESH_BINARY_INV)
else:
_, alpha = cv2.threshold(tmp, 25, 255, cv2.THRESH_BINARY)
b, g, r = cv2.split(image)
rgba = [b, g, r, alpha]
image = cv2.merge(rgba, 4)
cv2.imwrite(image_file_path, image)
if (
not settings.stdout
and not settings.output_only_path
and not settings.quiet
):
print("Output image location:", image_file_path)
elif (
not settings.stdout and settings.output_only_path and not settings.quiet
):
print(image_file_path)
if settings.stdout and not settings.quiet:
sys.stdout.buffer.write(cv2.imencode(".jpg", image)[1].tobytes())
else:
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print("Output video location:", scene.renderer.file_writer.movie_file_path)
elif not settings.stdout and settings.output_only_path and not settings.quiet:
print(scene.renderer.file_writer.movie_file_path)
if settings.auto_open and not settings.stdout:
try:
if not settings.animate:
open_file(image_file_path)
else:
open_file(scene.renderer.file_writer.movie_file_path)
except FileNotFoundError:
print(
"Error automatically opening media, please manually open the image or video file to view."
)
class Log(GitSimBaseCommand):
def __init__(self, ctx: typer.Context, n: int, all: bool):
super().__init__()
n_command = ctx.parent.params.get("n")
self.n_subcommand = n
if self.n_subcommand:
n = self.n_subcommand
else:
n = n_command
self.n = n
self.n_orig = self.n
all_command = ctx.parent.params.get("all")
self.all_subcommand = all
if self.all_subcommand:
all = self.all_subcommand
else:
all = all_command
self.all = all
try:
self.selected_branches.append(self.repo.active_branch.name)
except TypeError:
pass
def construct(self):
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print(
f"{settings.INFO_STRING} {type(self).__name__.lower()}{' --all' if self.all_subcommand else ''}{' -n ' + str(self.n) if self.n_subcommand else ''}"
)
self.show_intro()
self.parse_commits()
self.parse_all()
self.recenter_frame()
self.scale_frame()
self.color_by()
self.fadeout()
self.show_outro()
def log(
ctx: typer.Context,
n: int = typer.Option(
None,
"-n",
help="Number of commits to display from branch heads",
),
all: bool = typer.Option(
False,
"--all",
help="Display all local branches in the log output",
),
):
from git_sim.log import Log
scene = Log(ctx=ctx, n=n, all=all)
handle_animations(scene=scene) | null |
156,298 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
from git_sim.animations import handle_animations as _handle_animations
with settings.font_context:
return _handle_animations(scene)
def handle_animations(scene: Scene) -> None:
scene.render()
if settings.video_format == VideoFormat.WEBM:
webm_file_path = str(scene.renderer.file_writer.movie_file_path)[:-3] + "webm"
cmd = f"ffmpeg -y -i {scene.renderer.file_writer.movie_file_path} -hide_banner -loglevel error -c:v libvpx-vp9 -crf 50 -b:v 0 -b:a 128k -c:a libopus {webm_file_path}"
print("Converting video output to .webm format...")
# Start ffmpeg conversion
p = subprocess.Popen(cmd, shell=True)
p.wait()
# if the conversion is successful, delete the .mp4
if os.path.exists(webm_file_path):
os.remove(scene.renderer.file_writer.movie_file_path)
scene.renderer.file_writer.movie_file_path = webm_file_path
if not settings.animate:
video = cv2.VideoCapture(str(scene.renderer.file_writer.movie_file_path))
success, image = video.read()
if success:
t = datetime.datetime.fromtimestamp(time.time()).strftime(
"%m-%d-%y_%H-%M-%S"
)
image_file_name = (
"git-sim-"
+ inspect.stack()[2].function
+ "_"
+ t
+ "."
+ settings.img_format
)
image_file_path = os.path.join(
os.path.join(settings.media_dir, "images"), image_file_name
)
if settings.transparent_bg:
unsharp_image = cv2.GaussianBlur(image, (0, 0), 3)
image = cv2.addWeighted(image, 1.5, unsharp_image, -0.5, 0)
tmp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if settings.light_mode:
_, alpha = cv2.threshold(tmp, 225, 255, cv2.THRESH_BINARY_INV)
else:
_, alpha = cv2.threshold(tmp, 25, 255, cv2.THRESH_BINARY)
b, g, r = cv2.split(image)
rgba = [b, g, r, alpha]
image = cv2.merge(rgba, 4)
cv2.imwrite(image_file_path, image)
if (
not settings.stdout
and not settings.output_only_path
and not settings.quiet
):
print("Output image location:", image_file_path)
elif (
not settings.stdout and settings.output_only_path and not settings.quiet
):
print(image_file_path)
if settings.stdout and not settings.quiet:
sys.stdout.buffer.write(cv2.imencode(".jpg", image)[1].tobytes())
else:
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print("Output video location:", scene.renderer.file_writer.movie_file_path)
elif not settings.stdout and settings.output_only_path and not settings.quiet:
print(scene.renderer.file_writer.movie_file_path)
if settings.auto_open and not settings.stdout:
try:
if not settings.animate:
open_file(image_file_path)
else:
open_file(scene.renderer.file_writer.movie_file_path)
except FileNotFoundError:
print(
"Error automatically opening media, please manually open the image or video file to view."
)
class Merge(GitSimBaseCommand):
def __init__(self, branch: str, no_ff: bool, message: str):
super().__init__()
self.branch = branch
self.no_ff = no_ff
self.message = message
try:
git.repo.fun.rev_parse(self.repo, self.branch)
except git.exc.BadName:
print(
"git-sim error: '"
+ self.branch
+ "' is not a valid Git ref or identifier."
)
sys.exit(1)
self.ff = False
if self.branch in [branch.name for branch in self.repo.heads]:
self.selected_branches.append(self.branch)
try:
self.selected_branches.append(self.repo.active_branch.name)
except TypeError:
pass
def construct(self):
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print(
f"{settings.INFO_STRING } {type(self).__name__.lower()} {self.branch} {'--no-ff' if self.no_ff else ''}"
)
if self.repo.active_branch.name in self.repo.git.branch(
"--contains", self.branch
):
print(
"git-sim error: Branch '"
+ self.branch
+ "' is already included in the history of active branch '"
+ self.repo.active_branch.name
+ "'."
)
sys.exit(1)
self.show_intro()
head_commit = self.get_commit()
branch_commit = self.get_commit(self.branch)
if self.branch not in self.get_remote_tracking_branches():
if self.branch in self.repo.git.branch("--contains", head_commit.hexsha):
self.ff = True
else:
if self.branch in self.repo.git.branch(
"-r", "--contains", head_commit.hexsha
):
self.ff = True
if self.ff:
self.parse_commits(branch_commit)
self.parse_all()
reset_head_to = branch_commit.hexsha
shift = numpy.array([0.0, 0.6, 0.0])
if self.no_ff:
self.center_frame_on_commit(branch_commit)
commitId = self.setup_and_draw_parent(branch_commit, self.message)
# If pre-merge HEAD is on screen, drawn an arrow to it as 2nd parent
if head_commit.hexsha in self.drawnCommits:
start = self.drawnCommits["abcdef"].get_center()
end = self.drawnCommits[head_commit.hexsha].get_center()
arrow = m.CurvedArrow(
start,
end,
color=self.fontColor,
stroke_width=self.arrow_stroke_width,
tip_shape=self.arrow_tip_shape,
)
self.draw_arrow(True, arrow)
reset_head_to = "abcdef"
shift = numpy.array([0.0, 0.0, 0.0])
self.recenter_frame()
self.scale_frame()
if "HEAD" in self.drawnRefs and self.no_ff:
self.reset_head_branch(reset_head_to, shift=shift)
elif "HEAD" in self.drawnRefs:
self.reset_head_branch_to_ref(self.topref, shift=shift)
else:
self.draw_ref(branch_commit, commitId if self.no_ff else self.topref)
self.draw_ref(
branch_commit,
self.drawnRefs["HEAD"],
text=self.repo.active_branch.name,
color=m.GREEN,
)
if self.no_ff:
self.color_by(offset=2)
else:
self.color_by()
else:
merge_result, new_dir = self.check_merge_conflict(
self.repo.active_branch.name, self.branch
)
if merge_result:
self.hide_first_tag = True
self.parse_commits(head_commit)
self.recenter_frame()
self.scale_frame()
# Show the conflicted files names in the table/zones
self.vsplit_frame()
self.setup_and_draw_zones(
first_column_name="----",
second_column_name="Conflicted files",
third_column_name="----",
)
self.color_by()
else:
self.parse_commits(head_commit)
self.parse_commits(branch_commit, shift=4 * m.DOWN)
self.parse_all()
self.center_frame_on_commit(head_commit)
self.setup_and_draw_parent(
head_commit,
self.message,
shift=2 * m.DOWN,
draw_arrow=False,
color=m.GRAY,
)
self.draw_arrow_between_commits("abcdef", branch_commit.hexsha)
self.draw_arrow_between_commits("abcdef", head_commit.hexsha)
self.recenter_frame()
self.scale_frame()
self.reset_head_branch("abcdef")
self.color_by(offset=2)
self.fadeout()
self.show_outro()
# Unlink the program from the filesystem
self.repo.git.clear_cache()
# Delete the local clone
try:
shutil.rmtree(new_dir, onerror=self.del_rw)
except (FileNotFoundError, UnboundLocalError):
pass
def check_merge_conflict(self, branch1, branch2):
git_root = self.repo.git.rev_parse("--show-toplevel")
repo_name = os.path.basename(self.repo.working_dir)
new_dir = os.path.join(tempfile.gettempdir(), "git_sim", repo_name)
orig_repo = self.repo
orig_remotes = self.repo.remotes
self.repo = git.Repo.clone_from(git_root, new_dir, no_hardlinks=True)
self.repo.git.checkout(branch2)
self.repo.git.checkout(branch1)
try:
self.repo.git.merge(branch2)
except git.GitCommandError as e:
if "CONFLICT" in e.stdout:
self.conflicted_files = []
self.n = 5
for entry in self.repo.index.entries:
if len(entry) == 2 and entry[1] > 0:
self.conflicted_files.append(entry[0])
return 1, new_dir
self.repo = orig_repo
return 0, new_dir
# Override to display conflicted filenames
def populate_zones(
self,
firstColumnFileNames,
secondColumnFileNames,
thirdColumnFileNames,
firstColumnArrowMap={},
secondColumnArrowMap={},
thirdColumnArrowMap={},
):
for filename in self.conflicted_files:
secondColumnFileNames.add(filename)
def merge(
branch: str = typer.Argument(
...,
help="The name of the branch to merge into the active checked-out branch",
),
no_ff: bool = typer.Option(
False,
"--no-ff",
help="Simulate creation of a merge commit in all cases, even when the merge could instead be resolved as a fast-forward",
),
message: str = typer.Option(
"Merge commit",
"--message",
"-m",
help="The commit message of the new merge commit",
),
):
from git_sim.merge import Merge
scene = Merge(branch=branch, no_ff=no_ff, message=message)
handle_animations(scene=scene) | null |
156,299 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
from git_sim.animations import handle_animations as _handle_animations
with settings.font_context:
return _handle_animations(scene)
settings = Settings()
def handle_animations(scene: Scene) -> None:
scene.render()
if settings.video_format == VideoFormat.WEBM:
webm_file_path = str(scene.renderer.file_writer.movie_file_path)[:-3] + "webm"
cmd = f"ffmpeg -y -i {scene.renderer.file_writer.movie_file_path} -hide_banner -loglevel error -c:v libvpx-vp9 -crf 50 -b:v 0 -b:a 128k -c:a libopus {webm_file_path}"
print("Converting video output to .webm format...")
# Start ffmpeg conversion
p = subprocess.Popen(cmd, shell=True)
p.wait()
# if the conversion is successful, delete the .mp4
if os.path.exists(webm_file_path):
os.remove(scene.renderer.file_writer.movie_file_path)
scene.renderer.file_writer.movie_file_path = webm_file_path
if not settings.animate:
video = cv2.VideoCapture(str(scene.renderer.file_writer.movie_file_path))
success, image = video.read()
if success:
t = datetime.datetime.fromtimestamp(time.time()).strftime(
"%m-%d-%y_%H-%M-%S"
)
image_file_name = (
"git-sim-"
+ inspect.stack()[2].function
+ "_"
+ t
+ "."
+ settings.img_format
)
image_file_path = os.path.join(
os.path.join(settings.media_dir, "images"), image_file_name
)
if settings.transparent_bg:
unsharp_image = cv2.GaussianBlur(image, (0, 0), 3)
image = cv2.addWeighted(image, 1.5, unsharp_image, -0.5, 0)
tmp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if settings.light_mode:
_, alpha = cv2.threshold(tmp, 225, 255, cv2.THRESH_BINARY_INV)
else:
_, alpha = cv2.threshold(tmp, 25, 255, cv2.THRESH_BINARY)
b, g, r = cv2.split(image)
rgba = [b, g, r, alpha]
image = cv2.merge(rgba, 4)
cv2.imwrite(image_file_path, image)
if (
not settings.stdout
and not settings.output_only_path
and not settings.quiet
):
print("Output image location:", image_file_path)
elif (
not settings.stdout and settings.output_only_path and not settings.quiet
):
print(image_file_path)
if settings.stdout and not settings.quiet:
sys.stdout.buffer.write(cv2.imencode(".jpg", image)[1].tobytes())
else:
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print("Output video location:", scene.renderer.file_writer.movie_file_path)
elif not settings.stdout and settings.output_only_path and not settings.quiet:
print(scene.renderer.file_writer.movie_file_path)
if settings.auto_open and not settings.stdout:
try:
if not settings.animate:
open_file(image_file_path)
else:
open_file(scene.renderer.file_writer.movie_file_path)
except FileNotFoundError:
print(
"Error automatically opening media, please manually open the image or video file to view."
)
class Mv(GitSimBaseCommand):
def __init__(self, file: str, new_file: str):
super().__init__()
self.hide_first_tag = True
self.allow_no_commits = True
self.file = file
self.new_file = new_file
settings.hide_merged_branches = True
self.n = self.n_default
try:
self.selected_branches.append(self.repo.active_branch.name)
except TypeError:
pass
try:
self.repo.git.ls_files("--error-unmatch", self.file)
except:
print(f"git-sim error: No tracked file with name: '{file}'")
sys.exit()
def construct(self):
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print(
f"{settings.INFO_STRING} {type(self).__name__.lower()} {self.file} {self.new_file}"
)
self.show_intro()
self.parse_commits()
self.recenter_frame()
self.scale_frame()
self.vsplit_frame()
self.setup_and_draw_zones(
first_column_name="Working directory",
second_column_name="Staging area",
third_column_name="Renamed files",
)
self.rename_moved_file()
self.fadeout()
self.show_outro()
def populate_zones(
self,
firstColumnFileNames,
secondColumnFileNames,
thirdColumnFileNames,
firstColumnArrowMap={},
secondColumnArrowMap={},
thirdColumnArrowMap={},
):
if self.file in [x.a_path for x in self.repo.index.diff("HEAD")]:
secondColumnFileNames.add(self.file)
secondColumnArrowMap[self.file] = m.Arrow(
stroke_width=3, color=self.fontColor
)
else:
firstColumnFileNames.add(self.file)
firstColumnArrowMap[self.file] = m.Arrow(
stroke_width=3, color=self.fontColor
)
thirdColumnFileNames.add(self.file)
def rename_moved_file(self):
for file in self.thirdColumnFiles:
new_file = m.Text(
self.trim_path(self.new_file),
font=self.font,
font_size=24,
color=self.fontColor,
)
new_file.move_to(file.get_center())
if settings.animate:
self.play(m.FadeOut(file), run_time=1 / settings.speed)
self.toFadeOut.remove(file)
self.play(m.AddTextLetterByLetter(new_file))
self.toFadeOut.add(new_file)
else:
self.remove(file)
self.add(new_file)
def mv(
file: str = typer.Argument(
default=None,
help="The name of the file to change the name/path of",
),
new_file: str = typer.Argument(
default=None,
help="The new name/path of the file",
),
):
from git_sim.mv import Mv
settings.hide_first_tag = True
scene = Mv(file=file, new_file=new_file)
handle_animations(scene=scene) | null |
156,300 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
def handle_animations(scene: Scene) -> None:
class Pull(GitSimBaseCommand):
def __init__(self, remote: str = None, branch: str = None):
def construct(self):
def populate_zones(
self,
firstColumnFileNames,
secondColumnFileNames,
thirdColumnFileNames,
firstColumnArrowMap={},
secondColumnArrowMap={},
thirdColumnArrowMap={},
):
def pull(
remote: str = typer.Argument(
default=None,
help="The name of the remote to pull from",
),
branch: str = typer.Argument(
default=None,
help="The name of the branch to pull",
),
):
from git_sim.pull import Pull
scene = Pull(remote=remote, branch=branch)
handle_animations(scene=scene) | null |
156,301 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
from git_sim.animations import handle_animations as _handle_animations
with settings.font_context:
return _handle_animations(scene)
def handle_animations(scene: Scene) -> None:
scene.render()
if settings.video_format == VideoFormat.WEBM:
webm_file_path = str(scene.renderer.file_writer.movie_file_path)[:-3] + "webm"
cmd = f"ffmpeg -y -i {scene.renderer.file_writer.movie_file_path} -hide_banner -loglevel error -c:v libvpx-vp9 -crf 50 -b:v 0 -b:a 128k -c:a libopus {webm_file_path}"
print("Converting video output to .webm format...")
# Start ffmpeg conversion
p = subprocess.Popen(cmd, shell=True)
p.wait()
# if the conversion is successful, delete the .mp4
if os.path.exists(webm_file_path):
os.remove(scene.renderer.file_writer.movie_file_path)
scene.renderer.file_writer.movie_file_path = webm_file_path
if not settings.animate:
video = cv2.VideoCapture(str(scene.renderer.file_writer.movie_file_path))
success, image = video.read()
if success:
t = datetime.datetime.fromtimestamp(time.time()).strftime(
"%m-%d-%y_%H-%M-%S"
)
image_file_name = (
"git-sim-"
+ inspect.stack()[2].function
+ "_"
+ t
+ "."
+ settings.img_format
)
image_file_path = os.path.join(
os.path.join(settings.media_dir, "images"), image_file_name
)
if settings.transparent_bg:
unsharp_image = cv2.GaussianBlur(image, (0, 0), 3)
image = cv2.addWeighted(image, 1.5, unsharp_image, -0.5, 0)
tmp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if settings.light_mode:
_, alpha = cv2.threshold(tmp, 225, 255, cv2.THRESH_BINARY_INV)
else:
_, alpha = cv2.threshold(tmp, 25, 255, cv2.THRESH_BINARY)
b, g, r = cv2.split(image)
rgba = [b, g, r, alpha]
image = cv2.merge(rgba, 4)
cv2.imwrite(image_file_path, image)
if (
not settings.stdout
and not settings.output_only_path
and not settings.quiet
):
print("Output image location:", image_file_path)
elif (
not settings.stdout and settings.output_only_path and not settings.quiet
):
print(image_file_path)
if settings.stdout and not settings.quiet:
sys.stdout.buffer.write(cv2.imencode(".jpg", image)[1].tobytes())
else:
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print("Output video location:", scene.renderer.file_writer.movie_file_path)
elif not settings.stdout and settings.output_only_path and not settings.quiet:
print(scene.renderer.file_writer.movie_file_path)
if settings.auto_open and not settings.stdout:
try:
if not settings.animate:
open_file(image_file_path)
else:
open_file(scene.renderer.file_writer.movie_file_path)
except FileNotFoundError:
print(
"Error automatically opening media, please manually open the image or video file to view."
)
class Push(GitSimBaseCommand):
def __init__(self, remote: str = None, branch: str = None):
super().__init__()
self.remote = remote
self.branch = branch
settings.max_branches_per_commit = 2
if self.remote and self.remote not in self.repo.remotes:
print("git-sim error: no remote with name '" + self.remote + "'")
sys.exit(1)
def construct(self):
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print(
f"{settings.INFO_STRING } {type(self).__name__.lower()} {self.remote if self.remote else ''} {self.branch if self.branch else ''}"
)
self.show_intro()
# Configure paths to make local clone to run networked commands in
git_root = self.repo.git.rev_parse("--show-toplevel")
repo_name = os.path.basename(self.repo.working_dir)
new_dir = os.path.join(tempfile.gettempdir(), "git_sim", repo_name)
new_dir2 = os.path.join(tempfile.gettempdir(), "git_sim", repo_name + "2")
# Save remotes
orig_remotes = self.repo.remotes
# Create local clone of local repo
self.repo = git.Repo.clone_from(git_root, new_dir, no_hardlinks=True)
if self.remote:
for r in orig_remotes:
if self.remote == r.name:
remote_url = r.url
break
else:
remote_url = orig_remotes[0].url
# Create local clone of remote repo to simulate push to so we don't touch the real remote
self.remote_repo = git.Repo.clone_from(
remote_url, new_dir2, no_hardlinks=True, bare=True
)
# Reset local clone remote to the local clone of remote repo
if self.remote:
for r in self.repo.remotes:
if self.remote == r.name:
r.set_url(new_dir2)
else:
self.repo.remotes[0].set_url(new_dir2)
# Push the local clone into the local clone of the remote repo
push_result = 0
self.orig_repo = None
try:
self.repo.git.push(self.remote, self.branch)
# If push fails...
except git.GitCommandError as e:
if "rejected" in e.stderr and ("fetch first" in e.stderr):
push_result = 1
self.orig_repo = self.repo
self.repo = self.remote_repo
settings.color_by = ColorByOptions.NOTLOCAL1
elif "rejected" in e.stderr and ("non-fast-forward" in e.stderr):
push_result = 2
self.orig_repo = self.repo
self.repo = self.remote_repo
settings.color_by = ColorByOptions.NOTLOCAL2
else:
print(f"git-sim error: git push failed: {e.stderr}")
return
head_commit = self.get_commit()
if push_result > 0:
self.parse_commits(
head_commit,
make_branches_remote=(
self.remote if self.remote else self.repo.remotes[0].name
),
)
else:
self.parse_commits(head_commit)
self.recenter_frame()
self.scale_frame()
self.failed_push(push_result)
self.color_by()
self.fadeout()
self.show_outro()
# Unlink the program from the filesystem
self.repo.git.clear_cache()
if self.orig_repo:
self.orig_repo.git.clear_cache()
# Delete the local clones
shutil.rmtree(new_dir, onerror=self.del_rw)
shutil.rmtree(new_dir2, onerror=self.del_rw)
def failed_push(self, push_result):
texts = []
if push_result == 1:
text1 = m.Text(
f"'git push' failed since the remote repo has commits that don't exist locally.",
font=self.font,
font_size=20,
color=self.fontColor,
weight=m.BOLD,
)
text1.move_to([self.camera.frame.get_center()[0], 5, 0])
text2 = m.Text(
f"Run 'git pull' (or 'git-sim pull' to simulate first) and then try again.",
font=self.font,
font_size=20,
color=self.fontColor,
weight=m.BOLD,
)
text2.move_to(text1.get_center()).shift(m.DOWN / 2)
text3 = m.Text(
f"Gold commits exist in remote repo, but not locally (need to be pulled).",
font=self.font,
font_size=20,
color=m.GOLD,
weight=m.BOLD,
)
text3.move_to(text2.get_center()).shift(m.DOWN / 2)
text4 = m.Text(
f"Red commits exist in both local and remote repos.",
font=self.font,
font_size=20,
color=m.RED,
weight=m.BOLD,
)
text4.move_to(text3.get_center()).shift(m.DOWN / 2)
texts = [text1, text2, text3, text4]
elif push_result == 2:
text1 = m.Text(
f"'git push' failed since the tip of your current branch is behind the remote.",
font=self.font,
font_size=20,
color=self.fontColor,
weight=m.BOLD,
)
text1.move_to([self.camera.frame.get_center()[0], 5, 0])
text2 = m.Text(
f"Run 'git pull' (or 'git-sim pull' to simulate first) and then try again.",
font=self.font,
font_size=20,
color=self.fontColor,
weight=m.BOLD,
)
text2.move_to(text1.get_center()).shift(m.DOWN / 2)
text3 = m.Text(
f"Gold commits are ahead of your current branch tip (need to be pulled).",
font=self.font,
font_size=20,
color=m.GOLD,
weight=m.BOLD,
)
text3.move_to(text2.get_center()).shift(m.DOWN / 2)
text4 = m.Text(
f"Red commits are up to date in both local and remote branches.",
font=self.font,
font_size=20,
color=m.RED,
weight=m.BOLD,
)
text4.move_to(text3.get_center()).shift(m.DOWN / 2)
texts = [text1, text2, text3, text4]
self.toFadeOut.add(*texts)
self.recenter_frame()
self.scale_frame()
if settings.animate:
self.play(*[m.AddTextLetterByLetter(t) for t in texts])
else:
self.add(*texts)
def push(
remote: str = typer.Argument(
default=None,
help="The name of the remote to push to",
),
branch: str = typer.Argument(
default=None,
help="The name of the branch to push",
),
):
from git_sim.push import Push
scene = Push(remote=remote, branch=branch)
handle_animations(scene=scene) | null |
156,302 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
from git_sim.animations import handle_animations as _handle_animations
with settings.font_context:
return _handle_animations(scene)
def handle_animations(scene: Scene) -> None:
scene.render()
if settings.video_format == VideoFormat.WEBM:
webm_file_path = str(scene.renderer.file_writer.movie_file_path)[:-3] + "webm"
cmd = f"ffmpeg -y -i {scene.renderer.file_writer.movie_file_path} -hide_banner -loglevel error -c:v libvpx-vp9 -crf 50 -b:v 0 -b:a 128k -c:a libopus {webm_file_path}"
print("Converting video output to .webm format...")
# Start ffmpeg conversion
p = subprocess.Popen(cmd, shell=True)
p.wait()
# if the conversion is successful, delete the .mp4
if os.path.exists(webm_file_path):
os.remove(scene.renderer.file_writer.movie_file_path)
scene.renderer.file_writer.movie_file_path = webm_file_path
if not settings.animate:
video = cv2.VideoCapture(str(scene.renderer.file_writer.movie_file_path))
success, image = video.read()
if success:
t = datetime.datetime.fromtimestamp(time.time()).strftime(
"%m-%d-%y_%H-%M-%S"
)
image_file_name = (
"git-sim-"
+ inspect.stack()[2].function
+ "_"
+ t
+ "."
+ settings.img_format
)
image_file_path = os.path.join(
os.path.join(settings.media_dir, "images"), image_file_name
)
if settings.transparent_bg:
unsharp_image = cv2.GaussianBlur(image, (0, 0), 3)
image = cv2.addWeighted(image, 1.5, unsharp_image, -0.5, 0)
tmp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if settings.light_mode:
_, alpha = cv2.threshold(tmp, 225, 255, cv2.THRESH_BINARY_INV)
else:
_, alpha = cv2.threshold(tmp, 25, 255, cv2.THRESH_BINARY)
b, g, r = cv2.split(image)
rgba = [b, g, r, alpha]
image = cv2.merge(rgba, 4)
cv2.imwrite(image_file_path, image)
if (
not settings.stdout
and not settings.output_only_path
and not settings.quiet
):
print("Output image location:", image_file_path)
elif (
not settings.stdout and settings.output_only_path and not settings.quiet
):
print(image_file_path)
if settings.stdout and not settings.quiet:
sys.stdout.buffer.write(cv2.imencode(".jpg", image)[1].tobytes())
else:
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print("Output video location:", scene.renderer.file_writer.movie_file_path)
elif not settings.stdout and settings.output_only_path and not settings.quiet:
print(scene.renderer.file_writer.movie_file_path)
if settings.auto_open and not settings.stdout:
try:
if not settings.animate:
open_file(image_file_path)
else:
open_file(scene.renderer.file_writer.movie_file_path)
except FileNotFoundError:
print(
"Error automatically opening media, please manually open the image or video file to view."
)
class Rebase(GitSimBaseCommand):
def __init__(self, branch: str):
super().__init__()
self.branch = branch
try:
git.repo.fun.rev_parse(self.repo, self.branch)
except git.exc.BadName:
print(
"git-sim error: '"
+ self.branch
+ "' is not a valid Git ref or identifier."
)
sys.exit(1)
if self.branch in [branch.name for branch in self.repo.heads]:
self.selected_branches.append(self.branch)
try:
self.selected_branches.append(self.repo.active_branch.name)
except TypeError:
pass
def construct(self):
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print(
f"{settings.INFO_STRING } {type(self).__name__.lower()} {self.branch}"
)
if self.branch in self.repo.git.branch(
"--contains", self.repo.active_branch.name
):
print(
"git-sim error: Branch '"
+ self.repo.active_branch.name
+ "' is already included in the history of active branch '"
+ self.branch
+ "'."
)
sys.exit(1)
if self.repo.active_branch.name in self.repo.git.branch(
"--contains", self.branch
):
print(
"git-sim error: Branch '"
+ self.branch
+ "' is already based on active branch '"
+ self.repo.active_branch.name
+ "'."
)
sys.exit(1)
self.show_intro()
branch_commit = self.get_commit(self.branch)
self.parse_commits(branch_commit)
head_commit = self.get_commit()
reached_base = False
for commit in self.get_default_commits():
if commit != "dark" and self.branch in self.repo.git.branch(
"--contains", commit
):
reached_base = True
self.parse_commits(head_commit, shift=4 * m.DOWN)
self.parse_all()
self.center_frame_on_commit(branch_commit)
to_rebase = []
i = 0
current = head_commit
while self.branch not in self.repo.git.branch("--contains", current):
to_rebase.append(current)
i += 1
if i >= self.n:
break
current = self.get_default_commits()[i]
parent = branch_commit.hexsha
for j, tr in enumerate(reversed(to_rebase)):
if not reached_base and j == 0:
message = "..."
else:
message = tr.message
parent = self.setup_and_draw_parent(parent, message)
self.draw_arrow_between_commits(tr.hexsha, parent)
self.recenter_frame()
self.scale_frame()
self.reset_head_branch(parent)
self.color_by(offset=2 * len(to_rebase))
self.fadeout()
self.show_outro()
def setup_and_draw_parent(
self,
child,
commitMessage="New commit",
shift=numpy.array([0.0, 0.0, 0.0]),
draw_arrow=True,
):
circle = m.Circle(
stroke_color=m.RED,
stroke_width=self.commit_stroke_width,
fill_color=m.RED,
fill_opacity=0.25,
)
circle.height = 1
circle.next_to(
self.drawnCommits[child],
m.LEFT if settings.reverse else m.RIGHT,
buff=1.5,
)
circle.shift(shift)
start = circle.get_center()
end = self.drawnCommits[child].get_center()
arrow = m.Arrow(
start,
end,
color=self.fontColor,
stroke_width=self.arrow_stroke_width,
tip_shape=self.arrow_tip_shape,
max_stroke_width_to_length_ratio=1000,
)
length = numpy.linalg.norm(start - end) - (1.5 if start[1] == end[1] else 3)
arrow.set_length(length)
sha = "".join(
chr(ord(letter) + 1)
if (
(chr(ord(letter) + 1).isalpha() and letter < "f")
or chr(ord(letter) + 1).isdigit()
)
else letter
for letter in child[:6]
)
commitId = m.Text(
sha if commitMessage != "..." else "...",
font=self.font,
font_size=20,
color=self.fontColor,
).next_to(circle, m.UP)
self.toFadeOut.add(commitId)
commitMessage = commitMessage[:40].replace("\n", " ")
message = m.Text(
"\n".join(
commitMessage[j : j + 20] for j in range(0, len(commitMessage), 20)
)[:100],
font=self.font,
font_size=14,
color=self.fontColor,
).next_to(circle, m.DOWN)
self.toFadeOut.add(message)
if settings.animate:
self.play(
self.camera.frame.animate.move_to(circle.get_center()),
m.Create(circle),
m.AddTextLetterByLetter(commitId),
m.AddTextLetterByLetter(message),
run_time=1 / settings.speed,
)
else:
self.camera.frame.move_to(circle.get_center())
self.add(circle, commitId, message)
self.drawnCommits[sha] = circle
self.toFadeOut.add(circle)
if draw_arrow:
if settings.animate:
self.play(m.Create(arrow), run_time=1 / settings.speed)
else:
self.add(arrow)
self.toFadeOut.add(arrow)
return sha
def rebase(
branch: str = typer.Argument(
...,
help="The branch to simulate rebasing the checked-out commit onto",
)
):
from git_sim.rebase import Rebase
scene = Rebase(branch=branch)
handle_animations(scene=scene) | null |
156,303 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
from git_sim.animations import handle_animations as _handle_animations
with settings.font_context:
return _handle_animations(scene)
class ResetMode(Enum):
DEFAULT = "mixed"
SOFT = "soft"
MIXED = "mixed"
HARD = "hard"
settings = Settings()
def handle_animations(scene: Scene) -> None:
scene.render()
if settings.video_format == VideoFormat.WEBM:
webm_file_path = str(scene.renderer.file_writer.movie_file_path)[:-3] + "webm"
cmd = f"ffmpeg -y -i {scene.renderer.file_writer.movie_file_path} -hide_banner -loglevel error -c:v libvpx-vp9 -crf 50 -b:v 0 -b:a 128k -c:a libopus {webm_file_path}"
print("Converting video output to .webm format...")
# Start ffmpeg conversion
p = subprocess.Popen(cmd, shell=True)
p.wait()
# if the conversion is successful, delete the .mp4
if os.path.exists(webm_file_path):
os.remove(scene.renderer.file_writer.movie_file_path)
scene.renderer.file_writer.movie_file_path = webm_file_path
if not settings.animate:
video = cv2.VideoCapture(str(scene.renderer.file_writer.movie_file_path))
success, image = video.read()
if success:
t = datetime.datetime.fromtimestamp(time.time()).strftime(
"%m-%d-%y_%H-%M-%S"
)
image_file_name = (
"git-sim-"
+ inspect.stack()[2].function
+ "_"
+ t
+ "."
+ settings.img_format
)
image_file_path = os.path.join(
os.path.join(settings.media_dir, "images"), image_file_name
)
if settings.transparent_bg:
unsharp_image = cv2.GaussianBlur(image, (0, 0), 3)
image = cv2.addWeighted(image, 1.5, unsharp_image, -0.5, 0)
tmp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if settings.light_mode:
_, alpha = cv2.threshold(tmp, 225, 255, cv2.THRESH_BINARY_INV)
else:
_, alpha = cv2.threshold(tmp, 25, 255, cv2.THRESH_BINARY)
b, g, r = cv2.split(image)
rgba = [b, g, r, alpha]
image = cv2.merge(rgba, 4)
cv2.imwrite(image_file_path, image)
if (
not settings.stdout
and not settings.output_only_path
and not settings.quiet
):
print("Output image location:", image_file_path)
elif (
not settings.stdout and settings.output_only_path and not settings.quiet
):
print(image_file_path)
if settings.stdout and not settings.quiet:
sys.stdout.buffer.write(cv2.imencode(".jpg", image)[1].tobytes())
else:
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print("Output video location:", scene.renderer.file_writer.movie_file_path)
elif not settings.stdout and settings.output_only_path and not settings.quiet:
print(scene.renderer.file_writer.movie_file_path)
if settings.auto_open and not settings.stdout:
try:
if not settings.animate:
open_file(image_file_path)
else:
open_file(scene.renderer.file_writer.movie_file_path)
except FileNotFoundError:
print(
"Error automatically opening media, please manually open the image or video file to view."
)
class Reset(GitSimBaseCommand):
def __init__(
self, commit: str, mode: ResetMode, soft: bool, mixed: bool, hard: bool
):
super().__init__()
self.commit = commit
self.mode = mode
settings.hide_merged_branches = True
try:
self.resetTo = git.repo.fun.rev_parse(self.repo, self.commit)
except git.exc.BadName:
print(
f"git-sim error: '{self.commit}' is not a valid Git ref or identifier."
)
sys.exit(1)
self.commitsSinceResetTo = list(self.repo.iter_commits(self.commit + "...HEAD"))
self.n = self.n_default
try:
self.selected_branches.append(self.repo.active_branch.name)
except TypeError:
pass
if hard:
self.mode = ResetMode.HARD
if mixed:
self.mode = ResetMode.MIXED
if soft:
self.mode = ResetMode.SOFT
def construct(self):
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print(
f"{settings.INFO_STRING } {type(self).__name__.lower()}{' --' + self.mode.value if self.mode != ResetMode.DEFAULT else ''} {self.commit}",
)
self.show_intro()
self.parse_commits()
self.recenter_frame()
self.scale_frame()
self.reset_head_branch(self.resetTo.hexsha)
self.vsplit_frame()
self.setup_and_draw_zones(first_column_name="Changes deleted from")
self.fadeout()
self.show_outro()
def build_commit_id_and_message(self, commit, i):
hide_refs = False
if commit == "dark":
commitId = m.Text("", font=self.font, font_size=20, color=self.fontColor)
commitMessage = ""
elif i == 3 and self.resetTo.hexsha not in [
c.hexsha for c in self.get_default_commits()
]:
commitId = m.Text(
"...", font=self.font, font_size=20, color=self.fontColor
)
commitMessage = "..."
hide_refs = True
elif i == 4 and self.resetTo.hexsha not in [
c.hexsha for c in self.get_default_commits()
]:
commitId = m.Text(
self.resetTo.hexsha[:6],
font=self.font,
font_size=20,
color=self.fontColor,
)
commitMessage = self.resetTo.message.split("\n")[0][:40].replace("\n", " ")
commit = self.resetTo
hide_refs = True
else:
commitId = m.Text(
commit.hexsha[:6],
font=self.font,
font_size=20,
color=self.fontColor,
)
commitMessage = commit.message.split("\n")[0][:40].replace("\n", " ")
if (
commit != "dark"
and commit.hexsha == self.resetTo.hexsha
and commit.hexsha != self.repo.head.commit.hexsha
):
hide_refs = True
return commitId, commitMessage, commit, hide_refs
def populate_zones(
self,
firstColumnFileNames,
secondColumnFileNames,
thirdColumnFileNames,
firstColumnArrowMap={},
secondColumnArrowMap={},
thirdColumnArrowMap={},
):
for commit in self.commitsSinceResetTo:
if commit.hexsha == self.resetTo.hexsha:
break
for filename in commit.stats.files:
if self.mode == ResetMode.SOFT:
thirdColumnFileNames.add(filename)
elif self.mode in (ResetMode.MIXED, ResetMode.DEFAULT):
secondColumnFileNames.add(filename)
elif self.mode == ResetMode.HARD:
firstColumnFileNames.add(filename)
for x in self.repo.index.diff(None):
if "git-sim_media" not in x.a_path:
if self.mode == ResetMode.SOFT:
secondColumnFileNames.add(x.a_path)
elif self.mode in (ResetMode.MIXED, ResetMode.DEFAULT):
secondColumnFileNames.add(x.a_path)
elif self.mode == ResetMode.HARD:
firstColumnFileNames.add(x.a_path)
for y in self.repo.index.diff("HEAD"):
if "git-sim_media" not in y.a_path:
if self.mode == ResetMode.SOFT:
thirdColumnFileNames.add(y.a_path)
elif self.mode in (ResetMode.MIXED, ResetMode.DEFAULT):
secondColumnFileNames.add(y.a_path)
elif self.mode == ResetMode.HARD:
firstColumnFileNames.add(y.a_path)
def reset(
commit: str = typer.Argument(
default="HEAD",
help="The ref (branch/tag), or commit ID to simulate reset to",
),
mode: ResetMode = typer.Option(
default="mixed",
help="Either mixed, soft, or hard",
),
soft: bool = typer.Option(
default=False,
help="Simulate a soft reset, shortcut for --mode=soft",
),
mixed: bool = typer.Option(
default=False,
help="Simulate a mixed reset, shortcut for --mode=mixed",
),
hard: bool = typer.Option(
default=False,
help="Simulate a soft reset, shortcut for --mode=hard",
),
):
from git_sim.reset import Reset
settings.hide_first_tag = True
scene = Reset(commit=commit, mode=mode, soft=soft, mixed=mixed, hard=hard)
handle_animations(scene=scene) | null |
156,304 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
from git_sim.animations import handle_animations as _handle_animations
with settings.font_context:
return _handle_animations(scene)
settings = Settings()
def handle_animations(scene: Scene) -> None:
scene.render()
if settings.video_format == VideoFormat.WEBM:
webm_file_path = str(scene.renderer.file_writer.movie_file_path)[:-3] + "webm"
cmd = f"ffmpeg -y -i {scene.renderer.file_writer.movie_file_path} -hide_banner -loglevel error -c:v libvpx-vp9 -crf 50 -b:v 0 -b:a 128k -c:a libopus {webm_file_path}"
print("Converting video output to .webm format...")
# Start ffmpeg conversion
p = subprocess.Popen(cmd, shell=True)
p.wait()
# if the conversion is successful, delete the .mp4
if os.path.exists(webm_file_path):
os.remove(scene.renderer.file_writer.movie_file_path)
scene.renderer.file_writer.movie_file_path = webm_file_path
if not settings.animate:
video = cv2.VideoCapture(str(scene.renderer.file_writer.movie_file_path))
success, image = video.read()
if success:
t = datetime.datetime.fromtimestamp(time.time()).strftime(
"%m-%d-%y_%H-%M-%S"
)
image_file_name = (
"git-sim-"
+ inspect.stack()[2].function
+ "_"
+ t
+ "."
+ settings.img_format
)
image_file_path = os.path.join(
os.path.join(settings.media_dir, "images"), image_file_name
)
if settings.transparent_bg:
unsharp_image = cv2.GaussianBlur(image, (0, 0), 3)
image = cv2.addWeighted(image, 1.5, unsharp_image, -0.5, 0)
tmp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if settings.light_mode:
_, alpha = cv2.threshold(tmp, 225, 255, cv2.THRESH_BINARY_INV)
else:
_, alpha = cv2.threshold(tmp, 25, 255, cv2.THRESH_BINARY)
b, g, r = cv2.split(image)
rgba = [b, g, r, alpha]
image = cv2.merge(rgba, 4)
cv2.imwrite(image_file_path, image)
if (
not settings.stdout
and not settings.output_only_path
and not settings.quiet
):
print("Output image location:", image_file_path)
elif (
not settings.stdout and settings.output_only_path and not settings.quiet
):
print(image_file_path)
if settings.stdout and not settings.quiet:
sys.stdout.buffer.write(cv2.imencode(".jpg", image)[1].tobytes())
else:
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print("Output video location:", scene.renderer.file_writer.movie_file_path)
elif not settings.stdout and settings.output_only_path and not settings.quiet:
print(scene.renderer.file_writer.movie_file_path)
if settings.auto_open and not settings.stdout:
try:
if not settings.animate:
open_file(image_file_path)
else:
open_file(scene.renderer.file_writer.movie_file_path)
except FileNotFoundError:
print(
"Error automatically opening media, please manually open the image or video file to view."
)
class Restore(GitSimBaseCommand):
def __init__(self, files: List[str]):
super().__init__()
self.files = files
settings.hide_merged_branches = True
self.n = self.n_default
try:
self.selected_branches.append(self.repo.active_branch.name)
except TypeError:
pass
for file in self.files:
if file not in [x.a_path for x in self.repo.index.diff(None)] + [
y.a_path for y in self.repo.index.diff("HEAD")
]:
print(f"git-sim error: No modified or staged file with name: '{file}'")
sys.exit()
def construct(self):
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print(
f"{settings.INFO_STRING } {type(self).__name__.lower()} {' '.join(self.files)}"
)
self.show_intro()
self.parse_commits()
self.recenter_frame()
self.scale_frame()
self.vsplit_frame()
self.setup_and_draw_zones(reverse=True)
self.fadeout()
self.show_outro()
def populate_zones(
self,
firstColumnFileNames,
secondColumnFileNames,
thirdColumnFileNames,
firstColumnArrowMap={},
secondColumnArrowMap={},
thirdColumnArrowMap={},
):
for x in self.repo.index.diff(None):
if "git-sim_media" not in x.a_path:
secondColumnFileNames.add(x.a_path)
for file in self.files:
if file == x.a_path:
thirdColumnFileNames.add(x.a_path)
secondColumnArrowMap[x.a_path] = m.Arrow(
stroke_width=3, color=self.fontColor
)
for y in self.repo.index.diff("HEAD"):
if "git-sim_media" not in y.a_path:
firstColumnFileNames.add(y.a_path)
for file in self.files:
if file == y.a_path:
secondColumnFileNames.add(y.a_path)
firstColumnArrowMap[y.a_path] = m.Arrow(
stroke_width=3, color=self.fontColor
)
def restore(
files: List[str] = typer.Argument(
default=None,
help="The names of one or more files to restore",
)
):
from git_sim.restore import Restore
settings.hide_first_tag = True
scene = Restore(files=files)
handle_animations(scene=scene) | null |
156,305 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
from git_sim.animations import handle_animations as _handle_animations
with settings.font_context:
return _handle_animations(scene)
settings = Settings()
def handle_animations(scene: Scene) -> None:
scene.render()
if settings.video_format == VideoFormat.WEBM:
webm_file_path = str(scene.renderer.file_writer.movie_file_path)[:-3] + "webm"
cmd = f"ffmpeg -y -i {scene.renderer.file_writer.movie_file_path} -hide_banner -loglevel error -c:v libvpx-vp9 -crf 50 -b:v 0 -b:a 128k -c:a libopus {webm_file_path}"
print("Converting video output to .webm format...")
# Start ffmpeg conversion
p = subprocess.Popen(cmd, shell=True)
p.wait()
# if the conversion is successful, delete the .mp4
if os.path.exists(webm_file_path):
os.remove(scene.renderer.file_writer.movie_file_path)
scene.renderer.file_writer.movie_file_path = webm_file_path
if not settings.animate:
video = cv2.VideoCapture(str(scene.renderer.file_writer.movie_file_path))
success, image = video.read()
if success:
t = datetime.datetime.fromtimestamp(time.time()).strftime(
"%m-%d-%y_%H-%M-%S"
)
image_file_name = (
"git-sim-"
+ inspect.stack()[2].function
+ "_"
+ t
+ "."
+ settings.img_format
)
image_file_path = os.path.join(
os.path.join(settings.media_dir, "images"), image_file_name
)
if settings.transparent_bg:
unsharp_image = cv2.GaussianBlur(image, (0, 0), 3)
image = cv2.addWeighted(image, 1.5, unsharp_image, -0.5, 0)
tmp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if settings.light_mode:
_, alpha = cv2.threshold(tmp, 225, 255, cv2.THRESH_BINARY_INV)
else:
_, alpha = cv2.threshold(tmp, 25, 255, cv2.THRESH_BINARY)
b, g, r = cv2.split(image)
rgba = [b, g, r, alpha]
image = cv2.merge(rgba, 4)
cv2.imwrite(image_file_path, image)
if (
not settings.stdout
and not settings.output_only_path
and not settings.quiet
):
print("Output image location:", image_file_path)
elif (
not settings.stdout and settings.output_only_path and not settings.quiet
):
print(image_file_path)
if settings.stdout and not settings.quiet:
sys.stdout.buffer.write(cv2.imencode(".jpg", image)[1].tobytes())
else:
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print("Output video location:", scene.renderer.file_writer.movie_file_path)
elif not settings.stdout and settings.output_only_path and not settings.quiet:
print(scene.renderer.file_writer.movie_file_path)
if settings.auto_open and not settings.stdout:
try:
if not settings.animate:
open_file(image_file_path)
else:
open_file(scene.renderer.file_writer.movie_file_path)
except FileNotFoundError:
print(
"Error automatically opening media, please manually open the image or video file to view."
)
class Revert(GitSimBaseCommand):
def __init__(self, commit: str):
super().__init__()
self.commit = commit
try:
self.revert = git.repo.fun.rev_parse(self.repo, self.commit)
except git.exc.BadName:
print(
"git-sim error: '"
+ self.commit
+ "' is not a valid Git ref or identifier."
)
sys.exit(1)
self.n_default = 4
self.n = self.n_default
settings.hide_merged_branches = True
self.zone_title_offset += 0.1
try:
self.selected_branches.append(self.repo.active_branch.name)
except TypeError:
pass
def construct(self):
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print(
f"{settings.INFO_STRING } {type(self).__name__.lower()} {self.commit}"
)
self.show_intro()
self.parse_commits()
self.center_frame_on_commit(self.get_commit())
self.setup_and_draw_revert_commit()
self.recenter_frame()
self.scale_frame()
self.reset_head_branch("abcdef")
self.vsplit_frame()
self.setup_and_draw_zones(
first_column_name="----",
second_column_name="Changes reverted from",
third_column_name="----",
)
self.fadeout()
self.show_outro()
def build_commit_id_and_message(self, commit, i):
hide_refs = False
if commit == "dark":
commitId = m.Text("", font=self.font, font_size=20, color=self.fontColor)
commitMessage = ""
elif i == 2 and self.revert.hexsha not in [
commit.hexsha for commit in self.get_default_commits()
]:
commitId = m.Text(
"...", font=self.font, font_size=20, color=self.fontColor
)
commitMessage = "..."
hide_refs = True
elif i == 3 and self.revert.hexsha not in [
commit.hexsha for commit in self.get_default_commits()
]:
commitId = m.Text(
self.revert.hexsha[:6],
font=self.font,
font_size=20,
color=self.fontColor,
)
commitMessage = self.revert.message.split("\n")[0][:40].replace("\n", " ")
hide_refs = True
else:
commitId = m.Text(
commit.hexsha[:6],
font=self.font,
font_size=20,
color=self.fontColor,
)
commitMessage = commit.message.split("\n")[0][:40].replace("\n", " ")
return commitId, commitMessage, commit, hide_refs
def setup_and_draw_revert_commit(self):
circle = m.Circle(
stroke_color=m.RED,
stroke_width=self.commit_stroke_width,
fill_color=m.RED,
fill_opacity=0.25,
)
circle.height = 1
circle.next_to(
self.drawnCommits[self.get_commit().hexsha],
m.LEFT if settings.reverse else m.RIGHT,
buff=1.5,
)
start = circle.get_center()
end = self.drawnCommits[self.get_commit().hexsha].get_center()
arrow = m.Arrow(
start,
end,
color=self.fontColor,
stroke_width=self.arrow_stroke_width,
tip_shape=self.arrow_tip_shape,
max_stroke_width_to_length_ratio=1000,
)
length = numpy.linalg.norm(start - end) - (1.5 if start[1] == end[1] else 3)
arrow.set_length(length)
commitId = m.Text(
"abcdef", font=self.font, font_size=20, color=self.fontColor
).next_to(circle, m.UP)
self.toFadeOut.add(commitId)
commitMessage = "Revert " + self.revert.hexsha[0:6]
commitMessage = commitMessage[:40].replace("\n", " ")
message = m.Text(
"\n".join(
commitMessage[j : j + 20] for j in range(0, len(commitMessage), 20)
)[:100],
font=self.font,
font_size=14,
color=self.fontColor,
).next_to(circle, m.DOWN)
self.toFadeOut.add(message)
if settings.animate:
self.play(
self.camera.frame.animate.move_to(circle.get_center()),
m.Create(circle),
m.AddTextLetterByLetter(commitId),
m.AddTextLetterByLetter(message),
run_time=1 / settings.speed,
)
else:
self.camera.frame.move_to(circle.get_center())
self.add(circle, commitId, message)
self.drawnCommits["abcdef"] = circle
self.toFadeOut.add(circle)
if settings.animate:
self.play(m.Create(arrow), run_time=1 / settings.speed)
else:
self.add(arrow)
self.toFadeOut.add(arrow)
def populate_zones(
self,
firstColumnFileNames,
secondColumnFileNames,
thirdColumnFileNames,
firstColumnArrowMap={},
secondColumnArrowMap={},
thirdColumnArrowMap={},
):
for filename in self.revert.stats.files:
secondColumnFileNames.add(filename)
def revert(
commit: str = typer.Argument(
default="HEAD",
help="The ref (branch/tag), or commit ID to simulate revert",
)
):
from git_sim.revert import Revert
settings.hide_first_tag = True
scene = Revert(commit=commit)
handle_animations(scene=scene) | null |
156,306 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
from git_sim.animations import handle_animations as _handle_animations
with settings.font_context:
return _handle_animations(scene)
settings = Settings()
def handle_animations(scene: Scene) -> None:
scene.render()
if settings.video_format == VideoFormat.WEBM:
webm_file_path = str(scene.renderer.file_writer.movie_file_path)[:-3] + "webm"
cmd = f"ffmpeg -y -i {scene.renderer.file_writer.movie_file_path} -hide_banner -loglevel error -c:v libvpx-vp9 -crf 50 -b:v 0 -b:a 128k -c:a libopus {webm_file_path}"
print("Converting video output to .webm format...")
# Start ffmpeg conversion
p = subprocess.Popen(cmd, shell=True)
p.wait()
# if the conversion is successful, delete the .mp4
if os.path.exists(webm_file_path):
os.remove(scene.renderer.file_writer.movie_file_path)
scene.renderer.file_writer.movie_file_path = webm_file_path
if not settings.animate:
video = cv2.VideoCapture(str(scene.renderer.file_writer.movie_file_path))
success, image = video.read()
if success:
t = datetime.datetime.fromtimestamp(time.time()).strftime(
"%m-%d-%y_%H-%M-%S"
)
image_file_name = (
"git-sim-"
+ inspect.stack()[2].function
+ "_"
+ t
+ "."
+ settings.img_format
)
image_file_path = os.path.join(
os.path.join(settings.media_dir, "images"), image_file_name
)
if settings.transparent_bg:
unsharp_image = cv2.GaussianBlur(image, (0, 0), 3)
image = cv2.addWeighted(image, 1.5, unsharp_image, -0.5, 0)
tmp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if settings.light_mode:
_, alpha = cv2.threshold(tmp, 225, 255, cv2.THRESH_BINARY_INV)
else:
_, alpha = cv2.threshold(tmp, 25, 255, cv2.THRESH_BINARY)
b, g, r = cv2.split(image)
rgba = [b, g, r, alpha]
image = cv2.merge(rgba, 4)
cv2.imwrite(image_file_path, image)
if (
not settings.stdout
and not settings.output_only_path
and not settings.quiet
):
print("Output image location:", image_file_path)
elif (
not settings.stdout and settings.output_only_path and not settings.quiet
):
print(image_file_path)
if settings.stdout and not settings.quiet:
sys.stdout.buffer.write(cv2.imencode(".jpg", image)[1].tobytes())
else:
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print("Output video location:", scene.renderer.file_writer.movie_file_path)
elif not settings.stdout and settings.output_only_path and not settings.quiet:
print(scene.renderer.file_writer.movie_file_path)
if settings.auto_open and not settings.stdout:
try:
if not settings.animate:
open_file(image_file_path)
else:
open_file(scene.renderer.file_writer.movie_file_path)
except FileNotFoundError:
print(
"Error automatically opening media, please manually open the image or video file to view."
)
class Rm(GitSimBaseCommand):
def __init__(self, files: List[str]):
super().__init__()
self.hide_first_tag = True
self.allow_no_commits = True
self.files = files
settings.hide_merged_branches = True
self.n = self.n_default
try:
self.selected_branches.append(self.repo.active_branch.name)
except TypeError:
pass
for file in self.files:
try:
self.repo.git.ls_files("--error-unmatch", file)
except:
print(f"git-sim error: No tracked file with name: '{file}'")
sys.exit()
def construct(self):
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print(
f"{settings.INFO_STRING} {type(self).__name__.lower()} {' '.join(self.files)}"
)
self.show_intro()
self.parse_commits()
self.recenter_frame()
self.scale_frame()
self.vsplit_frame()
self.setup_and_draw_zones(
first_column_name="Working directory",
second_column_name="Staging area",
third_column_name="Removed files",
)
self.fadeout()
self.show_outro()
def create_zone_text(
self,
firstColumnFileNames,
secondColumnFileNames,
thirdColumnFileNames,
firstColumnFiles,
secondColumnFiles,
thirdColumnFiles,
firstColumnFilesDict,
secondColumnFilesDict,
thirdColumnFilesDict,
firstColumnTitle,
secondColumnTitle,
thirdColumnTitle,
horizontal2,
):
for i, f in enumerate(firstColumnFileNames):
text = (
m.Text(
self.trim_path(f),
font=self.font,
font_size=24,
color=self.fontColor,
)
.move_to(
(firstColumnTitle.get_center()[0], horizontal2.get_center()[1], 0)
)
.shift(m.DOWN * 0.5 * (i + 1))
)
firstColumnFiles.add(text)
firstColumnFilesDict[f] = text
for j, f in enumerate(secondColumnFileNames):
text = (
m.Text(
self.trim_path(f),
font=self.font,
font_size=24,
color=self.fontColor,
)
.move_to(
(secondColumnTitle.get_center()[0], horizontal2.get_center()[1], 0)
)
.shift(m.DOWN * 0.5 * (j + 1))
)
secondColumnFiles.add(text)
secondColumnFilesDict[f] = text
for h, f in enumerate(thirdColumnFileNames):
text = (
m.MarkupText(
"<span strikethrough='true' strikethrough_color='"
+ self.fontColor
+ "'>"
+ self.trim_path(f)
+ "</span>",
font=self.font,
font_size=24,
color=self.fontColor,
)
.move_to(
(thirdColumnTitle.get_center()[0], horizontal2.get_center()[1], 0)
)
.shift(m.DOWN * 0.5 * (h + 1))
)
thirdColumnFiles.add(text)
thirdColumnFilesDict[f] = text
def populate_zones(
self,
firstColumnFileNames,
secondColumnFileNames,
thirdColumnFileNames,
firstColumnArrowMap={},
secondColumnArrowMap={},
thirdColumnArrowMap={},
):
for file in self.files:
if file in [x.a_path for x in self.repo.index.diff("HEAD")]:
secondColumnFileNames.add(file)
secondColumnArrowMap[file] = m.Arrow(
stroke_width=3, color=self.fontColor
)
else:
firstColumnFileNames.add(file)
firstColumnArrowMap[file] = m.Arrow(
stroke_width=3, color=self.fontColor
)
thirdColumnFileNames.add(file)
def rm(
files: List[str] = typer.Argument(
default=None,
help="The names of one or more files to remove from Git's index",
)
):
from git_sim.rm import Rm
settings.hide_first_tag = True
scene = Rm(files=files)
handle_animations(scene=scene) | null |
156,307 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
from git_sim.animations import handle_animations as _handle_animations
with settings.font_context:
return _handle_animations(scene)
class StashSubCommand(Enum):
POP = "pop"
APPLY = "apply"
PUSH = "push"
settings = Settings()
def handle_animations(scene: Scene) -> None:
scene.render()
if settings.video_format == VideoFormat.WEBM:
webm_file_path = str(scene.renderer.file_writer.movie_file_path)[:-3] + "webm"
cmd = f"ffmpeg -y -i {scene.renderer.file_writer.movie_file_path} -hide_banner -loglevel error -c:v libvpx-vp9 -crf 50 -b:v 0 -b:a 128k -c:a libopus {webm_file_path}"
print("Converting video output to .webm format...")
# Start ffmpeg conversion
p = subprocess.Popen(cmd, shell=True)
p.wait()
# if the conversion is successful, delete the .mp4
if os.path.exists(webm_file_path):
os.remove(scene.renderer.file_writer.movie_file_path)
scene.renderer.file_writer.movie_file_path = webm_file_path
if not settings.animate:
video = cv2.VideoCapture(str(scene.renderer.file_writer.movie_file_path))
success, image = video.read()
if success:
t = datetime.datetime.fromtimestamp(time.time()).strftime(
"%m-%d-%y_%H-%M-%S"
)
image_file_name = (
"git-sim-"
+ inspect.stack()[2].function
+ "_"
+ t
+ "."
+ settings.img_format
)
image_file_path = os.path.join(
os.path.join(settings.media_dir, "images"), image_file_name
)
if settings.transparent_bg:
unsharp_image = cv2.GaussianBlur(image, (0, 0), 3)
image = cv2.addWeighted(image, 1.5, unsharp_image, -0.5, 0)
tmp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if settings.light_mode:
_, alpha = cv2.threshold(tmp, 225, 255, cv2.THRESH_BINARY_INV)
else:
_, alpha = cv2.threshold(tmp, 25, 255, cv2.THRESH_BINARY)
b, g, r = cv2.split(image)
rgba = [b, g, r, alpha]
image = cv2.merge(rgba, 4)
cv2.imwrite(image_file_path, image)
if (
not settings.stdout
and not settings.output_only_path
and not settings.quiet
):
print("Output image location:", image_file_path)
elif (
not settings.stdout and settings.output_only_path and not settings.quiet
):
print(image_file_path)
if settings.stdout and not settings.quiet:
sys.stdout.buffer.write(cv2.imencode(".jpg", image)[1].tobytes())
else:
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print("Output video location:", scene.renderer.file_writer.movie_file_path)
elif not settings.stdout and settings.output_only_path and not settings.quiet:
print(scene.renderer.file_writer.movie_file_path)
if settings.auto_open and not settings.stdout:
try:
if not settings.animate:
open_file(image_file_path)
else:
open_file(scene.renderer.file_writer.movie_file_path)
except FileNotFoundError:
print(
"Error automatically opening media, please manually open the image or video file to view."
)
class Stash(GitSimBaseCommand):
def __init__(self, files: List[str], command: StashSubCommand):
super().__init__()
self.files = files
self.no_files = True if not self.files else False
self.command = command
settings.hide_merged_branches = True
self.n = self.n_default
try:
self.selected_branches.append(self.repo.active_branch.name)
except TypeError:
pass
if self.command in [StashSubCommand.PUSH, None]:
for file in self.files:
if file not in [x.a_path for x in self.repo.index.diff(None)] + [
y.a_path for y in self.repo.index.diff("HEAD")
]:
print(
f"git-sim error: No modified or staged file with name: '{file}'"
)
sys.exit()
if not self.files:
self.files = [x.a_path for x in self.repo.index.diff(None)] + [
y.a_path for y in self.repo.index.diff("HEAD")
]
elif self.files:
if (
not settings.stdout
and not settings.output_only_path
and not settings.quiet
):
print(
"Files are not required in apply/pop subcommand. Ignoring the file list....."
)
def construct(self):
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print(
f"{settings.INFO_STRING } {type(self).__name__.lower()} {self.command.value if self.command else ''} {' '.join(self.files) if not self.no_files else ''}"
)
self.show_intro()
self.parse_commits()
self.recenter_frame()
self.scale_frame()
self.vsplit_frame()
self.setup_and_draw_zones(
first_column_name="Working directory",
second_column_name="Staging area",
third_column_name="Stashed changes",
)
self.fadeout()
self.show_outro()
def create_zone_text(
self,
firstColumnFileNames,
secondColumnFileNames,
thirdColumnFileNames,
firstColumnFiles,
secondColumnFiles,
thirdColumnFiles,
firstColumnFilesDict,
secondColumnFilesDict,
thirdColumnFilesDict,
firstColumnTitle,
secondColumnTitle,
thirdColumnTitle,
horizontal2,
):
for i, f in enumerate(firstColumnFileNames):
text = (
m.Text(
self.trim_path(f),
font=self.font,
font_size=24,
color=self.fontColor,
)
.move_to(
(firstColumnTitle.get_center()[0], horizontal2.get_center()[1], 0)
)
.shift(m.DOWN * 0.5 * (i + 1))
)
firstColumnFiles.add(text)
firstColumnFilesDict[f] = text
for j, f in enumerate(secondColumnFileNames):
text = (
m.Text(
self.trim_path(f),
font=self.font,
font_size=24,
color=self.fontColor,
)
.move_to(
(secondColumnTitle.get_center()[0], horizontal2.get_center()[1], 0)
)
.shift(m.DOWN * 0.5 * (j + 1))
)
secondColumnFiles.add(text)
secondColumnFilesDict[f] = text
for h, f in enumerate(thirdColumnFileNames):
text = (
m.MarkupText(
"<span strikethrough='true' strikethrough_color='"
+ self.fontColor
+ "'>"
+ self.trim_path(f)
+ "</span>"
if self.command == StashSubCommand.POP
else self.trim_path(f),
font=self.font,
font_size=24,
color=self.fontColor,
)
.move_to(
(thirdColumnTitle.get_center()[0], horizontal2.get_center()[1], 0)
)
.shift(m.DOWN * 0.5 * (h + 1))
)
thirdColumnFiles.add(text)
thirdColumnFilesDict[f] = text
def populate_zones(
self,
firstColumnFileNames,
secondColumnFileNames,
thirdColumnFileNames,
firstColumnArrowMap={},
secondColumnArrowMap={},
thirdColumnArrowMap={},
):
if self.command in [StashSubCommand.POP, StashSubCommand.APPLY]:
for x in self.repo.index.diff(None):
thirdColumnFileNames.add(x.a_path)
firstColumnFileNames.add(x.a_path)
thirdColumnArrowMap[x.a_path] = m.Arrow(
stroke_width=3, color=self.fontColor
)
for y in self.repo.index.diff("HEAD"):
firstColumnFileNames.add(y.a_path)
thirdColumnFileNames.add(y.a_path)
thirdColumnArrowMap[y.a_path] = m.Arrow(
stroke_width=3, color=self.fontColor
)
else:
for x in self.repo.index.diff(None):
firstColumnFileNames.add(x.a_path)
for file in self.files:
if file == x.a_path:
thirdColumnFileNames.add(x.a_path)
firstColumnArrowMap[x.a_path] = m.Arrow(
stroke_width=3, color=self.fontColor
)
for y in self.repo.index.diff("HEAD"):
secondColumnFileNames.add(y.a_path)
for file in self.files:
if file == y.a_path:
thirdColumnFileNames.add(y.a_path)
secondColumnArrowMap[y.a_path] = m.Arrow(
stroke_width=3, color=self.fontColor
)
def stash(
command: StashSubCommand = typer.Argument(
default=None,
help="Stash subcommand (push, pop, apply)",
),
files: List[str] = typer.Argument(
default=None,
help="The name of the file to stash changes for",
),
):
from git_sim.stash import Stash
settings.hide_first_tag = True
scene = Stash(files=files, command=command)
handle_animations(scene=scene) | null |
156,308 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
from git_sim.animations import handle_animations as _handle_animations
with settings.font_context:
return _handle_animations(scene)
settings = Settings()
def handle_animations(scene: Scene) -> None:
scene.render()
if settings.video_format == VideoFormat.WEBM:
webm_file_path = str(scene.renderer.file_writer.movie_file_path)[:-3] + "webm"
cmd = f"ffmpeg -y -i {scene.renderer.file_writer.movie_file_path} -hide_banner -loglevel error -c:v libvpx-vp9 -crf 50 -b:v 0 -b:a 128k -c:a libopus {webm_file_path}"
print("Converting video output to .webm format...")
# Start ffmpeg conversion
p = subprocess.Popen(cmd, shell=True)
p.wait()
# if the conversion is successful, delete the .mp4
if os.path.exists(webm_file_path):
os.remove(scene.renderer.file_writer.movie_file_path)
scene.renderer.file_writer.movie_file_path = webm_file_path
if not settings.animate:
video = cv2.VideoCapture(str(scene.renderer.file_writer.movie_file_path))
success, image = video.read()
if success:
t = datetime.datetime.fromtimestamp(time.time()).strftime(
"%m-%d-%y_%H-%M-%S"
)
image_file_name = (
"git-sim-"
+ inspect.stack()[2].function
+ "_"
+ t
+ "."
+ settings.img_format
)
image_file_path = os.path.join(
os.path.join(settings.media_dir, "images"), image_file_name
)
if settings.transparent_bg:
unsharp_image = cv2.GaussianBlur(image, (0, 0), 3)
image = cv2.addWeighted(image, 1.5, unsharp_image, -0.5, 0)
tmp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if settings.light_mode:
_, alpha = cv2.threshold(tmp, 225, 255, cv2.THRESH_BINARY_INV)
else:
_, alpha = cv2.threshold(tmp, 25, 255, cv2.THRESH_BINARY)
b, g, r = cv2.split(image)
rgba = [b, g, r, alpha]
image = cv2.merge(rgba, 4)
cv2.imwrite(image_file_path, image)
if (
not settings.stdout
and not settings.output_only_path
and not settings.quiet
):
print("Output image location:", image_file_path)
elif (
not settings.stdout and settings.output_only_path and not settings.quiet
):
print(image_file_path)
if settings.stdout and not settings.quiet:
sys.stdout.buffer.write(cv2.imencode(".jpg", image)[1].tobytes())
else:
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print("Output video location:", scene.renderer.file_writer.movie_file_path)
elif not settings.stdout and settings.output_only_path and not settings.quiet:
print(scene.renderer.file_writer.movie_file_path)
if settings.auto_open and not settings.stdout:
try:
if not settings.animate:
open_file(image_file_path)
else:
open_file(scene.renderer.file_writer.movie_file_path)
except FileNotFoundError:
print(
"Error automatically opening media, please manually open the image or video file to view."
)
class Status(GitSimBaseCommand):
def __init__(self):
super().__init__()
try:
self.selected_branches.append(self.repo.active_branch.name)
except TypeError:
pass
settings.hide_merged_branches = True
self.n = self.n_default
def construct(self):
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print(f"{settings.INFO_STRING } {type(self).__name__.lower()}")
self.show_intro()
self.parse_commits()
self.recenter_frame()
self.scale_frame()
self.vsplit_frame()
self.setup_and_draw_zones()
self.fadeout()
self.show_outro()
def status():
from git_sim.status import Status
settings.hide_first_tag = True
settings.allow_no_commits = True
scene = Status()
handle_animations(scene=scene) | null |
156,309 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
from git_sim.animations import handle_animations as _handle_animations
with settings.font_context:
return _handle_animations(scene)
def handle_animations(scene: Scene) -> None:
scene.render()
if settings.video_format == VideoFormat.WEBM:
webm_file_path = str(scene.renderer.file_writer.movie_file_path)[:-3] + "webm"
cmd = f"ffmpeg -y -i {scene.renderer.file_writer.movie_file_path} -hide_banner -loglevel error -c:v libvpx-vp9 -crf 50 -b:v 0 -b:a 128k -c:a libopus {webm_file_path}"
print("Converting video output to .webm format...")
# Start ffmpeg conversion
p = subprocess.Popen(cmd, shell=True)
p.wait()
# if the conversion is successful, delete the .mp4
if os.path.exists(webm_file_path):
os.remove(scene.renderer.file_writer.movie_file_path)
scene.renderer.file_writer.movie_file_path = webm_file_path
if not settings.animate:
video = cv2.VideoCapture(str(scene.renderer.file_writer.movie_file_path))
success, image = video.read()
if success:
t = datetime.datetime.fromtimestamp(time.time()).strftime(
"%m-%d-%y_%H-%M-%S"
)
image_file_name = (
"git-sim-"
+ inspect.stack()[2].function
+ "_"
+ t
+ "."
+ settings.img_format
)
image_file_path = os.path.join(
os.path.join(settings.media_dir, "images"), image_file_name
)
if settings.transparent_bg:
unsharp_image = cv2.GaussianBlur(image, (0, 0), 3)
image = cv2.addWeighted(image, 1.5, unsharp_image, -0.5, 0)
tmp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if settings.light_mode:
_, alpha = cv2.threshold(tmp, 225, 255, cv2.THRESH_BINARY_INV)
else:
_, alpha = cv2.threshold(tmp, 25, 255, cv2.THRESH_BINARY)
b, g, r = cv2.split(image)
rgba = [b, g, r, alpha]
image = cv2.merge(rgba, 4)
cv2.imwrite(image_file_path, image)
if (
not settings.stdout
and not settings.output_only_path
and not settings.quiet
):
print("Output image location:", image_file_path)
elif (
not settings.stdout and settings.output_only_path and not settings.quiet
):
print(image_file_path)
if settings.stdout and not settings.quiet:
sys.stdout.buffer.write(cv2.imencode(".jpg", image)[1].tobytes())
else:
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print("Output video location:", scene.renderer.file_writer.movie_file_path)
elif not settings.stdout and settings.output_only_path and not settings.quiet:
print(scene.renderer.file_writer.movie_file_path)
if settings.auto_open and not settings.stdout:
try:
if not settings.animate:
open_file(image_file_path)
else:
open_file(scene.renderer.file_writer.movie_file_path)
except FileNotFoundError:
print(
"Error automatically opening media, please manually open the image or video file to view."
)
class Switch(GitSimBaseCommand):
def __init__(self, branch: str, c: bool):
super().__init__()
self.branch = branch
self.c = c
if self.c:
if self.branch in self.repo.heads:
print(
"git-sim error: can't create new branch '"
+ self.branch
+ "', it already exists"
)
sys.exit(1)
else:
try:
git.repo.fun.rev_parse(self.repo, self.branch)
except git.exc.BadName:
print(
"git-sim error: '"
+ self.branch
+ "' is not a valid Git ref or identifier."
)
sys.exit(1)
if self.branch == self.repo.active_branch.name:
print("git-sim error: already on branch '" + self.branch + "'")
sys.exit(1)
self.is_ancestor = False
self.is_descendant = False
# branch being switched to is behind HEAD
if self.repo.active_branch.name in self.repo.git.branch(
"--contains", self.branch
):
self.is_ancestor = True
# HEAD is behind branch being switched to
elif self.branch in self.repo.git.branch(
"--contains", self.repo.active_branch.name
):
self.is_descendant = True
if self.branch in [branch.name for branch in self.repo.heads]:
self.selected_branches.append(self.branch)
try:
self.selected_branches.append(self.repo.active_branch.name)
except TypeError:
pass
def construct(self):
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print(
f"{settings.INFO_STRING } {type(self).__name__.lower()}{' -c' if self.c else ''} {self.branch}"
)
self.show_intro()
head_commit = self.get_commit()
# using -c flag, create new branch label and exit
if self.c:
self.parse_commits(head_commit)
self.recenter_frame()
self.scale_frame()
self.draw_ref(head_commit, self.topref, text=self.branch, color=m.GREEN)
else:
branch_commit = self.get_commit(self.branch)
if self.is_ancestor:
commits_in_range = list(self.repo.iter_commits(self.branch + "..HEAD"))
# branch is reached from HEAD, so draw everything
if len(commits_in_range) <= self.n:
self.parse_commits(head_commit)
reset_head_to = branch_commit.hexsha
self.recenter_frame()
self.scale_frame()
self.reset_head(reset_head_to)
self.reset_branch(head_commit.hexsha)
# branch is not reached, so start from branch
else:
self.parse_commits(branch_commit)
self.draw_ref(branch_commit, self.topref)
self.recenter_frame()
self.scale_frame()
elif self.is_descendant:
self.parse_commits(branch_commit)
reset_head_to = branch_commit.hexsha
self.recenter_frame()
self.scale_frame()
if "HEAD" in self.drawnRefs:
self.reset_head(reset_head_to)
self.reset_branch(head_commit.hexsha)
else:
self.draw_ref(branch_commit, self.topref)
else:
self.parse_commits(head_commit)
self.parse_commits(branch_commit, shift=4 * m.DOWN)
self.center_frame_on_commit(branch_commit)
self.recenter_frame()
self.scale_frame()
self.reset_head(branch_commit.hexsha)
self.reset_branch(head_commit.hexsha)
self.color_by()
self.fadeout()
self.show_outro()
def switch(
branch: str = typer.Argument(
...,
help="The name of the branch to switch to",
),
c: bool = typer.Option(
False,
"-c",
help="Create the specified branch if it doesn't already exist",
),
):
from git_sim.switch import Switch
scene = Switch(branch=branch, c=c)
handle_animations(scene=scene) | null |
156,310 | from __future__ import annotations
import typer
from typing import List, TYPE_CHECKING
from git_sim.enums import ResetMode, StashSubCommand
from git_sim.settings import settings
def handle_animations(scene: Scene) -> None:
from git_sim.animations import handle_animations as _handle_animations
with settings.font_context:
return _handle_animations(scene)
def handle_animations(scene: Scene) -> None:
scene.render()
if settings.video_format == VideoFormat.WEBM:
webm_file_path = str(scene.renderer.file_writer.movie_file_path)[:-3] + "webm"
cmd = f"ffmpeg -y -i {scene.renderer.file_writer.movie_file_path} -hide_banner -loglevel error -c:v libvpx-vp9 -crf 50 -b:v 0 -b:a 128k -c:a libopus {webm_file_path}"
print("Converting video output to .webm format...")
# Start ffmpeg conversion
p = subprocess.Popen(cmd, shell=True)
p.wait()
# if the conversion is successful, delete the .mp4
if os.path.exists(webm_file_path):
os.remove(scene.renderer.file_writer.movie_file_path)
scene.renderer.file_writer.movie_file_path = webm_file_path
if not settings.animate:
video = cv2.VideoCapture(str(scene.renderer.file_writer.movie_file_path))
success, image = video.read()
if success:
t = datetime.datetime.fromtimestamp(time.time()).strftime(
"%m-%d-%y_%H-%M-%S"
)
image_file_name = (
"git-sim-"
+ inspect.stack()[2].function
+ "_"
+ t
+ "."
+ settings.img_format
)
image_file_path = os.path.join(
os.path.join(settings.media_dir, "images"), image_file_name
)
if settings.transparent_bg:
unsharp_image = cv2.GaussianBlur(image, (0, 0), 3)
image = cv2.addWeighted(image, 1.5, unsharp_image, -0.5, 0)
tmp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if settings.light_mode:
_, alpha = cv2.threshold(tmp, 225, 255, cv2.THRESH_BINARY_INV)
else:
_, alpha = cv2.threshold(tmp, 25, 255, cv2.THRESH_BINARY)
b, g, r = cv2.split(image)
rgba = [b, g, r, alpha]
image = cv2.merge(rgba, 4)
cv2.imwrite(image_file_path, image)
if (
not settings.stdout
and not settings.output_only_path
and not settings.quiet
):
print("Output image location:", image_file_path)
elif (
not settings.stdout and settings.output_only_path and not settings.quiet
):
print(image_file_path)
if settings.stdout and not settings.quiet:
sys.stdout.buffer.write(cv2.imencode(".jpg", image)[1].tobytes())
else:
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print("Output video location:", scene.renderer.file_writer.movie_file_path)
elif not settings.stdout and settings.output_only_path and not settings.quiet:
print(scene.renderer.file_writer.movie_file_path)
if settings.auto_open and not settings.stdout:
try:
if not settings.animate:
open_file(image_file_path)
else:
open_file(scene.renderer.file_writer.movie_file_path)
except FileNotFoundError:
print(
"Error automatically opening media, please manually open the image or video file to view."
)
class Tag(GitSimBaseCommand):
def __init__(self, name: str):
super().__init__()
self.name = name
def construct(self):
if not settings.stdout and not settings.output_only_path and not settings.quiet:
print(f"{settings.INFO_STRING } {type(self).__name__.lower()} {self.name}")
self.show_intro()
self.parse_commits()
self.parse_all()
self.center_frame_on_commit(self.get_commit())
tagText = m.Text(
self.name,
font=self.font,
font_size=20,
color=self.fontColor,
)
tagRec = m.Rectangle(
color=m.YELLOW,
fill_color=m.YELLOW,
fill_opacity=0.25,
height=0.4,
width=tagText.width + 0.25,
)
tagRec.next_to(self.topref, m.UP)
tagText.move_to(tagRec.get_center())
fulltag = m.VGroup(tagRec, tagText)
if settings.animate:
self.play(m.Create(fulltag), run_time=1 / settings.speed)
else:
self.add(fulltag)
self.toFadeOut.add(tagRec, tagText)
self.drawnRefs[self.name] = fulltag
self.recenter_frame()
self.scale_frame()
self.color_by()
self.fadeout()
self.show_outro()
def tag(
name: str = typer.Argument(
...,
help="The name of the new tag",
)
):
from git_sim.tag import Tag
scene = Tag(name=name)
handle_animations(scene=scene) | null |
156,311 | import contextlib
import datetime
import os
import pathlib
import sys
import time
from pathlib import Path
import typer
import manim as m
from fontTools.ttLib import TTFont
import git_sim.commands
from git_sim.settings import (
ColorByOptions,
StyleOptions,
ImgFormat,
VideoFormat,
settings,
)
The provided code snippet includes necessary dependencies for implementing the `get_font_name` function. Write a Python function `def get_font_name(font_path)` to solve the following problem:
Get the name of a font from its .ttf file.
Here is the function:
def get_font_name(font_path):
"""Get the name of a font from its .ttf file."""
font = TTFont(font_path)
return font["name"].getName(4, 3, 1, 1033).toUnicode() | Get the name of a font from its .ttf file. |
156,312 | import contextlib
import datetime
import os
import pathlib
import sys
import time
from pathlib import Path
import typer
import manim as m
from fontTools.ttLib import TTFont
import git_sim.commands
from git_sim.settings import (
ColorByOptions,
StyleOptions,
ImgFormat,
VideoFormat,
settings,
)
def version_callback(value: bool) -> None:
if value:
print(f"git-sim version {git_sim.__version__}")
raise typer.Exit() | null |
156,313 | import asyncio
import logging
import mimetypes
import os
from pathlib import Path
from api_analytics.fastapi import Analytics
from fastapi import Depends, FastAPI, Request, status
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi_simple_cachecontrol.middleware import CacheControlMiddleware
from fastapi_simple_cachecontrol.types import CacheControl
from huggingface_hub.hf_api import LocalTokenNotFoundError
from api import websocket_manager
from api.routes import static, ws
from api.websockets.data import Data
from api.websockets.notification import Notification
from core import shared
from core.files import get_full_model_path
from core.types import InferenceBackend
from core.utils import determine_model_type
logger = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `log_request` function. Write a Python function `async def log_request(request: Request)` to solve the following problem:
Log all requests
Here is the function:
async def log_request(request: Request):
"Log all requests"
logger.debug(
f"url: {request.url}"
# f"url: {request.url}, params: {request.query_params}, body: {await request.body()}"
) | Log all requests |
156,314 | import asyncio
import logging
import mimetypes
import os
from pathlib import Path
from api_analytics.fastapi import Analytics
from fastapi import Depends, FastAPI, Request, status
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi_simple_cachecontrol.middleware import CacheControlMiddleware
from fastapi_simple_cachecontrol.types import CacheControl
from huggingface_hub.hf_api import LocalTokenNotFoundError
from api import websocket_manager
from api.routes import static, ws
from api.websockets.data import Data
from api.websockets.notification import Notification
from core import shared
from core.files import get_full_model_path
from core.types import InferenceBackend
from core.utils import determine_model_type
logger = logging.getLogger(__name__)
key = os.getenv("FASTAPI_ANALYTICS_KEY")
if key:
app.add_middleware(Analytics, api_key=key)
logger.info("Enabled FastAPI Analytics")
else:
logger.debug("No FastAPI Analytics key provided, skipping")
websocket_manager = WebSocketManager()
class Data:
"Data to be sent to the client"
def __init__(self, data: Union[Dict[Any, Any], List[Any]], data_type: str):
self.data = data
self.type = data_type
def to_json(self) -> Union[Dict[str, Any], List[Any]]:
"Converts the data to a JSON object"
return {"type": self.type, "data": self.data}
class Notification(Data):
"Notification to be sent to the client"
def __init__(
self,
severity: Literal["info", "warning", "error", "success"] = "info",
title: str = "",
message: str = "",
timeout: int = 10_000,
) -> None:
self.severity = severity
self.title = title
self.message = message
self.timeout = timeout
super().__init__(
data={
"severity": self.severity,
"title": self.title,
"message": self.message,
"timeout": self.timeout,
},
data_type="notification",
)
class Configuration(DataClassJsonMixin):
"Main configuration class for the application"
txt2img: Txt2ImgConfig = field(default_factory=Txt2ImgConfig)
img2img: Img2ImgConfig = field(default_factory=Img2ImgConfig)
inpainting: InpaintingConfig = field(default_factory=InpaintingConfig)
controlnet: ControlNetConfig = field(default_factory=ControlNetConfig)
upscale: UpscaleConfig = field(default_factory=UpscaleConfig)
api: APIConfig = field(default_factory=APIConfig)
interrogator: InterrogatorConfig = field(default_factory=InterrogatorConfig)
aitemplate: AITemplateConfig = field(default_factory=AITemplateConfig)
onnx: ONNXConfig = field(default_factory=ONNXConfig)
bot: BotConfig = field(default_factory=BotConfig)
frontend: FrontendConfig = field(default_factory=FrontendConfig)
sampler_config: SamplerConfig = field(default_factory=SamplerConfig)
flags: FlagsConfig = field(default_factory=FlagsConfig)
extra: CatchAll = field(default_factory=dict)
config = load_config()
The provided code snippet includes necessary dependencies for implementing the `validation_exception_handler` function. Write a Python function `async def validation_exception_handler(_request: Request, exc: RequestValidationError)` to solve the following problem:
Output validation errors into debug log for debugging purposes
Here is the function:
async def validation_exception_handler(_request: Request, exc: RequestValidationError):
"Output validation errors into debug log for debugging purposes"
logger.debug(exc)
if exc._error_cache is not None and exc._error_cache[0]["loc"][0] == "body":
from core.config._config import Configuration
default_value = Configuration()
keys = [str(i) for i in exc._error_cache[0]["loc"][1:]] # type: ignore
current_value = exc._error_cache[0]["ctx"]["given"] # type: ignore
# Traverse the config object to find the correct value
for key in keys:
default_value = getattr(default_value, key)
websocket_manager.broadcast_sync(
data=Data(
data={
"default_value": default_value,
"key": keys,
"current_value": current_value,
},
data_type="incorrect_settings_value",
)
)
try:
websocket_manager.broadcast_sync(
data=Notification(
severity="error",
message="Validation error",
title="Validation Error",
)
)
except IndexError:
logger.debug("Unable to parse validation error, skipping the error broadcast")
content = {
"status_code": 10422,
"message": f"{exc}".replace("\n", " ").replace(" ", " "),
"data": None,
}
return JSONResponse(
content=content, status_code=status.HTTP_422_UNPROCESSABLE_ENTITY
) | Output validation errors into debug log for debugging purposes |
156,315 | import asyncio
import logging
import mimetypes
import os
from pathlib import Path
from api_analytics.fastapi import Analytics
from fastapi import Depends, FastAPI, Request, status
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi_simple_cachecontrol.middleware import CacheControlMiddleware
from fastapi_simple_cachecontrol.types import CacheControl
from huggingface_hub.hf_api import LocalTokenNotFoundError
from api import websocket_manager
from api.routes import static, ws
from api.websockets.data import Data
from api.websockets.notification import Notification
from core import shared
from core.files import get_full_model_path
from core.types import InferenceBackend
from core.utils import determine_model_type
websocket_manager = WebSocketManager()
class Data:
"Data to be sent to the client"
def __init__(self, data: Union[Dict[Any, Any], List[Any]], data_type: str):
self.data = data
self.type = data_type
def to_json(self) -> Union[Dict[str, Any], List[Any]]:
"Converts the data to a JSON object"
return {"type": self.type, "data": self.data}
async def hf_token_error(_request, _exc):
await websocket_manager.broadcast(
data=Data(
data_type="token",
data={"huggingface": "missing"},
)
)
return JSONResponse(
content={
"status_code": 10422,
"message": "HuggingFace token not found",
"data": None,
},
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
) | null |
156,316 | import asyncio
import logging
import mimetypes
import os
from pathlib import Path
from api_analytics.fastapi import Analytics
from fastapi import Depends, FastAPI, Request, status
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi_simple_cachecontrol.middleware import CacheControlMiddleware
from fastapi_simple_cachecontrol.types import CacheControl
from huggingface_hub.hf_api import LocalTokenNotFoundError
from api import websocket_manager
from api.routes import static, ws
from api.websockets.data import Data
from api.websockets.notification import Notification
from core import shared
from core.files import get_full_model_path
from core.types import InferenceBackend
from core.utils import determine_model_type
The provided code snippet includes necessary dependencies for implementing the `custom_http_exception_handler` function. Write a Python function `async def custom_http_exception_handler(request: Request, _exc)` to solve the following problem:
Redirect back to the main page (frontend will handle it)
Here is the function:
async def custom_http_exception_handler(request: Request, _exc):
"Redirect back to the main page (frontend will handle it)"
if request.url.path.startswith("/api"):
return JSONResponse(
content={
"status_code": 10404,
"message": "Not Found",
"data": None,
},
status_code=status.HTTP_404_NOT_FOUND,
)
return FileResponse("frontend/dist/index.html") | Redirect back to the main page (frontend will handle it) |
156,317 | import asyncio
import logging
import mimetypes
import os
from pathlib import Path
from api_analytics.fastapi import Analytics
from fastapi import Depends, FastAPI, Request, status
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi_simple_cachecontrol.middleware import CacheControlMiddleware
from fastapi_simple_cachecontrol.types import CacheControl
from huggingface_hub.hf_api import LocalTokenNotFoundError
from api import websocket_manager
from api.routes import static, ws
from api.websockets.data import Data
from api.websockets.notification import Notification
from core import shared
from core.files import get_full_model_path
from core.types import InferenceBackend
from core.utils import determine_model_type
logger = logging.getLogger(__name__)
websocket_manager = WebSocketManager()
def get_full_model_path(
repo_id: str,
revision: str = "main",
model_folder: str = "models",
force: bool = False,
diffusers_skip_ref_follow: bool = False,
) -> Path:
"Return the path to the actual model"
# Replace -- with / and remove the __dim part
repo_id = repo_id.replace("--", "/").split("__")[0]
repo_path = Path(repo_id)
# 1. Check for the exact path
if repo_path.exists():
logger.debug(f"Found model in {repo_path}")
return repo_path
# 2. Check if model is stored in local storage
alt_path = Path("data") / model_folder / repo_id
if alt_path.exists() or force or alt_path.is_symlink():
logger.debug(f"Found model in {alt_path}")
return alt_path
logger.debug(
f"Model not found in {repo_path} or {alt_path}, checking diffusers cache..."
)
# 3. Check if model is stored in diffusers cache
storage = diffusers_storage_name(repo_id)
ref = current_diffusers_ref(storage, revision)
if not ref:
raise ValueError(f"No ref found for {repo_id}")
if diffusers_skip_ref_follow:
return Path(storage)
return Path(storage) / "snapshots" / ref
InferenceBackend = Literal["PyTorch", "AITemplate", "SDXL", "ONNX"]
def determine_model_type(
file: Path,
) -> Tuple[str, PyTorchModelBase, PyTorchModelStage]:
name = file.name
model_type: PyTorchModelBase = "Unknown"
model_stage: PyTorchModelStage = "last_stage"
if file.suffix == ".safetensors":
with open(file, "rb") as f:
length = struct.unpack("<Q", f.read(8))[0]
_metadata: Dict[str, Dict[str, str]] = json.loads(f.read(length))
keys: Dict[str, str] = _metadata.get("__metadata__", {})
if "format" in keys:
# Model is A1111-style
merge_recipe: str = keys.get("sd_merge_recipe", None) # type: ignore
if merge_recipe is not None:
merge_recipe_json: dict = json.loads(merge_recipe)
og = name
name = merge_recipe_json.get("custom_name", None)
if name is None:
name = og
else:
name = f"{name} ({og})"
if (
"conditioner.embedders.0.transformer.text_model.encoder.layers.3.layer_norm1.bias"
in _metadata
):
model_type = "SDXL"
elif (
"conditioner.embedders.0.model.transformer.resblocks.0.attn.in_proj_weight"
in _metadata
):
model_stage = "last_stage"
model_type = "SDXL"
elif (
"cond_stage_model.transformer.text_model.encoder.layers.0.layer_norm1.weight"
in _metadata
):
model_type = "SD2.x"
elif (
"encoder.block.20.layer.1.DenseReluDense.wo.weight" in _metadata
or "encoder.block.0.layer.0.SelfAttention.k.SCB" in _metadata
):
model_type = "IF"
model_stage = "text_encoding"
elif "add_embedding.norm1.weight" in _metadata:
model_type = "IF"
if "class_embedding.linear_1.bias" not in _metadata:
model_stage = "first_stage"
elif file.is_dir():
if file.joinpath("model_index.json").exists():
with open(file / "model_index.json", "r") as f:
metadata: Dict[str, str] = json.loads(f.read())
class_name = metadata.get("_class_name")
if class_name == "KandinskyV22PriorPipeline":
model_type = "Kandinsky 2.2"
model_stage = "text_encoding"
elif (
class_name == "KandinskyV22ControlnetPipeline"
or class_name == "KandinskyV22Pipeline"
):
model_type = "Kandinsky 2.2"
elif class_name == "KandinskyPipeline":
model_type = "Kandinsky 2.1"
elif class_name == "KandinskyPriorPipeline":
model_type = "Kandinsky 2.1"
model_stage = "text_encoding"
elif class_name == "StableDiffusionPipeline":
# Either SD1.x or SD2.x
model_type = "SD1.x"
elif class_name == "StableDiffusionXLPipeline":
model_type = "SDXL"
else:
model_type = "Unknown"
return (name, model_type, model_stage)
config = load_config()
cached_model_list = CachedModelList()
gpu = GPU()
The provided code snippet includes necessary dependencies for implementing the `startup_event` function. Write a Python function `async def startup_event()` to solve the following problem:
Prepare the event loop for other asynchronous tasks
Here is the function:
async def startup_event():
"Prepare the event loop for other asynchronous tasks"
if logger.level > logging.DEBUG:
from transformers import logging as transformers_logging
transformers_logging.set_verbosity_error()
shared.asyncio_loop = asyncio.get_event_loop()
websocket_manager.loop = shared.asyncio_loop
perf_task = asyncio.create_task(websocket_manager.perf_loop())
shared.asyncio_tasks.append(perf_task)
from core.config import config
if config.api.autoloaded_models:
from core.shared_dependent import cached_model_list, gpu
all_models = cached_model_list.all()
for model in config.api.autoloaded_models:
if model in [i.path for i in all_models]:
backend: InferenceBackend = [i.backend for i in all_models if i.path == model][0] # type: ignore
model_type = determine_model_type(get_full_model_path(model))[1]
gpu.load_model(model, backend, type=model_type)
else:
logger.warning(f"Autoloaded model {model} not found, skipping")
logger.info("Started WebSocketManager performance monitoring loop")
logger.info(f"UI Available at: http://localhost:{shared.api_port}/") | Prepare the event loop for other asynchronous tasks |
156,318 | import asyncio
import logging
import mimetypes
import os
from pathlib import Path
from api_analytics.fastapi import Analytics
from fastapi import Depends, FastAPI, Request, status
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi_simple_cachecontrol.middleware import CacheControlMiddleware
from fastapi_simple_cachecontrol.types import CacheControl
from huggingface_hub.hf_api import LocalTokenNotFoundError
from api import websocket_manager
from api.routes import static, ws
from api.websockets.data import Data
from api.websockets.notification import Notification
from core import shared
from core.files import get_full_model_path
from core.types import InferenceBackend
from core.utils import determine_model_type
logger = logging.getLogger(__name__)
websocket_manager = WebSocketManager()
The provided code snippet includes necessary dependencies for implementing the `shutdown_event` function. Write a Python function `async def shutdown_event()` to solve the following problem:
Close all WebSocket connections
Here is the function:
async def shutdown_event():
"Close all WebSocket connections"
logger.info("Closing all WebSocket connections")
await websocket_manager.close_all() | Close all WebSocket connections |
156,319 | import logging
from fastapi import APIRouter
from fastapi.websockets import WebSocket, WebSocketDisconnect
from api import websocket_manager
from api.websockets.data import Data
websocket_manager = WebSocketManager()
The provided code snippet includes necessary dependencies for implementing the `master_endpoint` function. Write a Python function `async def master_endpoint(websocket: WebSocket)` to solve the following problem:
Main connection point for the websocket
Here is the function:
async def master_endpoint(websocket: WebSocket):
"Main connection point for the websocket"
await websocket_manager.connect(websocket)
try:
while True:
text = await websocket.receive_text()
if text == "ping":
await websocket.send_text("pong")
except WebSocketDisconnect:
websocket_manager.disconnect(websocket) | Main connection point for the websocket |
156,320 | import logging
from fastapi import APIRouter
from fastapi.websockets import WebSocket, WebSocketDisconnect
from api import websocket_manager
from api.websockets.data import Data
websocket_manager = WebSocketManager()
class Data:
"Data to be sent to the client"
def __init__(self, data: Union[Dict[Any, Any], List[Any]], data_type: str):
self.data = data
self.type = data_type
def to_json(self) -> Union[Dict[str, Any], List[Any]]:
"Converts the data to a JSON object"
return {"type": self.type, "data": self.data}
The provided code snippet includes necessary dependencies for implementing the `set_progress` function. Write a Python function `def set_progress(progress: int)` to solve the following problem:
Set the progress of the progress bar
Here is the function:
def set_progress(progress: int):
"Set the progress of the progress bar"
websocket_manager.broadcast_sync(
data=Data(data_type="progress", data={"progress": progress})
) | Set the progress of the progress bar |
156,321 | import logging
from fastapi import APIRouter
from fastapi.websockets import WebSocket, WebSocketDisconnect
from api import websocket_manager
from api.websockets.data import Data
websocket_manager = WebSocketManager()
def get_active_connections():
connections = websocket_manager.get_active_connections()
converted_connections = [
f"{connection.client.host}:{connection.client.port}-{connection.client_state.name}"
for connection in connections
if connection.client is not None
]
return converted_connections | null |
156,322 | import logging
import os
import shutil
import traceback
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import List, Literal
import torch
from fastapi import APIRouter, HTTPException, Request, UploadFile
from streaming_form_data import StreamingFormDataParser
from streaming_form_data.targets import FileTarget
from api import websocket_manager
from api.websockets.data import Data
from api.websockets.notification import Notification
from core.files import get_full_model_path
from core.shared_dependent import cached_model_list, gpu
from core.types import (
DeleteModelRequest,
InferenceBackend,
ModelResponse,
PyTorchModelBase,
TextualInversionLoadRequest,
VaeLoadRequest,
)
from core.utils import determine_model_type, download_file
def get_full_model_path(
repo_id: str,
revision: str = "main",
model_folder: str = "models",
force: bool = False,
diffusers_skip_ref_follow: bool = False,
) -> Path:
"Return the path to the actual model"
# Replace -- with / and remove the __dim part
repo_id = repo_id.replace("--", "/").split("__")[0]
repo_path = Path(repo_id)
# 1. Check for the exact path
if repo_path.exists():
logger.debug(f"Found model in {repo_path}")
return repo_path
# 2. Check if model is stored in local storage
alt_path = Path("data") / model_folder / repo_id
if alt_path.exists() or force or alt_path.is_symlink():
logger.debug(f"Found model in {alt_path}")
return alt_path
logger.debug(
f"Model not found in {repo_path} or {alt_path}, checking diffusers cache..."
)
# 3. Check if model is stored in diffusers cache
storage = diffusers_storage_name(repo_id)
ref = current_diffusers_ref(storage, revision)
if not ref:
raise ValueError(f"No ref found for {repo_id}")
if diffusers_skip_ref_follow:
return Path(storage)
return Path(storage) / "snapshots" / ref
gpu = GPU()
class ModelResponse:
"Dataclass for a response containing a loaded model info"
name: str
path: str
backend: Backend
valid: bool
vae: str
state: Literal["loading", "loaded", "not loaded"] = field(default="not loaded")
textual_inversions: List[str] = field(default_factory=list)
type: PyTorchModelBase = "SD1.x"
stage: PyTorchModelStage = "last_stage"
def determine_model_type(
file: Path,
) -> Tuple[str, PyTorchModelBase, PyTorchModelStage]:
name = file.name
model_type: PyTorchModelBase = "Unknown"
model_stage: PyTorchModelStage = "last_stage"
if file.suffix == ".safetensors":
with open(file, "rb") as f:
length = struct.unpack("<Q", f.read(8))[0]
_metadata: Dict[str, Dict[str, str]] = json.loads(f.read(length))
keys: Dict[str, str] = _metadata.get("__metadata__", {})
if "format" in keys:
# Model is A1111-style
merge_recipe: str = keys.get("sd_merge_recipe", None) # type: ignore
if merge_recipe is not None:
merge_recipe_json: dict = json.loads(merge_recipe)
og = name
name = merge_recipe_json.get("custom_name", None)
if name is None:
name = og
else:
name = f"{name} ({og})"
if (
"conditioner.embedders.0.transformer.text_model.encoder.layers.3.layer_norm1.bias"
in _metadata
):
model_type = "SDXL"
elif (
"conditioner.embedders.0.model.transformer.resblocks.0.attn.in_proj_weight"
in _metadata
):
model_stage = "last_stage"
model_type = "SDXL"
elif (
"cond_stage_model.transformer.text_model.encoder.layers.0.layer_norm1.weight"
in _metadata
):
model_type = "SD2.x"
elif (
"encoder.block.20.layer.1.DenseReluDense.wo.weight" in _metadata
or "encoder.block.0.layer.0.SelfAttention.k.SCB" in _metadata
):
model_type = "IF"
model_stage = "text_encoding"
elif "add_embedding.norm1.weight" in _metadata:
model_type = "IF"
if "class_embedding.linear_1.bias" not in _metadata:
model_stage = "first_stage"
elif file.is_dir():
if file.joinpath("model_index.json").exists():
with open(file / "model_index.json", "r") as f:
metadata: Dict[str, str] = json.loads(f.read())
class_name = metadata.get("_class_name")
if class_name == "KandinskyV22PriorPipeline":
model_type = "Kandinsky 2.2"
model_stage = "text_encoding"
elif (
class_name == "KandinskyV22ControlnetPipeline"
or class_name == "KandinskyV22Pipeline"
):
model_type = "Kandinsky 2.2"
elif class_name == "KandinskyPipeline":
model_type = "Kandinsky 2.1"
elif class_name == "KandinskyPriorPipeline":
model_type = "Kandinsky 2.1"
model_stage = "text_encoding"
elif class_name == "StableDiffusionPipeline":
# Either SD1.x or SD2.x
model_type = "SD1.x"
elif class_name == "StableDiffusionXLPipeline":
model_type = "SDXL"
else:
model_type = "Unknown"
return (name, model_type, model_stage)
The provided code snippet includes necessary dependencies for implementing the `list_loaded_models` function. Write a Python function `def list_loaded_models() -> List[ModelResponse]` to solve the following problem:
Returns a list containing information about loaded models
Here is the function:
def list_loaded_models() -> List[ModelResponse]:
"Returns a list containing information about loaded models"
loaded_models = []
for model_id in gpu.loaded_models:
name, type_, stage = determine_model_type(get_full_model_path(model_id))
loaded_models.append(
ModelResponse(
name=name
if (".ckpt" in model_id) or (".safetensors" in model_id)
else model_id,
backend=gpu.loaded_models[model_id].backend,
path=gpu.loaded_models[model_id].model_id,
state="loaded",
vae=gpu.loaded_models[model_id].__dict__.get("vae_path", "default"),
textual_inversions=gpu.loaded_models[model_id].__dict__.get(
"textual_inversions", []
),
valid=True,
stage=stage,
type=type_,
)
)
return loaded_models | Returns a list containing information about loaded models |
156,323 | import logging
import os
import shutil
import traceback
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import List, Literal
import torch
from fastapi import APIRouter, HTTPException, Request, UploadFile
from streaming_form_data import StreamingFormDataParser
from streaming_form_data.targets import FileTarget
from api import websocket_manager
from api.websockets.data import Data
from api.websockets.notification import Notification
from core.files import get_full_model_path
from core.shared_dependent import cached_model_list, gpu
from core.types import (
DeleteModelRequest,
InferenceBackend,
ModelResponse,
PyTorchModelBase,
TextualInversionLoadRequest,
VaeLoadRequest,
)
from core.utils import determine_model_type, download_file
cached_model_list = CachedModelList()
class ModelResponse:
"Dataclass for a response containing a loaded model info"
name: str
path: str
backend: Backend
valid: bool
vae: str
state: Literal["loading", "loaded", "not loaded"] = field(default="not loaded")
textual_inversions: List[str] = field(default_factory=list)
type: PyTorchModelBase = "SD1.x"
stage: PyTorchModelStage = "last_stage"
The provided code snippet includes necessary dependencies for implementing the `list_available_models` function. Write a Python function `def list_available_models() -> List[ModelResponse]` to solve the following problem:
Show a list of available models
Here is the function:
def list_available_models() -> List[ModelResponse]:
"Show a list of available models"
return cached_model_list.all() | Show a list of available models |
156,324 | import logging
import os
import shutil
import traceback
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import List, Literal
import torch
from fastapi import APIRouter, HTTPException, Request, UploadFile
from streaming_form_data import StreamingFormDataParser
from streaming_form_data.targets import FileTarget
from api import websocket_manager
from api.websockets.data import Data
from api.websockets.notification import Notification
from core.files import get_full_model_path
from core.shared_dependent import cached_model_list, gpu
from core.types import (
DeleteModelRequest,
InferenceBackend,
ModelResponse,
PyTorchModelBase,
TextualInversionLoadRequest,
VaeLoadRequest,
)
from core.utils import determine_model_type, download_file
logger = logging.getLogger(__name__)
websocket_manager = WebSocketManager()
class Data:
"Data to be sent to the client"
def __init__(self, data: Union[Dict[Any, Any], List[Any]], data_type: str):
self.data = data
self.type = data_type
def to_json(self) -> Union[Dict[str, Any], List[Any]]:
"Converts the data to a JSON object"
return {"type": self.type, "data": self.data}
gpu = GPU()
InferenceBackend = Literal["PyTorch", "AITemplate", "SDXL", "ONNX"]
PyTorchModelBase = Literal[
"SD1.x",
"SD2.x",
"SDXL",
"Kandinsky 2.1",
"Kandinsky 2.2",
"Wuerstchen",
"IF",
"Unknown",
]
The provided code snippet includes necessary dependencies for implementing the `load_model` function. Write a Python function `def load_model( model: str, backend: InferenceBackend, type: PyTorchModelBase, )` to solve the following problem:
Loads a model into memory
Here is the function:
def load_model(
model: str,
backend: InferenceBackend,
type: PyTorchModelBase,
):
"Loads a model into memory"
try:
gpu.load_model(model, backend, type)
websocket_manager.broadcast_sync(data=Data(data_type="refresh_models", data={}))
except torch.cuda.OutOfMemoryError: # type: ignore
logger.warning(traceback.format_exc())
raise HTTPException(status_code=500, detail="Out of memory")
return {"message": "Model loaded"} | Loads a model into memory |
156,325 | import logging
import os
import shutil
import traceback
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import List, Literal
import torch
from fastapi import APIRouter, HTTPException, Request, UploadFile
from streaming_form_data import StreamingFormDataParser
from streaming_form_data.targets import FileTarget
from api import websocket_manager
from api.websockets.data import Data
from api.websockets.notification import Notification
from core.files import get_full_model_path
from core.shared_dependent import cached_model_list, gpu
from core.types import (
DeleteModelRequest,
InferenceBackend,
ModelResponse,
PyTorchModelBase,
TextualInversionLoadRequest,
VaeLoadRequest,
)
from core.utils import determine_model_type, download_file
websocket_manager = WebSocketManager()
class Data:
"Data to be sent to the client"
def __init__(self, data: Union[Dict[Any, Any], List[Any]], data_type: str):
self.data = data
self.type = data_type
def to_json(self) -> Union[Dict[str, Any], List[Any]]:
"Converts the data to a JSON object"
return {"type": self.type, "data": self.data}
gpu = GPU()
The provided code snippet includes necessary dependencies for implementing the `unload_model` function. Write a Python function `def unload_model(model: str)` to solve the following problem:
Unloads a model from memory
Here is the function:
def unload_model(model: str):
"Unloads a model from memory"
gpu.unload(model)
websocket_manager.broadcast_sync(data=Data(data_type="refresh_models", data={}))
return {"message": "Model unloaded"} | Unloads a model from memory |
156,326 | import logging
import os
import shutil
import traceback
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import List, Literal
import torch
from fastapi import APIRouter, HTTPException, Request, UploadFile
from streaming_form_data import StreamingFormDataParser
from streaming_form_data.targets import FileTarget
from api import websocket_manager
from api.websockets.data import Data
from api.websockets.notification import Notification
from core.files import get_full_model_path
from core.shared_dependent import cached_model_list, gpu
from core.types import (
DeleteModelRequest,
InferenceBackend,
ModelResponse,
PyTorchModelBase,
TextualInversionLoadRequest,
VaeLoadRequest,
)
from core.utils import determine_model_type, download_file
websocket_manager = WebSocketManager()
class Data:
"Data to be sent to the client"
def __init__(self, data: Union[Dict[Any, Any], List[Any]], data_type: str):
self.data = data
self.type = data_type
def to_json(self) -> Union[Dict[str, Any], List[Any]]:
"Converts the data to a JSON object"
return {"type": self.type, "data": self.data}
gpu = GPU()
The provided code snippet includes necessary dependencies for implementing the `unload_all_models` function. Write a Python function `def unload_all_models()` to solve the following problem:
Unload all models from memory
Here is the function:
def unload_all_models():
"Unload all models from memory"
gpu.unload_all()
websocket_manager.broadcast_sync(data=Data(data_type="refresh_models", data={}))
return {"message": "All models unloaded"} | Unload all models from memory |
156,327 | import logging
import os
import shutil
import traceback
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import List, Literal
import torch
from fastapi import APIRouter, HTTPException, Request, UploadFile
from streaming_form_data import StreamingFormDataParser
from streaming_form_data.targets import FileTarget
from api import websocket_manager
from api.websockets.data import Data
from api.websockets.notification import Notification
from core.files import get_full_model_path
from core.shared_dependent import cached_model_list, gpu
from core.types import (
DeleteModelRequest,
InferenceBackend,
ModelResponse,
PyTorchModelBase,
TextualInversionLoadRequest,
VaeLoadRequest,
)
from core.utils import determine_model_type, download_file
websocket_manager = WebSocketManager()
class Data:
"Data to be sent to the client"
def __init__(self, data: Union[Dict[Any, Any], List[Any]], data_type: str):
self.data = data
self.type = data_type
def to_json(self) -> Union[Dict[str, Any], List[Any]]:
"Converts the data to a JSON object"
return {"type": self.type, "data": self.data}
gpu = GPU()
class VaeLoadRequest:
"Dataclass for loading a VAE into a model"
model: str
vae: str
The provided code snippet includes necessary dependencies for implementing the `load_vae` function. Write a Python function `def load_vae(req: VaeLoadRequest)` to solve the following problem:
Load a VAE into a model
Here is the function:
def load_vae(req: VaeLoadRequest):
"Load a VAE into a model"
gpu.load_vae(req)
websocket_manager.broadcast_sync(data=Data(data_type="refresh_models", data={}))
return {"message": "VAE model loaded"} | Load a VAE into a model |
156,328 | import logging
import os
import shutil
import traceback
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import List, Literal
import torch
from fastapi import APIRouter, HTTPException, Request, UploadFile
from streaming_form_data import StreamingFormDataParser
from streaming_form_data.targets import FileTarget
from api import websocket_manager
from api.websockets.data import Data
from api.websockets.notification import Notification
from core.files import get_full_model_path
from core.shared_dependent import cached_model_list, gpu
from core.types import (
DeleteModelRequest,
InferenceBackend,
ModelResponse,
PyTorchModelBase,
TextualInversionLoadRequest,
VaeLoadRequest,
)
from core.utils import determine_model_type, download_file
websocket_manager = WebSocketManager()
class Data:
"Data to be sent to the client"
def __init__(self, data: Union[Dict[Any, Any], List[Any]], data_type: str):
self.data = data
self.type = data_type
def to_json(self) -> Union[Dict[str, Any], List[Any]]:
"Converts the data to a JSON object"
return {"type": self.type, "data": self.data}
gpu = GPU()
class TextualInversionLoadRequest:
"Dataclass for loading a textual inversion onto a model"
model: str
textual_inversion: str
The provided code snippet includes necessary dependencies for implementing the `load_textual_inversion` function. Write a Python function `def load_textual_inversion(req: TextualInversionLoadRequest)` to solve the following problem:
Load a LoRA model into a model
Here is the function:
def load_textual_inversion(req: TextualInversionLoadRequest):
"Load a LoRA model into a model"
gpu.load_textual_inversion(req)
websocket_manager.broadcast_sync(data=Data(data_type="refresh_models", data={}))
return {"message": "LoRA model loaded"} | Load a LoRA model into a model |
156,329 | import logging
import os
import shutil
import traceback
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import List, Literal
import torch
from fastapi import APIRouter, HTTPException, Request, UploadFile
from streaming_form_data import StreamingFormDataParser
from streaming_form_data.targets import FileTarget
from api import websocket_manager
from api.websockets.data import Data
from api.websockets.notification import Notification
from core.files import get_full_model_path
from core.shared_dependent import cached_model_list, gpu
from core.types import (
DeleteModelRequest,
InferenceBackend,
ModelResponse,
PyTorchModelBase,
TextualInversionLoadRequest,
VaeLoadRequest,
)
from core.utils import determine_model_type, download_file
gpu = GPU()
The provided code snippet includes necessary dependencies for implementing the `cleanup` function. Write a Python function `def cleanup()` to solve the following problem:
Free up memory manually
Here is the function:
def cleanup():
"Free up memory manually"
gpu.memory_cleanup()
return {"message": "Memory cleaned up"} | Free up memory manually |
156,330 | import logging
import os
import shutil
import traceback
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import List, Literal
import torch
from fastapi import APIRouter, HTTPException, Request, UploadFile
from streaming_form_data import StreamingFormDataParser
from streaming_form_data.targets import FileTarget
from api import websocket_manager
from api.websockets.data import Data
from api.websockets.notification import Notification
from core.files import get_full_model_path
from core.shared_dependent import cached_model_list, gpu
from core.types import (
DeleteModelRequest,
InferenceBackend,
ModelResponse,
PyTorchModelBase,
TextualInversionLoadRequest,
VaeLoadRequest,
)
from core.utils import determine_model_type, download_file
websocket_manager = WebSocketManager()
class Data:
"Data to be sent to the client"
def __init__(self, data: Union[Dict[Any, Any], List[Any]], data_type: str):
self.data = data
self.type = data_type
def to_json(self) -> Union[Dict[str, Any], List[Any]]:
"Converts the data to a JSON object"
return {"type": self.type, "data": self.data}
gpu = GPU()
The provided code snippet includes necessary dependencies for implementing the `download_model` function. Write a Python function `def download_model(model: str)` to solve the following problem:
Download a model to the cache
Here is the function:
def download_model(model: str):
"Download a model to the cache"
gpu.download_huggingface_model(model)
websocket_manager.broadcast_sync(data=Data(data_type="refresh_models", data={}))
return {"message": "Model downloaded"} | Download a model to the cache |
156,331 | import logging
import os
import shutil
import traceback
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import List, Literal
import torch
from fastapi import APIRouter, HTTPException, Request, UploadFile
from streaming_form_data import StreamingFormDataParser
from streaming_form_data.targets import FileTarget
from api import websocket_manager
from api.websockets.data import Data
from api.websockets.notification import Notification
from core.files import get_full_model_path
from core.shared_dependent import cached_model_list, gpu
from core.types import (
DeleteModelRequest,
InferenceBackend,
ModelResponse,
PyTorchModelBase,
TextualInversionLoadRequest,
VaeLoadRequest,
)
from core.utils import determine_model_type, download_file
The provided code snippet includes necessary dependencies for implementing the `get_current_cached_preprocessor` function. Write a Python function `def get_current_cached_preprocessor()` to solve the following problem:
Get the current cached preprocessor
Here is the function:
def get_current_cached_preprocessor():
"Get the current cached preprocessor"
from core import shared_dependent
if not shared_dependent.cached_controlnet_preprocessor:
return {
"preprocessor": shared_dependent.cached_controlnet_preprocessor.__class__.__name__
if shared_dependent.cached_controlnet_preprocessor
else None
}
else:
return {"preprocessor": None} | Get the current cached preprocessor |
156,332 | import logging
import os
import shutil
import traceback
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import List, Literal
import torch
from fastapi import APIRouter, HTTPException, Request, UploadFile
from streaming_form_data import StreamingFormDataParser
from streaming_form_data.targets import FileTarget
from api import websocket_manager
from api.websockets.data import Data
from api.websockets.notification import Notification
from core.files import get_full_model_path
from core.shared_dependent import cached_model_list, gpu
from core.types import (
DeleteModelRequest,
InferenceBackend,
ModelResponse,
PyTorchModelBase,
TextualInversionLoadRequest,
VaeLoadRequest,
)
from core.utils import determine_model_type, download_file
logger = logging.getLogger(__name__)
possible_dirs = [
"models",
"lora",
"textual-inversion",
"lycoris",
"vae",
"aitemplate",
"onnx",
]
class UploadFileTarget(FileTarget):
"A target that writes to a temporary file and then moves it to the target dir"
def __init__(self, dir_: Path, *args, **kwargs):
super().__init__(None, *args, **kwargs) # type: ignore
self.file = UploadFile(
filename=None, file=NamedTemporaryFile(delete=False, dir=dir_) # type: ignore
)
self._fd = self.file.file
self.dir = dir_
def on_start(self):
self.file.filename = self.filename = self.multipart_filename # type: ignore
if self.dir.joinpath(self.filename).exists(): # type: ignore
raise HTTPException(409, "File already exists")
websocket_manager = WebSocketManager()
class Data:
"Data to be sent to the client"
def __init__(self, data: Union[Dict[Any, Any], List[Any]], data_type: str):
self.data = data
self.type = data_type
def to_json(self) -> Union[Dict[str, Any], List[Any]]:
"Converts the data to a JSON object"
return {"type": self.type, "data": self.data}
The provided code snippet includes necessary dependencies for implementing the `upload_model` function. Write a Python function `async def upload_model(request: Request)` to solve the following problem:
Upload a model file to the server
Here is the function:
async def upload_model(request: Request):
"Upload a model file to the server"
upload_type = request.query_params.get("type")
assert upload_type in possible_dirs, f"Invalid upload type '{upload_type}'"
logger.info(f"Recieving model of type '{upload_type}'")
upload_dir = Path("data") / upload_type
parser = StreamingFormDataParser(request.headers)
target = UploadFileTarget(upload_dir)
try:
parser.register("file", target)
async for chunk in request.stream():
parser.data_received(chunk)
if target.filename:
shutil.move(target.file.file.name, upload_dir.joinpath(target.filename))
else:
raise HTTPException(422, "Could not find file in body")
finally:
await target.file.close()
if os.path.exists(target.file.file.name):
os.unlink(target.file.file.name)
await websocket_manager.broadcast(
data=Data(data_type="refresh_models", data={})
)
return {"message": "Model uploaded"} | Upload a model file to the server |
156,333 | import logging
import os
import shutil
import traceback
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import List, Literal
import torch
from fastapi import APIRouter, HTTPException, Request, UploadFile
from streaming_form_data import StreamingFormDataParser
from streaming_form_data.targets import FileTarget
from api import websocket_manager
from api.websockets.data import Data
from api.websockets.notification import Notification
from core.files import get_full_model_path
from core.shared_dependent import cached_model_list, gpu
from core.types import (
DeleteModelRequest,
InferenceBackend,
ModelResponse,
PyTorchModelBase,
TextualInversionLoadRequest,
VaeLoadRequest,
)
from core.utils import determine_model_type, download_file
logger = logging.getLogger(__name__)
possible_dirs = [
"models",
"lora",
"textual-inversion",
"lycoris",
"vae",
"aitemplate",
"onnx",
]
websocket_manager = WebSocketManager()
class Data:
"Data to be sent to the client"
def __init__(self, data: Union[Dict[Any, Any], List[Any]], data_type: str):
self.data = data
self.type = data_type
def to_json(self) -> Union[Dict[str, Any], List[Any]]:
"Converts the data to a JSON object"
return {"type": self.type, "data": self.data}
class Notification(Data):
"Notification to be sent to the client"
def __init__(
self,
severity: Literal["info", "warning", "error", "success"] = "info",
title: str = "",
message: str = "",
timeout: int = 10_000,
) -> None:
self.severity = severity
self.title = title
self.message = message
self.timeout = timeout
super().__init__(
data={
"severity": self.severity,
"title": self.title,
"message": self.message,
"timeout": self.timeout,
},
data_type="notification",
)
def get_full_model_path(
repo_id: str,
revision: str = "main",
model_folder: str = "models",
force: bool = False,
diffusers_skip_ref_follow: bool = False,
) -> Path:
"Return the path to the actual model"
# Replace -- with / and remove the __dim part
repo_id = repo_id.replace("--", "/").split("__")[0]
repo_path = Path(repo_id)
# 1. Check for the exact path
if repo_path.exists():
logger.debug(f"Found model in {repo_path}")
return repo_path
# 2. Check if model is stored in local storage
alt_path = Path("data") / model_folder / repo_id
if alt_path.exists() or force or alt_path.is_symlink():
logger.debug(f"Found model in {alt_path}")
return alt_path
logger.debug(
f"Model not found in {repo_path} or {alt_path}, checking diffusers cache..."
)
# 3. Check if model is stored in diffusers cache
storage = diffusers_storage_name(repo_id)
ref = current_diffusers_ref(storage, revision)
if not ref:
raise ValueError(f"No ref found for {repo_id}")
if diffusers_skip_ref_follow:
return Path(storage)
return Path(storage) / "snapshots" / ref
class DeleteModelRequest:
"Dataclass for requesting a deletion of a model"
model_path: str
model_type: Literal[
"models", "lora", "textual-inversion", "lycoris", "vae", "aitemplate"
]
The provided code snippet includes necessary dependencies for implementing the `delete_model` function. Write a Python function `def delete_model(req: DeleteModelRequest)` to solve the following problem:
Delete a model from the server
Here is the function:
def delete_model(req: DeleteModelRequest):
"Delete a model from the server"
assert req.model_type in possible_dirs, f"Invalid upload type {req.model_type}"
if req.model_type == "models":
path = get_full_model_path(req.model_path, diffusers_skip_ref_follow=True)
else:
path = Path(req.model_path)
logger.warning(f"Deleting model '{path}' of type '{req.model_type}'")
if not path.is_symlink():
if not path.exists():
websocket_manager.broadcast_sync(
data=Notification(
severity="error",
message="Model not found",
)
)
raise HTTPException(404, "Model not found")
if path.is_dir():
shutil.rmtree(path)
else:
os.unlink(path)
websocket_manager.broadcast_sync(data=Data(data_type="refresh_models", data={}))
return {"message": "Model deleted"} | Delete a model from the server |
156,334 | import logging
import os
import shutil
import traceback
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import List, Literal
import torch
from fastapi import APIRouter, HTTPException, Request, UploadFile
from streaming_form_data import StreamingFormDataParser
from streaming_form_data.targets import FileTarget
from api import websocket_manager
from api.websockets.data import Data
from api.websockets.notification import Notification
from core.files import get_full_model_path
from core.shared_dependent import cached_model_list, gpu
from core.types import (
DeleteModelRequest,
InferenceBackend,
ModelResponse,
PyTorchModelBase,
TextualInversionLoadRequest,
VaeLoadRequest,
)
from core.utils import determine_model_type, download_file
websocket_manager = WebSocketManager()
class Data:
"Data to be sent to the client"
def __init__(self, data: Union[Dict[Any, Any], List[Any]], data_type: str):
self.data = data
self.type = data_type
def to_json(self) -> Union[Dict[str, Any], List[Any]]:
"Converts the data to a JSON object"
return {"type": self.type, "data": self.data}
def download_file(url: str, file: Path, add_filename: bool = False):
"""Download a file to the specified path, or to a child of the provided file
with the name provided in the Content-Disposition header"""
session = requests.Session()
retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])
session.mount("http://", HTTPAdapter(max_retries=retries))
session.mount("https://", HTTPAdapter(max_retries=retries))
with session.get(url, stream=True, timeout=30) as r:
try:
filename_match = content_disposition_regex.search(
r.headers["Content-Disposition"]
)
if filename_match:
file_name = filename_match.group(1)
else:
raise KeyError
except KeyError:
file_name = url.split("/")[-1]
if add_filename:
file = file / file_name
try:
total = int(r.headers["Content-Length"])
except KeyError:
total = None
logger.warning(
"Content-Length header not found, progress bar will not work"
)
if file.exists():
logger.debug(f"File {file.as_posix()} already exists, skipping")
return file
logger.info(f"Downloading {file_name} into {file.as_posix()}")
# AFAIK Windows doesn't like big buffers
s = (64 if os.name == "nt" else 1024) * 1024
with open(file, mode="wb+") as f:
with tqdm(total=total, unit="B", unit_scale=True) as pbar:
for data in r.iter_content(s):
f.write(data)
pbar.update(len(data))
return file
The provided code snippet includes necessary dependencies for implementing the `download_checkpoint` function. Write a Python function `def download_checkpoint( link: str, model_type: Literal["Checkpoint", "TextualInversion", "LORA", "VAE"] ) -> str` to solve the following problem:
Download a model from a link and return the path to the downloaded file.
Here is the function:
def download_checkpoint(
link: str, model_type: Literal["Checkpoint", "TextualInversion", "LORA", "VAE"]
) -> str:
"Download a model from a link and return the path to the downloaded file."
mtype = model_type.casefold()
if mtype == "checkpoint":
folder = "models"
elif mtype == "textualinversion":
folder = "textual-inversion"
elif mtype == "lora":
folder = "lora"
elif mtype == "vae":
folder = "vae"
else:
raise ValueError(f"Unknown model type {mtype}")
saved_path = download_file(link, Path("data") / folder, True).as_posix()
websocket_manager.broadcast_sync(Data(data_type="refresh_models", data={}))
return saved_path | Download a model from a link and return the path to the downloaded file. |
156,335 | import logging
import os
from fastapi import APIRouter
from core import config
from core.config._config import update_config
logger = logging.getLogger(__name__)
def update_config(config: Configuration, new_config: Configuration):
"Update the configuration with new values instead of overwriting the pointer"
for cls_field in fields(new_config):
assert isinstance(cls_field, Field)
setattr(config, cls_field.name, getattr(new_config, cls_field.name))
The provided code snippet includes necessary dependencies for implementing the `save_configuration` function. Write a Python function `def save_configuration(settings: config.Configuration)` to solve the following problem:
Update settings and save them to the config file
Here is the function:
def save_configuration(settings: config.Configuration):
"Update settings and save them to the config file"
reload_required = False
if config.config.api.device != settings.api.device:
logger.info(f"Device was changed to {settings.api.device}")
reload_required = True
if config.config.api.data_type != settings.api.data_type:
logger.info(f"Precision changed to {settings.api.data_type}")
reload_required = True
if config.config.api != settings.api:
reload_required = True
if reload_required:
logger.info(
"API settings changed, you might need to reload your models for these changes to take effect"
)
update_config(config.config, settings)
config.save_config(config.config)
logger.info("Config was updated and saved to disk")
return {"message": "success"} | Update settings and save them to the config file |
156,336 | import logging
import os
from fastapi import APIRouter
from core import config
from core.config._config import update_config
logger = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `get_configuration` function. Write a Python function `def get_configuration()` to solve the following problem:
Get current settings
Here is the function:
def get_configuration():
"Get current settings"
logger.debug(f"Sending configuration to frontend: {config.config}")
return config.config | Get current settings |
156,337 | import logging
import os
from fastapi import APIRouter
from core import config
from core.config._config import update_config
def inject_var_into_dotenv(key: str, value: str) -> None:
"""
Injects the HuggingFace token into the .env file
Args:
token (str): HuggingFace token with read permissions
"""
pattern = re.compile(f"{key}=(.*)")
dotenv_path = Path(".env")
if key == "HUGGINGFACE_TOKEN":
# Check if the token is valid
from huggingface_hub import HfApi
api = HfApi()
try:
api.whoami(token=value)
except HTTPError as e:
logger.error(f"Invalid HuggingFace token: {e}")
if not dotenv_path.exists():
example_dotenv = open(".env.example", "r", encoding="utf-8")
example_dotenv_contents = example_dotenv.read()
example_dotenv.close()
with dotenv_path.open("w", encoding="utf-8") as f:
f.write(example_dotenv_contents)
with dotenv_path.open("r", encoding="utf-8") as f:
dotenv_contents = f.read()
dotenv_contents = pattern.sub(f"{key}={value}", dotenv_contents)
with dotenv_path.open("w", encoding="utf-8") as f:
f.write(dotenv_contents)
logger.info(f"{key} was injected to the .env file")
os.environ[key] = value
logger.info("Variable injected into current environment")
The provided code snippet includes necessary dependencies for implementing the `set_hf_token` function. Write a Python function `def set_hf_token(key: str, value: str)` to solve the following problem:
Set the HuggingFace token in the environment variables and in the .env file
Here is the function:
def set_hf_token(key: str, value: str):
"Set the HuggingFace token in the environment variables and in the .env file"
from core.functions import inject_var_into_dotenv
inject_var_into_dotenv(key, value)
return {"message": "success"} | Set the HuggingFace token in the environment variables and in the .env file |
156,338 | import logging
import os
from fastapi import APIRouter
from core import config
from core.config._config import update_config
The provided code snippet includes necessary dependencies for implementing the `hf_whoami` function. Write a Python function `def hf_whoami()` to solve the following problem:
Return the current HuggingFace user
Here is the function:
def hf_whoami():
"Return the current HuggingFace user"
from huggingface_hub import HfApi
api = HfApi()
return api.whoami(token=os.getenv("HUGGINGFACE_TOKEN")) | Return the current HuggingFace user |
156,339 | from typing import List
import torch
from fastapi import APIRouter, HTTPException
from core.shared import all_gpus, amd
def gpus():
"List all available GPUs"
devices = {}
for i in gpu_ids():
if amd:
data = all_gpus[i]
name = data.name
total_memory = data.memory_info["vram_size"] # type: ignore
major = 8
minor = 1
multi_processor_count = 1000
else:
from torch._C import _CudaDeviceProperties
data: _CudaDeviceProperties = torch.cuda.get_device_properties(i)
name = data.name
total_memory = data.total_memory
major = data.major
minor = data.minor
multi_processor_count = data.multi_processor_count
devices[i] = {
"name": name,
"total_memory": str(round(total_memory / 1024**3)) + "GB",
"major": major,
"minor": minor,
"multi_processor_count": multi_processor_count,
}
return devices
amd: bool = False
all_gpus: List = []
The provided code snippet includes necessary dependencies for implementing the `gpu_memory` function. Write a Python function `def gpu_memory(gpu_id: int)` to solve the following problem:
Return the memory statistics of the GPU
Here is the function:
def gpu_memory(gpu_id: int):
"Return the memory statistics of the GPU"
if amd:
amdgpu = all_gpus[gpu_id]
data = amdgpu.memory_info
return (data["vram_size"], data["vram_size"] - amdgpu.query_vram_usage(), "b")
else:
from gpustat.core import GPUStatCollection
try:
gpu_data = GPUStatCollection.new_query().gpus[gpu_id]
return (gpu_data.memory_total, gpu_data.memory_free, "MB")
except IndexError:
raise HTTPException(status_code=400, detail="GPU not found") | Return the memory statistics of the GPU |
156,340 | from typing import List
import torch
from fastapi import APIRouter, HTTPException
from core.shared import all_gpus, amd
def gpu(gpu_id: int) -> str:
"Return the name of the GPU"
if amd:
return all_gpus[gpu_id].name
return torch.cuda.get_device_name(gpu_id)
gpu = GPU()
The provided code snippet includes necessary dependencies for implementing the `capabilities` function. Write a Python function `def capabilities()` to solve the following problem:
List of all the capabilities of this system
Here is the function:
def capabilities():
"List of all the capabilities of this system"
from core.shared_dependent import gpu as _gpu
return _gpu.capabilities | List of all the capabilities of this system |
156,341 | from fastapi import APIRouter
from fastapi.responses import FileResponse
The provided code snippet includes necessary dependencies for implementing the `index` function. Write a Python function `def index()` to solve the following problem:
Main page
Here is the function:
def index():
"Main page"
return FileResponse("frontend/dist/index.html") | Main page |
156,342 | from fastapi import APIRouter
from fastapi.responses import FileResponse
The provided code snippet includes necessary dependencies for implementing the `favicon` function. Write a Python function `def favicon()` to solve the following problem:
Icon of the app
Here is the function:
def favicon():
"Icon of the app"
return FileResponse("frontend/dist/favicon.ico") | Icon of the app |
156,343 | import logging
import os
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Any, Dict, List
from fastapi import APIRouter, HTTPException
from core.functions import image_meta_from_file
valid_extensions = ["png", "jpeg", "webp"]
def sort_images(images: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"Sort images by time"
return sorted(images, key=lambda x: x["time"], reverse=True)
The provided code snippet includes necessary dependencies for implementing the `txt2img` function. Write a Python function `def txt2img() -> List[Dict[str, Any]]` to solve the following problem:
List all generated images
Here is the function:
def txt2img() -> List[Dict[str, Any]]:
"List all generated images"
path = Path("data/outputs/txt2img")
if not path.exists():
return []
data: List[Dict[str, Any]] = []
for extension in valid_extensions:
for i in path.rglob(f"**/*.{extension}"):
data.append(
{"path": i.as_posix(), "time": os.path.getmtime(i), "id": Path(i).stem}
)
return sort_images(data) | List all generated images |
156,344 | import logging
import os
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Any, Dict, List
from fastapi import APIRouter, HTTPException
from core.functions import image_meta_from_file
valid_extensions = ["png", "jpeg", "webp"]
def sort_images(images: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"Sort images by time"
return sorted(images, key=lambda x: x["time"], reverse=True)
The provided code snippet includes necessary dependencies for implementing the `img2img` function. Write a Python function `def img2img() -> List[Dict[str, Any]]` to solve the following problem:
List all generated images
Here is the function:
def img2img() -> List[Dict[str, Any]]:
"List all generated images"
path = Path("data/outputs/img2img")
if not path.exists():
return []
data: List[Dict[str, Any]] = []
for extension in valid_extensions:
for i in path.rglob(f"**/*.{extension}"):
data.append(
{"path": i.as_posix(), "time": os.path.getmtime(i), "id": Path(i).stem}
)
return sort_images(data) | List all generated images |
156,345 | import logging
import os
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Any, Dict, List
from fastapi import APIRouter, HTTPException
from core.functions import image_meta_from_file
valid_extensions = ["png", "jpeg", "webp"]
def sort_images(images: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"Sort images by time"
return sorted(images, key=lambda x: x["time"], reverse=True)
The provided code snippet includes necessary dependencies for implementing the `extra` function. Write a Python function `def extra() -> List[Dict[str, Any]]` to solve the following problem:
List all generated images
Here is the function:
def extra() -> List[Dict[str, Any]]:
"List all generated images"
path = Path("data/outputs/extra")
if not path.exists():
return []
data: List[Dict[str, Any]] = []
for extension in valid_extensions:
for i in path.rglob(f"**/*.{extension}"):
data.append(
{"path": i.as_posix(), "time": os.path.getmtime(i), "id": Path(i).stem}
)
return sort_images(data) | List all generated images |
156,346 | import logging
import os
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Any, Dict, List
from fastapi import APIRouter, HTTPException
from core.functions import image_meta_from_file
def image_meta_from_file(path: Path) -> Dict[str, str]:
"Return image metadata from a file"
extension = path.suffix.lower()
if extension == ".png":
with path.open("rb") as f:
image = Image.open(f)
meta = image.text # type: ignore
return meta
else:
try:
data = piexif.load(path.as_posix())
meta: Dict[str, str] = json.loads(
piexif.helper.UserComment.load(data["Exif"][piexif.ExifIFD.UserComment])
)
return meta
except ValueError as e:
logger.warning(f"Error while loading metadata from {path}: {e}")
return {}
The provided code snippet includes necessary dependencies for implementing the `image_data` function. Write a Python function `def image_data(filename: str) -> Dict[str, str]` to solve the following problem:
Get a generated image metadata
Here is the function:
def image_data(filename: str) -> Dict[str, str]:
"Get a generated image metadata"
path = Path(filename)
path_str = path.as_posix()
# CodeQl: Path Traversal fix
if not path_str.startswith("data/outputs"):
raise HTTPException(status_code=403, detail="Access denied")
if not path.exists():
raise HTTPException(status_code=404, detail="File not found")
return image_meta_from_file(path) | Get a generated image metadata |
156,347 | import logging
import os
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Any, Dict, List
from fastapi import APIRouter, HTTPException
from core.functions import image_meta_from_file
The provided code snippet includes necessary dependencies for implementing the `delete_image` function. Write a Python function `def delete_image(filename: str) -> Dict[str, str]` to solve the following problem:
Delete a generated image (does not purge the directory)
Here is the function:
def delete_image(filename: str) -> Dict[str, str]:
"Delete a generated image (does not purge the directory)"
path = Path(filename)
path_str = path.as_posix()
# CodeQl: Path Traversal fix
if not path_str.startswith("data/outputs"):
raise HTTPException(status_code=403, detail="Access denied")
if not path.exists():
raise HTTPException(status_code=404, detail="File not found")
path.unlink()
return {"message": "File deleted"} | Delete a generated image (does not purge the directory) |
156,348 | import logging
from typing import List, Union
from fastapi import APIRouter, HTTPException
from PIL import Image
from core.config import config
from core.errors import ModelNotLoadedError
from core.functions import images_to_response, img_to_bytes
from core.shared_dependent import gpu
from core.types import (
AITemplateBuildRequest,
AITemplateDynamicBuildRequest,
ControlNetQueueEntry,
ConvertModelRequest,
Img2ImgQueueEntry,
InpaintQueueEntry,
InterrogatorQueueEntry,
ONNXBuildRequest,
Txt2ImgQueueEntry,
UpscaleQueueEntry,
)
from core.utils import convert_bytes_to_image_stream, convert_image_to_base64
class ModelNotLoadedError(Exception):
"Raised when the model is blocked from being loaded automatically"
def images_to_response(images: Union[List[Image.Image], List[str]], time: float):
"Generate a valid response for the API"
if len(images) == 0:
return {
"time": time,
"images": [],
}
elif isinstance(images[0], str):
return {
"time": time,
"images": images,
}
elif config.api.image_return_format == "bytes":
if len(images) > 1:
raise HTTPException(
status_code=500,
detail={
"message": "Image return format is set to bytes, but {len(images)} images were returned"
},
)
return Response(img_to_bytes(images[0]), media_type="binary/octet-stream") # type: ignore
else:
return {
"time": time,
"images": [convert_image_to_base64(i) for i in images], # type: ignore
}
gpu = GPU()
class Txt2ImgQueueEntry(Job):
"Dataclass for a text to image queue entry"
data: Txt2imgData
The provided code snippet includes necessary dependencies for implementing the `txt2img_job` function. Write a Python function `def txt2img_job(job: Txt2ImgQueueEntry)` to solve the following problem:
Generate images from text
Here is the function:
def txt2img_job(job: Txt2ImgQueueEntry):
"Generate images from text"
try:
images: Union[List[Image.Image], List[str]]
time: float
images, time = gpu.generate(job)
except ModelNotLoadedError:
raise HTTPException(status_code=400, detail="Model is not loaded")
return images_to_response(images, time) | Generate images from text |
156,349 | import logging
from typing import List, Union
from fastapi import APIRouter, HTTPException
from PIL import Image
from core.config import config
from core.errors import ModelNotLoadedError
from core.functions import images_to_response, img_to_bytes
from core.shared_dependent import gpu
from core.types import (
AITemplateBuildRequest,
AITemplateDynamicBuildRequest,
ControlNetQueueEntry,
ConvertModelRequest,
Img2ImgQueueEntry,
InpaintQueueEntry,
InterrogatorQueueEntry,
ONNXBuildRequest,
Txt2ImgQueueEntry,
UpscaleQueueEntry,
)
from core.utils import convert_bytes_to_image_stream, convert_image_to_base64
class ModelNotLoadedError(Exception):
"Raised when the model is blocked from being loaded automatically"
def images_to_response(images: Union[List[Image.Image], List[str]], time: float):
"Generate a valid response for the API"
if len(images) == 0:
return {
"time": time,
"images": [],
}
elif isinstance(images[0], str):
return {
"time": time,
"images": images,
}
elif config.api.image_return_format == "bytes":
if len(images) > 1:
raise HTTPException(
status_code=500,
detail={
"message": "Image return format is set to bytes, but {len(images)} images were returned"
},
)
return Response(img_to_bytes(images[0]), media_type="binary/octet-stream") # type: ignore
else:
return {
"time": time,
"images": [convert_image_to_base64(i) for i in images], # type: ignore
}
gpu = GPU()
class Img2ImgQueueEntry(Job):
"Dataclass for an image to image queue entry"
data: Img2imgData
def convert_bytes_to_image_stream(data: bytes) -> str:
"Convert a base64 string to a PIL Image"
pattern = re.compile(r"data:image\/[\w]+;base64,")
img = data
img = img.decode("utf-8")
img = re.sub(pattern, "", img)
return img
The provided code snippet includes necessary dependencies for implementing the `img2img_job` function. Write a Python function `def img2img_job(job: Img2ImgQueueEntry)` to solve the following problem:
Modify image with prompt
Here is the function:
def img2img_job(job: Img2ImgQueueEntry):
"Modify image with prompt"
data = job.data.image
assert isinstance(data, bytes)
job.data.image = convert_bytes_to_image_stream(data)
try:
images: Union[List[Image.Image], List[str]]
time: float
images, time = gpu.generate(job)
except ModelNotLoadedError:
raise HTTPException(status_code=400, detail="Model is not loaded")
return images_to_response(images, time) | Modify image with prompt |
156,350 | import logging
from typing import List, Union
from fastapi import APIRouter, HTTPException
from PIL import Image
from core.config import config
from core.errors import ModelNotLoadedError
from core.functions import images_to_response, img_to_bytes
from core.shared_dependent import gpu
from core.types import (
AITemplateBuildRequest,
AITemplateDynamicBuildRequest,
ControlNetQueueEntry,
ConvertModelRequest,
Img2ImgQueueEntry,
InpaintQueueEntry,
InterrogatorQueueEntry,
ONNXBuildRequest,
Txt2ImgQueueEntry,
UpscaleQueueEntry,
)
from core.utils import convert_bytes_to_image_stream, convert_image_to_base64
class ModelNotLoadedError(Exception):
"Raised when the model is blocked from being loaded automatically"
def images_to_response(images: Union[List[Image.Image], List[str]], time: float):
"Generate a valid response for the API"
if len(images) == 0:
return {
"time": time,
"images": [],
}
elif isinstance(images[0], str):
return {
"time": time,
"images": images,
}
elif config.api.image_return_format == "bytes":
if len(images) > 1:
raise HTTPException(
status_code=500,
detail={
"message": "Image return format is set to bytes, but {len(images)} images were returned"
},
)
return Response(img_to_bytes(images[0]), media_type="binary/octet-stream") # type: ignore
else:
return {
"time": time,
"images": [convert_image_to_base64(i) for i in images], # type: ignore
}
gpu = GPU()
class InpaintQueueEntry(Job):
"Dataclass for an image to image queue entry"
data: InpaintData
def convert_bytes_to_image_stream(data: bytes) -> str:
"Convert a base64 string to a PIL Image"
pattern = re.compile(r"data:image\/[\w]+;base64,")
img = data
img = img.decode("utf-8")
img = re.sub(pattern, "", img)
return img
The provided code snippet includes necessary dependencies for implementing the `inpaint_job` function. Write a Python function `def inpaint_job(job: InpaintQueueEntry)` to solve the following problem:
Inpaint image with prompt
Here is the function:
def inpaint_job(job: InpaintQueueEntry):
"Inpaint image with prompt"
image_bytes = job.data.image
assert isinstance(image_bytes, bytes)
job.data.image = convert_bytes_to_image_stream(image_bytes)
mask_bytes = job.data.mask_image
assert isinstance(mask_bytes, bytes)
job.data.mask_image = convert_bytes_to_image_stream(mask_bytes)
try:
images: Union[List[Image.Image], List[str]]
time: float
images, time = gpu.generate(job)
except ModelNotLoadedError:
raise HTTPException(status_code=400, detail="Model is not loaded")
return images_to_response(images, time) | Inpaint image with prompt |
156,351 | import logging
from typing import List, Union
from fastapi import APIRouter, HTTPException
from PIL import Image
from core.config import config
from core.errors import ModelNotLoadedError
from core.functions import images_to_response, img_to_bytes
from core.shared_dependent import gpu
from core.types import (
AITemplateBuildRequest,
AITemplateDynamicBuildRequest,
ControlNetQueueEntry,
ConvertModelRequest,
Img2ImgQueueEntry,
InpaintQueueEntry,
InterrogatorQueueEntry,
ONNXBuildRequest,
Txt2ImgQueueEntry,
UpscaleQueueEntry,
)
from core.utils import convert_bytes_to_image_stream, convert_image_to_base64
class ModelNotLoadedError(Exception):
"Raised when the model is blocked from being loaded automatically"
def images_to_response(images: Union[List[Image.Image], List[str]], time: float):
"Generate a valid response for the API"
if len(images) == 0:
return {
"time": time,
"images": [],
}
elif isinstance(images[0], str):
return {
"time": time,
"images": images,
}
elif config.api.image_return_format == "bytes":
if len(images) > 1:
raise HTTPException(
status_code=500,
detail={
"message": "Image return format is set to bytes, but {len(images)} images were returned"
},
)
return Response(img_to_bytes(images[0]), media_type="binary/octet-stream") # type: ignore
else:
return {
"time": time,
"images": [convert_image_to_base64(i) for i in images], # type: ignore
}
gpu = GPU()
class ControlNetQueueEntry(Job):
"Dataclass for a control net queue entry"
data: ControlNetData
def convert_bytes_to_image_stream(data: bytes) -> str:
"Convert a base64 string to a PIL Image"
pattern = re.compile(r"data:image\/[\w]+;base64,")
img = data
img = img.decode("utf-8")
img = re.sub(pattern, "", img)
return img
The provided code snippet includes necessary dependencies for implementing the `controlnet_job` function. Write a Python function `def controlnet_job(job: ControlNetQueueEntry)` to solve the following problem:
Generate images based on a reference image
Here is the function:
def controlnet_job(job: ControlNetQueueEntry):
"Generate images based on a reference image"
image_bytes = job.data.image
assert isinstance(image_bytes, bytes)
job.data.image = convert_bytes_to_image_stream(image_bytes)
try:
images: Union[List[Image.Image], List[str]]
time: float
images, time = gpu.generate(job)
except ModelNotLoadedError:
raise HTTPException(status_code=400, detail="Model is not loaded")
return images_to_response(images, time) | Generate images based on a reference image |
156,352 | import logging
from typing import List, Union
from fastapi import APIRouter, HTTPException
from PIL import Image
from core.config import config
from core.errors import ModelNotLoadedError
from core.functions import images_to_response, img_to_bytes
from core.shared_dependent import gpu
from core.types import (
AITemplateBuildRequest,
AITemplateDynamicBuildRequest,
ControlNetQueueEntry,
ConvertModelRequest,
Img2ImgQueueEntry,
InpaintQueueEntry,
InterrogatorQueueEntry,
ONNXBuildRequest,
Txt2ImgQueueEntry,
UpscaleQueueEntry,
)
from core.utils import convert_bytes_to_image_stream, convert_image_to_base64
config = load_config()
class ModelNotLoadedError(Exception):
"Raised when the model is blocked from being loaded automatically"
def img_to_bytes(img: Image.Image) -> bytes:
"Convert an image to bytes"
with BytesIO() as output:
img.save(output, format=config.api.image_extension)
return output.getvalue()
gpu = GPU()
class UpscaleQueueEntry(Job):
"Dataclass for a real esrgan job"
data: UpscaleData
def convert_image_to_base64(
image: Image.Image,
quality: int = 95,
image_format: ImageFormats = "webp",
prefix_js: bool = True,
) -> str:
"Convert an image to a base64 string"
stream = convert_image_to_stream(image, quality=quality)
if prefix_js:
prefix = f"data:image/{image_format};base64,"
else:
prefix = ""
return prefix + base64.b64encode(stream.read()).decode("utf-8")
def convert_bytes_to_image_stream(data: bytes) -> str:
"Convert a base64 string to a PIL Image"
pattern = re.compile(r"data:image\/[\w]+;base64,")
img = data
img = img.decode("utf-8")
img = re.sub(pattern, "", img)
return img
The provided code snippet includes necessary dependencies for implementing the `realesrgan_upscale_job` function. Write a Python function `def realesrgan_upscale_job(job: UpscaleQueueEntry)` to solve the following problem:
Upscale image with RealESRGAN model
Here is the function:
def realesrgan_upscale_job(job: UpscaleQueueEntry):
"Upscale image with RealESRGAN model"
image_bytes = job.data.image
assert isinstance(image_bytes, bytes)
job.data.image = convert_bytes_to_image_stream(image_bytes)
try:
image: Image.Image
time: float
image, time = gpu.upscale(job)
except ModelNotLoadedError:
raise HTTPException(status_code=400, detail="Model is not loaded")
return {
"time": time,
"images": convert_image_to_base64(image)
if config.api.image_return_format == "base64"
else img_to_bytes(image),
} | Upscale image with RealESRGAN model |
156,353 | import logging
from typing import List, Union
from fastapi import APIRouter, HTTPException
from PIL import Image
from core.config import config
from core.errors import ModelNotLoadedError
from core.functions import images_to_response, img_to_bytes
from core.shared_dependent import gpu
from core.types import (
AITemplateBuildRequest,
AITemplateDynamicBuildRequest,
ControlNetQueueEntry,
ConvertModelRequest,
Img2ImgQueueEntry,
InpaintQueueEntry,
InterrogatorQueueEntry,
ONNXBuildRequest,
Txt2ImgQueueEntry,
UpscaleQueueEntry,
)
from core.utils import convert_bytes_to_image_stream, convert_image_to_base64
gpu = GPU()
class AITemplateBuildRequest:
"Dataclass for requesting a build of an engine"
model_id: str
width: int = field(default=512)
height: int = field(default=512)
batch_size: int = field(default=1)
threads: Optional[int] = field(default=None)
The provided code snippet includes necessary dependencies for implementing the `generate_aitemplate` function. Write a Python function `def generate_aitemplate(request: AITemplateBuildRequest)` to solve the following problem:
Generate an AITemplate model from a local model
Here is the function:
def generate_aitemplate(request: AITemplateBuildRequest):
"Generate an AITemplate model from a local model"
gpu.build_aitemplate_engine(request)
return {"message": "Success"} | Generate an AITemplate model from a local model |
156,354 | import logging
from typing import List, Union
from fastapi import APIRouter, HTTPException
from PIL import Image
from core.config import config
from core.errors import ModelNotLoadedError
from core.functions import images_to_response, img_to_bytes
from core.shared_dependent import gpu
from core.types import (
AITemplateBuildRequest,
AITemplateDynamicBuildRequest,
ControlNetQueueEntry,
ConvertModelRequest,
Img2ImgQueueEntry,
InpaintQueueEntry,
InterrogatorQueueEntry,
ONNXBuildRequest,
Txt2ImgQueueEntry,
UpscaleQueueEntry,
)
from core.utils import convert_bytes_to_image_stream, convert_image_to_base64
gpu = GPU()
class AITemplateDynamicBuildRequest:
"Dataclass for requesting a build of an engine"
model_id: str
width: Tuple[int, int] = field(default=(64, 2048))
height: Tuple[int, int] = field(default=(64, 2048))
batch_size: Tuple[int, int] = field(default=(1, 4))
clip_chunks: int = field(default=6)
threads: Optional[int] = field(default=None)
The provided code snippet includes necessary dependencies for implementing the `generate_dynamic_aitemplate` function. Write a Python function `def generate_dynamic_aitemplate(request: AITemplateDynamicBuildRequest)` to solve the following problem:
Generate an AITemplate engine from a local model
Here is the function:
def generate_dynamic_aitemplate(request: AITemplateDynamicBuildRequest):
"Generate an AITemplate engine from a local model"
gpu.build_dynamic_aitemplate_engine(request)
return {"message": "Success"} | Generate an AITemplate engine from a local model |
156,355 | import logging
from typing import List, Union
from fastapi import APIRouter, HTTPException
from PIL import Image
from core.config import config
from core.errors import ModelNotLoadedError
from core.functions import images_to_response, img_to_bytes
from core.shared_dependent import gpu
from core.types import (
AITemplateBuildRequest,
AITemplateDynamicBuildRequest,
ControlNetQueueEntry,
ConvertModelRequest,
Img2ImgQueueEntry,
InpaintQueueEntry,
InterrogatorQueueEntry,
ONNXBuildRequest,
Txt2ImgQueueEntry,
UpscaleQueueEntry,
)
from core.utils import convert_bytes_to_image_stream, convert_image_to_base64
gpu = GPU()
class ONNXBuildRequest:
"Dataclass for requesting a build of an ONNX engine"
model_id: str
simplify_unet: bool = False
convert_to_fp16: bool = False
quant_dict: QuantizationDict = field(default_factory=QuantizationDict)
The provided code snippet includes necessary dependencies for implementing the `generate_onnx` function. Write a Python function `def generate_onnx(request: ONNXBuildRequest)` to solve the following problem:
Generate an ONNX model from a local model
Here is the function:
def generate_onnx(request: ONNXBuildRequest):
"Generate an ONNX model from a local model"
gpu.build_onnx_engine(request)
return {"message": "Success"} | Generate an ONNX model from a local model |
156,356 | import logging
from typing import List, Union
from fastapi import APIRouter, HTTPException
from PIL import Image
from core.config import config
from core.errors import ModelNotLoadedError
from core.functions import images_to_response, img_to_bytes
from core.shared_dependent import gpu
from core.types import (
AITemplateBuildRequest,
AITemplateDynamicBuildRequest,
ControlNetQueueEntry,
ConvertModelRequest,
Img2ImgQueueEntry,
InpaintQueueEntry,
InterrogatorQueueEntry,
ONNXBuildRequest,
Txt2ImgQueueEntry,
UpscaleQueueEntry,
)
from core.utils import convert_bytes_to_image_stream, convert_image_to_base64
gpu = GPU()
class ConvertModelRequest:
"Dataclass for requesting a conversion of a model"
model: str
safetensors: bool = False
The provided code snippet includes necessary dependencies for implementing the `convert_model` function. Write a Python function `def convert_model(request: ConvertModelRequest)` to solve the following problem:
Convert a Stable Diffusion model
Here is the function:
def convert_model(request: ConvertModelRequest):
"Convert a Stable Diffusion model"
gpu.convert_model(model=request.model, safetensors=request.safetensors)
return {"message": "Success"} | Convert a Stable Diffusion model |
156,357 | import logging
from typing import List, Union
from fastapi import APIRouter, HTTPException
from PIL import Image
from core.config import config
from core.errors import ModelNotLoadedError
from core.functions import images_to_response, img_to_bytes
from core.shared_dependent import gpu
from core.types import (
AITemplateBuildRequest,
AITemplateDynamicBuildRequest,
ControlNetQueueEntry,
ConvertModelRequest,
Img2ImgQueueEntry,
InpaintQueueEntry,
InterrogatorQueueEntry,
ONNXBuildRequest,
Txt2ImgQueueEntry,
UpscaleQueueEntry,
)
from core.utils import convert_bytes_to_image_stream, convert_image_to_base64
gpu = GPU()
class InterrogatorQueueEntry(Job):
"Dataclass for an interrogation queue entry"
data: InterrogationData
def convert_bytes_to_image_stream(data: bytes) -> str:
"Convert a base64 string to a PIL Image"
pattern = re.compile(r"data:image\/[\w]+;base64,")
img = data
img = img.decode("utf-8")
img = re.sub(pattern, "", img)
return img
The provided code snippet includes necessary dependencies for implementing the `interrogate` function. Write a Python function `def interrogate(request: InterrogatorQueueEntry)` to solve the following problem:
Interrogate a model
Here is the function:
def interrogate(request: InterrogatorQueueEntry):
"Interrogate a model"
data = request.data.image
assert isinstance(data, bytes)
request.data.image = convert_bytes_to_image_stream(data)
result = gpu.interrogate(request)
return result | Interrogate a model |
156,358 | import logging
from pathlib import Path
from typing import List
from fastapi import APIRouter
logger = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `get_autofill_list` function. Write a Python function `def get_autofill_list() -> List[str]` to solve the following problem:
Gathers and returns all words from the prompt autofill files
Here is the function:
def get_autofill_list() -> List[str]:
"Gathers and returns all words from the prompt autofill files"
autofill_folder = Path("data/autofill")
words = []
logger.debug(f"Looking for autofill files in {autofill_folder}")
logger.debug(f"Found {list(autofill_folder.iterdir())} files")
for file in autofill_folder.iterdir():
if file.is_file():
if file.suffix == ".txt":
logger.debug(f"Found autofill file: {file}")
with open(file, "r", encoding="utf-8") as f:
words.extend(f.read().splitlines())
return list(set(words)) | Gathers and returns all words from the prompt autofill files |
156,359 | import logging
import sys
from pathlib import Path
from fastapi import APIRouter
from api import websocket_manager
from api.websockets.notification import Notification
from core import shared
The provided code snippet includes necessary dependencies for implementing the `interrupt` function. Write a Python function `def interrupt()` to solve the following problem:
Interupt the current job
Here is the function:
def interrupt():
"Interupt the current job"
shared.interrupt = True
return {"message": "Interupted"} | Interupt the current job |
156,360 | import logging
import sys
from pathlib import Path
from fastapi import APIRouter
from api import websocket_manager
from api.websockets.notification import Notification
from core import shared
logger = logging.getLogger(__name__)
websocket_manager = WebSocketManager()
class Notification(Data):
"Notification to be sent to the client"
def __init__(
self,
severity: Literal["info", "warning", "error", "success"] = "info",
title: str = "",
message: str = "",
timeout: int = 10_000,
) -> None:
self.severity = severity
self.title = title
self.message = message
self.timeout = timeout
super().__init__(
data={
"severity": self.severity,
"title": self.title,
"message": self.message,
"timeout": self.timeout,
},
data_type="notification",
)
uvicorn_server: Optional["Server"] = None
uvicorn_loop: Optional[asyncio.AbstractEventLoop] = None
config = load_config()
The provided code snippet includes necessary dependencies for implementing the `shutdown` function. Write a Python function `def shutdown()` to solve the following problem:
Shutdown the server
Here is the function:
def shutdown():
"Shutdown the server"
from core.config import config
from core.shared import uvicorn_loop, uvicorn_server
if config.api.enable_shutdown:
if uvicorn_server is not None:
websocket_manager.broadcast_sync(
data=Notification(
message="Shutting down the server",
severity="warning",
title="Shutdown",
)
)
for task in shared.asyncio_tasks:
task.cancel()
uvicorn_server.force_exit = True
logger.debug("Setting force_exit to True")
assert uvicorn_server is not None
assert uvicorn_loop is not None
uvicorn_loop.run_in_executor(None, uvicorn_server.shutdown)
logger.debug("Unicorn server shutdown")
uvicorn_loop.stop()
logger.debug("Unicorn loop stopped")
sys.exit(0)
else:
websocket_manager.broadcast_sync(
data=Notification(
message="Shutdown is disabled", severity="error", title="Shutdown"
)
)
return {"message": "Shutdown is disabled"} | Shutdown the server |
156,361 | import logging
import sys
from pathlib import Path
from fastapi import APIRouter
from api import websocket_manager
from api.websockets.notification import Notification
from core import shared
gpu = GPU()
The provided code snippet includes necessary dependencies for implementing the `queue_status` function. Write a Python function `def queue_status()` to solve the following problem:
Get the status of the queue
Here is the function:
def queue_status():
"Get the status of the queue"
from core.shared_dependent import gpu
queue = gpu.queue
return {
"jobs": queue.jobs,
} | Get the status of the queue |
156,362 | import logging
import sys
from pathlib import Path
from fastapi import APIRouter
from api import websocket_manager
from api.websockets.notification import Notification
from core import shared
gpu = GPU()
The provided code snippet includes necessary dependencies for implementing the `queue_clear` function. Write a Python function `def queue_clear()` to solve the following problem:
Clear the queue
Here is the function:
def queue_clear():
"Clear the queue"
from core.shared_dependent import gpu
queue = gpu.queue
queue.clear()
return {"message": "Queue cleared"} | Clear the queue |
156,363 | import logging
import sys
from pathlib import Path
from fastapi import APIRouter
from api import websocket_manager
from api.websockets.notification import Notification
from core import shared
The provided code snippet includes necessary dependencies for implementing the `themes` function. Write a Python function `def themes()` to solve the following problem:
Get all available themes
Here is the function:
def themes():
"Get all available themes"
path = Path("data/themes")
files = []
for file in path.glob("*.json"):
files.append(file.stem)
files.sort()
return files | Get all available themes |
156,364 | import asyncio
import logging
import os
import shlex
import subprocess
import sys
import threading
import warnings
from argparse import ArgumentParser
from datetime import datetime, timedelta
from pathlib import Path
from core.install_requirements import (
check_valid_python_version,
commit_hash,
create_environment,
in_virtualenv,
install_deps,
is_installed,
version_check,
)
if not Path(".env").exists():
with open(".env", "w") as f_out:
with open("example.env", "r") as f_in:
f_out.write(f_in.read())
app_args = [] if os.getenv("TESTING") == "1" else sys.argv[1:]
parser = ArgumentParser(
prog="VoltaML Fast Stable Diffusion",
epilog="""
VoltaML Fast Stable Diffusion - Accelerated Stable Diffusion inference
Copyright (C) 2023-present Stax124
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
""",
)
parser.add_argument(
"--log-level",
help="Log level",
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
)
parser.add_argument("--ngrok", action="store_true", help="Use ngrok to expose the API")
parser.add_argument("--host", action="store_true", help="Expose the API to the network")
parser.add_argument("--in-container", action="store_true", help="Skip virtualenv check")
parser.add_argument(
"--pytorch-type",
help="Force voltaml to use a specific type of pytorch distribution.",
choices=["cpu", "cuda", "rocm", "directml", "intel", "vulkan"],
)
parser.add_argument(
"--bot", action="store_true", help="Run in tandem with the Discord bot"
)
parser.add_argument(
"--enable-r2",
action="store_true",
help="Enable Cloudflare R2 bucket upload support",
)
parser.add_argument(
"-p", "--port", type=int, help="Port to expose the API on", default=5003
)
parser.add_argument(
"--install-only",
action="store_true",
help="Only install requirements and exit",
)
args = parser.parse_args(args=app_args)
logger: logging.Logger = logging.getLogger()
logging.getLogger("PIL.PngImagePlugin").setLevel(logging.INFO)
logging.getLogger("urllib3.connectionpool").setLevel(logging.WARNING)
logging.getLogger("PIL.Image").setLevel(logging.INFO)
logging.getLogger("uvicorn.error").setLevel(logging.INFO)
def cleanup_old_logs():
"Cleanup old logs"
for file in Path("data/logs").glob("*.log"):
if datetime.fromtimestamp(file.stat().st_mtime) < datetime.now() - timedelta(
days=7
):
file.unlink()
def is_root():
"Check if user has elevated privileges"
try:
is_admin = os.getuid() == 0 # type: ignore
except AttributeError:
import ctypes
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0 # type: ignore
return is_admin
def install_deps(force_distribution: Union[int, str] = -1):
"Install necessary requirements for inference"
# Install pytorch
if not is_installed("torch") or not is_installed("torchvision"):
if isinstance(force_distribution, int):
forced_distribution = (
_pytorch_distributions[force_distribution]
if -1 < force_distribution < len(_pytorch_distributions)
else None
)
else:
forced_distribution = [
x
for x in _pytorch_distributions
if x.name == force_distribution.lower()
and (x.windows_supported if platform.system() == "Windows" else True)
][0]
logger.info("Installing PyTorch")
if platform.system() == "Darwin":
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "torch==2.1.0", "torchvision"]
)
else:
if forced_distribution is not None:
# User forced a specific distribution
logger.info(forced_distribution.success_message)
if isinstance(forced_distribution.install_command[0], list):
for cmd in forced_distribution.install_command:
subprocess.check_call(cmd)
else:
subprocess.check_call(forced_distribution.install_command) # type: ignore
else:
# Automatically detect pytorch distribution
for c in _pytorch_distributions:
if (
c.windows_supported if platform.system() == "Windows" else True
) and (
(
subprocess.run(
c.check_command,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
shell=True,
).returncode
== 0
)
):
logger.info(c.success_message)
if isinstance(c.install_command[0], list):
for cmd in c.install_command:
subprocess.check_call(cmd)
else:
subprocess.check_call(c.install_command) # type: ignore
break
# Install other requirements
install_requirements("requirements/pytorch.txt")
install_requirements("requirements/api.txt")
install_requirements("requirements/interrogation.txt")
if os.environ.get("DISCORD_BOT_TOKEN"):
install_requirements("requirements/bot.txt")
def is_installed(package: str, version: Optional[str] = None):
"Check if a package is installed"
try:
spec = importlib.util.find_spec(package)
if spec is None:
raise NoModuleSpecFound
if version is not None:
try:
from packaging import version as packaging_version
version_number = version.split("=")[-1]
version_type = version[:2]
required_version = packaging_version.parse(version_number)
current_version = packaging_version.parse(
importlib.metadata.version(package)
)
logger.debug(
f"Required version: {required_version} - Current version: {current_version} - version type: {version_type}"
)
if version_type == "==":
assert current_version == required_version
elif version_type == ">=":
assert current_version >= required_version
elif version_type == "<=":
assert current_version <= required_version
except PackageNotFoundError:
logger.debug(
f"Version metadata not found for {package}, skipping version check"
)
else:
logger.debug(f"Package {package} - ok")
except AssertionError:
logger.debug(
f"Package {package} - found - incorrect version - {version} - {importlib.metadata.version(package)}"
)
return False
except NoModuleSpecFound:
logger.debug(f"Package {package} - not found")
return False
return spec is not None
def commit_hash():
"""
Get the commit hash of the current repository
Some parts taken from A111 repo (https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/645f4e7ef8c9d59deea7091a22373b2da2b780f2/launch.py#L20)
"""
try:
result = subprocess.run(
"git rev-parse HEAD",
shell=True,
check=True,
capture_output=True,
)
stored_commit_hash = result.stdout.decode(encoding="utf-8").strip()
except subprocess.CalledProcessError:
stored_commit_hash = "<none>"
return stored_commit_hash
def version_check(commit: str):
"""
Check if the local version is up to date
Taken from: https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/645f4e7ef8c9d59deea7091a22373b2da2b780f2/launch.py#L134
"""
try:
import requests
current_branch = (
subprocess.check_output("git rev-parse --abbrev-ref HEAD", shell=True)
.decode("utf-8")
.strip()
)
origin = (
subprocess.check_output("git config --get remote.origin.url", shell=True)
.decode("utf-8")
.strip()
)
username = origin.split("/")[-2]
project = origin.split("/")[-1].split(".")[0]
commits = requests.get(
f"https://api.github.com/repos/{username}/{project}/branches/{current_branch}",
timeout=5,
).json()
print(f"Current commit: {commit}")
if commit not in ("<none>", commits["commit"]["sha"]):
print("--------------------------------------------------------")
print("| You are not up to date with the most recent release. |")
print("| Consider running `git pull` to update. |")
print("--------------------------------------------------------")
elif commits["commit"]["sha"] == commit:
print("You are up to date with the most recent release.")
else:
print("Not a git clone, can't perform version check.")
except Exception as e:
logger.debug(f"Version check failed: {e}")
logger.info(
"No git repo found, assuming that we are in containerized environment"
)
def check_valid_python_version():
minor = int(platform.python_version_tuple()[1])
if minor >= 12:
print("Python 3.12 or later is not currently supported in voltaML!")
print("Please consider switching to an older release to use volta!")
raise RuntimeError("Unsupported Python version")
elif minor < 9:
print("--------------------------------------------------------")
print("| The python release you are currently using is older |")
print("| than our official supported version! Please consider |")
print("| updating to Python 3.11! |")
print("| |")
print("| Issues will most likely be IGNORED! |")
print("--------------------------------------------------------")
def in_virtualenv():
"Check if we are in a virtual environment"
return get_base_prefix_compat() != sys.prefix
def create_environment():
"Create a virtual environment"
command = (
"source venv/bin/activate"
if sys.platform == "linux"
else "venv\\Scripts\\activate.bat OR venv\\Scripts\\Activate.ps1"
)
if virtualenv_exists():
logger.info(
f"Virtual environment already exists, you just need to activate it with '{command}', then run the script again"
)
return
if not in_virtualenv():
logger.info("Creating virtual environment")
python_executable = sys.executable
try:
subprocess.run(
f"{python_executable} -m virtualenv venv",
shell=True,
check=True,
)
logger.info(
f"Virtual environment created, please activate it with '{command}', then run the script again"
)
except subprocess.CalledProcessError:
logger.error("Failed to create virtual environment")
sys.exit(1)
else:
logger.info("Already in virtual environment")
config = load_config()
class WebSocketLoggingHandler(StreamHandler):
"Broadcasts log messages to all connected clients."
def __init__(self, config: Optional["Configuration"]):
super().__init__()
self.buffer = []
self.config = config
def emit(self, record: LogRecord):
if not self.config:
return
if self.config.api.enable_websocket_logging is False:
return
msg = f"{record.levelname} {self.format(record)}"
self.buffer.insert(0, msg)
# Prevent buffer from growing too large
if len(self.buffer) > 100:
self.send()
self.debounced_send()
def debounced_send(self):
self.send()
def send(self):
msg = "\n".join(self.buffer)
self.buffer.clear()
websocket_manager.broadcast_sync(Data(data={"message": msg}, data_type="log"))
class R2Bucket:
"""
Class for interacting with R2 bucket.
Args:
endpoint: R2 endpoint. `R2_ENDPOINT` env var.
bucket_name: Bucket name. `R2_BUCKET_NAME` env var.
key: secret access id. `AWS_ACCESS_KEY_ID` env var.
secret: secret access key. `AWS_SECRET_ACCESS_KEY` env var.
dev_address: Development address where files can be seen publicly. `R2_DEV_ADDRESS` env var.
"""
def __init__(
self,
endpoint: str,
bucket_name: str,
key: Optional[str] = None,
secret: Optional[str] = None,
dev_address: Optional[str] = None,
):
self.endpoint = endpoint
self.bucket_name = bucket_name
self.key = key
self.secret = secret
self.client = boto3.client(
"s3",
endpoint_url=self.endpoint,
aws_access_key_id=self.key,
aws_secret_access_key=self.secret,
)
self.dev_address = dev_address if dev_address else os.getenv("R2_DEV_ADDRESS")
def upload_file(self, file: BytesIO, filename: str):
"Upload file to R2 bucket."
self.client.upload_fileobj(
file, self.bucket_name, filename, ExtraArgs={"ACL": "public-read"}
)
if self.dev_address:
url = f"{self.dev_address}/{quote_plus(filename)}"
logger.info(f"Uploaded file to R2: {url}")
return url
The provided code snippet includes necessary dependencies for implementing the `checks` function. Write a Python function `def checks()` to solve the following problem:
Check if the script is run from a virtual environment, if yes, check requirements
Here is the function:
def checks():
"Check if the script is run from a virtual environment, if yes, check requirements"
if not (is_root() or args.in_container):
if not in_virtualenv():
create_environment()
print("Please run the script from a virtual environment")
sys.exit(1)
# Install more user friendly logging
if not is_installed("rich"):
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"install",
"rich",
]
)
if not is_installed("requests"):
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"install",
"requests",
]
)
if not is_installed("packaging"):
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"install",
"packaging",
]
)
if not is_installed("dotenv"):
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"install",
"python-dotenv",
]
)
# Handle dotenv file
import dotenv
dotenv.load_dotenv()
# Handle arguments passed to the script
extra_args = os.getenv("EXTRA_ARGS")
if extra_args:
app_args.extend(shlex.split(extra_args))
args_with_extras = parser.parse_args(args=app_args)
# Inject better logger
from rich.logging import RichHandler
print(f"Log level: {args_with_extras.log_level}")
args_with_extras.log_level = args_with_extras.log_level or os.getenv(
"LOG_LEVEL", "INFO"
)
cleanup_old_logs()
logging.basicConfig(
level=args_with_extras.log_level,
format="%(asctime)s | %(name)s » %(message)s",
datefmt="%H:%M:%S",
handlers=[
RichHandler(rich_tracebacks=True, show_time=False),
logging.FileHandler(
f"data/logs/{datetime.now().strftime('%d-%m-%Y_%H-%M-%S')}.log",
mode="w",
encoding="utf-8",
),
],
)
logger = logging.getLogger()
if args_with_extras.bot and not args_with_extras.install_only:
if not os.getenv("DISCORD_BOT_TOKEN"):
logger.error(
"Bot start requested, but no Discord token provided. Please provide a token with DISCORD_BOT_TOKEN environment variable"
)
sys.exit(1)
# Check if we are up to date with the latest release
version_check(commit_hash())
# Check if user is running unsupported/non-working version of Python
try:
check_valid_python_version()
except RuntimeError:
exit(0)
# Install pytorch and api requirements
install_deps(args_with_extras.pytorch_type if args_with_extras.pytorch_type else -1)
if not os.getenv("HUGGINGFACE_TOKEN"):
logger.info(
"No HuggingFace token provided, some features will be disabled until it is provided in the .env file or in the web interface"
)
# Create the diffusers cache folder
from diffusers.utils.constants import DIFFUSERS_CACHE
Path(DIFFUSERS_CACHE).mkdir(exist_ok=True, parents=True)
from core.config import config
from core.logger.websocket_logging import WebSocketLoggingHandler
logger.addHandler(WebSocketLoggingHandler(config=config))
logger.info(f"Device: {config.api.device}")
logger.info(f"Precision: {config.api.data_type}")
# Initialize R2 bucket if needed
if args_with_extras.enable_r2:
from core import shared_dependent
from core.extra.cloudflare_r2 import R2Bucket
endpoint = os.environ["R2_ENDPOINT"]
bucket_name = os.environ["R2_BUCKET_NAME"]
shared_dependent.r2 = R2Bucket(endpoint=endpoint, bucket_name=bucket_name)
return args_with_extras | Check if the script is run from a virtual environment, if yes, check requirements |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.