id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
17,201 | from __future__ import print_function
import base64
import os
import sys
def show_help():
print("Usage: imgcat filename ...")
print(" or: cat filename | python imgcat.py -")
exit() | null |
17,202 | from __future__ import print_function
import base64
import os
import sys
def _read_binary_stdin():
# see https://stackoverflow.com/a/38939320/474819 for other platform notes
PY3 = sys.version_info >= (3, 0)
if PY3:
source = sys.stdin.buffer
else:
# Python 2 on Windows opens sys.stdin in text mode, and
# binary data that read from it becomes corrupted on \r\n
if sys.platform == "win32":
# set sys.stdin to binary mode
import msvcrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
source = sys.stdin
return source.read() | null |
17,203 | from __future__ import print_function
import heapq
import itertools
import math
from collections import deque
from functools import wraps
The provided code snippet includes necessary dependencies for implementing the `require_axis` function. Write a Python function `def require_axis(f)` to solve the following problem:
Check if the object of the function has axis and sel_axis members
Here is the function:
def require_axis(f):
""" Check if the object of the function has axis and sel_axis members """
@wraps(f)
def _wrapper(self, *args, **kwargs):
if None in (self.axis, self.sel_axis):
raise ValueError('%(func_name) requires the node %(node)s '
'to have an axis and a sel_axis function' % dict(func_name=f.__name__, node=repr(self)))
return f(self, *args, **kwargs)
return _wrapper | Check if the object of the function has axis and sel_axis members |
17,204 | from __future__ import print_function
import heapq
import itertools
import math
from collections import deque
from functools import wraps
def level_order(tree, include_all=False):
""" Returns an iterator over the tree in level-order
If include_all is set to True, empty parts of the tree are filled
with dummy entries and the iterator becomes infinite. """
q = deque()
q.append(tree)
while q:
node = q.popleft()
yield node
if include_all or node.left:
q.append(node.left or node.__class__())
if include_all or node.right:
q.append(node.right or node.__class__())
The provided code snippet includes necessary dependencies for implementing the `visualize` function. Write a Python function `def visualize(tree, max_level=100, node_width=10, left_padding=5)` to solve the following problem:
Prints the tree to stdout
Here is the function:
def visualize(tree, max_level=100, node_width=10, left_padding=5):
""" Prints the tree to stdout """
height = min(max_level, tree.height() - 1)
max_width = pow(2, height)
per_level = 1
in_level = 0
level = 0
for node in level_order(tree, include_all=True):
if in_level == 0:
print()
print()
print(' ' * left_padding, end=' ')
width = int(max_width * node_width / per_level)
node_str = (str(node.data) if node else '').center(width)
print(node_str, end=' ')
in_level += 1
if in_level == per_level:
in_level = 0
per_level *= 2
level += 1
if level > height:
break
print()
print() | Prints the tree to stdout |
17,205 | import functools
from io import StringIO
from PIL import Image
from . import kdtree
def convert(filename,
is_unicode=False,
is_truecolor=False,
is_256color=True,
is_16color=False,
is_8color=False,
width=80,
palette="default"):
"""
Convert an image, and return the resulting string.
Arguments:
infile -- The name of the input file to load. Example: '/home/user/image.png'
Keyword Arguments:
is_unicode -- whether to use unicode in generating output (default False, ASCII will be used)
is_truecolor -- whether to use RGB colors in generation (few terminals support this). Exactly one color option must only be selected. Default False.
is_256color -- whether to use 256 colors (16 system colors, 6x6x6 color cube, and 24 grayscale colors) for generating the output. This is the default color setting. Please run colortest-256 for a demonstration of colors. Default True.
is_16color -- Whether to use only the 16 System colors. Default False
is_8color -- Whether to use only the first 8 of the System colors. Default False.
width -- Number of columns the output will use
palette -- Determines which RGB colors the System colors map to. This only is relevant when using 8/16/256 color modes. This may be one of ["default", "xterm", "linuxconsole", "solarized", "rxvt", "tango", "gruvbox", "gruvboxdark"]
"""
# open the img, but convert to rgb because this fails if grayscale
# (assumes pixels are at least triplets)
im = Image.open(filename).convert('RGB')
ctype = _get_color_type(is_truecolor=is_truecolor,
is_256color=is_256color,
is_16color=is_16color,
is_8color=is_8color)
return _toAnsi(im,
oWidth=width,
is_unicode=is_unicode,
color_type=ctype,
palette=palette)
The provided code snippet includes necessary dependencies for implementing the `to_file` function. Write a Python function `def to_file(infile, outfile, is_unicode=False, is_truecolor=False, is_256color=True, is_16color=False, is_8color=False, width=80, palette="default")` to solve the following problem:
Convert an image, and output to file. Arguments: infile -- The name of the input file to load. Example: '/home/user/image.png' outfile -- The name of the output file that the string will be written into. Keyword Arguments: is_unicode -- Whether to use unicode in generating output (default False, ASCII will be used) is_truecolor -- Whether to use RGB colors in generation (few terminals support this). Exactly one color option must only be selected. Default False. is_256color -- Whether to use 256 colors (16 system colors, 6x6x6 color cube, and 24 grayscale colors) for generating the output. This is the default color setting. Please run colortest-256 for a demonstration of colors. Default True. is_16color -- Whether to use only the 16 System colors. Default False is_8color -- Whether to use only the first 8 of the System colors. Default False. width -- Number of columns the output will use palette -- Determines which RGB colors the System colors map to. This only is relevant when using 8/16/256 color modes. This may be one of ["default", "xterm", "linuxconsole", "solarized", "rxvt", "tango", "gruvbox", "gruvboxdark"]
Here is the function:
def to_file(infile,
outfile,
is_unicode=False,
is_truecolor=False,
is_256color=True,
is_16color=False,
is_8color=False,
width=80,
palette="default"):
"""
Convert an image, and output to file.
Arguments:
infile -- The name of the input file to load. Example: '/home/user/image.png'
outfile -- The name of the output file that the string will be written into.
Keyword Arguments:
is_unicode -- Whether to use unicode in generating output (default False, ASCII will be used)
is_truecolor -- Whether to use RGB colors in generation (few terminals support this). Exactly one color option must only be selected. Default False.
is_256color -- Whether to use 256 colors (16 system colors, 6x6x6 color cube, and 24 grayscale colors) for generating the output. This is the default color setting. Please run colortest-256 for a demonstration of colors. Default True.
is_16color -- Whether to use only the 16 System colors. Default False
is_8color -- Whether to use only the first 8 of the System colors. Default False.
width -- Number of columns the output will use
palette -- Determines which RGB colors the System colors map to. This only is relevant when using 8/16/256 color modes. This may be one of ["default", "xterm", "linuxconsole", "solarized", "rxvt", "tango", "gruvbox", "gruvboxdark"]
"""
with open(outfile, 'w') as ofile:
ansi_str = convert(infile,
is_unicode=is_unicode,
is_truecolor=is_truecolor,
is_256color=is_256color,
is_16color=is_16color,
is_8color=is_8color,
width=width,
palette=palette)
ofile.write(ansi_str) | Convert an image, and output to file. Arguments: infile -- The name of the input file to load. Example: '/home/user/image.png' outfile -- The name of the output file that the string will be written into. Keyword Arguments: is_unicode -- Whether to use unicode in generating output (default False, ASCII will be used) is_truecolor -- Whether to use RGB colors in generation (few terminals support this). Exactly one color option must only be selected. Default False. is_256color -- Whether to use 256 colors (16 system colors, 6x6x6 color cube, and 24 grayscale colors) for generating the output. This is the default color setting. Please run colortest-256 for a demonstration of colors. Default True. is_16color -- Whether to use only the 16 System colors. Default False is_8color -- Whether to use only the first 8 of the System colors. Default False. width -- Number of columns the output will use palette -- Determines which RGB colors the System colors map to. This only is relevant when using 8/16/256 color modes. This may be one of ["default", "xterm", "linuxconsole", "solarized", "rxvt", "tango", "gruvbox", "gruvboxdark"] |
17,206 | __credits__ = ["Micah Elliott", "Kevin Lange", "Takumi Sueda", "Torry Crass"]
__license__ = "WTFPL http://sam.zoy.org/wtfpl/"
__version__ = "0.2"
__maintainer__ = "Torry Crass"
__email__ = "tc.github@outlook.com"
__status__ = "Development"
import sys
import os.path
def print_help():
print("")
print(75 * "=")
print(30 * "=", "IMAGE to ANSI", 30 * "=")
print(75 * "=")
print("\nCredits: ", __credits__[0], "\n\t", __credits__[1], "\n\t",
__credits__[2], "\n\t", __credits__[3])
print("License: ", __license__)
print("Version: ", __version__)
print("Maintainer: ", __maintainer__, " ", __email__)
print("Status: ", __status__)
print("\nThis program allows you to convert a graphic file (preferably png) into\n" \
"ANSI code that will display a rendition of the image in a console that\n" \
"supports ANSI color codes.\n")
print("You need to have python, python-image, python-pillow. You can either\n" \
"install these with your package manager (apt/yum) or install python-pip\n" \
"and install the necessary modules from there.\n")
print("See the github repository for more information (if available).\n")
print("\n### USAGE ###\n")
print("Standard:\n" \
"\tpython image-to-ansi.py <inputfile>\n")
print(75 * "-")
print("Source: https://github.com/torrycrass/image-to-ansi")
print(75 * "-")
print("") | null |
17,207 | import sys
import os.path
def _create_incs_lut():
incs = [(0x00, 0x5f), (0x5f, 0x87), (0x87, 0xaf), (0xaf, 0xd7),
(0xd7, 0xff)]
res = []
for part in range(256):
for s, b in incs:
if s <= part <= b:
if abs(s - part) < abs(b - part):
res.append('%02x' % s)
else:
res.append('%02x' % b)
break
return res | null |
17,208 | import sys
import os.path
RGB2SHORT_DICT = dict(CLUT)
def lut(part):
def rgb2short_fast(r, g, b):
return RGB2SHORT_DICT['%s%s%s' % (lut(r), lut(g), lut(b))] | null |
17,209 | import importlib
import importlib.util
import logging
import numpy as np
import os
import random
import sys
from datetime import datetime
import torch
The provided code snippet includes necessary dependencies for implementing the `seed_all_rng` function. Write a Python function `def seed_all_rng(seed=None)` to solve the following problem:
Set the random seed for the RNG in torch, numpy and python. Args: seed (int): if None, will use a strong random seed.
Here is the function:
def seed_all_rng(seed=None):
"""
Set the random seed for the RNG in torch, numpy and python.
Args:
seed (int): if None, will use a strong random seed.
"""
if seed is None:
seed = (
os.getpid()
+ int(datetime.now().strftime("%S%f"))
+ int.from_bytes(os.urandom(2), "big")
)
logger = logging.getLogger(__name__)
logger.info("Using a generated random seed {}".format(seed))
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed) | Set the random seed for the RNG in torch, numpy and python. Args: seed (int): if None, will use a strong random seed. |
17,210 | import importlib
import importlib.util
import logging
import numpy as np
import os
import random
import sys
from datetime import datetime
import torch
The provided code snippet includes necessary dependencies for implementing the `_configure_libraries` function. Write a Python function `def _configure_libraries()` to solve the following problem:
Configurations for some libraries.
Here is the function:
def _configure_libraries():
"""
Configurations for some libraries.
"""
# An environment option to disable `import cv2` globally,
# in case it leads to negative performance impact
disable_cv2 = int(os.environ.get("SFAST_DISABLE_CV2", False))
if disable_cv2:
sys.modules["cv2"] = None
else:
# Disable opencl in opencv since its interaction with cuda often has negative effects
# This envvar is supported after OpenCV 3.4.0
os.environ["OPENCV_OPENCL_RUNTIME"] = "disabled"
try:
import cv2
if int(cv2.__version__.split(".")[0]) >= 3:
cv2.ocl.setUseOpenCL(False)
except ModuleNotFoundError:
# Other types of ImportError, if happened, should not be ignored.
# Because a failed opencv import could mess up address space
# https://github.com/skvark/opencv-python/issues/381
pass
def get_version(module, digit=2):
return tuple(map(int, module.__version__.split(".")[:digit]))
# fmt: off
# assert get_version(torch) >= (1, 4), "Requires torch>=1.4"
# import fvcore
# assert get_version(fvcore, 3) >= (0, 1, 2), "Requires fvcore>=0.1.2"
# import yaml
# assert get_version(yaml) >= (5, 1), "Requires pyyaml>=5.1"
# fmt: on | Configurations for some libraries. |
17,211 | import importlib
import importlib.util
import logging
import numpy as np
import os
import random
import sys
from datetime import datetime
import torch
DOC_BUILDING = os.getenv("_DOC_BUILDING", False)
The provided code snippet includes necessary dependencies for implementing the `fixup_module_metadata` function. Write a Python function `def fixup_module_metadata(module_name, namespace, keys=None)` to solve the following problem:
Fix the __qualname__ of module members to be their exported api name, so when they are referenced in docs, sphinx can find them. Reference: https://github.com/python-trio/trio/blob/6754c74eacfad9cc5c92d5c24727a2f3b620624e/trio/_util.py#L216-L241
Here is the function:
def fixup_module_metadata(module_name, namespace, keys=None):
"""
Fix the __qualname__ of module members to be their exported api name, so
when they are referenced in docs, sphinx can find them. Reference:
https://github.com/python-trio/trio/blob/6754c74eacfad9cc5c92d5c24727a2f3b620624e/trio/_util.py#L216-L241
"""
if not DOC_BUILDING:
return
seen_ids = set()
def fix_one(qualname, name, obj):
# avoid infinite recursion (relevant when using
# typing.Generic, for example)
if id(obj) in seen_ids:
return
seen_ids.add(id(obj))
mod = getattr(obj, "__module__", None)
if mod is not None and mod.startswith(module_name):
obj.__module__ = module_name
# Modules, unlike everything else in Python, put fully-qualitied
# names into their __name__ attribute. We check for "." to avoid
# rewriting these.
if hasattr(obj, "__name__") and "." not in obj.__name__:
obj.__name__ = name
obj.__qualname__ = qualname
if isinstance(obj, type):
for attr_name, attr_value in obj.__dict__.items():
fix_one(objname + "." + attr_name, attr_name, attr_value)
if keys is None:
keys = namespace.keys()
for objname in keys:
if not objname.startswith("_"):
obj = namespace[objname]
fix_one(objname, objname, obj) | Fix the __qualname__ of module members to be their exported api name, so when they are referenced in docs, sphinx can find them. Reference: https://github.com/python-trio/trio/blob/6754c74eacfad9cc5c92d5c24727a2f3b620624e/trio/_util.py#L216-L241 |
17,212 | import itertools
import functools
import dataclasses
import torch
def unflatten_tensors(tensors, start=0):
obj_type = tensors[start].item()
if obj_type == 0:
return unflatten_none(tensors, start + 1)
elif obj_type == 1:
return unflatten_tensor(tensors, start + 1)
elif obj_type == 2:
return unflatten_bool(tensors, start + 1)
elif obj_type == 3:
return unflatten_float(tensors, start + 1)
elif obj_type == 4:
return unflatten_int(tensors, start + 1)
elif obj_type == 5:
return unflatten_str(tensors, start + 1)
elif obj_type == 6:
return unflatten_bytes(tensors, start + 1)
elif obj_type == 7:
return unflatten_list_or_tuple(tensors, start + 1, list)
elif obj_type == 8:
return unflatten_list_or_tuple(tensors, start + 1, tuple)
elif obj_type == 9:
return unflatten_dataclass(tensors, start + 1)
elif obj_type == 10:
return unflatten_dict(tensors, start + 1)
elif obj_type == 11:
return unflatten_unknown(tensors, start + 1)
else:
raise ValueError("Unknown type number: {}".format(obj_type))
def unflattern(tensors):
# for sfast._C._jit_pass_erase_scalar_tensors
tensors = tuple(t if isinstance(t, torch.Tensor) else torch.tensor([t])
for t in tensors)
return unflatten_tensors(tensors)[0] | null |
17,213 | import contextlib
import torch
def compute_precision(*, allow_tf32):
old_allow_tf32_matmul = torch.backends.cuda.matmul.allow_tf32
try:
torch.backends.cuda.matmul.allow_tf32 = allow_tf32
with torch.backends.cudnn.flags(enabled=None,
benchmark=None,
deterministic=None,
allow_tf32=allow_tf32):
yield
finally:
torch.backends.cuda.matmul.allow_tf32 = old_allow_tf32_matmul
def low_compute_precision():
try:
with compute_precision(allow_tf32=True):
yield
finally:
pass | null |
17,214 | import contextlib
import torch
def compute_precision(*, allow_tf32):
old_allow_tf32_matmul = torch.backends.cuda.matmul.allow_tf32
try:
torch.backends.cuda.matmul.allow_tf32 = allow_tf32
with torch.backends.cudnn.flags(enabled=None,
benchmark=None,
deterministic=None,
allow_tf32=allow_tf32):
yield
finally:
torch.backends.cuda.matmul.allow_tf32 = old_allow_tf32_matmul
def high_compute_precision():
try:
with compute_precision(allow_tf32=False):
yield
finally:
pass | null |
17,215 | MODEL = 'runwayml/stable-diffusion-v1-5'
VARIANT = None
CUSTOM_PIPELINE = None
SCHEDULER = 'LCMScheduler'
LORA = 'latent-consistency/lcm-lora-sdv1-5'
CONTROLNET = None
STEPS = 4
PROMPT = 'best quality, realistic, unreal engine, 4K, a beautiful girl'
NEGATIVE_PROMPT = None
SEED = None
WARMUPS = 3
BATCH = 1
HEIGHT = None
WIDTH = None
INPUT_IMAGE = None
CONTROL_IMAGE = None
OUTPUT_IMAGE = None
EXTRA_CALL_KWARGS = '{"guidance_scale": 0.0}'
import importlib
import inspect
import argparse
import time
import json
import torch
from PIL import (Image, ImageDraw)
from diffusers.utils import load_image
from sfast.compilers.diffusion_pipeline_compiler import (compile,
CompilationConfig)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default=MODEL)
parser.add_argument('--variant', type=str, default=VARIANT)
parser.add_argument('--custom-pipeline', type=str, default=CUSTOM_PIPELINE)
parser.add_argument('--scheduler', type=str, default=SCHEDULER)
parser.add_argument('--lora', type=str, default=LORA)
parser.add_argument('--controlnet', type=str, default=CONTROLNET)
parser.add_argument('--steps', type=int, default=STEPS)
parser.add_argument('--prompt', type=str, default=PROMPT)
parser.add_argument('--negative-prompt', type=str, default=NEGATIVE_PROMPT)
parser.add_argument('--seed', type=int, default=SEED)
parser.add_argument('--warmups', type=int, default=WARMUPS)
parser.add_argument('--batch', type=int, default=BATCH)
parser.add_argument('--height', type=int, default=HEIGHT)
parser.add_argument('--width', type=int, default=WIDTH)
parser.add_argument('--extra-call-kwargs',
type=str,
default=EXTRA_CALL_KWARGS)
parser.add_argument('--input-image', type=str, default=INPUT_IMAGE)
parser.add_argument('--control-image', type=str, default=CONTROL_IMAGE)
parser.add_argument('--output-image', type=str, default=OUTPUT_IMAGE)
parser.add_argument(
'--compiler',
type=str,
default='sfast',
choices=['none', 'sfast', 'compile', 'compile-max-autotune'])
parser.add_argument('--quantize', action='store_true')
parser.add_argument('--no-fusion', action='store_true')
return parser.parse_args() | null |
17,216 | import importlib
import inspect
import argparse
import time
import json
import torch
from PIL import (Image, ImageDraw)
from diffusers.utils import load_image
from sfast.compilers.diffusion_pipeline_compiler import (compile,
CompilationConfig)
def load_model(pipeline_cls,
model,
variant=None,
custom_pipeline=None,
scheduler=None,
lora=None,
controlnet=None):
extra_kwargs = {}
if custom_pipeline is not None:
extra_kwargs['custom_pipeline'] = custom_pipeline
if variant is not None:
extra_kwargs['variant'] = variant
if controlnet is not None:
from diffusers import ControlNetModel
controlnet = ControlNetModel.from_pretrained(controlnet,
torch_dtype=torch.float16)
extra_kwargs['controlnet'] = controlnet
model = pipeline_cls.from_pretrained(model,
torch_dtype=torch.float16,
**extra_kwargs)
if scheduler is not None:
scheduler_cls = getattr(importlib.import_module('diffusers'),
scheduler)
model.scheduler = scheduler_cls.from_config(model.scheduler.config)
if lora is not None:
model.load_lora_weights(lora)
model.fuse_lora()
model.safety_checker = None
model.to(torch.device('cuda'))
return model | null |
17,217 | import importlib
import inspect
import argparse
import time
import json
import torch
from PIL import (Image, ImageDraw)
from diffusers.utils import load_image
from sfast.compilers.diffusion_pipeline_compiler import (compile,
CompilationConfig)
class CompilationConfig:
class Default:
'''
Default compilation config
memory_format:
channels_last if tensor core is available, otherwise contiguous_format.
On GPUs with tensor core, channels_last is faster
enable_jit:
Whether to enable JIT, most optimizations are done with JIT
enable_jit_freeze:
Whether to freeze the model after JIT tracing.
Freezing the model will enable us to optimize the model further.
preserve_parameters:
Whether to preserve parameters when freezing the model.
If True, parameters will be preserved, but the model will be a bit slower.
If False, parameters will be marked as constants, and the model will be faster.
However, if parameters are not preserved, LoRA cannot be switched dynamically.
enable_cnn_optimization:
Whether to enable CNN optimization by fusion.
enable_fused_linear_geglu:
Whether to enable fused Linear-GEGLU kernel.
It uses fp16 for accumulation, so could cause **quality degradation**.
prefer_lowp_gemm:
Whether to prefer low-precision GEMM and a series of fusion optimizations.
This will make the model faster, but may cause numerical issues.
These use fp16 for accumulation, so could cause **quality degradation**.
enable_xformers:
Whether to enable xformers and hijack it to make it compatible with JIT tracing.
enable_cuda_graph:
Whether to enable CUDA graph. CUDA Graph will significantly speed up the model,
by reducing the overhead of CUDA kernel launch, memory allocation, etc.
However, it will also increase the memory usage.
Our implementation of CUDA graph supports dynamic shape by caching graphs of
different shapes.
enable_triton:
Whether to enable Triton generated CUDA kernels.
Triton generated CUDA kernels are faster than PyTorch's CUDA kernels.
However, Triton has a lot of bugs, and can increase the CPU overhead,
though the overhead can be reduced by enabling CUDA graph.
trace_scheduler:
Whether to trace the scheduler.
'''
memory_format: torch.memory_format = (
torch.channels_last if gpu_device.device_has_tensor_core() else
torch.contiguous_format)
enable_jit: bool = True
enable_jit_freeze: bool = True
preserve_parameters: bool = True
enable_cnn_optimization: bool = gpu_device.device_has_tensor_core()
enable_fused_linear_geglu: bool = gpu_device.device_has_capability(
8, 0)
prefer_lowp_gemm: bool = True
enable_xformers: bool = False
enable_cuda_graph: bool = False
enable_triton: bool = False
trace_scheduler: bool = False
def compile(m, config):
# attribute `device` is not generally available
device = m.device if hasattr(m, 'device') else torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
enable_cuda_graph = config.enable_cuda_graph and device.type == 'cuda'
m.unet = compile_unet(m.unet, config)
if hasattr(m, 'controlnet'):
m.controlnet = compile_unet(m.controlnet, config)
m.vae = compile_vae(m.vae, config)
if config.enable_jit:
lazy_trace_ = _build_lazy_trace(config)
if getattr(m, 'text_encoder', None) is not None:
m.text_encoder.forward = lazy_trace_(m.text_encoder.forward)
# for SDXL
if getattr(m, 'text_encoder_2', None) is not None:
m.text_encoder_2.forward = lazy_trace_(m.text_encoder_2.forward)
# for SVD
if getattr(m, 'image_encoder', None) is not None:
m.image_encoder.forward = lazy_trace_(m.image_encoder.forward)
if config.trace_scheduler:
m.scheduler.scale_model_input = lazy_trace_(
m.scheduler.scale_model_input)
m.scheduler.step = lazy_trace_(m.scheduler.step)
if enable_cuda_graph:
if getattr(m, 'text_encoder', None) is not None:
m.text_encoder.forward = make_dynamic_graphed_callable(
m.text_encoder.forward)
if getattr(m, 'text_encoder_2', None) is not None:
m.text_encoder_2.forward = make_dynamic_graphed_callable(
m.text_encoder_2.forward)
if getattr(m, 'image_encoder', None) is not None:
m.image_encoder.forward = make_dynamic_graphed_callable(
m.image_encoder.forward)
if hasattr(m, 'image_processor'):
from sfast.libs.diffusers.image_processor import patch_image_prcessor
patch_image_prcessor(m.image_processor)
return m
def compile_model(model):
config = CompilationConfig.Default()
# xformers and Triton are suggested for achieving best performance.
# It might be slow for Triton to generate, compile and fine-tune kernels.
try:
import xformers
config.enable_xformers = True
except ImportError:
print('xformers not installed, skip')
# NOTE:
# When GPU VRAM is insufficient or the architecture is too old, Triton might be slow.
# Disable Triton if you encounter this problem.
try:
import triton
config.enable_triton = True
except ImportError:
print('Triton not installed, skip')
# NOTE:
# CUDA Graph is suggested for small batch sizes and small resolutions to reduce CPU overhead.
# My implementation can handle dynamic shape with increased need for GPU memory.
# But when your GPU VRAM is insufficient or the image resolution is high,
# CUDA Graph could cause less efficient VRAM utilization and slow down the inference,
# especially when on Windows or WSL which has the "shared VRAM" mechanism.
# If you meet problems related to it, you should disable it.
config.enable_cuda_graph = True
model = compile(model, config)
return model | null |
17,218 | import argparse
import logging
import math
import os
import random
import shutil
from pathlib import Path
import datasets
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from datasets import load_dataset
from huggingface_hub import create_repo, upload_folder
from packaging import version
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel
from diffusers.loaders import AttnProcsLayers
from diffusers.models.attention_processor import LoRAAttnProcessor
from diffusers.optimization import get_scheduler
from diffusers.training_utils import compute_snr
from diffusers.utils import check_min_version, is_wandb_available
from diffusers.utils.import_utils import is_xformers_available
def save_model_card(repo_id: str, images=None, base_model=str, dataset_name=str, repo_folder=None):
img_str = ""
for i, image in enumerate(images):
image.save(os.path.join(repo_folder, f"image_{i}.png"))
img_str += f"\n"
yaml = f"""
---
license: creativeml-openrail-m
base_model: {base_model}
tags:
- stable-diffusion
- stable-diffusion-diffusers
- text-to-image
- diffusers
- lora
inference: true
---
"""
model_card = f"""
# LoRA text2image fine-tuning - {repo_id}
These are LoRA adaption weights for {base_model}. The weights were fine-tuned on the {dataset_name} dataset. You can find some example images in the following. \n
{img_str}
"""
with open(os.path.join(repo_folder, "README.md"), "w") as f:
f.write(yaml + model_card) | null |
17,219 | import argparse
import logging
import math
import os
import random
import shutil
from pathlib import Path
import datasets
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from datasets import load_dataset
from huggingface_hub import create_repo, upload_folder
from packaging import version
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel
from diffusers.loaders import AttnProcsLayers
from diffusers.models.attention_processor import LoRAAttnProcessor
from diffusers.optimization import get_scheduler
from diffusers.training_utils import compute_snr
from diffusers.utils import check_min_version, is_wandb_available
from diffusers.utils.import_utils import is_xformers_available
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help=(
"The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
" dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
" or to a folder containing files that 🤗 Datasets can understand."
),
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The config of the Dataset, leave as None if there's only one config.",
)
parser.add_argument(
"--train_data_dir",
type=str,
default=None,
help=(
"A folder containing the training data. Folder contents must follow the structure described in"
" https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file"
" must exist to provide the captions for the images. Ignored if `dataset_name` is specified."
),
)
parser.add_argument(
"--image_column", type=str, default="image", help="The column of the dataset containing an image."
)
parser.add_argument(
"--caption_column",
type=str,
default="text",
help="The column of the dataset containing a caption or a list of captions.",
)
parser.add_argument(
"--validation_prompt", type=str, default=None, help="A prompt that is sampled during training for inference."
)
parser.add_argument(
"--num_validation_images",
type=int,
default=4,
help="Number of images that should be generated during validation with `validation_prompt`.",
)
parser.add_argument(
"--validation_epochs",
type=int,
default=1,
help=(
"Run fine-tuning validation every X epochs. The validation process consists of running the prompt"
" `args.validation_prompt` multiple times: `args.num_validation_images`."
),
)
parser.add_argument(
"--max_train_samples",
type=int,
default=None,
help=(
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
),
)
parser.add_argument(
"--output_dir",
type=str,
default="sd-model-finetuned-lora",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="The directory where the downloaded models and datasets will be stored.",
)
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--center_crop",
default=False,
action="store_true",
help=(
"Whether to center crop the input images to the resolution. If not set, the images will be randomly"
" cropped. The images will be resized to the resolution first before cropping."
),
)
parser.add_argument(
"--random_flip",
action="store_true",
help="whether to randomly flip images horizontally",
)
parser.add_argument(
"--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
)
parser.add_argument("--num_train_epochs", type=int, default=100)
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-4,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--snr_gamma",
type=float,
default=None,
help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. "
"More details here: https://arxiv.org/abs/2303.09556.",
)
parser.add_argument(
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help=(
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
),
)
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help=(
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
),
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--prediction_type",
type=str,
default=None,
help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediciton_type` is chosen.",
)
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument(
"--checkpointing_steps",
type=int,
default=500,
help=(
"Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--checkpoints_total_limit",
type=int,
default=None,
help=("Max number of checkpoints to store."),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
)
parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.")
parser.add_argument(
"--rank",
type=int,
default=4,
help=("The dimension of the LoRA update matrices."),
)
parser.add_argument(
"--sfast",
action="store_true",
help=("Whether or not to use `stable-fast` to optimize the training."),
)
parser.add_argument(
"--compile",
action="store_true",
help=("Whether or not to torch.compile the model for faster training (conflicts with sfast)."),
)
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
# Sanity checks
if args.dataset_name is None and args.train_data_dir is None:
raise ValueError("Need either a dataset name or a training folder.")
return args | null |
17,220 | MODEL = 'runwayml/stable-diffusion-v1-5'
VARIANT = None
CUSTOM_PIPELINE = None
SCHEDULER = 'EulerAncestralDiscreteScheduler'
LORA = None
CONTROLNET = None
STEPS = 30
PROMPT = 'best quality, realistic, unreal engine, 4K, a beautiful girl'
NEGATIVE_PROMPT = None
SEED = None
WARMUPS = 3
BATCH = 1
HEIGHT = None
WIDTH = None
INPUT_IMAGE = None
CONTROL_IMAGE = None
OUTPUT_IMAGE = None
EXTRA_CALL_KWARGS = None
import importlib
import inspect
import argparse
import time
import json
import torch
from PIL import (Image, ImageDraw)
from diffusers.utils import load_image
from sfast.compilers.diffusion_pipeline_compiler import (compile,
CompilationConfig)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default=MODEL)
parser.add_argument('--variant', type=str, default=VARIANT)
parser.add_argument('--custom-pipeline', type=str, default=CUSTOM_PIPELINE)
parser.add_argument('--scheduler', type=str, default=SCHEDULER)
parser.add_argument('--lora', type=str, default=LORA)
parser.add_argument('--controlnet', type=str, default=CONTROLNET)
parser.add_argument('--steps', type=int, default=STEPS)
parser.add_argument('--prompt', type=str, default=PROMPT)
parser.add_argument('--negative-prompt', type=str, default=NEGATIVE_PROMPT)
parser.add_argument('--seed', type=int, default=SEED)
parser.add_argument('--warmups', type=int, default=WARMUPS)
parser.add_argument('--batch', type=int, default=BATCH)
parser.add_argument('--height', type=int, default=HEIGHT)
parser.add_argument('--width', type=int, default=WIDTH)
parser.add_argument('--extra-call-kwargs',
type=str,
default=EXTRA_CALL_KWARGS)
parser.add_argument('--input-image', type=str, default=INPUT_IMAGE)
parser.add_argument('--control-image', type=str, default=CONTROL_IMAGE)
parser.add_argument('--output-image', type=str, default=OUTPUT_IMAGE)
parser.add_argument(
'--compiler',
type=str,
default='sfast',
choices=['none', 'sfast', 'compile', 'compile-max-autotune'])
parser.add_argument('--quantize', action='store_true')
parser.add_argument('--no-fusion', action='store_true')
return parser.parse_args() | null |
17,223 | MODEL = 'SimianLuo/LCM_Dreamshaper_v7'
VARIANT = None
CUSTOM_PIPELINE = 'latent_consistency_txt2img'
SCHEDULER = 'EulerAncestralDiscreteScheduler'
LORA = None
CONTROLNET = None
STEPS = 4
PROMPT = 'best quality, realistic, unreal engine, 4K, a beautiful girl'
NEGATIVE_PROMPT = None
SEED = None
WARMUPS = 3
BATCH = 1
HEIGHT = 768
WIDTH = 768
INPUT_IMAGE = None
CONTROL_IMAGE = None
OUTPUT_IMAGE = None
EXTRA_CALL_KWARGS = None
import importlib
import inspect
import argparse
import time
import json
import torch
from PIL import (Image, ImageDraw)
from diffusers.utils import load_image
from sfast.compilers.diffusion_pipeline_compiler import (compile,
CompilationConfig)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default=MODEL)
parser.add_argument('--variant', type=str, default=VARIANT)
parser.add_argument('--custom-pipeline', type=str, default=CUSTOM_PIPELINE)
parser.add_argument('--scheduler', type=str, default=SCHEDULER)
parser.add_argument('--lora', type=str, default=LORA)
parser.add_argument('--controlnet', type=str, default=CONTROLNET)
parser.add_argument('--steps', type=int, default=STEPS)
parser.add_argument('--prompt', type=str, default=PROMPT)
parser.add_argument('--negative-prompt', type=str, default=NEGATIVE_PROMPT)
parser.add_argument('--seed', type=int, default=SEED)
parser.add_argument('--warmups', type=int, default=WARMUPS)
parser.add_argument('--batch', type=int, default=BATCH)
parser.add_argument('--height', type=int, default=HEIGHT)
parser.add_argument('--width', type=int, default=WIDTH)
parser.add_argument('--extra-call-kwargs',
type=str,
default=EXTRA_CALL_KWARGS)
parser.add_argument('--input-image', type=str, default=INPUT_IMAGE)
parser.add_argument('--control-image', type=str, default=CONTROL_IMAGE)
parser.add_argument('--output-image', type=str, default=OUTPUT_IMAGE)
parser.add_argument(
'--compiler',
type=str,
default='sfast',
choices=['none', 'sfast', 'compile', 'compile-max-autotune'])
parser.add_argument('--quantize', action='store_true')
parser.add_argument('--no-fusion', action='store_true')
return parser.parse_args() | null |
17,226 | REPO = None
FACE_ANALYSIS_ROOT = None
MODEL = 'wangqixun/YamerMIX_v8'
VARIANT = None
CUSTOM_PIPELINE = None
SCHEDULER = 'EulerAncestralDiscreteScheduler'
LORA = None
CONTROLNET = 'InstantX/InstantID'
STEPS = 30
PROMPT = 'film noir style, ink sketch|vector, male man, highly detailed, sharp focus, ultra sharpness, monochrome, high contrast, dramatic shadows, 1940s style, mysterious, cinematic'
NEGATIVE_PROMPT = 'ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, vibrant, colorful'
SEED = None
WARMUPS = 3
BATCH = 1
HEIGHT = None
WIDTH = None
INPUT_IMAGE = 'https://github.com/InstantID/InstantID/blob/main/examples/musk_resize.jpeg?raw=true'
CONTROL_IMAGE = None
OUTPUT_IMAGE = None
EXTRA_CALL_KWARGS = '''{
"controlnet_conditioning_scale": 0.8,
"ip_adapter_scale": 0.8
}'''
import sys
import os
import importlib
import inspect
import argparse
import time
import json
import torch
from PIL import (Image, ImageDraw)
import numpy as np
import cv2
from huggingface_hub import snapshot_download
from diffusers.utils import load_image
from insightface.app import FaceAnalysis
from sfast.compilers.diffusion_pipeline_compiler import (compile,
CompilationConfig)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--repo', type=str, default=REPO)
parser.add_argument('--face-analysis-root',
type=str,
default=FACE_ANALYSIS_ROOT)
parser.add_argument('--model', type=str, default=MODEL)
parser.add_argument('--variant', type=str, default=VARIANT)
parser.add_argument('--custom-pipeline', type=str, default=CUSTOM_PIPELINE)
parser.add_argument('--scheduler', type=str, default=SCHEDULER)
parser.add_argument('--lora', type=str, default=LORA)
parser.add_argument('--controlnet', type=str, default=CONTROLNET)
parser.add_argument('--steps', type=int, default=STEPS)
parser.add_argument('--prompt', type=str, default=PROMPT)
parser.add_argument('--negative-prompt', type=str, default=NEGATIVE_PROMPT)
parser.add_argument('--seed', type=int, default=SEED)
parser.add_argument('--warmups', type=int, default=WARMUPS)
parser.add_argument('--batch', type=int, default=BATCH)
parser.add_argument('--height', type=int, default=HEIGHT)
parser.add_argument('--width', type=int, default=WIDTH)
parser.add_argument('--extra-call-kwargs',
type=str,
default=EXTRA_CALL_KWARGS)
parser.add_argument('--input-image', type=str, default=INPUT_IMAGE)
parser.add_argument('--control-image', type=str, default=CONTROL_IMAGE)
parser.add_argument('--output-image', type=str, default=OUTPUT_IMAGE)
parser.add_argument(
'--compiler',
type=str,
default='sfast',
choices=['none', 'sfast', 'compile', 'compile-max-autotune'])
parser.add_argument('--quantize', action='store_true')
parser.add_argument('--no-fusion', action='store_true')
return parser.parse_args() | null |
17,227 | import sys
import os
import importlib
import inspect
import argparse
import time
import json
import torch
from PIL import (Image, ImageDraw)
import numpy as np
import cv2
from huggingface_hub import snapshot_download
from diffusers.utils import load_image
from insightface.app import FaceAnalysis
from sfast.compilers.diffusion_pipeline_compiler import (compile,
CompilationConfig)
def load_model(pipeline_cls,
model,
variant=None,
custom_pipeline=None,
scheduler=None,
lora=None,
controlnet=None):
extra_kwargs = {}
if custom_pipeline is not None:
extra_kwargs['custom_pipeline'] = custom_pipeline
if variant is not None:
extra_kwargs['variant'] = variant
if controlnet is not None:
from diffusers import ControlNetModel
controlnet = ControlNetModel.from_pretrained(controlnet,
torch_dtype=torch.float16)
extra_kwargs['controlnet'] = controlnet
model = pipeline_cls.from_pretrained(model,
torch_dtype=torch.float16,
**extra_kwargs)
if scheduler is not None:
scheduler_cls = getattr(importlib.import_module('diffusers'),
scheduler)
model.scheduler = scheduler_cls.from_config(model.scheduler.config)
if lora is not None:
model.load_lora_weights(lora)
model.fuse_lora()
model.safety_checker = None
model.to(torch.device('cuda'))
return model | null |
17,228 | import sys
import os
import importlib
import inspect
import argparse
import time
import json
import torch
from PIL import (Image, ImageDraw)
import numpy as np
import cv2
from huggingface_hub import snapshot_download
from diffusers.utils import load_image
from insightface.app import FaceAnalysis
from sfast.compilers.diffusion_pipeline_compiler import (compile,
CompilationConfig)
class CompilationConfig:
class Default:
'''
Default compilation config
memory_format:
channels_last if tensor core is available, otherwise contiguous_format.
On GPUs with tensor core, channels_last is faster
enable_jit:
Whether to enable JIT, most optimizations are done with JIT
enable_jit_freeze:
Whether to freeze the model after JIT tracing.
Freezing the model will enable us to optimize the model further.
preserve_parameters:
Whether to preserve parameters when freezing the model.
If True, parameters will be preserved, but the model will be a bit slower.
If False, parameters will be marked as constants, and the model will be faster.
However, if parameters are not preserved, LoRA cannot be switched dynamically.
enable_cnn_optimization:
Whether to enable CNN optimization by fusion.
enable_fused_linear_geglu:
Whether to enable fused Linear-GEGLU kernel.
It uses fp16 for accumulation, so could cause **quality degradation**.
prefer_lowp_gemm:
Whether to prefer low-precision GEMM and a series of fusion optimizations.
This will make the model faster, but may cause numerical issues.
These use fp16 for accumulation, so could cause **quality degradation**.
enable_xformers:
Whether to enable xformers and hijack it to make it compatible with JIT tracing.
enable_cuda_graph:
Whether to enable CUDA graph. CUDA Graph will significantly speed up the model,
by reducing the overhead of CUDA kernel launch, memory allocation, etc.
However, it will also increase the memory usage.
Our implementation of CUDA graph supports dynamic shape by caching graphs of
different shapes.
enable_triton:
Whether to enable Triton generated CUDA kernels.
Triton generated CUDA kernels are faster than PyTorch's CUDA kernels.
However, Triton has a lot of bugs, and can increase the CPU overhead,
though the overhead can be reduced by enabling CUDA graph.
trace_scheduler:
Whether to trace the scheduler.
'''
memory_format: torch.memory_format = (
torch.channels_last if gpu_device.device_has_tensor_core() else
torch.contiguous_format)
enable_jit: bool = True
enable_jit_freeze: bool = True
preserve_parameters: bool = True
enable_cnn_optimization: bool = gpu_device.device_has_tensor_core()
enable_fused_linear_geglu: bool = gpu_device.device_has_capability(
8, 0)
prefer_lowp_gemm: bool = True
enable_xformers: bool = False
enable_cuda_graph: bool = False
enable_triton: bool = False
trace_scheduler: bool = False
def compile(m, config):
# attribute `device` is not generally available
device = m.device if hasattr(m, 'device') else torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
enable_cuda_graph = config.enable_cuda_graph and device.type == 'cuda'
m.unet = compile_unet(m.unet, config)
if hasattr(m, 'controlnet'):
m.controlnet = compile_unet(m.controlnet, config)
m.vae = compile_vae(m.vae, config)
if config.enable_jit:
lazy_trace_ = _build_lazy_trace(config)
if getattr(m, 'text_encoder', None) is not None:
m.text_encoder.forward = lazy_trace_(m.text_encoder.forward)
# for SDXL
if getattr(m, 'text_encoder_2', None) is not None:
m.text_encoder_2.forward = lazy_trace_(m.text_encoder_2.forward)
# for SVD
if getattr(m, 'image_encoder', None) is not None:
m.image_encoder.forward = lazy_trace_(m.image_encoder.forward)
if config.trace_scheduler:
m.scheduler.scale_model_input = lazy_trace_(
m.scheduler.scale_model_input)
m.scheduler.step = lazy_trace_(m.scheduler.step)
if enable_cuda_graph:
if getattr(m, 'text_encoder', None) is not None:
m.text_encoder.forward = make_dynamic_graphed_callable(
m.text_encoder.forward)
if getattr(m, 'text_encoder_2', None) is not None:
m.text_encoder_2.forward = make_dynamic_graphed_callable(
m.text_encoder_2.forward)
if getattr(m, 'image_encoder', None) is not None:
m.image_encoder.forward = make_dynamic_graphed_callable(
m.image_encoder.forward)
if hasattr(m, 'image_processor'):
from sfast.libs.diffusers.image_processor import patch_image_prcessor
patch_image_prcessor(m.image_processor)
return m
def compile_model(model):
config = CompilationConfig.Default()
# xformers and Triton are suggested for achieving best performance.
# It might be slow for Triton to generate, compile and fine-tune kernels.
try:
import xformers
config.enable_xformers = True
except ImportError:
print('xformers not installed, skip')
# NOTE:
# When GPU VRAM is insufficient or the architecture is too old, Triton might be slow.
# Disable Triton if you encounter this problem.
try:
import triton
config.enable_triton = True
except ImportError:
print('Triton not installed, skip')
# NOTE:
# CUDA Graph is suggested for small batch sizes and small resolutions to reduce CPU overhead.
# My implementation can handle dynamic shape with increased need for GPU memory.
# But when your GPU VRAM is insufficient or the image resolution is high,
# CUDA Graph could cause less efficient VRAM utilization and slow down the inference,
# especially when on Windows or WSL which has the "shared VRAM" mechanism.
# If you meet problems related to it, you should disable it.
config.enable_cuda_graph = True
model = compile(model, config)
return model | null |
17,229 | MODEL = 'stabilityai/stable-video-diffusion-img2vid-xt'
VARIANT = None
CUSTOM_PIPELINE = None
SCHEDULER = None
LORA = None
CONTROLNET = None
STEPS = 25
SEED = None
WARMUPS = 1
FRAMES = None
BATCH = 1
HEIGHT = 576
WIDTH = 1024
FPS = 7
DECODE_CHUNK_SIZE = 4
INPUT_IMAGE = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/rocket.png?download=true'
CONTROL_IMAGE = None
OUTPUT_VIDEO = None
EXTRA_CALL_KWARGS = None
import importlib
import inspect
import argparse
import time
import json
import torch
from PIL import (Image, ImageDraw)
from diffusers.utils import load_image, export_to_video
from sfast.compilers.diffusion_pipeline_compiler import (compile,
CompilationConfig)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default=MODEL)
parser.add_argument('--variant', type=str, default=VARIANT)
parser.add_argument('--custom-pipeline', type=str, default=CUSTOM_PIPELINE)
parser.add_argument('--scheduler', type=str, default=SCHEDULER)
parser.add_argument('--lora', type=str, default=LORA)
parser.add_argument('--controlnet', type=str, default=CONTROLNET)
parser.add_argument('--steps', type=int, default=STEPS)
parser.add_argument('--seed', type=int, default=SEED)
parser.add_argument('--warmups', type=int, default=WARMUPS)
parser.add_argument('--frames', type=int, default=FRAMES)
parser.add_argument('--batch', type=int, default=BATCH)
parser.add_argument('--height', type=int, default=HEIGHT)
parser.add_argument('--width', type=int, default=WIDTH)
parser.add_argument('--fps', type=int, default=FPS)
parser.add_argument('--decode-chunk-size',
type=int,
default=DECODE_CHUNK_SIZE)
parser.add_argument('--extra-call-kwargs',
type=str,
default=EXTRA_CALL_KWARGS)
parser.add_argument('--input-image', type=str, default=INPUT_IMAGE)
parser.add_argument('--control-image', type=str, default=CONTROL_IMAGE)
parser.add_argument('--output-video', type=str, default=OUTPUT_VIDEO)
parser.add_argument(
'--compiler',
type=str,
default='sfast',
choices=['none', 'sfast', 'compile', 'compile-max-autotune'])
parser.add_argument('--quantize', action='store_true')
parser.add_argument('--no-fusion', action='store_true')
return parser.parse_args() | null |
17,230 | import importlib
import inspect
import argparse
import time
import json
import torch
from PIL import (Image, ImageDraw)
from diffusers.utils import load_image, export_to_video
from sfast.compilers.diffusion_pipeline_compiler import (compile,
CompilationConfig)
def load_model(pipeline_cls,
model,
variant=None,
custom_pipeline=None,
scheduler=None,
lora=None,
controlnet=None):
extra_kwargs = {}
if custom_pipeline is not None:
extra_kwargs['custom_pipeline'] = custom_pipeline
if variant is not None:
extra_kwargs['variant'] = variant
if controlnet is not None:
from diffusers import ControlNetModel
controlnet = ControlNetModel.from_pretrained(controlnet,
torch_dtype=torch.float16)
extra_kwargs['controlnet'] = controlnet
model = pipeline_cls.from_pretrained(model,
torch_dtype=torch.float16,
**extra_kwargs)
if scheduler is not None:
scheduler_cls = getattr(importlib.import_module('diffusers'),
scheduler)
model.scheduler = scheduler_cls.from_config(model.scheduler.config)
if lora is not None:
model.load_lora_weights(lora)
model.fuse_lora()
model.safety_checker = None
model.to(torch.device('cuda'))
return model | null |
17,231 | import importlib
import inspect
import argparse
import time
import json
import torch
from PIL import (Image, ImageDraw)
from diffusers.utils import load_image, export_to_video
from sfast.compilers.diffusion_pipeline_compiler import (compile,
CompilationConfig)
class CompilationConfig:
class Default:
'''
Default compilation config
memory_format:
channels_last if tensor core is available, otherwise contiguous_format.
On GPUs with tensor core, channels_last is faster
enable_jit:
Whether to enable JIT, most optimizations are done with JIT
enable_jit_freeze:
Whether to freeze the model after JIT tracing.
Freezing the model will enable us to optimize the model further.
preserve_parameters:
Whether to preserve parameters when freezing the model.
If True, parameters will be preserved, but the model will be a bit slower.
If False, parameters will be marked as constants, and the model will be faster.
However, if parameters are not preserved, LoRA cannot be switched dynamically.
enable_cnn_optimization:
Whether to enable CNN optimization by fusion.
enable_fused_linear_geglu:
Whether to enable fused Linear-GEGLU kernel.
It uses fp16 for accumulation, so could cause **quality degradation**.
prefer_lowp_gemm:
Whether to prefer low-precision GEMM and a series of fusion optimizations.
This will make the model faster, but may cause numerical issues.
These use fp16 for accumulation, so could cause **quality degradation**.
enable_xformers:
Whether to enable xformers and hijack it to make it compatible with JIT tracing.
enable_cuda_graph:
Whether to enable CUDA graph. CUDA Graph will significantly speed up the model,
by reducing the overhead of CUDA kernel launch, memory allocation, etc.
However, it will also increase the memory usage.
Our implementation of CUDA graph supports dynamic shape by caching graphs of
different shapes.
enable_triton:
Whether to enable Triton generated CUDA kernels.
Triton generated CUDA kernels are faster than PyTorch's CUDA kernels.
However, Triton has a lot of bugs, and can increase the CPU overhead,
though the overhead can be reduced by enabling CUDA graph.
trace_scheduler:
Whether to trace the scheduler.
'''
memory_format: torch.memory_format = (
torch.channels_last if gpu_device.device_has_tensor_core() else
torch.contiguous_format)
enable_jit: bool = True
enable_jit_freeze: bool = True
preserve_parameters: bool = True
enable_cnn_optimization: bool = gpu_device.device_has_tensor_core()
enable_fused_linear_geglu: bool = gpu_device.device_has_capability(
8, 0)
prefer_lowp_gemm: bool = True
enable_xformers: bool = False
enable_cuda_graph: bool = False
enable_triton: bool = False
trace_scheduler: bool = False
def compile(m, config):
# attribute `device` is not generally available
device = m.device if hasattr(m, 'device') else torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
enable_cuda_graph = config.enable_cuda_graph and device.type == 'cuda'
m.unet = compile_unet(m.unet, config)
if hasattr(m, 'controlnet'):
m.controlnet = compile_unet(m.controlnet, config)
m.vae = compile_vae(m.vae, config)
if config.enable_jit:
lazy_trace_ = _build_lazy_trace(config)
if getattr(m, 'text_encoder', None) is not None:
m.text_encoder.forward = lazy_trace_(m.text_encoder.forward)
# for SDXL
if getattr(m, 'text_encoder_2', None) is not None:
m.text_encoder_2.forward = lazy_trace_(m.text_encoder_2.forward)
# for SVD
if getattr(m, 'image_encoder', None) is not None:
m.image_encoder.forward = lazy_trace_(m.image_encoder.forward)
if config.trace_scheduler:
m.scheduler.scale_model_input = lazy_trace_(
m.scheduler.scale_model_input)
m.scheduler.step = lazy_trace_(m.scheduler.step)
if enable_cuda_graph:
if getattr(m, 'text_encoder', None) is not None:
m.text_encoder.forward = make_dynamic_graphed_callable(
m.text_encoder.forward)
if getattr(m, 'text_encoder_2', None) is not None:
m.text_encoder_2.forward = make_dynamic_graphed_callable(
m.text_encoder_2.forward)
if getattr(m, 'image_encoder', None) is not None:
m.image_encoder.forward = make_dynamic_graphed_callable(
m.image_encoder.forward)
if hasattr(m, 'image_processor'):
from sfast.libs.diffusers.image_processor import patch_image_prcessor
patch_image_prcessor(m.image_processor)
return m
def compile_model(model):
config = CompilationConfig.Default()
# xformers and Triton are suggested for achieving best performance.
# It might be slow for Triton to generate, compile and fine-tune kernels.
try:
import xformers
config.enable_xformers = True
except ImportError:
print('xformers not installed, skip')
# NOTE:
# When GPU VRAM is insufficient or the architecture is too old, Triton might be slow.
# Disable Triton if you encounter this problem.
try:
import triton
config.enable_triton = True
except ImportError:
print('Triton not installed, skip')
# NOTE:
# CUDA Graph is suggested for small batch sizes and small resolutions to reduce CPU overhead.
# But it can increase the amount of GPU memory used.
# For StableVideoDiffusionPipeline it is not needed.
# config.enable_cuda_graph = True
model = compile(model, config)
return model | null |
17,232 | import torch
from diffusers import AutoPipelineForText2Image, EulerDiscreteScheduler, ControlNetModel
from diffusers.utils import load_image
from sfast.compilers.diffusion_pipeline_compiler import (compile,
CompilationConfig)
import numpy as np
import cv2
from PIL import Image
def canny_process(image, width, height):
np_image = cv2.resize(image, (width, height))
np_image = cv2.Canny(np_image, 100, 200)
np_image = np_image[:, :, None]
np_image = np.concatenate([np_image, np_image, np_image], axis=2)
# canny_image = Image.fromarray(np_image)
return Image.fromarray(np_image) | null |
17,233 | import torch
from diffusers import AutoPipelineForText2Image, EulerDiscreteScheduler, ControlNetModel
from diffusers.utils import load_image
from sfast.compilers.diffusion_pipeline_compiler import (compile,
CompilationConfig)
import numpy as np
import cv2
from PIL import Image
def reference_process(image, width, height):
np_image = cv2.resize(image, (width, height))
return Image.fromarray(np_image) | null |
17,234 | import torch
from diffusers import AutoPipelineForText2Image, EulerDiscreteScheduler, ControlNetModel
from diffusers.utils import load_image
from sfast.compilers.diffusion_pipeline_compiler import (compile,
CompilationConfig)
import numpy as np
import cv2
from PIL import Image
CUDA_DEVICE = "cuda:0"
def load_model():
extra_kwargs = {}
# extra_kwargs['variant'] = variant
controlnet = ControlNetModel.from_pretrained(
"lllyasviel/control_v11p_sd15_canny",
torch_dtype=torch.float16,
variant="fp16",
name="diffusion_pytorch_model.fp16.safetensors",
use_safetensors=True)
extra_kwargs['controlnet'] = controlnet
model = AutoPipelineForText2Image.from_pretrained(
"runwayml/stable-diffusion-v1-5",
torch_dtype=torch.float16,
**extra_kwargs)
model.scheduler = EulerDiscreteScheduler.from_config(
model.scheduler.config)
model.safety_checker = None
model.load_ip_adapter("h94/IP-Adapter",
subfolder="models",
weight_name="ip-adapter_sd15.safetensors")
model.to(torch.device(CUDA_DEVICE))
return model | null |
17,235 | import torch
from diffusers import AutoPipelineForText2Image, EulerDiscreteScheduler, ControlNetModel
from diffusers.utils import load_image
from sfast.compilers.diffusion_pipeline_compiler import (compile,
CompilationConfig)
import numpy as np
import cv2
from PIL import Image
class CompilationConfig:
class Default:
'''
Default compilation config
memory_format:
channels_last if tensor core is available, otherwise contiguous_format.
On GPUs with tensor core, channels_last is faster
enable_jit:
Whether to enable JIT, most optimizations are done with JIT
enable_jit_freeze:
Whether to freeze the model after JIT tracing.
Freezing the model will enable us to optimize the model further.
preserve_parameters:
Whether to preserve parameters when freezing the model.
If True, parameters will be preserved, but the model will be a bit slower.
If False, parameters will be marked as constants, and the model will be faster.
However, if parameters are not preserved, LoRA cannot be switched dynamically.
enable_cnn_optimization:
Whether to enable CNN optimization by fusion.
enable_fused_linear_geglu:
Whether to enable fused Linear-GEGLU kernel.
It uses fp16 for accumulation, so could cause **quality degradation**.
prefer_lowp_gemm:
Whether to prefer low-precision GEMM and a series of fusion optimizations.
This will make the model faster, but may cause numerical issues.
These use fp16 for accumulation, so could cause **quality degradation**.
enable_xformers:
Whether to enable xformers and hijack it to make it compatible with JIT tracing.
enable_cuda_graph:
Whether to enable CUDA graph. CUDA Graph will significantly speed up the model,
by reducing the overhead of CUDA kernel launch, memory allocation, etc.
However, it will also increase the memory usage.
Our implementation of CUDA graph supports dynamic shape by caching graphs of
different shapes.
enable_triton:
Whether to enable Triton generated CUDA kernels.
Triton generated CUDA kernels are faster than PyTorch's CUDA kernels.
However, Triton has a lot of bugs, and can increase the CPU overhead,
though the overhead can be reduced by enabling CUDA graph.
trace_scheduler:
Whether to trace the scheduler.
'''
memory_format: torch.memory_format = (
torch.channels_last if gpu_device.device_has_tensor_core() else
torch.contiguous_format)
enable_jit: bool = True
enable_jit_freeze: bool = True
preserve_parameters: bool = True
enable_cnn_optimization: bool = gpu_device.device_has_tensor_core()
enable_fused_linear_geglu: bool = gpu_device.device_has_capability(
8, 0)
prefer_lowp_gemm: bool = True
enable_xformers: bool = False
enable_cuda_graph: bool = False
enable_triton: bool = False
trace_scheduler: bool = False
def compile(m, config):
# attribute `device` is not generally available
device = m.device if hasattr(m, 'device') else torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
enable_cuda_graph = config.enable_cuda_graph and device.type == 'cuda'
m.unet = compile_unet(m.unet, config)
if hasattr(m, 'controlnet'):
m.controlnet = compile_unet(m.controlnet, config)
m.vae = compile_vae(m.vae, config)
if config.enable_jit:
lazy_trace_ = _build_lazy_trace(config)
if getattr(m, 'text_encoder', None) is not None:
m.text_encoder.forward = lazy_trace_(m.text_encoder.forward)
# for SDXL
if getattr(m, 'text_encoder_2', None) is not None:
m.text_encoder_2.forward = lazy_trace_(m.text_encoder_2.forward)
# for SVD
if getattr(m, 'image_encoder', None) is not None:
m.image_encoder.forward = lazy_trace_(m.image_encoder.forward)
if config.trace_scheduler:
m.scheduler.scale_model_input = lazy_trace_(
m.scheduler.scale_model_input)
m.scheduler.step = lazy_trace_(m.scheduler.step)
if enable_cuda_graph:
if getattr(m, 'text_encoder', None) is not None:
m.text_encoder.forward = make_dynamic_graphed_callable(
m.text_encoder.forward)
if getattr(m, 'text_encoder_2', None) is not None:
m.text_encoder_2.forward = make_dynamic_graphed_callable(
m.text_encoder_2.forward)
if getattr(m, 'image_encoder', None) is not None:
m.image_encoder.forward = make_dynamic_graphed_callable(
m.image_encoder.forward)
if hasattr(m, 'image_processor'):
from sfast.libs.diffusers.image_processor import patch_image_prcessor
patch_image_prcessor(m.image_processor)
return m
def compile_model(model):
config = CompilationConfig.Default()
try:
import xformers
config.enable_xformers = True
except ImportError:
print('xformers not installed, skip')
try:
import triton
config.enable_triton = True
except ImportError:
print('Triton not installed, skip')
config.enable_cuda_graph = True
model = compile(model, config)
return model | null |
17,236 | import os
import subprocess
import sys
import config as cfg
from . import resource_dir
def cleaner_onprem(f):
f = f.replace("_", "-")
return f.lower() | null |
17,237 | import os
import subprocess
import sys
import config as cfg
from . import resource_dir
def cleaner_aws(f):
f = f.replace("_", "-")
f = f.replace("@4x", "")
f = f.replace("@5x", "")
f = f.replace("2.0", "2-0")
f = f.replace("-light-bg4x", "")
f = f.replace("-light-bg", "")
for p in cfg.FILE_PREFIXES["aws"]:
if f.startswith(p):
f = f[len(p) :]
break
return f.lower() | null |
17,238 | import os
import subprocess
import sys
import config as cfg
from . import resource_dir
def cleaner_azure(f):
f = f.replace("_", "-")
f = f.replace("(", "").replace(")", "")
f = "-".join(f.split())
for p in cfg.FILE_PREFIXES["azure"]:
if f.startswith(p):
f = f[len(p) :]
break
return f.lower() | null |
17,239 | import os
import subprocess
import sys
import config as cfg
from . import resource_dir
def cleaner_gcp(f):
f = f.replace("_", "-")
f = "-".join(f.split())
for p in cfg.FILE_PREFIXES["gcp"]:
if f.startswith(p):
f = f[len(p) :]
break
return f.lower() | null |
17,240 | import os
import subprocess
import sys
import config as cfg
from . import resource_dir
def cleaner_ibm(f):
f = f.replace("_", "-")
f = "-".join(f.split())
for p in cfg.FILE_PREFIXES["ibm"]:
if f.startswith(p):
f = f[len(p) :]
break
return f.lower() | null |
17,241 | import os
import subprocess
import sys
import config as cfg
from . import resource_dir
def cleaner_firebase(f):
f = f.replace("_", "-")
f = "-".join(f.split())
for p in cfg.FILE_PREFIXES["firebase"]:
if f.startswith(p):
f = f[len(p) :]
break
return f.lower() | null |
17,242 | import os
import subprocess
import sys
import config as cfg
from . import resource_dir
def cleaner_k8s(f):
f = f.replace("-256", "")
for p in cfg.FILE_PREFIXES["k8s"]:
if f.startswith(p):
f = f[len(p) :]
break
return f.lower() | null |
17,243 | import os
import subprocess
import sys
import config as cfg
from . import resource_dir
def cleaner_digitalocean(f):
f = f.replace("-32", "")
for p in cfg.FILE_PREFIXES["digitalocean"]:
if f.startswith(p):
f = f[len(p) :]
break
return f.lower() | null |
17,244 | import os
import subprocess
import sys
import config as cfg
from . import resource_dir
def cleaner_alibabacloud(f):
for p in cfg.FILE_PREFIXES["alibabacloud"]:
if f.startswith(p):
f = f[len(p) :]
break
return f.lower() | null |
17,245 | import os
import subprocess
import sys
import config as cfg
from . import resource_dir
def cleaner_oci(f):
f = f.replace(" ", "-")
f = f.replace("_", "-")
for p in cfg.FILE_PREFIXES["oci"]:
if f.startswith(p):
f = f[len(p) :]
break
return f.lower() | null |
17,246 | import os
import subprocess
import sys
import config as cfg
from . import resource_dir
def cleaner_programming(f):
return f.lower() | null |
17,247 | import os
import subprocess
import sys
import config as cfg
from . import resource_dir
def cleaner_generic(f):
return f.lower() | null |
17,248 | import os
import subprocess
import sys
import config as cfg
from . import resource_dir
def cleaner_saas(f):
return f.lower() | null |
17,249 | import os
import subprocess
import sys
import config as cfg
from . import resource_dir
def cleaner_elastic(f):
return f.lower() | null |
17,250 | import os
import subprocess
import sys
import config as cfg
from . import resource_dir
def cleaner_outscale(f):
return f.lower() | null |
17,251 | import os
import subprocess
import sys
import config as cfg
from . import resource_dir
def cleaner_openstack(f):
return f.lower() | null |
17,252 | import os
import subprocess
import sys
import config as cfg
from . import resource_dir
cleaners = {
"onprem": cleaner_onprem,
"aws": cleaner_aws,
"azure": cleaner_azure,
"digitalocean": cleaner_digitalocean,
"gcp": cleaner_gcp,
"ibm": cleaner_ibm,
"firebase": cleaner_firebase,
"k8s": cleaner_k8s,
"alibabacloud": cleaner_alibabacloud,
"oci": cleaner_oci,
"programming": cleaner_programming,
"saas": cleaner_saas,
"elastic": cleaner_elastic,
"outscale": cleaner_outscale,
"generic": cleaner_generic,
"openstack": cleaner_openstack,
}
The provided code snippet includes necessary dependencies for implementing the `clean_png` function. Write a Python function `def clean_png(pvd: str) -> None` to solve the following problem:
Refine the resources files names.
Here is the function:
def clean_png(pvd: str) -> None:
"""Refine the resources files names."""
def _rename(base: str, png: str):
new = cleaners[pvd](png)
old_path = os.path.join(base, png)
new_path = os.path.join(base, new)
os.rename(old_path, new_path)
for root, _, files in os.walk(resource_dir(pvd)):
pngs = filter(lambda f: f.endswith(".png"), files)
[_rename(root, png) for png in pngs] | Refine the resources files names. |
17,253 | import os
import subprocess
import sys
import config as cfg
from . import resource_dir
The provided code snippet includes necessary dependencies for implementing the `round_png` function. Write a Python function `def round_png(pvd: str) -> None` to solve the following problem:
Round the images.
Here is the function:
def round_png(pvd: str) -> None:
"""Round the images."""
def _round(base: str, path: str):
path = os.path.join(base, path)
subprocess.run([cfg.CMD_ROUND, *cfg.CMD_ROUND_OPTS, path])
for root, _, files in os.walk(resource_dir(pvd)):
pngs = filter(lambda f: f.endswith(".png"), files)
paths = filter(lambda f: "rounded" not in f, pngs)
[_round(root, path) for path in paths] | Round the images. |
17,254 | import os
import subprocess
import sys
import config as cfg
from . import resource_dir
The provided code snippet includes necessary dependencies for implementing the `svg2png` function. Write a Python function `def svg2png(pvd: str) -> None` to solve the following problem:
Convert the svg into png
Here is the function:
def svg2png(pvd: str) -> None:
"""Convert the svg into png"""
def _convert(base: str, path: str):
path = os.path.join(base, path)
subprocess.run([cfg.CMD_SVG2PNG, *cfg.CMD_SVG2PNG_OPTS, path])
subprocess.run(["rm", path])
for root, _, files in os.walk(resource_dir(pvd)):
svgs = filter(lambda f: f.endswith(".svg"), files)
[_convert(root, path) for path in svgs] | Convert the svg into png |
17,255 | import os
import subprocess
import sys
import config as cfg
from . import resource_dir
The provided code snippet includes necessary dependencies for implementing the `svg2png2` function. Write a Python function `def svg2png2(pvd: str) -> None` to solve the following problem:
Convert the svg into png using image magick
Here is the function:
def svg2png2(pvd: str) -> None:
"""Convert the svg into png using image magick"""
def _convert(base: str, path: str):
path_src = os.path.join(base, path)
path_dest = path_src.replace(".svg", ".png")
subprocess.run([cfg.CMD_SVG2PNG_IM, *cfg.CMD_SVG2PNG_IM_OPTS, path_src, path_dest])
subprocess.run(["rm", path_src])
for root, _, files in os.walk(resource_dir(pvd)):
svgs = filter(lambda f: f.endswith(".svg"), files)
[_convert(root, path) for path in svgs] | Convert the svg into png using image magick |
17,256 | import os
import sys
from typing import Iterable
from jinja2 import Environment, FileSystemLoader, Template, exceptions
import config as cfg
from . import app_root_dir, doc_root_dir, resource_dir, template_dir, base_dir
def gen_classes(pvd: str, typ: str, paths: Iterable[str]) -> str:
"""Generate all service node classes based on resources paths with class templates."""
tmpl = load_tmpl(cfg.TMPL_MODULE)
# TODO: extract the gen class metas for sharing
# TODO: independent function for generating all pvd/typ/paths pairs
def _gen_class_meta(path: str) -> dict:
base = os.path.splitext(path)[0]
name = "".join([up_or_title(pvd, s) for s in base.split("-")])
return {"name": name, "icon": path}
metas = map(_gen_class_meta, paths)
aliases = cfg.ALIASES[pvd][typ] if typ in cfg.ALIASES[pvd] else {}
return tmpl.render(pvd=pvd, typ=typ, metas=metas, aliases=aliases)
def gen_apidoc(pvd: str, typ_paths: dict) -> str:
try:
default_tmp = cfg.TMPL_APIDOC.split('.')
tmpl_file = f"{default_tmp[0]}_{pvd}.{default_tmp[1]}"
tmpl = load_tmpl(tmpl_file)
except exceptions.TemplateNotFound:
tmpl = load_tmpl(cfg.TMPL_APIDOC)
# TODO: remove
def _gen_class_name(path: str) -> str:
base = os.path.splitext(path)[0]
name = "".join([up_or_title(pvd, s) for s in base.split("-")])
return name
typ_classes = {}
for typ, (paths, resource_root) in sorted(typ_paths.items()):
typ_classes[typ] = []
for path in paths:
name = _gen_class_name(path)
resource_path = os.path.join(resource_root, path)
alias = cfg.ALIASES[pvd].get(typ, {}).get(name)
typ_classes[typ].append({"name": name, "alias": alias, "resource_path": resource_path})
return tmpl.render(pvd=pvd, typ_classes=typ_classes)
def make_module(pvd: str, typ: str, classes: str) -> None:
"""Create a module file"""
mod_path = os.path.join(app_root_dir(pvd), f"{typ}.py")
with open(mod_path, "w+") as f:
f.write(classes)
def make_apidoc(pvd: str, content: str) -> None:
"""Create an api documentation file"""
mod_path = os.path.join(doc_root_dir(), f"{pvd}.md")
with open(mod_path, "w+") as f:
f.write(content)
The provided code snippet includes necessary dependencies for implementing the `generate` function. Write a Python function `def generate(pvd: str) -> None` to solve the following problem:
Generates a service node classes.
Here is the function:
def generate(pvd: str) -> None:
"""Generates a service node classes."""
typ_paths = {}
base = base_dir()
for root, _, files in os.walk(resource_dir(pvd)):
# Extract the names and paths from resources.
files.sort()
pngs = list(filter(lambda f: f.endswith(".png"), files))
paths = list(filter(lambda f: "rounded" not in f, pngs))
# Skip the top-root directory.
typ = os.path.basename(root)
if typ == pvd:
continue
resource_root = os.path.relpath(root, base)
classes = gen_classes(pvd, typ, paths)
make_module(pvd, typ, classes)
typ_paths[typ] = (paths, resource_root)
# Build API documentation
apidoc = gen_apidoc(pvd, typ_paths)
make_apidoc(pvd, apidoc) | Generates a service node classes. |
17,257 | from collections import defaultdict
from pathlib import Path
from uuid import uuid4
import sys
import re
from typing import List
import peewee
from datetime import datetime
from sqlite3 import Cursor
database = peewee.SqliteDatabase(None)
lass Database(object):
def __init__(self, sqlite_file: Path) -> None:
def prepare(self):
def reset(self):
# 更新法律版本
# 如果任意法律有多个版本(即同名,但多个 publish, 则将 ver 设为其数量)
# 除最新版本的法律, 其余均设为 expired.
def update_versions(self):
def lookup_path(self) -> Path:
def load_ignore_folders(self):
def __ignore(self, ignore_folders: List[Path], file: Path) -> bool:
def load_laws(self):
def update_law_level(self, laws: List[Law], level: str) -> int:
def validate(self):
def update_database(self):
def get_law_count(self):
def get_law_level_by_folder(folder: Path) -> str:
root_folder = folder.parts[0]
r = re.match("^((司法解释)|(地方性法规)|(宪法)|(案例)|(行政法规)|(部门规章))$", root_folder)
if r:
return root_folder
return "法律" | null |
17,258 | import re
import json
from typing import List
from uuid import uuid4
section = [
"案情回顾",
"法官解读",
"基本案情",
"申请人请求",
"原告诉讼请求",
"裁判结果",
"处理结果",
"案例分析",
"典型意义",
"裁判要点",
"简要案情",
"法院裁判",
"裁判要旨",
"适用解析",
"司法解释相关法条",
]
def isSection(line) -> bool:
line = line.strip()
for pattern in section:
flag = re.search("^[【\((]{0,1}" + pattern + "[】\))]{0,1}$", line)
if flag:
return True
return False | null |
17,259 | import re
import json
from typing import List
from uuid import uuid4
nums}、)|(?:^案例{zh_nums}))"
def isTitle(line) -> str:
if re.match(title_matcher, line):
return re.sub(title_matcher, "", line).strip()
return None | null |
17,260 | import logging
import re
from typing import List, Tuple
from docx.document import Document as _Document
from docx import Document
from docx.oxml.table import CT_Tbl
from docx.oxml.text.paragraph import CT_P
from docx.oxml import CT_SectPr
from docx.table import Table, _Cell, _Row
from docx.text.paragraph import Paragraph
from parsers.base import Parser
from common import LINE_RE
LINE_RE = INDENT_RE + [f"^第{NUMBER_RE}+条"]
def isStartLine(line: str):
for reg in LINE_RE:
if re.match(reg, line):
return True
return False | null |
17,261 | import logging
import os
import re
import sys
from hashlib import md5
from pathlib import Path
from time import time
from typing import Any, List
from common import LINE_RE
from manager import CacheManager, RequestManager
from parsers import ContentParser, HTMLParser, Parser, WordParser
def find(f, arr: List[Any]) -> Any:
for item in arr:
if f(item):
return item
raise Exception("not found") | null |
17,262 | import logging
import os
import re
import sys
from hashlib import md5
from pathlib import Path
from time import time
from typing import Any, List
from common import LINE_RE
from manager import CacheManager, RequestManager
from parsers import ContentParser, HTMLParser, Parser, WordParser
LINE_RE = INDENT_RE + [f"^第{NUMBER_RE}+条"]
def isStartLine(line: str):
for reg in LINE_RE:
if re.match(reg, line):
return True
return False | null |
17,263 | from pathlib import Path
from parsers import WordParser, ContentParser
from manager import CacheManager
word_parser = WordParser()
content_parser = ContentParser()
cache = CacheManager()
def parse(doc_file: Path):
print(doc_file)
title, desc, content = word_parser.parse_document(doc_file, doc_file.stem)
filedata = content_parser.parse(None, title, desc, content)
if not filedata:
return
cache.write_law(f"{title}.md", filedata) | null |
17,264 |
cfg = Config(VERSION_PATH, EXAMPLE_PATH, CONFIG_PATH)
cfg.env = os.environ.copy()
cfg.env['PATH'] = os.path.dirname(cfg.python_exe_path) + ';' + cfg.env['PATH']
cfg.useragent = {"User-Agent": f"March7thAssistant/{cfg.version}"}
if cfg.auto_set_game_path_enable:
detect_game_path()
log = Logger(cfg.get_value('log_level')) ;log.hr("{bCqCXzkKOEEZHJEc2CEg} {p6LqXENrR1AWA9rVW6mH}\n{mPxo756ANmcTn55VL89U}{s8osWU36ynHUQ9BnSsf3}".format(p6LqXENrR1AWA9rVW6mH=base64.b64decode("TWFyY2g3dGggQXNzaXN0YW50").decode("utf-8"),mPxo756ANmcTn55VL89U=base64.b64decode("aHR0cHM6Ly9naXRodWIuY29tL21vZXNub3cvTWFyY2g3dGhBc3Npc3RhbnQ=").decode("utf-8"),bCqCXzkKOEEZHJEc2CEg=base64.b64decode("5qyi6L+O5L2/55So").decode("utf-8"),s8osWU36ynHUQ9BnSsf3=base64.b64decode("CgrmraTnqIvluo/kuLrlhY3otLnlvIDmupDpobnnm64g5aaC5p6c5L2g5LuY5LqG6ZKx6K+356uL5Yi76YCA5qy+").decode("utf-8")),0,False)
def first_run():
if not cfg.get_value(base64.b64decode("YXV0b191cGRhdGU=").decode("utf-8")):
log.error("首次使用请先打开图形界面")
input("按回车键关闭窗口. . .")
sys.exit(0) | null |
17,265 |
class Daily:
def start():
if cfg.daily_enable:
Daily.run()
# 优先历战余响
if Date.is_next_mon_x_am(cfg.echo_of_war_timestamp, cfg.refresh_hour):
if cfg.echo_of_war_enable:
Echoofwar.start()
else:
log.info("历战余响未开启")
else:
log.info("历战余响尚未刷新")
Power.run()
if Date.is_next_x_am(cfg.fight_timestamp, cfg.refresh_hour):
if cfg.fight_enable:
Fight.start()
else:
log.info("锄大地未开启")
else:
log.info("锄大地尚未刷新")
if cfg.universe_frequency == "weekly":
if Date.is_next_mon_x_am(cfg.universe_timestamp, cfg.refresh_hour):
if cfg.universe_enable:
Power.run()
reward.start()
Universe.start(get_reward=True)
Power.run()
else:
log.info("模拟宇宙未开启")
else:
log.info("模拟宇宙尚未刷新")
elif cfg.universe_frequency == "daily":
if Date.is_next_x_am(cfg.universe_timestamp, cfg.refresh_hour):
if cfg.universe_enable:
Universe.start(get_reward=True)
else:
log.info("模拟宇宙未开启")
else:
log.info("模拟宇宙尚未刷新")
if Date.is_next_mon_x_am(cfg.forgottenhall_timestamp, cfg.refresh_hour):
if cfg.forgottenhall_enable:
challenge.start("memoryofchaos")
else:
log.info("忘却之庭未开启")
else:
log.info("忘却之庭尚未刷新")
if Date.is_next_mon_x_am(cfg.purefiction_timestamp, cfg.refresh_hour):
if cfg.purefiction_enable:
challenge.start("purefiction")
else:
log.info("虚构叙事未开启")
else:
log.info("虚构叙事尚未刷新")
Power.run()
def run():
log.hr("开始日常任务", 0)
if Date.is_next_x_am(cfg.last_run_timestamp, cfg.refresh_hour):
screen.change_to("guide2")
tasks = Tasks("./assets/config/task_mappings.json")
tasks.start()
cfg.set_value("daily_tasks", tasks.daily_tasks)
cfg.save_timestamp("last_run_timestamp")
else:
log.info("日常任务尚未刷新")
if len(cfg.daily_tasks) > 0:
task_functions = {
"登录游戏": lambda: True,
"拍照1次": lambda: Photo.photograph(),
"使用1次「万能合成机」": lambda: Synthesis.material(),
"合成1次消耗品": lambda: Synthesis.consumables(),
"合成1次材料": lambda: Synthesis.material(),
"使用1件消耗品": lambda: Synthesis.use_consumables(),
"完成1次「拟造花萼(金)」": lambda: Power.customize_run("拟造花萼(金)", cfg.instance_names["拟造花萼(金)"], 10, 1),
"完成1次「拟造花萼(赤)」": lambda: Power.customize_run("拟造花萼(赤)", cfg.instance_names["拟造花萼(赤)"], 10, 1),
"完成1次「凝滞虚影」": lambda: Power.customize_run("凝滞虚影", cfg.instance_names["凝滞虚影"], 30, 1),
"完成1次「侵蚀隧洞」": lambda: Power.customize_run("侵蚀隧洞", cfg.instance_names["侵蚀隧洞"], 40, 1),
"完成1次「历战余响」": lambda: Power.customize_run("历战余响", cfg.instance_names["历战余响"], 30, 1),
"累计施放2次秘技": lambda: HimekoTry.technique(),
"累计击碎3个可破坏物": lambda: HimekoTry.item(),
"完成1次「忘却之庭」": lambda: challenge.start_memory_one(1),
"单场战斗中,触发3种不同属性的弱点击破": lambda: challenge.start_memory_one(1),
"累计触发弱点击破效果5次": lambda: challenge.start_memory_one(1),
"累计消灭20个敌人": lambda: challenge.start_memory_one(2),
"利用弱点进入战斗并获胜3次": lambda: challenge.start_memory_one(3),
"施放终结技造成制胜一击1次": lambda: challenge.start_memory_one(1),
"通关「模拟宇宙」(任意世界)的1个区域": lambda: Universe.run_daily(),
"完成1次「模拟宇宙」": lambda: Universe.run_daily(),
}
log.hr(f"今日实训", 2)
count = 0
for key, value in cfg.daily_tasks.items():
state = red("待完成") if value else green("已完成")
log.info(f"{key}: {state}")
count = count + 1 if not value else count
log.info(f"已完成:{yellow(f'{count}/{len(cfg.daily_tasks)}')}")
for task_name, task_function in task_functions.items():
if task_name in cfg.daily_tasks and cfg.daily_tasks[task_name]:
if task_function():
cfg.daily_tasks[task_name] = False
cfg.save_config()
log.hr("完成", 2)
def run_main_actions():
while True:
version.start()
game.start()
activity.start()
Daily.start()
reward.start()
game.stop(True) | null |
17,266 |
class Daily:
def start():
if cfg.daily_enable:
Daily.run()
# 优先历战余响
if Date.is_next_mon_x_am(cfg.echo_of_war_timestamp, cfg.refresh_hour):
if cfg.echo_of_war_enable:
Echoofwar.start()
else:
log.info("历战余响未开启")
else:
log.info("历战余响尚未刷新")
Power.run()
if Date.is_next_x_am(cfg.fight_timestamp, cfg.refresh_hour):
if cfg.fight_enable:
Fight.start()
else:
log.info("锄大地未开启")
else:
log.info("锄大地尚未刷新")
if cfg.universe_frequency == "weekly":
if Date.is_next_mon_x_am(cfg.universe_timestamp, cfg.refresh_hour):
if cfg.universe_enable:
Power.run()
reward.start()
Universe.start(get_reward=True)
Power.run()
else:
log.info("模拟宇宙未开启")
else:
log.info("模拟宇宙尚未刷新")
elif cfg.universe_frequency == "daily":
if Date.is_next_x_am(cfg.universe_timestamp, cfg.refresh_hour):
if cfg.universe_enable:
Universe.start(get_reward=True)
else:
log.info("模拟宇宙未开启")
else:
log.info("模拟宇宙尚未刷新")
if Date.is_next_mon_x_am(cfg.forgottenhall_timestamp, cfg.refresh_hour):
if cfg.forgottenhall_enable:
challenge.start("memoryofchaos")
else:
log.info("忘却之庭未开启")
else:
log.info("忘却之庭尚未刷新")
if Date.is_next_mon_x_am(cfg.purefiction_timestamp, cfg.refresh_hour):
if cfg.purefiction_enable:
challenge.start("purefiction")
else:
log.info("虚构叙事未开启")
else:
log.info("虚构叙事尚未刷新")
Power.run()
def run():
log.hr("开始日常任务", 0)
if Date.is_next_x_am(cfg.last_run_timestamp, cfg.refresh_hour):
screen.change_to("guide2")
tasks = Tasks("./assets/config/task_mappings.json")
tasks.start()
cfg.set_value("daily_tasks", tasks.daily_tasks)
cfg.save_timestamp("last_run_timestamp")
else:
log.info("日常任务尚未刷新")
if len(cfg.daily_tasks) > 0:
task_functions = {
"登录游戏": lambda: True,
"拍照1次": lambda: Photo.photograph(),
"使用1次「万能合成机」": lambda: Synthesis.material(),
"合成1次消耗品": lambda: Synthesis.consumables(),
"合成1次材料": lambda: Synthesis.material(),
"使用1件消耗品": lambda: Synthesis.use_consumables(),
"完成1次「拟造花萼(金)」": lambda: Power.customize_run("拟造花萼(金)", cfg.instance_names["拟造花萼(金)"], 10, 1),
"完成1次「拟造花萼(赤)」": lambda: Power.customize_run("拟造花萼(赤)", cfg.instance_names["拟造花萼(赤)"], 10, 1),
"完成1次「凝滞虚影」": lambda: Power.customize_run("凝滞虚影", cfg.instance_names["凝滞虚影"], 30, 1),
"完成1次「侵蚀隧洞」": lambda: Power.customize_run("侵蚀隧洞", cfg.instance_names["侵蚀隧洞"], 40, 1),
"完成1次「历战余响」": lambda: Power.customize_run("历战余响", cfg.instance_names["历战余响"], 30, 1),
"累计施放2次秘技": lambda: HimekoTry.technique(),
"累计击碎3个可破坏物": lambda: HimekoTry.item(),
"完成1次「忘却之庭」": lambda: challenge.start_memory_one(1),
"单场战斗中,触发3种不同属性的弱点击破": lambda: challenge.start_memory_one(1),
"累计触发弱点击破效果5次": lambda: challenge.start_memory_one(1),
"累计消灭20个敌人": lambda: challenge.start_memory_one(2),
"利用弱点进入战斗并获胜3次": lambda: challenge.start_memory_one(3),
"施放终结技造成制胜一击1次": lambda: challenge.start_memory_one(1),
"通关「模拟宇宙」(任意世界)的1个区域": lambda: Universe.run_daily(),
"完成1次「模拟宇宙」": lambda: Universe.run_daily(),
}
log.hr(f"今日实训", 2)
count = 0
for key, value in cfg.daily_tasks.items():
state = red("待完成") if value else green("已完成")
log.info(f"{key}: {state}")
count = count + 1 if not value else count
log.info(f"已完成:{yellow(f'{count}/{len(cfg.daily_tasks)}')}")
for task_name, task_function in task_functions.items():
if task_name in cfg.daily_tasks and cfg.daily_tasks[task_name]:
if task_function():
cfg.daily_tasks[task_name] = False
cfg.save_config()
log.hr("完成", 2)
class Fight:
def update():
from module.update.update_handler import UpdateHandler
from tasks.base.fastest_mirror import FastestMirror
if cfg.fight_operation_mode == "exe":
import requests
import json
response = requests.get(FastestMirror.get_github_api_mirror("linruowuyin", "Fhoe-Rail"), timeout=10, headers=cfg.useragent)
if response.status_code == 200:
data = json.loads(response.text)
url = None
for asset in data["assets"]:
url = FastestMirror.get_github_mirror(asset["browser_download_url"])
break
if url is None:
log.error("没有找到可用更新,请稍后再试")
input("按回车键关闭窗口. . .")
sys.exit(0)
update_handler = UpdateHandler(url, cfg.fight_path, "Fhoe-Rail", os.path.join(cfg.fight_path, "map"))
update_handler.run()
elif cfg.fight_operation_mode == "source":
cfg.set_value("fight_requirements", False)
url = FastestMirror.get_github_mirror(
"https://github.com/linruowuyin/Fhoe-Rail/archive/master.zip")
update_handler = UpdateHandler(url, cfg.fight_path, "Fhoe-Rail-master")
update_handler.run()
def check_path():
status = False
if cfg.fight_operation_mode == "exe":
if not os.path.exists(os.path.join(cfg.fight_path, "Fhoe-Rail.exe")):
status = True
elif cfg.fight_operation_mode == "source":
if not os.path.exists(os.path.join(cfg.fight_path, "Honkai_Star_Rail.py")):
status = True
if not os.path.exists(os.path.join(cfg.fight_path, "点这里啦.exe")):
status = True
if status:
log.warning(f"锄大地路径不存在: {cfg.fight_path}")
Fight.update()
def check_requirements():
if not cfg.fight_requirements:
log.info("开始安装依赖")
from tasks.base.fastest_mirror import FastestMirror
subprocess.run([cfg.python_exe_path, "-m", "pip", "install", "-i",
FastestMirror.get_pypi_mirror(), "pip", "--upgrade"])
while not subprocess.run([cfg.python_exe_path, "-m", "pip", "install", "-i", FastestMirror.get_pypi_mirror(), "-r", "requirements.txt"], check=True, cwd=cfg.fight_path):
log.error("依赖安装失败")
input("按回车键重试. . .")
log.info("依赖安装成功")
cfg.set_value("fight_requirements", True)
def before_start():
Fight.check_path()
if cfg.fight_operation_mode == "source":
PythonChecker.run()
Fight.check_requirements()
return True
def start():
log.hr("准备锄大地", 0)
game = StarRailController(cfg.game_path, cfg.game_process_name, cfg.game_title_name, 'UnityWndClass', log)
game.check_resolution(1920, 1080)
if Fight.before_start():
# 切换队伍
if cfg.fight_team_enable:
Team.change_to(cfg.fight_team_number)
log.info("开始锄大地")
screen.change_to('main')
status = False
if cfg.fight_operation_mode == "exe":
if subprocess_with_timeout([os.path.join(cfg.fight_path, "Fhoe-Rail.exe")], cfg.fight_timeout * 3600, cfg.fight_path):
status = True
elif cfg.fight_operation_mode == "source":
if subprocess_with_timeout([cfg.python_exe_path, "Honkai_Star_Rail.py"], cfg.fight_timeout * 3600, cfg.fight_path, cfg.env):
status = True
if status:
cfg.save_timestamp("fight_timestamp")
Base.send_notification_with_screenshot("🎉锄大地已完成🎉")
return True
log.error("锄大地失败")
Base.send_notification_with_screenshot("⚠️锄大地未完成⚠️")
return False
def gui():
if Fight.before_start():
if cfg.fight_operation_mode == "exe":
if subprocess.run(["start", "Fhoe-Rail.exe", "--debug"], shell=True, check=True, cwd=cfg.fight_path):
return True
elif cfg.fight_operation_mode == "source":
if subprocess.run(["start", "点这里啦.exe"], shell=True, check=True, cwd=cfg.fight_path, env=cfg.env):
return True
return False
def reset_config():
config_path = os.path.join(cfg.fight_path, "config.json")
try:
os.remove(config_path)
log.info(f"重置配置文件完成:{config_path}")
except Exception as e:
log.warning(f"重置配置文件失败:{e}")
class Power:
def run():
Power.preprocess()
instance_type = cfg.instance_type
instance_name = cfg.instance_names[cfg.instance_type]
if not Instance.validate_instance(instance_type, instance_name):
return False
log.hr("开始清体力", 0)
power = Power.get()
if "拟造花萼" in instance_type:
Power.process_calyx(instance_type, instance_name, power)
else:
Power.process_standard(instance_type, instance_name, power)
log.hr("完成", 2)
def preprocess():
# 优先合成沉浸器
if cfg.merge_immersifier:
Power.merge("immersifier")
def process_calyx(instance_type, instance_name, power):
instance_power_max = 60
instance_power_min = 10
full_runs = power // instance_power_max
if full_runs:
Instance.run(instance_type, instance_name, instance_power_max, full_runs)
partial_run_power = power % instance_power_max
if partial_run_power >= instance_power_min:
Instance.run(instance_type, instance_name, partial_run_power, 1)
elif full_runs == 0:
log.info(f"🟣开拓力 < {instance_power_max}")
def process_standard(instance_type, instance_name, power):
instance_powers = {
"凝滞虚影": 30,
"侵蚀隧洞": 40,
"历战余响": 30
}
instance_power = instance_powers[instance_type]
full_runs = power // instance_power
if full_runs:
Instance.run(instance_type, instance_name, instance_power, full_runs)
else:
log.info(f"🟣开拓力 < {instance_power}")
def customize_run(instance_type, instance_name, power_need, runs):
if not Instance.validate_instance(instance_type, instance_name):
return False
log.hr(f"准备{instance_type}", 2)
power = Power.get()
if power < power_need * runs:
log.info(f"🟣开拓力 < {power_need}*{runs}")
return False
else:
return Instance.run(instance_type, instance_name, power_need, runs)
def get():
def get_power(crop, type="trailblaze_power"):
try:
if type == "trailblaze_power":
result = auto.get_single_line_text(
crop=crop, blacklist=['+', '米'], max_retries=3)
power = int(result.replace("1240", "/240").replace("?", "").split('/')[0])
return power if 0 <= power <= 999 else -1
elif type == "reserved_trailblaze_power":
result = auto.get_single_line_text(
crop=crop, blacklist=['+', '米'], max_retries=3)
power = int(result[0])
return power if 0 <= power <= 2400 else -1
except Exception as e:
log.error(f"识别开拓力失败: {e}")
return -1
def move_button_and_confirm():
if auto.click_element("./assets/images/zh_CN/base/confirm.png", "image", 0.9, max_retries=10):
result = auto.find_element(
"./assets/images/share/power/trailblaze_power/button.png", "image", 0.9, max_retries=10)
if result:
auto.click_element_with_pos(result, action="down")
time.sleep(0.5)
result = auto.find_element(
"./assets/images/share/power/trailblaze_power/plus.png", "image", 0.9)
if result:
auto.click_element_with_pos(result, action="move")
time.sleep(0.5)
auto.mouse_up()
if auto.click_element("./assets/images/zh_CN/base/confirm.png", "image", 0.9, max_retries=10):
time.sleep(1)
auto.press_key("esc")
if screen.check_screen("map"):
return True
return False
trailblaze_power_crop = (1588.0 / 1920, 35.0 / 1080, 198.0 / 1920, 56.0 / 1080)
if cfg.use_reserved_trailblaze_power or cfg.use_fuel:
screen.change_to('map')
# 打开开拓力补充界面
if auto.click_element("./assets/images/share/power/trailblaze_power/trailblaze_power.png", "image", 0.9, crop=trailblaze_power_crop):
# 等待界面加载
if auto.find_element("./assets/images/zh_CN/base/confirm.png", "image", 0.9, max_retries=10):
# 开启使用后备开拓力
if cfg.use_reserved_trailblaze_power and auto.click_element("./assets/images/share/power/trailblaze_power/reserved_trailblaze_power.png", "image", 0.9, scale_range=(0.95, 0.95)):
move_button_and_confirm()
# 开启使用燃料
elif cfg.use_fuel and auto.click_element("./assets/images/share/power/trailblaze_power/fuel.png", "image", 0.9, scale_range=(0.95, 0.95)):
move_button_and_confirm()
# # 开启使用星琼
# elif config.stellar_jade and auto.click_element("./assets/images/share/power/trailblaze_power/stellar_jade.png", "image", 0.9, scale_range=(0.95, 0.95)):
# pass
else:
auto.press_key("esc")
screen.change_to('map')
trailblaze_power = get_power(trailblaze_power_crop)
log.info(f"🟣开拓力: {trailblaze_power}/240")
return trailblaze_power
def merge(type):
if type == "immersifier":
log.hr("准备合成沉浸器", 2)
screen.change_to("guide3")
immersifier_crop = (1623.0 / 1920, 40.0 / 1080, 162.0 / 1920, 52.0 / 1080)
text = auto.get_single_line_text(crop=immersifier_crop, blacklist=[
'+', '米'], max_retries=3)
if "/8" not in text:
log.error("沉浸器数量识别失败")
return
immersifier_count = int(text.split("/")[0])
log.info(f"🟣沉浸器: {immersifier_count}/8")
if immersifier_count >= 8:
log.info("沉浸器已满")
return
screen.change_to("guide3")
power = Power.get()
count = min(power // 40, 8 - immersifier_count)
if count <= 0:
log.info("体力不足")
return
log.hr(f"准备合成 {count} 个沉浸器", 2)
screen.change_to("guide3")
if auto.click_element("./assets/images/share/power/immersifier/immersifier.png", "image", 0.9, crop=immersifier_crop):
time.sleep(1)
for i in range(count - 1):
auto.click_element(
"./assets/images/share/power/trailblaze_power/plus.png", "image", 0.9)
time.sleep(0.5)
if auto.click_element("./assets/images/zh_CN/base/confirm.png", "image", 0.9, max_retries=10):
time.sleep(1)
auto.press_key("esc")
class Universe:
def update():
from module.update.update_handler import UpdateHandler
from tasks.base.fastest_mirror import FastestMirror
if cfg.universe_operation_mode == "exe":
import requests
import json
response = requests.get(FastestMirror.get_github_api_mirror("moesnow", "Auto_Simulated_Universe"), timeout=10, headers=cfg.useragent)
if response.status_code == 200:
data = json.loads(response.text)
url = None
for asset in data["assets"]:
url = FastestMirror.get_github_mirror(asset["browser_download_url"])
break
if url is None:
log.error("没有找到可用更新,请稍后再试")
input("按回车键关闭窗口. . .")
sys.exit(0)
update_handler = UpdateHandler(url, cfg.universe_path, "Auto_Simulated_Universe")
update_handler.run()
elif cfg.universe_operation_mode == "source":
cfg.set_value("universe_requirements", False)
url = FastestMirror.get_github_mirror("https://github.com/CHNZYX/Auto_Simulated_Universe/archive/main.zip")
update_handler = UpdateHandler(url, cfg.universe_path, "Auto_Simulated_Universe-main")
update_handler.run()
def check_path():
status = False
if cfg.universe_operation_mode == "exe":
if not os.path.exists(os.path.join(cfg.universe_path, "states.exe")):
status = True
elif cfg.universe_operation_mode == "source":
if not os.path.exists(os.path.join(cfg.universe_path, "states.py")):
status = True
if status:
log.warning(f"模拟宇宙路径不存在: {cfg.universe_path}")
Universe.update()
def check_requirements():
if not cfg.universe_requirements:
log.info("开始安装依赖")
from tasks.base.fastest_mirror import FastestMirror
subprocess.run([cfg.python_exe_path, "-m", "pip", "install", "-i", FastestMirror.get_pypi_mirror(), "pip", "--upgrade"])
while not subprocess.run([cfg.python_exe_path, "-m", "pip", "install", "-i", FastestMirror.get_pypi_mirror(), "-r", "requirements.txt"], check=True, cwd=cfg.universe_path):
log.error("依赖安装失败")
input("按回车键重试. . .")
log.info("依赖安装成功")
cfg.set_value("universe_requirements", True)
def before_start():
Universe.check_path()
if cfg.universe_operation_mode == "source":
PythonChecker.run()
Universe.check_requirements()
return True
def start(get_reward=False, nums=cfg.universe_count, save=True):
log.hr("准备模拟宇宙", 0)
game = StarRailController(cfg.game_path, cfg.game_process_name, cfg.game_title_name, 'UnityWndClass', log)
game.check_resolution(1920, 1080)
if Universe.before_start():
screen.change_to('universe_main')
# 等待可能的周一弹窗
time.sleep(2)
# 进入黑塔办公室
screen.change_to('main')
if cfg.universe_operation_mode == "exe":
log.info("开始校准")
if subprocess_with_timeout([os.path.join(cfg.universe_path, "align_angle.exe")], cfg.universe_timeout * 3600, cfg.universe_path):
screen.change_to('universe_main')
log.info("开始模拟宇宙")
command = [os.path.join(cfg.universe_path, "states.exe")]
if cfg.universe_bonus_enable:
command.append("--bonus=1")
if nums:
command.append(f"--nums={nums}")
if subprocess_with_timeout(command, cfg.universe_timeout * 3600, cfg.universe_path):
if save:
cfg.save_timestamp("universe_timestamp")
if get_reward:
Universe.get_reward()
else:
Base.send_notification_with_screenshot("🎉模拟宇宙已完成🎉")
if cfg.universe_bonus_enable and cfg.break_down_level_four_relicset:
Relicset.run()
return True
else:
log.error("模拟宇宙失败")
else:
log.error("校准失败")
elif cfg.universe_operation_mode == "source":
log.info("开始校准")
if subprocess_with_timeout([cfg.python_exe_path, "align_angle.py"], 60, cfg.universe_path, cfg.env):
screen.change_to('universe_main')
log.info("开始模拟宇宙")
command = [cfg.python_exe_path, "states.py"]
if cfg.universe_bonus_enable:
command.append("--bonus=1")
if nums:
command.append(f"--nums={nums}")
if subprocess_with_timeout(command, cfg.universe_timeout * 3600, cfg.universe_path, cfg.env):
if save:
cfg.save_timestamp("universe_timestamp")
if get_reward:
Universe.get_reward()
else:
Base.send_notification_with_screenshot("🎉模拟宇宙已完成🎉")
return True
else:
log.error("模拟宇宙失败")
else:
log.error("校准失败")
Base.send_notification_with_screenshot("⚠️模拟宇宙未完成⚠️")
return False
def get_reward():
log.info("开始领取奖励")
screen.change_to('universe_main')
time.sleep(1)
if auto.click_element("./assets/images/share/base/RedExclamationMark.png", "image", 0.9, crop=(0 / 1920, 877.0 / 1080, 422.0 / 1920, 202.0 / 1080)):
if auto.click_element("./assets/images/zh_CN/universe/one_key_receive.png", "image", 0.9, max_retries=10):
if auto.find_element("./assets/images/zh_CN/base/click_close.png", "image", 0.8, max_retries=10):
Base.send_notification_with_screenshot("🎉模拟宇宙奖励已领取🎉")
auto.click_element("./assets/images/zh_CN/base/click_close.png", "image", 0.8, max_retries=10)
def gui():
if Universe.before_start():
if subprocess.run(["start", "gui.exe"], shell=True, check=True, cwd=cfg.universe_path, env=cfg.env):
return True
return False
def run_daily():
return False
# if config.daily_universe_enable:
# return Universe.start(get_reward=False, nums=1, save=False)
def reset_config():
config_path = os.path.join(cfg.universe_path, "info.yml")
try:
os.remove(config_path)
log.info(f"重置配置文件完成:{config_path}")
except Exception as e:
log.warning(f"重置配置文件失败:{e}")
def run_sub_task(action):
game.start()
sub_tasks = {
"daily": lambda: (Daily.run(), reward.start()),
"power": Power.run,
"fight": Fight.start,
"universe": Universe.start,
"forgottenhall": lambda: challenge.start("memoryofchaos"),
"purefiction": lambda: challenge.start("purefiction")
}
task = sub_tasks.get(action)
if task:
task()
game.stop(False) | null |
17,267 |
class Fight:
def update():
from module.update.update_handler import UpdateHandler
from tasks.base.fastest_mirror import FastestMirror
if cfg.fight_operation_mode == "exe":
import requests
import json
response = requests.get(FastestMirror.get_github_api_mirror("linruowuyin", "Fhoe-Rail"), timeout=10, headers=cfg.useragent)
if response.status_code == 200:
data = json.loads(response.text)
url = None
for asset in data["assets"]:
url = FastestMirror.get_github_mirror(asset["browser_download_url"])
break
if url is None:
log.error("没有找到可用更新,请稍后再试")
input("按回车键关闭窗口. . .")
sys.exit(0)
update_handler = UpdateHandler(url, cfg.fight_path, "Fhoe-Rail", os.path.join(cfg.fight_path, "map"))
update_handler.run()
elif cfg.fight_operation_mode == "source":
cfg.set_value("fight_requirements", False)
url = FastestMirror.get_github_mirror(
"https://github.com/linruowuyin/Fhoe-Rail/archive/master.zip")
update_handler = UpdateHandler(url, cfg.fight_path, "Fhoe-Rail-master")
update_handler.run()
def check_path():
status = False
if cfg.fight_operation_mode == "exe":
if not os.path.exists(os.path.join(cfg.fight_path, "Fhoe-Rail.exe")):
status = True
elif cfg.fight_operation_mode == "source":
if not os.path.exists(os.path.join(cfg.fight_path, "Honkai_Star_Rail.py")):
status = True
if not os.path.exists(os.path.join(cfg.fight_path, "点这里啦.exe")):
status = True
if status:
log.warning(f"锄大地路径不存在: {cfg.fight_path}")
Fight.update()
def check_requirements():
if not cfg.fight_requirements:
log.info("开始安装依赖")
from tasks.base.fastest_mirror import FastestMirror
subprocess.run([cfg.python_exe_path, "-m", "pip", "install", "-i",
FastestMirror.get_pypi_mirror(), "pip", "--upgrade"])
while not subprocess.run([cfg.python_exe_path, "-m", "pip", "install", "-i", FastestMirror.get_pypi_mirror(), "-r", "requirements.txt"], check=True, cwd=cfg.fight_path):
log.error("依赖安装失败")
input("按回车键重试. . .")
log.info("依赖安装成功")
cfg.set_value("fight_requirements", True)
def before_start():
Fight.check_path()
if cfg.fight_operation_mode == "source":
PythonChecker.run()
Fight.check_requirements()
return True
def start():
log.hr("准备锄大地", 0)
game = StarRailController(cfg.game_path, cfg.game_process_name, cfg.game_title_name, 'UnityWndClass', log)
game.check_resolution(1920, 1080)
if Fight.before_start():
# 切换队伍
if cfg.fight_team_enable:
Team.change_to(cfg.fight_team_number)
log.info("开始锄大地")
screen.change_to('main')
status = False
if cfg.fight_operation_mode == "exe":
if subprocess_with_timeout([os.path.join(cfg.fight_path, "Fhoe-Rail.exe")], cfg.fight_timeout * 3600, cfg.fight_path):
status = True
elif cfg.fight_operation_mode == "source":
if subprocess_with_timeout([cfg.python_exe_path, "Honkai_Star_Rail.py"], cfg.fight_timeout * 3600, cfg.fight_path, cfg.env):
status = True
if status:
cfg.save_timestamp("fight_timestamp")
Base.send_notification_with_screenshot("🎉锄大地已完成🎉")
return True
log.error("锄大地失败")
Base.send_notification_with_screenshot("⚠️锄大地未完成⚠️")
return False
def gui():
if Fight.before_start():
if cfg.fight_operation_mode == "exe":
if subprocess.run(["start", "Fhoe-Rail.exe", "--debug"], shell=True, check=True, cwd=cfg.fight_path):
return True
elif cfg.fight_operation_mode == "source":
if subprocess.run(["start", "点这里啦.exe"], shell=True, check=True, cwd=cfg.fight_path, env=cfg.env):
return True
return False
def reset_config():
config_path = os.path.join(cfg.fight_path, "config.json")
try:
os.remove(config_path)
log.info(f"重置配置文件完成:{config_path}")
except Exception as e:
log.warning(f"重置配置文件失败:{e}")
class Universe:
def update():
from module.update.update_handler import UpdateHandler
from tasks.base.fastest_mirror import FastestMirror
if cfg.universe_operation_mode == "exe":
import requests
import json
response = requests.get(FastestMirror.get_github_api_mirror("moesnow", "Auto_Simulated_Universe"), timeout=10, headers=cfg.useragent)
if response.status_code == 200:
data = json.loads(response.text)
url = None
for asset in data["assets"]:
url = FastestMirror.get_github_mirror(asset["browser_download_url"])
break
if url is None:
log.error("没有找到可用更新,请稍后再试")
input("按回车键关闭窗口. . .")
sys.exit(0)
update_handler = UpdateHandler(url, cfg.universe_path, "Auto_Simulated_Universe")
update_handler.run()
elif cfg.universe_operation_mode == "source":
cfg.set_value("universe_requirements", False)
url = FastestMirror.get_github_mirror("https://github.com/CHNZYX/Auto_Simulated_Universe/archive/main.zip")
update_handler = UpdateHandler(url, cfg.universe_path, "Auto_Simulated_Universe-main")
update_handler.run()
def check_path():
status = False
if cfg.universe_operation_mode == "exe":
if not os.path.exists(os.path.join(cfg.universe_path, "states.exe")):
status = True
elif cfg.universe_operation_mode == "source":
if not os.path.exists(os.path.join(cfg.universe_path, "states.py")):
status = True
if status:
log.warning(f"模拟宇宙路径不存在: {cfg.universe_path}")
Universe.update()
def check_requirements():
if not cfg.universe_requirements:
log.info("开始安装依赖")
from tasks.base.fastest_mirror import FastestMirror
subprocess.run([cfg.python_exe_path, "-m", "pip", "install", "-i", FastestMirror.get_pypi_mirror(), "pip", "--upgrade"])
while not subprocess.run([cfg.python_exe_path, "-m", "pip", "install", "-i", FastestMirror.get_pypi_mirror(), "-r", "requirements.txt"], check=True, cwd=cfg.universe_path):
log.error("依赖安装失败")
input("按回车键重试. . .")
log.info("依赖安装成功")
cfg.set_value("universe_requirements", True)
def before_start():
Universe.check_path()
if cfg.universe_operation_mode == "source":
PythonChecker.run()
Universe.check_requirements()
return True
def start(get_reward=False, nums=cfg.universe_count, save=True):
log.hr("准备模拟宇宙", 0)
game = StarRailController(cfg.game_path, cfg.game_process_name, cfg.game_title_name, 'UnityWndClass', log)
game.check_resolution(1920, 1080)
if Universe.before_start():
screen.change_to('universe_main')
# 等待可能的周一弹窗
time.sleep(2)
# 进入黑塔办公室
screen.change_to('main')
if cfg.universe_operation_mode == "exe":
log.info("开始校准")
if subprocess_with_timeout([os.path.join(cfg.universe_path, "align_angle.exe")], cfg.universe_timeout * 3600, cfg.universe_path):
screen.change_to('universe_main')
log.info("开始模拟宇宙")
command = [os.path.join(cfg.universe_path, "states.exe")]
if cfg.universe_bonus_enable:
command.append("--bonus=1")
if nums:
command.append(f"--nums={nums}")
if subprocess_with_timeout(command, cfg.universe_timeout * 3600, cfg.universe_path):
if save:
cfg.save_timestamp("universe_timestamp")
if get_reward:
Universe.get_reward()
else:
Base.send_notification_with_screenshot("🎉模拟宇宙已完成🎉")
if cfg.universe_bonus_enable and cfg.break_down_level_four_relicset:
Relicset.run()
return True
else:
log.error("模拟宇宙失败")
else:
log.error("校准失败")
elif cfg.universe_operation_mode == "source":
log.info("开始校准")
if subprocess_with_timeout([cfg.python_exe_path, "align_angle.py"], 60, cfg.universe_path, cfg.env):
screen.change_to('universe_main')
log.info("开始模拟宇宙")
command = [cfg.python_exe_path, "states.py"]
if cfg.universe_bonus_enable:
command.append("--bonus=1")
if nums:
command.append(f"--nums={nums}")
if subprocess_with_timeout(command, cfg.universe_timeout * 3600, cfg.universe_path, cfg.env):
if save:
cfg.save_timestamp("universe_timestamp")
if get_reward:
Universe.get_reward()
else:
Base.send_notification_with_screenshot("🎉模拟宇宙已完成🎉")
return True
else:
log.error("模拟宇宙失败")
else:
log.error("校准失败")
Base.send_notification_with_screenshot("⚠️模拟宇宙未完成⚠️")
return False
def get_reward():
log.info("开始领取奖励")
screen.change_to('universe_main')
time.sleep(1)
if auto.click_element("./assets/images/share/base/RedExclamationMark.png", "image", 0.9, crop=(0 / 1920, 877.0 / 1080, 422.0 / 1920, 202.0 / 1080)):
if auto.click_element("./assets/images/zh_CN/universe/one_key_receive.png", "image", 0.9, max_retries=10):
if auto.find_element("./assets/images/zh_CN/base/click_close.png", "image", 0.8, max_retries=10):
Base.send_notification_with_screenshot("🎉模拟宇宙奖励已领取🎉")
auto.click_element("./assets/images/zh_CN/base/click_close.png", "image", 0.8, max_retries=10)
def gui():
if Universe.before_start():
if subprocess.run(["start", "gui.exe"], shell=True, check=True, cwd=cfg.universe_path, env=cfg.env):
return True
return False
def run_daily():
return False
# if config.daily_universe_enable:
# return Universe.start(get_reward=False, nums=1, save=False)
def reset_config():
config_path = os.path.join(cfg.universe_path, "info.yml")
try:
os.remove(config_path)
log.info(f"重置配置文件完成:{config_path}")
except Exception as e:
log.warning(f"重置配置文件失败:{e}")
def run_sub_task_gui(action):
gui_tasks = {
"universe_gui": Universe.gui,
"fight_gui": Fight.gui
}
task = gui_tasks.get(action)
if task and not task():
input("按回车键关闭窗口. . .")
sys.exit(0) | null |
17,268 |
class Fight:
def update():
from module.update.update_handler import UpdateHandler
from tasks.base.fastest_mirror import FastestMirror
if cfg.fight_operation_mode == "exe":
import requests
import json
response = requests.get(FastestMirror.get_github_api_mirror("linruowuyin", "Fhoe-Rail"), timeout=10, headers=cfg.useragent)
if response.status_code == 200:
data = json.loads(response.text)
url = None
for asset in data["assets"]:
url = FastestMirror.get_github_mirror(asset["browser_download_url"])
break
if url is None:
log.error("没有找到可用更新,请稍后再试")
input("按回车键关闭窗口. . .")
sys.exit(0)
update_handler = UpdateHandler(url, cfg.fight_path, "Fhoe-Rail", os.path.join(cfg.fight_path, "map"))
update_handler.run()
elif cfg.fight_operation_mode == "source":
cfg.set_value("fight_requirements", False)
url = FastestMirror.get_github_mirror(
"https://github.com/linruowuyin/Fhoe-Rail/archive/master.zip")
update_handler = UpdateHandler(url, cfg.fight_path, "Fhoe-Rail-master")
update_handler.run()
def check_path():
status = False
if cfg.fight_operation_mode == "exe":
if not os.path.exists(os.path.join(cfg.fight_path, "Fhoe-Rail.exe")):
status = True
elif cfg.fight_operation_mode == "source":
if not os.path.exists(os.path.join(cfg.fight_path, "Honkai_Star_Rail.py")):
status = True
if not os.path.exists(os.path.join(cfg.fight_path, "点这里啦.exe")):
status = True
if status:
log.warning(f"锄大地路径不存在: {cfg.fight_path}")
Fight.update()
def check_requirements():
if not cfg.fight_requirements:
log.info("开始安装依赖")
from tasks.base.fastest_mirror import FastestMirror
subprocess.run([cfg.python_exe_path, "-m", "pip", "install", "-i",
FastestMirror.get_pypi_mirror(), "pip", "--upgrade"])
while not subprocess.run([cfg.python_exe_path, "-m", "pip", "install", "-i", FastestMirror.get_pypi_mirror(), "-r", "requirements.txt"], check=True, cwd=cfg.fight_path):
log.error("依赖安装失败")
input("按回车键重试. . .")
log.info("依赖安装成功")
cfg.set_value("fight_requirements", True)
def before_start():
Fight.check_path()
if cfg.fight_operation_mode == "source":
PythonChecker.run()
Fight.check_requirements()
return True
def start():
log.hr("准备锄大地", 0)
game = StarRailController(cfg.game_path, cfg.game_process_name, cfg.game_title_name, 'UnityWndClass', log)
game.check_resolution(1920, 1080)
if Fight.before_start():
# 切换队伍
if cfg.fight_team_enable:
Team.change_to(cfg.fight_team_number)
log.info("开始锄大地")
screen.change_to('main')
status = False
if cfg.fight_operation_mode == "exe":
if subprocess_with_timeout([os.path.join(cfg.fight_path, "Fhoe-Rail.exe")], cfg.fight_timeout * 3600, cfg.fight_path):
status = True
elif cfg.fight_operation_mode == "source":
if subprocess_with_timeout([cfg.python_exe_path, "Honkai_Star_Rail.py"], cfg.fight_timeout * 3600, cfg.fight_path, cfg.env):
status = True
if status:
cfg.save_timestamp("fight_timestamp")
Base.send_notification_with_screenshot("🎉锄大地已完成🎉")
return True
log.error("锄大地失败")
Base.send_notification_with_screenshot("⚠️锄大地未完成⚠️")
return False
def gui():
if Fight.before_start():
if cfg.fight_operation_mode == "exe":
if subprocess.run(["start", "Fhoe-Rail.exe", "--debug"], shell=True, check=True, cwd=cfg.fight_path):
return True
elif cfg.fight_operation_mode == "source":
if subprocess.run(["start", "点这里啦.exe"], shell=True, check=True, cwd=cfg.fight_path, env=cfg.env):
return True
return False
def reset_config():
config_path = os.path.join(cfg.fight_path, "config.json")
try:
os.remove(config_path)
log.info(f"重置配置文件完成:{config_path}")
except Exception as e:
log.warning(f"重置配置文件失败:{e}")
class Universe:
def update():
from module.update.update_handler import UpdateHandler
from tasks.base.fastest_mirror import FastestMirror
if cfg.universe_operation_mode == "exe":
import requests
import json
response = requests.get(FastestMirror.get_github_api_mirror("moesnow", "Auto_Simulated_Universe"), timeout=10, headers=cfg.useragent)
if response.status_code == 200:
data = json.loads(response.text)
url = None
for asset in data["assets"]:
url = FastestMirror.get_github_mirror(asset["browser_download_url"])
break
if url is None:
log.error("没有找到可用更新,请稍后再试")
input("按回车键关闭窗口. . .")
sys.exit(0)
update_handler = UpdateHandler(url, cfg.universe_path, "Auto_Simulated_Universe")
update_handler.run()
elif cfg.universe_operation_mode == "source":
cfg.set_value("universe_requirements", False)
url = FastestMirror.get_github_mirror("https://github.com/CHNZYX/Auto_Simulated_Universe/archive/main.zip")
update_handler = UpdateHandler(url, cfg.universe_path, "Auto_Simulated_Universe-main")
update_handler.run()
def check_path():
status = False
if cfg.universe_operation_mode == "exe":
if not os.path.exists(os.path.join(cfg.universe_path, "states.exe")):
status = True
elif cfg.universe_operation_mode == "source":
if not os.path.exists(os.path.join(cfg.universe_path, "states.py")):
status = True
if status:
log.warning(f"模拟宇宙路径不存在: {cfg.universe_path}")
Universe.update()
def check_requirements():
if not cfg.universe_requirements:
log.info("开始安装依赖")
from tasks.base.fastest_mirror import FastestMirror
subprocess.run([cfg.python_exe_path, "-m", "pip", "install", "-i", FastestMirror.get_pypi_mirror(), "pip", "--upgrade"])
while not subprocess.run([cfg.python_exe_path, "-m", "pip", "install", "-i", FastestMirror.get_pypi_mirror(), "-r", "requirements.txt"], check=True, cwd=cfg.universe_path):
log.error("依赖安装失败")
input("按回车键重试. . .")
log.info("依赖安装成功")
cfg.set_value("universe_requirements", True)
def before_start():
Universe.check_path()
if cfg.universe_operation_mode == "source":
PythonChecker.run()
Universe.check_requirements()
return True
def start(get_reward=False, nums=cfg.universe_count, save=True):
log.hr("准备模拟宇宙", 0)
game = StarRailController(cfg.game_path, cfg.game_process_name, cfg.game_title_name, 'UnityWndClass', log)
game.check_resolution(1920, 1080)
if Universe.before_start():
screen.change_to('universe_main')
# 等待可能的周一弹窗
time.sleep(2)
# 进入黑塔办公室
screen.change_to('main')
if cfg.universe_operation_mode == "exe":
log.info("开始校准")
if subprocess_with_timeout([os.path.join(cfg.universe_path, "align_angle.exe")], cfg.universe_timeout * 3600, cfg.universe_path):
screen.change_to('universe_main')
log.info("开始模拟宇宙")
command = [os.path.join(cfg.universe_path, "states.exe")]
if cfg.universe_bonus_enable:
command.append("--bonus=1")
if nums:
command.append(f"--nums={nums}")
if subprocess_with_timeout(command, cfg.universe_timeout * 3600, cfg.universe_path):
if save:
cfg.save_timestamp("universe_timestamp")
if get_reward:
Universe.get_reward()
else:
Base.send_notification_with_screenshot("🎉模拟宇宙已完成🎉")
if cfg.universe_bonus_enable and cfg.break_down_level_four_relicset:
Relicset.run()
return True
else:
log.error("模拟宇宙失败")
else:
log.error("校准失败")
elif cfg.universe_operation_mode == "source":
log.info("开始校准")
if subprocess_with_timeout([cfg.python_exe_path, "align_angle.py"], 60, cfg.universe_path, cfg.env):
screen.change_to('universe_main')
log.info("开始模拟宇宙")
command = [cfg.python_exe_path, "states.py"]
if cfg.universe_bonus_enable:
command.append("--bonus=1")
if nums:
command.append(f"--nums={nums}")
if subprocess_with_timeout(command, cfg.universe_timeout * 3600, cfg.universe_path, cfg.env):
if save:
cfg.save_timestamp("universe_timestamp")
if get_reward:
Universe.get_reward()
else:
Base.send_notification_with_screenshot("🎉模拟宇宙已完成🎉")
return True
else:
log.error("模拟宇宙失败")
else:
log.error("校准失败")
Base.send_notification_with_screenshot("⚠️模拟宇宙未完成⚠️")
return False
def get_reward():
log.info("开始领取奖励")
screen.change_to('universe_main')
time.sleep(1)
if auto.click_element("./assets/images/share/base/RedExclamationMark.png", "image", 0.9, crop=(0 / 1920, 877.0 / 1080, 422.0 / 1920, 202.0 / 1080)):
if auto.click_element("./assets/images/zh_CN/universe/one_key_receive.png", "image", 0.9, max_retries=10):
if auto.find_element("./assets/images/zh_CN/base/click_close.png", "image", 0.8, max_retries=10):
Base.send_notification_with_screenshot("🎉模拟宇宙奖励已领取🎉")
auto.click_element("./assets/images/zh_CN/base/click_close.png", "image", 0.8, max_retries=10)
def gui():
if Universe.before_start():
if subprocess.run(["start", "gui.exe"], shell=True, check=True, cwd=cfg.universe_path, env=cfg.env):
return True
return False
def run_daily():
return False
# if config.daily_universe_enable:
# return Universe.start(get_reward=False, nums=1, save=False)
def reset_config():
config_path = os.path.join(cfg.universe_path, "info.yml")
try:
os.remove(config_path)
log.info(f"重置配置文件完成:{config_path}")
except Exception as e:
log.warning(f"重置配置文件失败:{e}")
def run_sub_task_update(action):
update_tasks = {
"universe_update": Universe.update,
"fight_update": Fight.update
}
task = update_tasks.get(action)
if task:
task()
input("按回车键关闭窗口. . .")
sys.exit(0) | null |
17,269 |
class Fight:
def update():
def check_path():
def check_requirements():
def before_start():
def start():
def gui():
def reset_config():
class Universe:
def update():
def check_path():
def check_requirements():
def before_start():
def start(get_reward=False, nums=cfg.universe_count, save=True):
def get_reward():
def gui():
def run_daily():
def reset_config():
def run_sub_task_reset(action):
reset_tasks = {
"universe_reset": Universe.reset_config,
"fight_reset": Fight.reset_config
}
task = reset_tasks.get(action)
if task:
task()
input("按回车键关闭窗口. . .")
sys.exit(0) | null |
17,270 |
notif = Notification("三月七小助手|・ω・)", log):
def run_notify_action():
notif.notify("这是一条测试消息", "./assets/app/images/March7th.jpg")
input("按回车键关闭窗口. . .")
sys.exit(0) | null |
17,271 |
The provided code snippet includes necessary dependencies for implementing the `exit_handler` function. Write a Python function `def exit_handler()` to solve the following problem:
注册程序退出时的处理函数,用于清理OCR资源.
Here is the function:
def exit_handler():
"""注册程序退出时的处理函数,用于清理OCR资源."""
ocr.exit_ocr() | 注册程序退出时的处理函数,用于清理OCR资源. |
17,272 | import os
import socket t subprocess import loads as jsonLoads, dumps as jsonDumps
from sys import platform as sysPlatform se64 import b64encode lass PPOCR_pipe:
"""调用OCR(管道模式)"""
class PPOCR_socket(PPOCR_pipe):
"""调用OCR(套接字模式)"""
None, ipcMode: str = "pipe"):
"""获取识别器API对象。\n
`exePath`: 识别器`PaddleOCR_json.exe`的路径。\n
`argument`: 启动参数,字典`{"键":值}`。参数说明见 https://github.com/hiroi-sora/PaddleOCR-json\n
`ipcMode`: 进程通信模式,可选值为套接字模式`socket` 或 管道模式`pipe`。用法上完全一致。
"""
if ipcMode == "socket":
return PPOCR_socket(exePath, argument)
elif ipcMode == "pipe":
return PPOCR_pipe(exePath, argument)
else:
raise Exception(f'ipcMode可选值为 套接字模式"socket" 或 管道模式"pipe" ,不允许{ipcMode}。')
The provided code snippet includes necessary dependencies for implementing the `GetOcrApi` function. Write a Python function `def GetOcrApi(exePath: str, argument: dict = None, ipcMode: str = "pipe")` to solve the following problem:
获取识别器API对象。\n `exePath`: 识别器`PaddleOCR_json.exe`的路径。\n `argument`: 启动参数,字典`{"键":值}`。参数说明见 https://github.com/hiroi-sora/PaddleOCR-json\n `ipcMode`: 进程通信模式,可选值为套接字模式`socket` 或 管道模式`pipe`。用法上完全一致。
Here is the function:
def GetOcrApi(exePath: str, argument: dict = None, ipcMode: str = "pipe"):
"""获取识别器API对象。\n
`exePath`: 识别器`PaddleOCR_json.exe`的路径。\n
`argument`: 启动参数,字典`{"键":值}`。参数说明见 https://github.com/hiroi-sora/PaddleOCR-json\n
`ipcMode`: 进程通信模式,可选值为套接字模式`socket` 或 管道模式`pipe`。用法上完全一致。
"""
if ipcMode == "socket":
return PPOCR_socket(exePath, argument)
elif ipcMode == "pipe":
return PPOCR_pipe(exePath, argument)
else:
raise Exception(f'ipcMode可选值为 套接字模式"socket" 或 管道模式"pipe" ,不允许{ipcMode}。') | 获取识别器API对象。\n `exePath`: 识别器`PaddleOCR_json.exe`的路径。\n `argument`: 启动参数,字典`{"键":值}`。参数说明见 https://github.com/hiroi-sora/PaddleOCR-json\n `ipcMode`: 进程通信模式,可选值为套接字模式`socket` 或 管道模式`pipe`。用法上完全一致。 |
17,273 | import concurrent.futures
import json
import os
import shutil
import subprocess
import sys
import time
from packaging.version import parse
from tqdm import tqdm
import requests
import psutil
from urllib.request import urlopen
from urllib.error import URLError
from utils.color import red, green
from module.logger.logger import Logger
class Updater:
"""应用程序更新器,负责检查、下载、解压和安装最新版本的应用程序。"""
def __init__(self, logger: Logger, download_url=None):
self.logger = logger
self.process_names = ["March7th Assistant.exe", "March7th Launcher.exe"]
self.api_urls = [
"https://api.github.com/repos/moesnow/March7thAssistant/releases/latest",
"https://github.kotori.top/https://api.github.com/repos/moesnow/March7thAssistant/releases/latest",
]
self.temp_path = os.path.abspath("./temp")
os.makedirs(self.temp_path, exist_ok=True)
self.download_url = download_url
self.cover_folder_path = os.path.abspath("./")
self.exe_path = os.path.abspath("./assets/binary/7za.exe")
self.aria2_path = os.path.abspath("./assets/binary/aria2c.exe")
self.delete_folder_path = os.path.join("./3rdparty/Fhoe-Rail", "map")
self.logger.hr("获取下载链接", 0)
if download_url is None:
self.download_url = self.get_download_url()
self.logger.info(f"下载链接: {green(self.download_url)}")
self.logger.hr("完成", 2)
input("按回车键开始更新")
else:
self.logger.info(f"下载链接: {green(self.download_url)}")
self.logger.hr("完成", 2)
self.download_file_path = os.path.join(self.temp_path, os.path.basename(self.download_url))
self.extract_folder_path = os.path.join(self.temp_path, os.path.basename(self.download_url).rsplit(".", 1)[0])
def get_download_url(self):
"""检测更新并获取下载URL。"""
self.logger.info("开始检测更新")
fastest_mirror = self.find_fastest_mirror(self.api_urls)
try:
with urlopen(fastest_mirror, timeout=10) as response:
if response.getcode() == 200:
data = json.loads(response.read().decode('utf-8'))
return self.process_release_data(data)
except URLError as e:
self.logger.error(f"检测更新失败: {red(e)}")
input("按回车键重试...")
return self.get_download_url()
def process_release_data(self, data):
"""处理发布数据,获取下载URL并比较版本。"""
version = data["tag_name"]
download_url = None
for asset in data["assets"]:
if "full" not in asset["browser_download_url"]:
download_url = asset["browser_download_url"]
break
if download_url is None:
raise Exception("没有找到合适的下载URL")
self.compare_versions(version)
return self.find_fastest_mirror([download_url, f"https://github.kotori.top/{download_url}"])
def compare_versions(self, version):
"""比较本地版本和远程版本。"""
try:
with open("./assets/config/version.txt", 'r', encoding='utf-8') as file:
current_version = file.read().strip()
if parse(version.lstrip('v')) > parse(current_version.lstrip('v')):
self.logger.info(f"发现新版本: {current_version} ——> {version}")
else:
self.logger.info(f"本地版本: {current_version}")
self.logger.info(f"远程版本: {version}")
self.logger.info(f"当前已是最新版本")
except Exception as e:
self.logger.info(f"本地版本获取失败: {e}")
self.logger.info(f"最新版本: {version}")
def find_fastest_mirror(self, mirror_urls, timeout=5):
"""测速并找到最快的镜像。"""
def check_mirror(mirror_url):
try:
start_time = time.time()
response = requests.head(mirror_url, timeout=timeout, allow_redirects=True)
end_time = time.time()
if response.status_code == 200:
return mirror_url, end_time - start_time
except Exception:
pass
return None, None
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [executor.submit(check_mirror, url) for url in mirror_urls]
fastest_mirror, _ = min((future.result() for future in concurrent.futures.as_completed(futures)), key=lambda x: (x[1] is not None, x[1]), default=(None, None))
return fastest_mirror if fastest_mirror else mirror_urls[0]
def download_with_progress(self):
"""下载文件并显示进度条。"""
self.logger.hr("下载", 0)
while True:
try:
self.logger.info("开始下载...")
if os.path.exists(self.aria2_path):
command = [self.aria2_path, "--max-connection-per-server=16", "--dir={}".format(os.path.dirname(self.download_file_path)),
"--out={}".format(os.path.basename(self.download_file_path)), self.download_url]
if os.path.exists(self.download_file_path):
command.insert(2, "--continue=true")
subprocess.run(command, check=True)
else:
response = requests.head(self.download_url)
file_size = int(response.headers.get('Content-Length', 0))
with tqdm(total=file_size, unit='B', unit_scale=True, unit_divisor=1024) as pbar:
with requests.get(self.download_url, stream=True) as r:
with open(self.download_file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.update(len(chunk))
self.logger.info(f"下载完成: {green(self.download_file_path)}")
break
except Exception as e:
self.logger.error(f"下载失败: {red(e)}")
input("按回车键重试. . .")
if os.path.exists(self.download_file_path):
os.remove(self.download_file_path)
self.logger.hr("完成", 2)
def extract_file(self):
"""解压下载的文件。"""
self.logger.hr("解压", 0)
while True:
try:
self.logger.info("开始解压...")
if os.path.exists(self.exe_path):
subprocess.run([self.exe_path, "x", self.download_file_path, f"-o{self.temp_path}", "-aoa"], check=True)
else:
shutil.unpack_archive(self.download_file_path, self.temp_path)
self.logger.info(f"解压完成: {green(self.extract_folder_path)}")
self.logger.hr("完成", 2)
return True
except Exception as e:
self.logger.error(f"解压失败: {red(e)}")
self.logger.hr("完成", 2)
input("按回车键重新下载. . .")
if os.path.exists(self.download_file_path):
os.remove(self.download_file_path)
return False
def cover_folder(self):
"""覆盖安装最新版本的文件。"""
self.logger.hr("覆盖", 0)
while True:
try:
self.logger.info("开始覆盖...")
if "full" in self.download_url and os.path.exists(self.delete_folder_path):
shutil.rmtree(self.delete_folder_path)
shutil.copytree(self.extract_folder_path, self.cover_folder_path, dirs_exist_ok=True)
self.logger.info(f"覆盖完成: {green(self.cover_folder_path)}")
break
except Exception as e:
self.logger.error(f"覆盖失败: {red(e)}")
input("按回车键重试. . .")
self.logger.hr("完成", 2)
def terminate_processes(self):
"""终止相关进程以准备更新。"""
self.logger.hr("终止进程", 0)
self.logger.info("开始终止进程...")
for proc in psutil.process_iter(attrs=['pid', 'name']):
if proc.info['name'] in self.process_names:
try:
proc.terminate()
proc.wait(10)
except (psutil.NoSuchProcess, psutil.TimeoutExpired, psutil.AccessDenied):
pass
self.logger.info(green("终止进程完成"))
self.logger.hr("完成", 2)
def cleanup(self):
"""清理下载和解压的临时文件。"""
self.logger.hr("清理", 0)
self.logger.info("开始清理...")
try:
os.remove(self.download_file_path)
self.logger.info(f"清理完成: {green(self.download_file_path)}")
shutil.rmtree(self.extract_folder_path)
self.logger.info(f"清理完成: {green(self.extract_folder_path)}")
except Exception as e:
self.logger.error(f"清理失败: {e}")
self.logger.hr("完成", 2)
def run(self):
"""运行更新流程。"""
self.terminate_processes()
while True:
self.download_with_progress()
if self.extract_file():
break
self.cover_folder()
self.cleanup()
input("按回车键退出并打开软件")
os.system(f'cmd /c start "" "{os.path.abspath("./March7th Launcher.exe")}"')
class Logger(metaclass=SingletonMeta):
"""
日志管理类
"""
def __init__(self, level="INFO"):
self._level = level
self._init_logger()
self._initialized = True
def _init_logger(self):
"""根据提供的日志级别初始化日志器及其配置。"""
self._create_logger()
self._create_logger_title()
def _current_datetime(self):
"""获取当前日期,格式为YYYY-MM-DD."""
return datetime.now().strftime("%Y-%m-%d")
def _create_logger(self):
"""创建并配置日志器,包括控制台和文件输出."""
self.logger = logging.getLogger('March7thAssistant')
self.logger.propagate = False
self.logger.setLevel(self._level)
# 控制台日志
console_handler = logging.StreamHandler()
console_formatter = ColoredFormatter('%(asctime)s | %(levelname)s | %(message)s')
console_handler.setFormatter(console_formatter)
self.logger.addHandler(console_handler)
# 文件日志
self._ensure_log_directory_exists()
file_handler = logging.FileHandler(f"./logs/{self._current_datetime()}.log", encoding="utf-8")
file_formatter = ColorCodeFilter('%(asctime)s | %(levelname)s | %(message)s')
file_handler.setFormatter(file_formatter)
self.logger.addHandler(file_handler)
def _create_logger_title(self):
"""创建专用于标题日志的日志器."""
self.logger_title = logging.getLogger('March7thAssistant_title')
self.logger_title.propagate = False
self.logger_title.setLevel(self._level)
# 控制台日志
console_handler = logging.StreamHandler()
console_formatter = logging.Formatter('%(message)s')
console_handler.setFormatter(console_formatter)
self.logger_title.addHandler(console_handler)
# 文件日志
self._ensure_log_directory_exists()
file_handler = logging.FileHandler(f"./logs/{self._current_datetime()}.log", encoding="utf-8")
file_formatter = logging.Formatter('%(message)s')
file_handler.setFormatter(file_formatter)
self.logger_title.addHandler(file_handler)
def _ensure_log_directory_exists(self):
"""确保日志目录存在,不存在则创建."""
if not os.path.exists("logs"):
os.makedirs("logs")
def info(self, message):
"""记录INFO级别的日志."""
self.logger.info(message)
def debug(self, message):
"""记录DEBUG级别的日志."""
self.logger.debug(message)
def warning(self, message):
"""记录WARNING级别的日志."""
self.logger.warning(message)
def error(self, message):
"""记录ERROR级别的日志."""
self.logger.error(message)
def critical(self, message):
"""记录CRITICAL级别的日志."""
self.logger.critical(message)
def hr(self, title, level: Literal[0, 1, 2] = 0, write=True):
"""
格式化标题并打印或写入文件.
level: 0
+--------------------------+
| 这是一个标题 |
+--------------------------+
level: 1
======= 这是一个标题 =======
level: 2
------- 这是一个标题 -------
"""
try:
separator_length = 115
title_lines = title.split('\n')
separator = '+' + '-' * separator_length + '+'
title_length = self._custom_len(title)
half_separator_left = (separator_length - title_length) // 2
half_separator_right = separator_length - title_length - half_separator_left
if level == 0:
formatted_title_lines = []
for line in title_lines:
title_length_ = self._custom_len(line)
half_separator_left_ = (separator_length - title_length_) // 2
half_separator_right_ = separator_length - title_length_ - half_separator_left_
formatted_title_line = '|' + ' ' * half_separator_left_ + line + ' ' * half_separator_right_ + '|'
formatted_title_lines.append(formatted_title_line)
formatted_title = f"{separator}\n" + "\n".join(formatted_title_lines) + f"\n{separator}"
elif level == 1:
formatted_title = '=' * half_separator_left + ' ' + title + ' ' + '=' * half_separator_right
elif level == 2:
formatted_title = '-' * half_separator_left + ' ' + title + ' ' + '-' * half_separator_right
self._print_title(formatted_title, write)
except:
pass
def _custom_len(self, text):
"""
计算字符串的自定义长度,考虑到某些字符可能占用更多的显示宽度。
"""
return sum(2 if unicodedata.east_asian_width(c) in 'WF' else 1 for c in text)
def _print_title(self, title, write):
"""打印标题."""
if write:
self.logger_title.info(title)
else:
print(title)
The provided code snippet includes necessary dependencies for implementing the `check_temp_dir_and_run` function. Write a Python function `def check_temp_dir_and_run()` to solve the following problem:
检查临时目录并运行更新程序。
Here is the function:
def check_temp_dir_and_run():
"""检查临时目录并运行更新程序。"""
if not getattr(sys, 'frozen', False):
print("更新程序只支持打包成exe后运行")
sys.exit(1)
temp_path = os.path.abspath("./temp")
os.makedirs(temp_path, exist_ok=True)
file_path = sys.argv[0]
file_name = os.path.basename(file_path)
destination_path = os.path.join(temp_path, file_name)
if file_path != destination_path:
shutil.copy(file_path, destination_path)
args = [destination_path] + sys.argv[1:]
subprocess.Popen(args, creationflags=subprocess.DETACHED_PROCESS)
sys.exit(0)
download_url = sys.argv[1] if len(sys.argv) == 2 else None
logger = Logger()
updater = Updater(logger, download_url)
updater.run() | 检查临时目录并运行更新程序。 |
17,274 | from utils.command import subprocess_with_stdout
import subprocess
import sys
import os
def is_windows_terminal_available():
"""
检查 Windows Terminal (wt.exe) 是否可用。
"""
return subprocess_with_stdout(["where", "wt.exe"]) is not None
def execute_command_in_new_environment(command, use_windows_terminal=False):
"""
在新的环境中执行给定的命令。
"""
executable_path = os.path.abspath("./March7th Assistant.exe") if getattr(sys, 'frozen', False) else sys.executable
main_script = [] if getattr(sys, 'frozen', False) else ["main.py"]
if use_windows_terminal:
# 尝试使用 Windows Terminal 执行命令
try:
subprocess.Popen(["wt", executable_path] + main_script + [command], creationflags=subprocess.DETACHED_PROCESS)
except:
# 如果执行失败,则回退到创建新控制台的方式执行
subprocess.Popen([executable_path] + main_script + [command], creationflags=subprocess.CREATE_NEW_CONSOLE)
else:
# 直接在新的控制台中执行命令
subprocess.Popen([executable_path] + main_script + [command], creationflags=subprocess.CREATE_NEW_CONSOLE)
The provided code snippet includes necessary dependencies for implementing the `start_task` function. Write a Python function `def start_task(command)` to solve the following problem:
根据当前环境,启动任务。
Here is the function:
def start_task(command):
"""
根据当前环境,启动任务。
"""
# 检查 Windows Terminal 的可用性
wt_available = is_windows_terminal_available()
# 根据条件执行命令
execute_command_in_new_environment(command, use_windows_terminal=wt_available) | 根据当前环境,启动任务。 |
17,275 | from tqdm import tqdm
import urllib.request
import subprocess
import os
def download_with_progress(download_url, save_path):
aria2_path = os.path.abspath("./assets/binary/aria2c.exe")
if os.path.exists(aria2_path):
command = [aria2_path, "--max-connection-per-server=16", f"--dir={os.path.dirname(save_path)}", f"--out={os.path.basename(save_path)}", f"{download_url}"]
if os.path.exists(save_path):
command.insert(2, "--continue=true")
process = subprocess.Popen(command)
process.wait()
if process.returncode != 0:
raise Exception
else:
# 获取文件大小
response = urllib.request.urlopen(download_url)
file_size = int(response.info().get('Content-Length', -1))
# 使用 tqdm 创建进度条
with tqdm(total=file_size, unit='B', unit_scale=True, unit_divisor=1024) as pbar:
def update_bar(block_count, block_size, total_size):
if pbar.total != total_size:
pbar.total = total_size
downloaded = block_count * block_size
pbar.update(downloaded - pbar.n)
urllib.request.urlretrieve(download_url, save_path, reporthook=update_bar) | null |
17,276 | import sys
import os.path
import pkgutil
import shutil
import tempfile
import argparse
import importlib
from base64 import b85decode
def determine_pip_install_arguments():
def monkeypatch_for_cert(tmpdir):
def main():
def bootstrap(tmpdir):
monkeypatch_for_cert(tmpdir)
# Execute the included pip and use it to install the latest pip and
# setuptools from PyPI
from pip._internal.cli.main import main as pip_entry_point
args = determine_pip_install_arguments()
sys.exit(pip_entry_point(args)) | null |
17,277 | import sys
from enum import Enum
from PyQt5.QtCore import QLocale
from qfluentwidgets import (qconfig, QConfig, ConfigItem, OptionsConfigItem, BoolValidator,
OptionsValidator, RangeConfigItem, RangeValidator,
FolderListValidator, EnumSerializer, FolderValidator, ConfigSerializer, __version__)
def isWin11():
return sys.platform == 'win32' and sys.getwindowsversion().build >= 22000 | null |
17,278 | from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtCore import QThread, pyqtSignal
from ..card.messagebox_custom import MessageBoxAnnouncement
from module.config import cfg
from io import BytesIO
from enum import Enum
import requests
import qrcode
def download_image(image_url):
response = requests.get(image_url)
if response.status_code == 200:
return QImage.fromData(response.content)
else:
raise Exception("Failed to download image.") | null |
17,279 | from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtCore import QThread, pyqtSignal
from ..card.messagebox_custom import MessageBoxAnnouncement
from module.config import cfg
from io import BytesIO
from enum import Enum
import requests
import qrcode
def generate_qr_code(url):
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=7,
border=4,
)
qr.add_data(url)
qr.make(fit=True)
img = qr.make_image(fill_color="black", back_color="white")
img_byte_arr = BytesIO()
img.save(img_byte_arr, format='PNG')
img_byte_arr = img_byte_arr.getvalue()
return QImage.fromData(img_byte_arr) | null |
17,280 | from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtCore import QThread, pyqtSignal
from ..card.messagebox_custom import MessageBoxAnnouncement
from module.config import cfg
from io import BytesIO
from enum import Enum
import requests
import qrcode
class AnnouncementStatus(Enum):
class AnnouncementThread(QThread):
def __init__(self):
def run(self):
class MessageBoxAnnouncement(MessageBoxImage):
def __init__(self, title: str, content: str, image: Optional[str | QPixmap], parent=None):
def checkAnnouncement(self):
def handle_announcement(status):
if status == AnnouncementStatus.SUCCESS:
message_box = MessageBoxAnnouncement(
self.announcement_thread.title,
self.announcement_thread.content,
self.announcement_thread.image,
self.window()
)
message_box.exec()
self.announcement_thread = AnnouncementThread()
self.announcement_thread.announcementSignal.connect(handle_announcement)
self.announcement_thread.start() | null |
17,281 | from PyQt5.QtCore import Qt, QThread, pyqtSignal
from qfluentwidgets import InfoBar, InfoBarPosition, StateToolTip
from urllib.parse import urlencode, urlparse, parse_qs
from win32api import CopyFile
from datetime import datetime
from pathlib import Path
from enum import Enum
import markdown
import requests
import tempfile
import random
import time
import glob
import json
import re
import os
class WarpStatus(Enum):
class WarpThread(QThread):
def __init__(self, parent):
def run(self):
def warpExport(self):
self.stateTooltip = StateToolTip("抽卡记录", "正在获取跃迁数据...", self.window())
self.stateTooltip.closeButton.setVisible(False)
self.stateTooltip.move(self.stateTooltip.getSuitablePos())
self.stateTooltip.show()
self.updateBtn.setEnabled(False)
def handle_warp(status):
if status == WarpStatus.SUCCESS:
self.stateTooltip.setContent("跃迁数据获取完成(^∀^●)")
self.stateTooltip.setState(True)
self.stateTooltip = None
self.updateBtn.setEnabled(True)
elif status == WarpStatus.FAILURE:
# self.stateTooltip.setContent("跃迁数据获取失败(´▔∀▔`)")
self.stateTooltip.setState(True)
self.stateTooltip = None
self.updateBtn.setEnabled(True)
self.warp_thread = WarpThread(self)
self.warp_thread.warpSignal.connect(handle_warp)
self.warp_thread.start() | null |
17,282 | from PyQt5.QtCore import Qt
from qfluentwidgets import InfoBar, InfoBarPosition
from ..card.messagebox_custom import MessageBoxDisclaimer
from module.config import cfg
import markdown
import base64
import time
import sys
import os
lass MessageBoxDisclaimer(MessageBoxHtml):
def __init__(self, title: str, content: str, parent=None):
cfg = Config(VERSION_PATH, EXAMPLE_PATH, CONFIG_PATH)
cfg.env = os.environ.copy()
cfg.env['PATH'] = os.path.dirname(cfg.python_exe_path) + ';' + cfg.env['PATH']
cfg.useragent = {"User-Agent": f"March7thAssistant/{cfg.version}"}
if cfg.auto_set_game_path_enable:
detect_game_path()
def disclaimer(self):
html_style = """
<style>
a {
color: #f18cb9;
font-weight: bold;
}
</style>
"""
content = "LSDmraTnqIvluo/kuLrlhY3otLnlvIDmupDpobnnm67vvIzlpoLmnpzkvaDku5jkuobpkrHor7fnq4vliLvpgIDmrL7vvIHvvIHvvIEKCi0g5pys6aG555uu5bey57uP5Zug5YCS5Y2W6KGM5Li65Y+X5Yiw5Lil6YeN5aiB6IOB77yM6K+35biu5Yqp5oiR5Lus77yB77yB77yBCgotIOmXsumxvOWAkueLlzQwMDAr77yB5L2g5LuY57uZ5YCS54uX55qE5q+P5LiA5YiG6ZKx6YO95Lya6K6p5byA5rqQ6Ieq5Yqo5YyW5pu06Imw6Zq+77yM6K+36YCA5qy+5bm25Li+5oql5ZWG5a6277yB77yB77yBCgo8aDEgc3R5bGU9ImNvbG9yOiNmMThjYjkiPuazqOaEj++8mui9r+S7tuacquaOiOadg+S7u+S9leS6uuS7peS7u+S9leaWueW8j+i/m+ihjOWUruWNlu+8jDwvaDE+Cgo8aDEgc3R5bGU9ImNvbG9yOiNmMThjYjkiPuWmguWcqOmXsumxvOetieW3sui0reS5sO+8jOWPr+WHreatpOW8ueeql+aIquWbvuimgeaxgumAgOasvjwvaDE+CgrmnKzova/ku7blvIDmupDjgIHlhY3otLnvvIzku4XkvpvlrabkuaDkuqTmtYHkvb/nlKjjgILlvIDlj5HogIXlm6LpmJ/mi6XmnInmnKzpobnnm67nmoTmnIDnu4jop6Pph4rmnYPjgIIKCuS9v+eUqOacrOi9r+S7tuS6p+eUn+eahOaJgOaciemXrumimOS4juacrOmhueebruS4juW8gOWPkeiAheWboumYn+aXoOWFs+OAggoK6K+35rOo5oSP77yM5qC55o2uTWlIb1lv55qEIFvltKnlnY865pif56m56ZOB6YGT55qE5YWs5bmz5ri45oiP5a6j6KiAXShodHRwczovL3NyLm1paG95by5jb20vbmV3cy8xMTEyNDY/bmF2PW5ld3MmdHlwZT1ub3RpY2UpOgoKICAgICLkuKXnpoHkvb/nlKjlpJbmjILjgIHliqDpgJ/lmajjgIHohJrmnKzmiJblhbbku5bnoLTlnY/muLjmiI/lhazlubPmgKfnmoTnrKzkuInmlrnlt6XlhbfjgIIiCiAgICAi5LiA57uP5Y+R546w77yM57Gz5ZOI5ri477yI5LiL5Lqm56ew4oCc5oiR5Lus4oCd77yJ5bCG6KeG6L+d6KeE5Lil6YeN56iL5bqm5Y+K6L+d6KeE5qyh5pWw77yM6YeH5Y+W5omj6Zmk6L+d6KeE5pS255uK44CB5Ya757uT5ri45oiP6LSm5Y+344CB5rC45LmF5bCB56aB5ri45oiP6LSm5Y+3562J5o6q5pa944CCIg=="
try:
w = MessageBoxDisclaimer(base64.b64decode("5YWN6LSj5aOw5piO").decode("utf-8"), html_style + markdown.markdown(base64.b64decode(content).decode("utf-8")), self.window())
while True:
start_time = time.time()
result = w.exec()
if result:
sys.exit(0)
time_used = time.time() - start_time
if time_used > 10:
break
else:
InfoBar.error(
title=base64.b64decode("6ZiF6K+75pe26Ze05aSq55+t5LqG77yM5aSa5YGc55WZ5LiA5Lya5ZCnKO+8vuKIgO+8vuKXjyk=").decode("utf-8"),
content="",
orient=Qt.Horizontal,
isClosable=True,
position=InfoBarPosition.TOP,
duration=5000,
parent=self.window()
)
if not result:
cfg.set_value(base64.b64decode("YXV0b191cGRhdGU=").decode("utf-8"), True)
path = os.path.join(os.environ[base64.b64decode("UHJvZ3JhbURhdGE=").decode("utf-8")], base64.b64decode("TWFyY2g3dGhBc3Npc3RhbnQvZGlzY2xhaW1lcg==").decode("utf-8"))
os.makedirs(os.path.dirname(path), exist_ok=True)
open(path, 'a').close()
else:
sys.exit(0)
except Exception:
sys.exit(0) | null |
17,283 | from PyQt5.QtCore import Qt, QThread, pyqtSignal
from qfluentwidgets import InfoBar, InfoBarPosition
from ..card.messagebox_custom import MessageBoxUpdate
from tasks.base.fastest_mirror import FastestMirror
from module.config import cfg
from packaging.version import parse
from enum import Enum
import subprocess
import markdown
import requests
import re
import os
class UpdateStatus(Enum):
"""更新状态枚举类,用于指示更新检查的结果状态。"""
SUCCESS = 1
UPDATE_AVAILABLE = 2
FAILURE = 0
class UpdateThread(QThread):
"""负责后台检查更新的线程类。"""
updateSignal = pyqtSignal(UpdateStatus)
def __init__(self, timeout, flag):
super().__init__()
self.timeout = timeout # 超时时间
self.flag = flag # 标志位,用于控制是否执行更新检查
def remove_images_from_markdown(self, markdown_content):
"""从Markdown内容中移除图片标记。"""
img_pattern = re.compile(r'!\[.*?\]\(.*?\)')
return img_pattern.sub('', markdown_content)
def fetch_latest_release_info(self):
"""获取最新的发布信息。"""
response = requests.get(
FastestMirror.get_github_api_mirror("moesnow", "March7thAssistant", not cfg.update_prerelease_enable),
timeout=10,
headers=cfg.useragent
)
response.raise_for_status()
return response.json()[0] if cfg.update_prerelease_enable else response.json()
def get_download_url_from_assets(self, assets):
"""从发布信息中获取下载URL。"""
for asset in assets:
if (cfg.update_full_enable and "full" in asset["browser_download_url"]) or \
(not cfg.update_full_enable and "full" not in asset["browser_download_url"]):
return asset["browser_download_url"]
return None
def run(self):
"""执行更新检查逻辑。"""
try:
if self.flag and not cfg.check_update:
return
data = self.fetch_latest_release_info()
version = data["tag_name"]
content = self.remove_images_from_markdown(data["body"])
assert_url = self.get_download_url_from_assets(data["assets"])
if assert_url is None:
self.updateSignal.emit(UpdateStatus.SUCCESS)
return
if parse(version.lstrip('v')) > parse(cfg.version.lstrip('v')):
self.title = f"发现新版本:{cfg.version} ——> {version}\n更新日志 |・ω・)"
self.content = "<style>a {color: #f18cb9; font-weight: bold;}</style>" + markdown.markdown(content)
self.assert_url = assert_url
self.updateSignal.emit(UpdateStatus.UPDATE_AVAILABLE)
else:
self.updateSignal.emit(UpdateStatus.SUCCESS)
except Exception as e:
print(e)
self.updateSignal.emit(UpdateStatus.FAILURE)
class MessageBoxUpdate(MessageBoxHtml):
def __init__(self, title: str, content: str, parent=None):
super().__init__(title, content, parent)
self.yesButton.setText('下载')
self.cancelButton.setText('好的')
class FastestMirror:
def get_github_mirror(download_url):
# mirror_urls = [
# download_url,
# f"https://github.kotori.top/{download_url}",
# ]
# return FastestMirror.find_fastest_mirror(mirror_urls, 5)
return f"https://github.kotori.top/{download_url}"
def get_github_api_mirror(user, repo, latest=True):
# mirror_urls = [
# f"https://api.github.com/repos/{user}/{repo}/releases/latest",
# f"https://github.kotori.top/https://api.github.com/repos/{user}/{repo}/releases/latest",
# ]
# return FastestMirror.find_fastest_mirror(mirror_urls, 5)
if latest:
return f"https://github.kotori.top/https://api.github.com/repos/{user}/{repo}/releases/latest"
else:
return f"https://github.kotori.top/https://api.github.com/repos/{user}/{repo}/releases"
def get_pypi_mirror(timeout=5):
return FastestMirror.find_fastest_mirror(cfg.pypi_mirror_urls, timeout)
def find_fastest_mirror(mirror_urls, timeout=5):
"""测速并找到最快的镜像。"""
def check_mirror(mirror_url):
try:
start_time = time.time()
response = requests.head(mirror_url, timeout=timeout, allow_redirects=True)
end_time = time.time()
if response.status_code == 200:
return mirror_url, end_time - start_time
except Exception:
pass
return None, None
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [executor.submit(check_mirror, url) for url in mirror_urls]
fastest_mirror, _ = min((future.result() for future in concurrent.futures.as_completed(futures)), key=lambda x: (x[1] is not None, x[1]), default=(None, None))
return fastest_mirror if fastest_mirror else mirror_urls[0]
The provided code snippet includes necessary dependencies for implementing the `checkUpdate` function. Write a Python function `def checkUpdate(self, timeout=5, flag=False)` to solve the following problem:
检查更新,并根据更新状态显示不同的信息或执行更新操作。
Here is the function:
def checkUpdate(self, timeout=5, flag=False):
"""检查更新,并根据更新状态显示不同的信息或执行更新操作。"""
def handle_update(status):
if status == UpdateStatus.UPDATE_AVAILABLE:
# 显示更新对话框
message_box = MessageBoxUpdate(
self.update_thread.title,
self.update_thread.content,
self.window()
)
if message_box.exec():
# 执行更新操作
source_file = os.path.abspath("./Update.exe")
assert_url = FastestMirror.get_github_mirror(self.update_thread.assert_url)
subprocess.Popen([source_file, assert_url], creationflags=subprocess.DETACHED_PROCESS)
elif status == UpdateStatus.SUCCESS:
# 显示当前为最新版本的信息
InfoBar.success(
title=self.tr('当前是最新版本(^∀^●)'),
content="",
orient=Qt.Horizontal,
isClosable=True,
position=InfoBarPosition.TOP,
duration=1000,
parent=self
)
else:
# 显示检查更新失败的信息
InfoBar.warning(
title=self.tr('检测更新失败(╥╯﹏╰╥)'),
content="",
orient=Qt.Horizontal,
isClosable=True,
position=InfoBarPosition.TOP,
duration=1000,
parent=self
)
self.update_thread = UpdateThread(timeout, flag)
self.update_thread.updateSignal.connect(handle_update)
self.update_thread.start() | 检查更新,并根据更新状态显示不同的信息或执行更新操作。 |
17,284 | import subprocess
def subprocess_with_timeout(command, timeout, working_directory=None, env=None):
process = None
try:
process = subprocess.Popen(command, cwd=working_directory, env=env)
process.communicate(timeout=timeout)
if process.returncode == 0:
return True
except subprocess.TimeoutExpired:
if process is not None:
process.terminate()
process.wait()
return False | null |
17,285 | from typing import Literal
import winreg
import os
The provided code snippet includes necessary dependencies for implementing the `get_game_auto_hdr` function. Write a Python function `def get_game_auto_hdr(game_path: str) -> Literal["enable", "disable", "unset"]` to solve the following problem:
Get the Auto HDR setting for a specific game via Windows Registry. Parameters: - game_path: The file path to the game executable, ensuring Windows path conventions. Returns: - A Literal indicating the status of Auto HDR for the game: "enable", "disable", or "unset".
Here is the function:
def get_game_auto_hdr(game_path: str) -> Literal["enable", "disable", "unset"]:
"""
Get the Auto HDR setting for a specific game via Windows Registry.
Parameters:
- game_path: The file path to the game executable, ensuring Windows path conventions.
Returns:
- A Literal indicating the status of Auto HDR for the game: "enable", "disable", or "unset".
"""
if not os.path.isabs(game_path):
raise ValueError(f"'{game_path}' is not an absolute path.")
game_path = os.path.normpath(game_path)
reg_path = r"Software\Microsoft\DirectX\UserGpuPreferences"
reg_key = game_path
try:
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, reg_path) as key:
existing_value, _ = winreg.QueryValueEx(key, reg_key)
settings = dict(item.split("=") for item in existing_value.split(";") if item)
hdr_status = settings.get("AutoHDREnable", None)
if hdr_status == "2097":
return "enable"
elif hdr_status == "2096":
return "disable"
else:
return "unset"
except FileNotFoundError:
return "unset"
except Exception as e:
raise Exception(f"Error getting Auto HDR status for '{game_path}': {e}") | Get the Auto HDR setting for a specific game via Windows Registry. Parameters: - game_path: The file path to the game executable, ensuring Windows path conventions. Returns: - A Literal indicating the status of Auto HDR for the game: "enable", "disable", or "unset". |
17,286 | from typing import Literal
import winreg
import os
The provided code snippet includes necessary dependencies for implementing the `set_game_auto_hdr` function. Write a Python function `def set_game_auto_hdr(game_path: str, status: Literal["enable", "disable", "unset"] = "unset")` to solve the following problem:
Set, update, or unset the Auto HDR setting for a specific game via Windows Registry, without affecting other settings. Ensures the game path is an absolute path and raises exceptions on errors instead of printing. Parameters: - game_path: The file path to the game executable, ensuring Windows path conventions. - status: Literal indicating the desired status for Auto HDR. One of "enable", "disable", or "unset".
Here is the function:
def set_game_auto_hdr(game_path: str, status: Literal["enable", "disable", "unset"] = "unset"):
"""
Set, update, or unset the Auto HDR setting for a specific game via Windows Registry,
without affecting other settings. Ensures the game path is an absolute path
and raises exceptions on errors instead of printing.
Parameters:
- game_path: The file path to the game executable, ensuring Windows path conventions.
- status: Literal indicating the desired status for Auto HDR. One of "enable", "disable", or "unset".
"""
if not os.path.isabs(game_path):
raise ValueError(f"'{game_path}' is not an absolute path.")
game_path = os.path.normpath(game_path)
reg_path = r"Software\Microsoft\DirectX\UserGpuPreferences"
reg_key = game_path
hdr_value = {"enable": "2097", "disable": "2096"}.get(status, None)
try:
with winreg.CreateKey(winreg.HKEY_CURRENT_USER, reg_path) as key:
if status == "unset":
try:
existing_value, _ = winreg.QueryValueEx(key, reg_key)
settings = dict(item.split("=") for item in existing_value.split(";") if item)
if "AutoHDREnable" in settings:
del settings["AutoHDREnable"]
updated_value = ";".join([f"{k}={v}" for k, v in settings.items()]) + ";"
if settings:
winreg.SetValueEx(key, reg_key, 0, winreg.REG_SZ, updated_value)
else:
winreg.DeleteValue(key, reg_key)
except FileNotFoundError:
pass
else:
try:
existing_value, _ = winreg.QueryValueEx(key, reg_key)
except FileNotFoundError:
existing_value = ""
settings = dict(item.split("=") for item in existing_value.split(";") if item)
if hdr_value is not None:
settings["AutoHDREnable"] = hdr_value
updated_value = ";".join([f"{k}={v}" for k, v in settings.items()]) + ";"
winreg.SetValueEx(key, reg_key, 0, winreg.REG_SZ, updated_value)
except Exception as e:
raise Exception(f"Error setting Auto HDR for '{game_path}' with status '{status}': {e}") | Set, update, or unset the Auto HDR setting for a specific game via Windows Registry, without affecting other settings. Ensures the game path is an absolute path and raises exceptions on errors instead of printing. Parameters: - game_path: The file path to the game executable, ensuring Windows path conventions. - status: Literal indicating the desired status for Auto HDR. One of "enable", "disable", or "unset". |
17,287 | from typing import Tuple, Optional
import winreg
import json
registry_key_path = r"SOFTWARE\miHoYo\崩坏:星穹铁道"
resolution_value_name = "GraphicsSettings_PCResolution_h431323223"
def read_registry_value(key, sub_key, value_name):
"""
Read the content of the specified registry value.
Parameters:
- key: The handle of an open registry key.
- sub_key: The name of the key, relative to key, to open.
- value_name: The name of the value to query.
Returns:
The content of the specified value in the registry.
"""
try:
# Open the specified registry key
registry_key = winreg.OpenKey(key, sub_key)
# Read the content of the specified value in the registry
value, _ = winreg.QueryValueEx(registry_key, value_name)
# Close the registry key
winreg.CloseKey(registry_key)
return value
except FileNotFoundError:
raise FileNotFoundError(f"Specified registry key or value not found: {sub_key}\\{value_name}")
except Exception as e:
raise Exception(f"Error reading registry value: {e}")
The provided code snippet includes necessary dependencies for implementing the `get_game_resolution` function. Write a Python function `def get_game_resolution() -> Optional[Tuple[int, int, bool]]` to solve the following problem:
Return the game resolution from the registry value. This function does not take any parameters. Returns: - If the registry value exists and data is valid, it returns a tuple (width, height, isFullScreen) representing the game resolution. - If the registry value does not exist or data is invalid, it returns None or raises ValueError.
Here is the function:
def get_game_resolution() -> Optional[Tuple[int, int, bool]]:
"""
Return the game resolution from the registry value.
This function does not take any parameters.
Returns:
- If the registry value exists and data is valid, it returns a tuple (width, height, isFullScreen) representing the game resolution.
- If the registry value does not exist or data is invalid, it returns None or raises ValueError.
"""
value = read_registry_value(winreg.HKEY_CURRENT_USER, registry_key_path, resolution_value_name)
if value:
data_dict = json.loads(value.decode('utf-8').strip('\x00'))
# Convert keys to lower case to ensure case-insensitivity
data_dict = {k.lower(): v for k, v in data_dict.items()}
# Validate data format with case-insensitive keys
if 'width' in data_dict and 'height' in data_dict and 'isfullscreen' in data_dict:
if isinstance(data_dict['width'], int) and isinstance(data_dict['height'], int) and isinstance(data_dict['isfullscreen'], bool):
return data_dict['width'], data_dict['height'], data_dict['isfullscreen']
else:
raise ValueError("Registry data is invalid: width, height, and isFullScreen must be of type int, int, and bool respectively.")
else:
raise ValueError("Registry data is missing required fields: width, height, or isFullScreen.")
return None | Return the game resolution from the registry value. This function does not take any parameters. Returns: - If the registry value exists and data is valid, it returns a tuple (width, height, isFullScreen) representing the game resolution. - If the registry value does not exist or data is invalid, it returns None or raises ValueError. |
17,288 | from typing import Tuple, Optional
import winreg
import json
registry_key_path = r"SOFTWARE\miHoYo\崩坏:星穹铁道"
resolution_value_name = "GraphicsSettings_PCResolution_h431323223"
def write_registry_value(key, sub_key, value_name, data, mode) -> None:
"""
Write a registry value to the specified registry key.
Parameters:
- key: The registry key.
- sub_key: The subkey under the specified registry key.
- value_name: The name of the registry value.
- data: The data to be written to the registry.
- mode: The type of data.
"""
try:
# Open or create the specified registry key
registry_key = winreg.CreateKey(key, sub_key)
# Write data to the registry
winreg.SetValueEx(registry_key, value_name, 0, mode, data)
# Close the registry key
winreg.CloseKey(registry_key)
except Exception as e:
raise Exception(f"Error writing registry value: {e}")
The provided code snippet includes necessary dependencies for implementing the `set_game_resolution` function. Write a Python function `def set_game_resolution(width: int, height: int, is_fullscreen: bool) -> None` to solve the following problem:
Set the resolution of the game and whether it should run in fullscreen mode. Parameters: - width: The width of the game window. - height: The height of the game window. - is_fullscreen: Whether the game should run in fullscreen mode.
Here is the function:
def set_game_resolution(width: int, height: int, is_fullscreen: bool) -> None:
"""
Set the resolution of the game and whether it should run in fullscreen mode.
Parameters:
- width: The width of the game window.
- height: The height of the game window.
- is_fullscreen: Whether the game should run in fullscreen mode.
"""
data_dict = {
'width': width,
'height': height,
'isFullScreen': is_fullscreen
}
data = (json.dumps(data_dict) + '\x00').encode('utf-8')
write_registry_value(winreg.HKEY_CURRENT_USER, registry_key_path, resolution_value_name, data, winreg.REG_BINARY) | Set the resolution of the game and whether it should run in fullscreen mode. Parameters: - width: The width of the game window. - height: The height of the game window. - is_fullscreen: Whether the game should run in fullscreen mode. |
17,289 | from typing import Tuple, Optional
import winreg
import json
registry_key_path = r"SOFTWARE\miHoYo\崩坏:星穹铁道"
graphics_value_name = "GraphicsSettings_Model_h2986158309"
def read_registry_value(key, sub_key, value_name):
"""
Read the content of the specified registry value.
Parameters:
- key: The handle of an open registry key.
- sub_key: The name of the key, relative to key, to open.
- value_name: The name of the value to query.
Returns:
The content of the specified value in the registry.
"""
try:
# Open the specified registry key
registry_key = winreg.OpenKey(key, sub_key)
# Read the content of the specified value in the registry
value, _ = winreg.QueryValueEx(registry_key, value_name)
# Close the registry key
winreg.CloseKey(registry_key)
return value
except FileNotFoundError:
raise FileNotFoundError(f"Specified registry key or value not found: {sub_key}\\{value_name}")
except Exception as e:
raise Exception(f"Error reading registry value: {e}")
The provided code snippet includes necessary dependencies for implementing the `get_game_fps` function. Write a Python function `def get_game_fps() -> Optional[int]` to solve the following problem:
Return the game FPS settings from the registry value. This function does not take any parameters.
Here is the function:
def get_game_fps() -> Optional[int]:
"""
Return the game FPS settings from the registry value.
This function does not take any parameters.
"""
value = read_registry_value(winreg.HKEY_CURRENT_USER, registry_key_path, graphics_value_name)
if value:
data_dict = json.loads(value.decode('utf-8').strip('\x00'))
# Validate data format
if 'FPS' in data_dict:
if isinstance(data_dict['FPS'], int):
return data_dict['FPS']
else:
raise ValueError("Registry data is invalid: FPS must be of type int.")
else:
return 60
return None | Return the game FPS settings from the registry value. This function does not take any parameters. |
17,290 | from typing import Tuple, Optional
import winreg
import json
registry_key_path = r"SOFTWARE\miHoYo\崩坏:星穹铁道"
graphics_value_name = "GraphicsSettings_Model_h2986158309"
def read_registry_value(key, sub_key, value_name):
"""
Read the content of the specified registry value.
Parameters:
- key: The handle of an open registry key.
- sub_key: The name of the key, relative to key, to open.
- value_name: The name of the value to query.
Returns:
The content of the specified value in the registry.
"""
try:
# Open the specified registry key
registry_key = winreg.OpenKey(key, sub_key)
# Read the content of the specified value in the registry
value, _ = winreg.QueryValueEx(registry_key, value_name)
# Close the registry key
winreg.CloseKey(registry_key)
return value
except FileNotFoundError:
raise FileNotFoundError(f"Specified registry key or value not found: {sub_key}\\{value_name}")
except Exception as e:
raise Exception(f"Error reading registry value: {e}")
def write_registry_value(key, sub_key, value_name, data, mode) -> None:
"""
Write a registry value to the specified registry key.
Parameters:
- key: The registry key.
- sub_key: The subkey under the specified registry key.
- value_name: The name of the registry value.
- data: The data to be written to the registry.
- mode: The type of data.
"""
try:
# Open or create the specified registry key
registry_key = winreg.CreateKey(key, sub_key)
# Write data to the registry
winreg.SetValueEx(registry_key, value_name, 0, mode, data)
# Close the registry key
winreg.CloseKey(registry_key)
except Exception as e:
raise Exception(f"Error writing registry value: {e}")
The provided code snippet includes necessary dependencies for implementing the `set_game_fps` function. Write a Python function `def set_game_fps(fps: int) -> None` to solve the following problem:
Set the FPS of the game. Parameters: - fps
Here is the function:
def set_game_fps(fps: int) -> None:
"""
Set the FPS of the game.
Parameters:
- fps
"""
value = read_registry_value(winreg.HKEY_CURRENT_USER, registry_key_path, graphics_value_name)
data_dict = json.loads(value.decode('utf-8').strip('\x00'))
data_dict['FPS'] = fps
data = (json.dumps(data_dict) + '\x00').encode('utf-8')
write_registry_value(winreg.HKEY_CURRENT_USER, registry_key_path, graphics_value_name, data, winreg.REG_BINARY) | Set the FPS of the game. Parameters: - fps |
17,291 |
The provided code snippet includes necessary dependencies for implementing the `black` function. Write a Python function `def black(text)` to solve the following problem:
将文本颜色设置为黑色
Here is the function:
def black(text):
"""将文本颜色设置为黑色"""
return f"\033[30m{text}\033[0m" | 将文本颜色设置为黑色 |
17,292 |
The provided code snippet includes necessary dependencies for implementing the `grey` function. Write a Python function `def grey(text)` to solve the following problem:
将文本颜色设置为灰色
Here is the function:
def grey(text):
"""将文本颜色设置为灰色"""
return f"\033[90m{text}\033[0m" | 将文本颜色设置为灰色 |
17,293 |
The provided code snippet includes necessary dependencies for implementing the `red` function. Write a Python function `def red(text)` to solve the following problem:
将文本颜色设置为红色
Here is the function:
def red(text):
"""将文本颜色设置为红色"""
return f"\033[91m{text}\033[0m" | 将文本颜色设置为红色 |
17,294 |
The provided code snippet includes necessary dependencies for implementing the `green` function. Write a Python function `def green(text)` to solve the following problem:
将文本颜色设置为绿色
Here is the function:
def green(text):
"""将文本颜色设置为绿色"""
return f"\033[92m{text}\033[0m" | 将文本颜色设置为绿色 |
17,295 |
The provided code snippet includes necessary dependencies for implementing the `yellow` function. Write a Python function `def yellow(text)` to solve the following problem:
将文本颜色设置为黄色
Here is the function:
def yellow(text):
"""将文本颜色设置为黄色"""
return f"\033[93m{text}\033[0m" | 将文本颜色设置为黄色 |
17,296 |
The provided code snippet includes necessary dependencies for implementing the `blue` function. Write a Python function `def blue(text)` to solve the following problem:
将文本颜色设置为蓝色
Here is the function:
def blue(text):
"""将文本颜色设置为蓝色"""
return f"\033[94m{text}\033[0m" | 将文本颜色设置为蓝色 |
17,297 |
The provided code snippet includes necessary dependencies for implementing the `purple` function. Write a Python function `def purple(text)` to solve the following problem:
将文本颜色设置为紫色
Here is the function:
def purple(text):
"""将文本颜色设置为紫色"""
return f"\033[95m{text}\033[0m" | 将文本颜色设置为紫色 |
17,298 |
The provided code snippet includes necessary dependencies for implementing the `cyan` function. Write a Python function `def cyan(text)` to solve the following problem:
将文本颜色设置为青色
Here is the function:
def cyan(text):
"""将文本颜色设置为青色"""
return f"\033[96m{text}\033[0m" | 将文本颜色设置为青色 |
17,299 |
The provided code snippet includes necessary dependencies for implementing the `white` function. Write a Python function `def white(text)` to solve the following problem:
将文本颜色设置为白色
Here is the function:
def white(text):
"""将文本颜色设置为白色"""
return f"\033[97m{text}\033[0m" | 将文本颜色设置为白色 |
17,300 |
The provided code snippet includes necessary dependencies for implementing the `default` function. Write a Python function `def default(text)` to solve the following problem:
将文本颜色设置回默认颜色
Here is the function:
def default(text):
"""将文本颜色设置回默认颜色"""
return f"\033[39m{text}\033[0m" | 将文本颜色设置回默认颜色 |
17,301 | from fastapi import UploadFile
from functools import partial
from hashlib import sha256
from uuid import UUID
import aiofiles
import json
import re
from config import (
logger
)
_snake_1 = partial(re.compile(r'(.)((?<![^A-Za-z])[A-Z][a-z]+)').sub, r'\1_\2')
_snake_2 = partial(re.compile(r'([a-z0-9])([A-Z])').sub, r'\1_\2')
def snake_case(string: str) -> str:
return _snake_2(_snake_1(string)).casefold() | null |
17,302 | import random
import openai
import json
from langchain.docstore.document import Document as LangChainDocument
from langchain.embeddings.openai import OpenAIEmbeddings
from fastapi import HTTPException
from uuid import UUID, uuid4
from langchain.text_splitter import (
CharacterTextSplitter,
MarkdownTextSplitter
)
from sqlmodel import (
Session,
text
)
from util import (
sanitize_input,
sanitize_output
)
from langchain import OpenAI
from typing import (
List,
Union,
Optional,
Dict,
Tuple,
Any
)
from helpers import (
get_user_by_uuid_or_identifier,
get_chat_session_by_uuid
)
from models import (
User,
Organization,
Project,
Node,
ChatSession,
ChatSessionResponse,
get_engine
)
from config import (
CHANNEL_TYPE,
DOCUMENT_TYPE,
LLM_MODELS,
LLM_DISTANCE_THRESHOLD,
LLM_DEFAULT_TEMPERATURE,
LLM_MAX_OUTPUT_TOKENS,
LLM_CHUNK_SIZE,
LLM_CHUNK_OVERLAP,
LLM_MIN_NODE_LIMIT,
LLM_DEFAULT_DISTANCE_STRATEGY,
VECTOR_EMBEDDINGS_COUNT,
DISTANCE_STRATEGY,
AGENT_NAMES,
logger
)
AGENT_NAMES = [
"Aisha",
"Lilly",
"Hanna",
"Julia",
"Emily",
"Sophia",
"Alex",
"Isabella",
]
def get_random_agent():
return random.choice(AGENT_NAMES) | null |
17,303 | from fastapi import (
FastAPI,
File,
Depends,
HTTPException,
UploadFile
)
from fastapi.openapi.utils import get_openapi
from fastapi.staticfiles import StaticFiles
from sqlmodel import Session, select
from typing import (
List,
Optional,
Union,
Any
)
from datetime import datetime
import requests
import aiohttp
import time
import json
import os
from llm import (
chat_query
)
from models import (
# ---------------
# Database Models
# ---------------
Organization,
OrganizationCreate,
OrganizationRead,
OrganizationUpdate,
User,
UserCreate,
UserRead,
UserReadList,
UserUpdate,
DocumentRead,
DocumentReadList,
ProjectCreate,
ProjectRead,
ProjectReadList,
ChatSessionResponse,
ChatSessionCreatePost,
WebhookCreate,
# ------------------
# Database functions
# ------------------
get_engine,
get_session
)
from helpers import (
# ----------------
# Helper functions
# ----------------
get_org_by_uuid_or_namespace,
get_project_by_uuid,
get_user_by_uuid_or_identifier,
get_users,
get_documents_by_project_and_org,
get_document_by_uuid,
create_org_by_org_or_uuid,
create_project_by_org
)
from util import (
save_file,
get_sha256,
is_uuid,
logger
)
from config import (
APP_NAME,
APP_VERSION,
APP_DESCRIPTION,
ENTITY_STATUS,
CHANNEL_TYPE,
LLM_MODELS,
LLM_DISTANCE_THRESHOLD,
LLM_DEFAULT_DISTANCE_STRATEGY,
LLM_MAX_OUTPUT_TOKENS,
LLM_MIN_NODE_LIMIT,
FILE_UPLOAD_PATH,
RASA_WEBHOOK_URL
)
def health_check():
return {'status': 'ok'} | null |
17,304 | from fastapi import (
FastAPI,
File,
Depends,
HTTPException,
UploadFile
)
from fastapi.openapi.utils import get_openapi
from fastapi.staticfiles import StaticFiles
from sqlmodel import Session, select
from typing import (
List,
Optional,
Union,
Any
)
from datetime import datetime
import requests
import aiohttp
import time
import json
import os
from llm import (
chat_query
)
from models import (
# ---------------
# Database Models
# ---------------
Organization,
OrganizationCreate,
OrganizationRead,
OrganizationUpdate,
User,
UserCreate,
UserRead,
UserReadList,
UserUpdate,
DocumentRead,
DocumentReadList,
ProjectCreate,
ProjectRead,
ProjectReadList,
ChatSessionResponse,
ChatSessionCreatePost,
WebhookCreate,
# ------------------
# Database functions
# ------------------
get_engine,
get_session
)
from helpers import (
# ----------------
# Helper functions
# ----------------
get_org_by_uuid_or_namespace,
get_project_by_uuid,
get_user_by_uuid_or_identifier,
get_users,
get_documents_by_project_and_org,
get_document_by_uuid,
create_org_by_org_or_uuid,
create_project_by_org
)
from util import (
save_file,
get_sha256,
is_uuid,
logger
)
from config import (
APP_NAME,
APP_VERSION,
APP_DESCRIPTION,
ENTITY_STATUS,
CHANNEL_TYPE,
LLM_MODELS,
LLM_DISTANCE_THRESHOLD,
LLM_DEFAULT_DISTANCE_STRATEGY,
LLM_MAX_OUTPUT_TOKENS,
LLM_MIN_NODE_LIMIT,
FILE_UPLOAD_PATH,
RASA_WEBHOOK_URL
)
ENTITY_STATUS = IntEnum(
"ENTITY_STATUS",
["UNVERIFIED", "ACTIVE", "INACTIVE", "DELETED", "BANNED" "DEPRECATED"],
)
The provided code snippet includes necessary dependencies for implementing the `read_organizations` function. Write a Python function `def read_organizations()` to solve the following problem:
## Get all active organizations Returns: List[OrganizationRead]: List of organizations
Here is the function:
def read_organizations():
'''
## Get all active organizations
Returns:
List[OrganizationRead]: List of organizations
'''
with Session(get_engine()) as session:
orgs = session.exec(select(Organization).where(Organization.status == ENTITY_STATUS.ACTIVE.value)).all()
return orgs | ## Get all active organizations Returns: List[OrganizationRead]: List of organizations |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.