repo_name stringlengths 7 71 | file_path stringlengths 5 118 | context list | import_statement stringlengths 45 12.5k | token_num int64 641 99.4k | cropped_code stringlengths 44 17k | all_code stringlengths 43 754k | next_line stringlengths 2 330 | gold_snippet_index int64 0 68 | created_at stringlengths 25 25 | level stringclasses 9
values |
|---|---|---|---|---|---|---|---|---|---|---|
CPES-Power-and-Energy-Systems/interoperable-recommender-tso | energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/bayesian_optimization.py | [
{
"identifier": "GaussianProcess",
"path": "energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/helpers.py",
"snippet": "class GaussianProcess(BaseEstimator, RegressorMixin):\n \"\"\"The legacy Gaussian Process model class.\n\n .. deprecated:: 0.18\n ... | import numpy as np
from .helpers import GaussianProcess
from scipy.optimize import minimize
from .helpers import UtilityFunction, unique_rows, PrintLog | 10,671 | """
# Start with the lower bound as the argmax
x_max = bounds[:, 0]
max_acq = None
x_tries = np.random.uniform(bounds[:, 0], bounds[:, 1],
size=(100, bounds.shape[0]))
for x_try in x_tries:
# Find the minimum of minus the acquisition function
res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max),
x_try.reshape(1, -1),
bounds=bounds,
method="L-BFGS-B")
# Store it if better than previous minimum(maximum).
if max_acq is None or -res.fun >= max_acq:
x_max = res.x
max_acq = -res.fun
# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
return np.clip(x_max, bounds[:, 0], bounds[:, 1])
def matern52(theta, d):
"""
Matern 5/2 correlation model.::
theta, d --> r(theta, d) = (1+sqrt(5)*r + 5/3*r^2)*exp(-sqrt(5)*r) n
where r = sqrt(sum (d_i)^2 / (theta_i)^2 ) i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation modle.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
r = np.sqrt(np.sum(d ** 2, axis=1)) / theta[0]
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
r = np.sqrt(np.sum(d ** 2 / theta.reshape(1, n_features) ** 2, axis=1))
return (1 + np.sqrt(5) * r + 5 / 3. * r ** 2) * np.exp(-np.sqrt(5) * r)
class BayesianOptimization(object):
def __init__(self, f, pbounds, verbose=1):
"""
:param f:
Function to be maximized.
:param pbounds:
Dictionary with parameters names as keys and a tuple with minimum
and maximum values.
:param verbose:
Whether or not to print progress.
"""
# Store the original dictionary
self.pbounds = pbounds
# Get the name of the parameters
self.keys = list(pbounds.keys())
# Find number of parameters
self.dim = len(pbounds)
# Create an array with parameters bounds
self.bounds = []
for key in self.pbounds.keys():
self.bounds.append(self.pbounds[key])
self.bounds = np.asarray(self.bounds)
# Some function to be optimized
self.f = f
# Initialization flag
self.initialized = False
# Initialization lists --- stores starting points before process begins
self.init_points = []
self.x_init = []
self.y_init = []
# Numpy array place holders
self.X = None
self.Y = None
# Counter of iterations
self.i = 0
# Since scipy 0.16 passing lower and upper bound to theta seems to be
# broken. However, there is a lot of development going on around GP
# is scikit-learn. So I'll pick the easy route here and simple specify
# only theta0.
| """
BAYESIAN OPTIMIZATION MODULE - Version 0.1.0
Created by Fernando Nogueira (fmfn). Available in
- https://github.com/fmfn/BayesianOptimization
"""
__author__ = 'fmfn'
def acq_max(ac, gp, y_max, bounds):
"""
A function to find the maximum of the acquisition function using
the 'L-BFGS-B' method.
Parameters
----------
:param ac:
The acquisition function object that return its point-wise value.
:param gp:
A gaussian process fitted to the relevant data.
:param y_max:
The current maximum known value of the target function.
:param bounds:
The variables bounds to limit the search of the acq max.
Returns
-------
:return: x_max, The arg max of the acquisition function.
"""
# Start with the lower bound as the argmax
x_max = bounds[:, 0]
max_acq = None
x_tries = np.random.uniform(bounds[:, 0], bounds[:, 1],
size=(100, bounds.shape[0]))
for x_try in x_tries:
# Find the minimum of minus the acquisition function
res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max),
x_try.reshape(1, -1),
bounds=bounds,
method="L-BFGS-B")
# Store it if better than previous minimum(maximum).
if max_acq is None or -res.fun >= max_acq:
x_max = res.x
max_acq = -res.fun
# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
return np.clip(x_max, bounds[:, 0], bounds[:, 1])
def matern52(theta, d):
"""
Matern 5/2 correlation model.::
theta, d --> r(theta, d) = (1+sqrt(5)*r + 5/3*r^2)*exp(-sqrt(5)*r) n
where r = sqrt(sum (d_i)^2 / (theta_i)^2 ) i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation modle.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
r = np.sqrt(np.sum(d ** 2, axis=1)) / theta[0]
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
r = np.sqrt(np.sum(d ** 2 / theta.reshape(1, n_features) ** 2, axis=1))
return (1 + np.sqrt(5) * r + 5 / 3. * r ** 2) * np.exp(-np.sqrt(5) * r)
class BayesianOptimization(object):
def __init__(self, f, pbounds, verbose=1):
"""
:param f:
Function to be maximized.
:param pbounds:
Dictionary with parameters names as keys and a tuple with minimum
and maximum values.
:param verbose:
Whether or not to print progress.
"""
# Store the original dictionary
self.pbounds = pbounds
# Get the name of the parameters
self.keys = list(pbounds.keys())
# Find number of parameters
self.dim = len(pbounds)
# Create an array with parameters bounds
self.bounds = []
for key in self.pbounds.keys():
self.bounds.append(self.pbounds[key])
self.bounds = np.asarray(self.bounds)
# Some function to be optimized
self.f = f
# Initialization flag
self.initialized = False
# Initialization lists --- stores starting points before process begins
self.init_points = []
self.x_init = []
self.y_init = []
# Numpy array place holders
self.X = None
self.Y = None
# Counter of iterations
self.i = 0
# Since scipy 0.16 passing lower and upper bound to theta seems to be
# broken. However, there is a lot of development going on around GP
# is scikit-learn. So I'll pick the easy route here and simple specify
# only theta0. | self.gp = GaussianProcess(corr=matern52, | 0 | 2023-11-17 09:23:38+00:00 | 12k |
PlaxtonFlarion/NexaFlow | frameflow/framix.py | [
{
"identifier": "Show",
"path": "frameflow/show.py",
"snippet": "class Show(object):\n\n console = Console()\n\n @staticmethod\n def retry_fail_logo():\n logo = \"\"\"[bold]\n ╔════════════════════════════════╗\n ║ Retry Failed ║\n ╚════════════════... | import os
import re
import sys
import cv2
import time
import shutil
import random
import asyncio
import aiofiles
import tempfile
from loguru import logger
from rich.prompt import Prompt
from frameflow.show import Show
from frameflow.manage import Manage
from frameflow.parameters import Deploy, Option
from nexaflow import toolbox
from nexaflow.terminal import Terminal
from nexaflow.skills.report import Report
from nexaflow.video import VideoObject, VideoFrame
from nexaflow.cutter.cutter import VideoCutter
from nexaflow.hook import CropHook, OmitHook, FrameSaveHook, PaintCropHook, PaintOmitHook
from nexaflow.classifier.keras_classifier import KerasClassifier
from nexaflow.classifier.framix_classifier import FramixClassifier
from PIL import Image, ImageDraw, ImageFont
from multiprocessing import Pool, freeze_support
from argparse import ArgumentParser | 8,996 |
_tools_path = os.path.join(_job_path, "archivix", "tools")
_model_path = os.path.join(_job_path, "archivix", "molds", "model.h5")
_total_path = os.path.join(_job_path, "archivix", "pages")
_major_path = os.path.join(_job_path, "archivix", "pages")
_proto_path = os.path.join(_job_path, "archivix", "pages", "template_extra.html")
_initial_report = os.path.join(_universal, "framix.report")
_initial_deploy = os.path.join(_universal, "framix.source")
_initial_option = os.path.join(_universal, "framix.source")
if operation_system == "win32":
_adb = os.path.join(_tools_path, "win", "platform-tools", "adb.exe")
_ffmpeg = os.path.join(_tools_path, "win", "ffmpeg", "bin", "ffmpeg.exe")
_scrcpy = os.path.join(_tools_path, "win", "scrcpy", "scrcpy.exe")
elif operation_system == "darwin":
_adb = os.path.join(_tools_path, "mac", "platform-tools", "adb")
_ffmpeg = os.path.join(_tools_path, "mac", "ffmpeg", "bin", "ffmpeg")
_scrcpy = os.path.join(_tools_path, "mac", "scrcpy", "bin", "scrcpy")
else:
Show.console.print("[bold]Only compatible with [bold red]Windows[/bold red] and [bold red]macOS[/bold red] platforms ...[bold]")
time.sleep(5)
sys.exit(1)
os.environ["PATH"] = os.path.dirname(_adb) + os.path.pathsep + os.environ.get("PATH", "")
os.environ["PATH"] = os.path.dirname(_ffmpeg) + os.path.pathsep + os.environ.get("PATH", "")
os.environ["PATH"] = os.path.dirname(_scrcpy) + os.path.pathsep + os.environ.get("PATH", "")
try:
except (RuntimeError, ModuleNotFoundError) as err:
Show.console.print(f"[bold red]Error: {err}")
time.sleep(5)
sys.exit(1)
class Parser(object):
@staticmethod
def parse_cmd():
def parse_shape(dim_str):
if dim_str:
shape = [int(i) for i in re.split(r'[\s,;]+', dim_str)]
return tuple(shape) if len(shape) == 2 else (shape[0], shape[0])
return None
def parse_scale(dim_str):
try:
return int(dim_str)
except ValueError:
try:
return float(dim_str)
except ValueError:
return None
parser = ArgumentParser(description="Command Line Arguments Framix")
parser.add_argument('--flick', action='store_true', help='录制分析视频帧')
parser.add_argument('--alone', action='store_true', help='录制视频')
parser.add_argument('--paint', action='store_true', help='绘制分割线条')
parser.add_argument('--input', action='append', help='分析单个视频')
parser.add_argument('--whole', action='append', help='分析全部视频')
parser.add_argument('--merge', action='append', help='聚合报告')
parser.add_argument('--train', action='append', help='归类图片文件')
parser.add_argument('--build', action='append', help='训练模型文件')
parser.add_argument('--boost', action='store_true', help='快速模式')
parser.add_argument('--color', action='store_true', help='彩色模式')
parser.add_argument('--focus', action='store_true', help='转换视频')
parser.add_argument('--shape', nargs='?', const=None, type=parse_shape, help='图片尺寸')
parser.add_argument('--scale', nargs='?', const=None, type=parse_scale, help='缩放比例')
parser.add_argument('--crops', action='append', help='获取区域')
parser.add_argument('--omits', action='append', help='忽略区域')
parser.add_argument('--debug', action='store_true', help='调试模式')
return parser.parse_args()
class Missions(object):
def __init__(self, *args, **kwargs):
self.boost, self.color, self.focus, self.crops, self.omits, self.shape, self.scale = args
self.model_path = kwargs["model_path"]
self.total_path = kwargs["total_path"]
self.major_path = kwargs["major_path"]
self.proto_path = kwargs["proto_path"]
self.initial_report = kwargs["initial_report"]
self.initial_deploy = kwargs["initial_deploy"]
self.initial_option = kwargs["initial_option"]
self.adb = kwargs["adb"]
self.ffmpeg = kwargs["ffmpeg"]
self.scrcpy = kwargs["scrcpy"]
@staticmethod
def only_video(folder: str):
class Entry(object):
def __init__(self, title: str, place: str, sheet: list):
self.title = title
self.place = place
self.sheet = sheet
return [
Entry(
os.path.basename(root), root,
[os.path.join(root, f) for f in sorted(file) if "log" not in f]
)
for root, _, file in os.walk(folder) if file
]
def video_task(self, input_video):
reporter = Report(total_path=self.initial_report)
reporter.title = f"Framix_{time.strftime('%Y%m%d_%H%M%S')}_{os.getpid()}"
reporter.query = f"{random.randint(10, 99)}"
new_video_path = os.path.join(reporter.video_path, os.path.basename(input_video))
shutil.copy(input_video, new_video_path)
|
operation_system = sys.platform.strip().lower()
work_platform = os.path.basename(os.path.abspath(sys.argv[0])).lower()
exec_platform = ["framix.exe", "framix.bin", "framix", "framix.py"]
if work_platform == "framix.exe":
_job_path = os.path.dirname(os.path.abspath(sys.argv[0]))
_universal = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
elif work_platform == "framix.bin":
_job_path = os.path.dirname(sys.executable)
_universal = os.path.dirname(os.path.dirname(sys.executable))
elif work_platform == "framix":
_job_path = os.path.dirname(sys.executable)
_universal = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(sys.executable))))
elif work_platform == "framix.py":
_job_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
_universal = os.path.dirname(os.path.abspath(__file__))
else:
Show.console.print("[bold red]Only compatible with Windows and macOS platforms ...")
time.sleep(5)
sys.exit(1)
_tools_path = os.path.join(_job_path, "archivix", "tools")
_model_path = os.path.join(_job_path, "archivix", "molds", "model.h5")
_total_path = os.path.join(_job_path, "archivix", "pages")
_major_path = os.path.join(_job_path, "archivix", "pages")
_proto_path = os.path.join(_job_path, "archivix", "pages", "template_extra.html")
_initial_report = os.path.join(_universal, "framix.report")
_initial_deploy = os.path.join(_universal, "framix.source")
_initial_option = os.path.join(_universal, "framix.source")
if operation_system == "win32":
_adb = os.path.join(_tools_path, "win", "platform-tools", "adb.exe")
_ffmpeg = os.path.join(_tools_path, "win", "ffmpeg", "bin", "ffmpeg.exe")
_scrcpy = os.path.join(_tools_path, "win", "scrcpy", "scrcpy.exe")
elif operation_system == "darwin":
_adb = os.path.join(_tools_path, "mac", "platform-tools", "adb")
_ffmpeg = os.path.join(_tools_path, "mac", "ffmpeg", "bin", "ffmpeg")
_scrcpy = os.path.join(_tools_path, "mac", "scrcpy", "bin", "scrcpy")
else:
Show.console.print("[bold]Only compatible with [bold red]Windows[/bold red] and [bold red]macOS[/bold red] platforms ...[bold]")
time.sleep(5)
sys.exit(1)
os.environ["PATH"] = os.path.dirname(_adb) + os.path.pathsep + os.environ.get("PATH", "")
os.environ["PATH"] = os.path.dirname(_ffmpeg) + os.path.pathsep + os.environ.get("PATH", "")
os.environ["PATH"] = os.path.dirname(_scrcpy) + os.path.pathsep + os.environ.get("PATH", "")
try:
except (RuntimeError, ModuleNotFoundError) as err:
Show.console.print(f"[bold red]Error: {err}")
time.sleep(5)
sys.exit(1)
class Parser(object):
@staticmethod
def parse_cmd():
def parse_shape(dim_str):
if dim_str:
shape = [int(i) for i in re.split(r'[\s,;]+', dim_str)]
return tuple(shape) if len(shape) == 2 else (shape[0], shape[0])
return None
def parse_scale(dim_str):
try:
return int(dim_str)
except ValueError:
try:
return float(dim_str)
except ValueError:
return None
parser = ArgumentParser(description="Command Line Arguments Framix")
parser.add_argument('--flick', action='store_true', help='录制分析视频帧')
parser.add_argument('--alone', action='store_true', help='录制视频')
parser.add_argument('--paint', action='store_true', help='绘制分割线条')
parser.add_argument('--input', action='append', help='分析单个视频')
parser.add_argument('--whole', action='append', help='分析全部视频')
parser.add_argument('--merge', action='append', help='聚合报告')
parser.add_argument('--train', action='append', help='归类图片文件')
parser.add_argument('--build', action='append', help='训练模型文件')
parser.add_argument('--boost', action='store_true', help='快速模式')
parser.add_argument('--color', action='store_true', help='彩色模式')
parser.add_argument('--focus', action='store_true', help='转换视频')
parser.add_argument('--shape', nargs='?', const=None, type=parse_shape, help='图片尺寸')
parser.add_argument('--scale', nargs='?', const=None, type=parse_scale, help='缩放比例')
parser.add_argument('--crops', action='append', help='获取区域')
parser.add_argument('--omits', action='append', help='忽略区域')
parser.add_argument('--debug', action='store_true', help='调试模式')
return parser.parse_args()
class Missions(object):
def __init__(self, *args, **kwargs):
self.boost, self.color, self.focus, self.crops, self.omits, self.shape, self.scale = args
self.model_path = kwargs["model_path"]
self.total_path = kwargs["total_path"]
self.major_path = kwargs["major_path"]
self.proto_path = kwargs["proto_path"]
self.initial_report = kwargs["initial_report"]
self.initial_deploy = kwargs["initial_deploy"]
self.initial_option = kwargs["initial_option"]
self.adb = kwargs["adb"]
self.ffmpeg = kwargs["ffmpeg"]
self.scrcpy = kwargs["scrcpy"]
@staticmethod
def only_video(folder: str):
class Entry(object):
def __init__(self, title: str, place: str, sheet: list):
self.title = title
self.place = place
self.sheet = sheet
return [
Entry(
os.path.basename(root), root,
[os.path.join(root, f) for f in sorted(file) if "log" not in f]
)
for root, _, file in os.walk(folder) if file
]
def video_task(self, input_video):
reporter = Report(total_path=self.initial_report)
reporter.title = f"Framix_{time.strftime('%Y%m%d_%H%M%S')}_{os.getpid()}"
reporter.query = f"{random.randint(10, 99)}"
new_video_path = os.path.join(reporter.video_path, os.path.basename(input_video))
shutil.copy(input_video, new_video_path)
| deploy = Deploy( | 2 | 2023-11-13 05:27:34+00:00 | 12k |
OpenBMB/XAgent | XAgent/inner_loop_search_algorithms/ReACT.py | [
{
"identifier": "CONFIG",
"path": "XAgent/config.py",
"snippet": "CONFIG = XAgentConfig.get_default_config()"
},
{
"identifier": "BaseAgent",
"path": "XAgent/agent/base_agent.py",
"snippet": "class BaseAgent(metaclass=abc.ABCMeta):\n \"\"\"\n The BaseAgent class abstracts the essen... | import json
from colorama import Fore
from XAgent.config import CONFIG
from XAgent.agent.base_agent import BaseAgent
from XAgent.agent.summarize import summarize_action, summarize_plan, clip_text
from XAgent.core import XAgentCoreComponents
from XAgent.data_structure.node import ToolNode
from XAgent.data_structure.tree import TaskSearchTree
from XAgent.inner_loop_search_algorithms.base_search import BaseSearchMethod
from XAgent.message_history import Message
from XAgent.utils import SearchMethodStatusCode, ToolCallStatusCode | 8,552 | if "reasoning" in args.keys() and "reasoning" in assistant_thoughts.keys():
old["thoughts"]["properties"]["reasoning"] = args.get(
"reasoning", assistant_thoughts_reasoning)
if "plan" in args.keys() and "plan" in assistant_thoughts.keys():
old["thoughts"]["properties"]["plan"] = args.get(
"plan", assistant_thoughts_plan)
if "criticism" in args.keys() and "criticism" in assistant_thoughts.keys():
old["thoughts"]["properties"]["criticism"] = args.get(
"criticism", assistant_thoughts_criticism)
return old, True
def generate_chain(self, config, agent: BaseAgent, arguments, functions, task_id, now_dealing_task, plan_agent):
"""
Run the chain search task.
Args:
config: Configuration for the search.
agent: Base agent responsible for chain search.
arguments: Arguments for the current task to be handled.
functions: The available functions for use by agent.
task_id: ID of the current task.
Returns:
None.
Raises:
None.
"""
self.tree_list.append(TaskSearchTree())
now_attempt_tree = self.tree_list[-1]
now_node = now_attempt_tree.root
while now_node.get_depth() < config.max_subtask_chain_length:
self.xagent_core_components.logger.typewriter_log(
"-=-=-=-=-=-=-= THOUGHTS, REASONING, PLAN AND CRITICISM WILL NOW BE VERIFIED BY AGENT -=-=-=-=-=-=-=",
Fore.GREEN,
"",
)
if now_node.father != None:
if self.xagent_core_components.interaction.interrupt:
can_modify = self.get_origin_data(now_node.data)
receive_data = self.xagent_core_components.interaction.receive(
can_modify)
data, rewrite_flag = self.rewrite_input_func(
now_node.data, receive_data)
now_node.data = data
if rewrite_flag:
self.xagent_core_components.logger.typewriter_log(
"-=-=-=-=-=-=-= USER INPUT -=-=-=-=-=-=-=",
Fore.GREEN,
"",
)
self.xagent_core_components.print_assistant_thoughts(now_node.data, False)
self.xagent_core_components.logger.typewriter_log(
"-=-=-=-=-=-=-= USER INPUT -=-=-=-=-=-=-=",
Fore.GREEN,
"",
)
message_sequence = make_message(now_node=now_node,
max_length=config.max_subtask_chain_length,
config=config,
now_dealing_task=now_dealing_task)
function_call = None
if now_node.get_depth() == config.max_subtask_chain_length - 1:
function_call = {"name": "subtask_submit"}
file_archi, _, = self.xagent_core_components.toolserver_interface.execute_command_client(
"FileSystemEnv_print_filesys_struture", {"return_root": True})
file_archi, length = clip_text(file_archi, 1000, clip_end=True)
human_prompt = ""
if config.enable_ask_human_for_help:
human_prompt = "- Use 'ask_human_for_help' when you need help, remember to be specific to your requirement to help user to understand your problem."
else:
human_prompt = "- Human is not available for help. You are not allowed to ask human for help in any form or channel. Solve the problem by yourself. If information is not enough, try your best to use default value."
all_plan = plan_agent.latest_plan.to_json()
if config.enable_summary:
all_plan = summarize_plan(all_plan)
else:
all_plan = json.dumps(all_plan, indent=2, ensure_ascii=False)
new_message, tokens = agent.parse(
placeholders={
"system": {
"all_plan": all_plan
},
"user": {
"workspace_files": file_archi,
"subtask_id": now_dealing_task.get_subtask_id(to_str=True),
"max_length": config.max_subtask_chain_length,
"step_num": str(now_node.get_depth()+1),
"human_help_prompt": human_prompt,
}
},
arguments=arguments,
functions=functions,
function_call=function_call,
additional_messages=message_sequence,
additional_insert_index=-1
)
new_tree_node = agent.message_to_tool_node(new_message)
print_data = self.xagent_core_components.print_assistant_thoughts(
new_tree_node.data, False
)
tool_output, tool_output_status_code, need_for_plan_refine, using_tools = self.xagent_core_components.function_handler.handle_tool_call(
new_tree_node)
self.need_for_plan_refine = need_for_plan_refine
now_attempt_tree.make_father_relation(now_node, new_tree_node)
self.xagent_core_components.interaction.insert_data(
data={**print_data, "using_tools": using_tools}, status="inner", current=task_id, is_include_pictures=self.is_include_pictures(using_tools))
now_node = new_tree_node
|
NOW_SUBTASK_PROMPT = '''
'''
def make_message(now_node: ToolNode, max_length, config, now_dealing_task):
"""
Function to generate messages for each node.
Args:
now_node: The current ToolNode instance.
task_handler: Handler of the tasks.
max_length: Maximum length of the subtask chain.
config: The configuration settings.
Returns:
The sequence of messages for the current node.
"""
if CONFIG.enable_summary:
terminal_task_info = summarize_plan(
now_dealing_task.to_json())
else:
terminal_task_info = json.dumps(
now_dealing_task.to_json(), indent=2, ensure_ascii=False)
message_sequence = []
now_subtask_prompt = f'''Now you will perform the following subtask:\n"""\n{terminal_task_info}\n"""\n'''
message_sequence.append(Message("user", now_subtask_prompt))
action_process = now_node.process
if config.enable_summary:
action_process = summarize_action(
action_process, terminal_task_info)
user_prompt = f"""The following steps have been performed (you have already done the following and the current file contents are shown below):\n
{action_process}
"""
message_sequence.append(Message("user", user_prompt))
return message_sequence
class ReACTChainSearch(BaseSearchMethod):
"""
Class for ReACT chain search. It performs chain based searches for tasks.
"""
def __init__(self, xagent_core_components: XAgentCoreComponents):
"""
xagent_core_components: XAgentCoreComponents object, used to initialize ReACTChainSearch object
Initializes ReACTChainSearch object. It maintains a list of trees to represent
the processed tasks.
"""
super().__init__()
self.tree_list = []
self.finish_node = None
self.xagent_core_components = xagent_core_components
def run(self,
config,
agent: BaseAgent,
arguments,
functions,
task_id,
now_dealing_task,
plan_agent,
max_try=1,
max_answer=1):
"""
Runs the chain search task.
Args:
config: Configuration for the search.
agent: Base agent responsible for chain search.
arguments: Arguments for the current task to be handled.
functions: The available functions for use by agent.
task_id: ID of the current task.
max_try: Maximum number of attempts.
max_answer: Maximum number of answers to be received
Returns:
None
Raises:
None
"""
for _attempt_id in range(max_try):
self.generate_chain(config, agent, arguments,
functions, task_id, now_dealing_task, plan_agent)
if self.status == SearchMethodStatusCode.HAVE_AT_LEAST_ONE_ANSWER:
self.status = SearchMethodStatusCode.SUCCESS
else:
self.status = SearchMethodStatusCode.FAIL
def get_finish_node(self):
"""
Function to retrieve the finished node in the task tree.
Returns:
The finished node.
"""
return self.finish_node
def get_origin_data(self, data):
"""
Retrieves the initially entered data.
Args:
data: The initially entered data list.
Returns:
The initially entered data as a dictionary.:
"""
assistant_thoughts_reasoning = None
assistant_thoughts_plan = None
assistant_thoughts_speak = None
assistant_thoughts_criticism = None
assistant_thoughts = data.get("thoughts", {})
assistant_thoughts = assistant_thoughts.get("properties", {})
assistant_thoughts_text = assistant_thoughts.get("thought")
if assistant_thoughts:
assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
assistant_thoughts_plan = assistant_thoughts.get("plan")
assistant_thoughts_criticism = assistant_thoughts.get("criticism")
return {"args": {
"thoughts": assistant_thoughts_text,
"reasoning": assistant_thoughts_reasoning,
"plan": assistant_thoughts_plan,
"criticism": assistant_thoughts_criticism
}}
def rewrite_input_func(self, old, new):
"""
Checks whether the new inputs are valid and if so updates the old input
with the new one.
Args:
old: The old input entry.
new: The new input entry to replace the old one.
Returns:
The updated input list and the rewrite status.
"""
if not isinstance(new, dict):
pass
if new is None:
return old, False
else:
args = new.get("args", {})
assistant_thoughts_reasoning = None
assistant_thoughts_plan = None
assistant_thoughts_speak = None
assistant_thoughts_criticism = None
assistant_thoughts = old.get("thoughts", {})
assistant_thoughts = assistant_thoughts.get("properties", {})
assistant_thoughts_text = assistant_thoughts.get("thought")
if assistant_thoughts:
assistant_thoughts_reasoning = assistant_thoughts.get(
"reasoning")
assistant_thoughts_plan = assistant_thoughts.get("plan")
assistant_thoughts_criticism = assistant_thoughts.get(
"criticism")
if "thoughts" in args.keys() and "thought" in assistant_thoughts.keys():
old["thoughts"]["properties"]["thought"] = args.get(
"thoughts", assistant_thoughts_text)
if "reasoning" in args.keys() and "reasoning" in assistant_thoughts.keys():
old["thoughts"]["properties"]["reasoning"] = args.get(
"reasoning", assistant_thoughts_reasoning)
if "plan" in args.keys() and "plan" in assistant_thoughts.keys():
old["thoughts"]["properties"]["plan"] = args.get(
"plan", assistant_thoughts_plan)
if "criticism" in args.keys() and "criticism" in assistant_thoughts.keys():
old["thoughts"]["properties"]["criticism"] = args.get(
"criticism", assistant_thoughts_criticism)
return old, True
def generate_chain(self, config, agent: BaseAgent, arguments, functions, task_id, now_dealing_task, plan_agent):
"""
Run the chain search task.
Args:
config: Configuration for the search.
agent: Base agent responsible for chain search.
arguments: Arguments for the current task to be handled.
functions: The available functions for use by agent.
task_id: ID of the current task.
Returns:
None.
Raises:
None.
"""
self.tree_list.append(TaskSearchTree())
now_attempt_tree = self.tree_list[-1]
now_node = now_attempt_tree.root
while now_node.get_depth() < config.max_subtask_chain_length:
self.xagent_core_components.logger.typewriter_log(
"-=-=-=-=-=-=-= THOUGHTS, REASONING, PLAN AND CRITICISM WILL NOW BE VERIFIED BY AGENT -=-=-=-=-=-=-=",
Fore.GREEN,
"",
)
if now_node.father != None:
if self.xagent_core_components.interaction.interrupt:
can_modify = self.get_origin_data(now_node.data)
receive_data = self.xagent_core_components.interaction.receive(
can_modify)
data, rewrite_flag = self.rewrite_input_func(
now_node.data, receive_data)
now_node.data = data
if rewrite_flag:
self.xagent_core_components.logger.typewriter_log(
"-=-=-=-=-=-=-= USER INPUT -=-=-=-=-=-=-=",
Fore.GREEN,
"",
)
self.xagent_core_components.print_assistant_thoughts(now_node.data, False)
self.xagent_core_components.logger.typewriter_log(
"-=-=-=-=-=-=-= USER INPUT -=-=-=-=-=-=-=",
Fore.GREEN,
"",
)
message_sequence = make_message(now_node=now_node,
max_length=config.max_subtask_chain_length,
config=config,
now_dealing_task=now_dealing_task)
function_call = None
if now_node.get_depth() == config.max_subtask_chain_length - 1:
function_call = {"name": "subtask_submit"}
file_archi, _, = self.xagent_core_components.toolserver_interface.execute_command_client(
"FileSystemEnv_print_filesys_struture", {"return_root": True})
file_archi, length = clip_text(file_archi, 1000, clip_end=True)
human_prompt = ""
if config.enable_ask_human_for_help:
human_prompt = "- Use 'ask_human_for_help' when you need help, remember to be specific to your requirement to help user to understand your problem."
else:
human_prompt = "- Human is not available for help. You are not allowed to ask human for help in any form or channel. Solve the problem by yourself. If information is not enough, try your best to use default value."
all_plan = plan_agent.latest_plan.to_json()
if config.enable_summary:
all_plan = summarize_plan(all_plan)
else:
all_plan = json.dumps(all_plan, indent=2, ensure_ascii=False)
new_message, tokens = agent.parse(
placeholders={
"system": {
"all_plan": all_plan
},
"user": {
"workspace_files": file_archi,
"subtask_id": now_dealing_task.get_subtask_id(to_str=True),
"max_length": config.max_subtask_chain_length,
"step_num": str(now_node.get_depth()+1),
"human_help_prompt": human_prompt,
}
},
arguments=arguments,
functions=functions,
function_call=function_call,
additional_messages=message_sequence,
additional_insert_index=-1
)
new_tree_node = agent.message_to_tool_node(new_message)
print_data = self.xagent_core_components.print_assistant_thoughts(
new_tree_node.data, False
)
tool_output, tool_output_status_code, need_for_plan_refine, using_tools = self.xagent_core_components.function_handler.handle_tool_call(
new_tree_node)
self.need_for_plan_refine = need_for_plan_refine
now_attempt_tree.make_father_relation(now_node, new_tree_node)
self.xagent_core_components.interaction.insert_data(
data={**print_data, "using_tools": using_tools}, status="inner", current=task_id, is_include_pictures=self.is_include_pictures(using_tools))
now_node = new_tree_node
| if tool_output_status_code == ToolCallStatusCode.SUBMIT_AS_SUCCESS: | 9 | 2023-10-16 03:44:57+00:00 | 12k |
PKU-YuanGroup/Video-LLaVA | llava/model/language_model/mpt/modeling_mpt.py | [
{
"identifier": "attn_bias_shape",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if al... | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional, Tuple, Union
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from .attention import attn_bias_shape, build_attn_bias
from .blocks import MPTBlock
from .custom_embedding import SharedEmbedding
from .norm import NORM_CLASS_REGISTRY
from .configuration_mpt import MPTConfig
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
from .meta_init_context import init_empty_weights
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
from .flash_attn_triton import flash_attn_func | 9,446 | assert isinstance(attn_bias, torch.Tensor)
attn_bias = self._apply_sequence_id(attn_bias, sequence_id)
if attention_mask is not None:
s_k = attention_mask.shape[-1]
if attn_bias is None:
attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype)
else:
_s_k = max(0, attn_bias.size(-1) - s_k)
attn_bias = attn_bias[:, :, :, _s_k:]
if prefix_mask is not None and attention_mask.shape != prefix_mask.shape:
raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.')
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val)
return (attn_bias, None)
def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor):
(s_k, s_q) = attn_bias.shape[-2:]
if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len:
raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.')
seq_len = prefix_mask.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len)
prefix = prefix_mask.view(-1, 1, 1, seq_len)
cannot_attend = ~torch.logical_or(causal, prefix.bool())
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor):
seq_len = sequence_id.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1)
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.Tensor]=None):
return_dict = return_dict if return_dict is not None else self.config.return_dict
use_cache = use_cache if use_cache is not None else self.config.use_cache
if attention_mask is not None:
attention_mask = attention_mask.bool()
if prefix_mask is not None:
prefix_mask = prefix_mask.bool()
if not return_dict:
raise NotImplementedError('return_dict False is not implemented yet for MPT')
if output_attentions:
if self.attn_impl != 'torch':
raise NotImplementedError('output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.')
if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training:
raise NotImplementedError('MPT does not support training with left padding.')
if self.prefix_lm and prefix_mask is None:
raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.')
if self.training:
if self.attn_uses_sequence_id and sequence_id is None:
raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.')
elif self.attn_uses_sequence_id is False and sequence_id is not None:
warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.')
if input_ids is not None:
S = input_ids.size(1)
assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}'
tok_emb = self.wte(input_ids)
else:
assert inputs_embeds is not None
assert self.alibi, 'inputs_embeds is not implemented for MPT unless for alibi.'
S = inputs_embeds.size(1)
tok_emb = inputs_embeds
if self.alibi:
x = tok_emb
else:
past_position = 0
if past_key_values is not None:
if len(past_key_values) != self.config.n_layers:
raise ValueError(f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).')
past_position = past_key_values[0][0].size(1)
if self.attn_impl == 'torch':
past_position = past_key_values[0][0].size(3)
if S + past_position > self.config.max_seq_len:
raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.')
pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0)
if attention_mask is not None:
pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0)
pos_emb = self.wpe(pos)
x = tok_emb + pos_emb
if self.embedding_fraction == 1:
x = self.emb_drop(x)
else:
x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction)
assert isinstance(self.emb_drop, nn.Module)
x = self.emb_drop(x_shrunk)
(attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id)
if use_cache and past_key_values is None:
past_key_values = [() for _ in range(self.config.n_layers)]
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for (b_idx, block) in enumerate(self.blocks):
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
past_key_value = past_key_values[b_idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
(x, attn_weights, past_key_value) = torch.utils.checkpoint.checkpoint(block, x, past_key_value, attn_bias, attention_mask, self.is_causal)
else:
(x, attn_weights, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal)
if past_key_values is not None:
past_key_values[b_idx] = past_key_value
if output_attentions:
assert all_self_attns is not None
all_self_attns = all_self_attns + (attn_weights,)
x = self.norm_f(x)
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns)
def param_init_fn(self, module):
init_fn_name = self.config.init_config['name']
| """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max)
self._attn_bias_initialized = True
if self.attn_impl == 'flash':
return (self.attn_bias, attention_mask)
if self.attn_bias is not None:
self.attn_bias = self.attn_bias.to(dtype=dtype, device=device)
attn_bias = self.attn_bias
if self.prefix_lm:
assert isinstance(attn_bias, torch.Tensor)
assert isinstance(prefix_mask, torch.Tensor)
attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask)
if self.attn_uses_sequence_id and sequence_id is not None:
assert isinstance(attn_bias, torch.Tensor)
attn_bias = self._apply_sequence_id(attn_bias, sequence_id)
if attention_mask is not None:
s_k = attention_mask.shape[-1]
if attn_bias is None:
attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype)
else:
_s_k = max(0, attn_bias.size(-1) - s_k)
attn_bias = attn_bias[:, :, :, _s_k:]
if prefix_mask is not None and attention_mask.shape != prefix_mask.shape:
raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.')
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val)
return (attn_bias, None)
def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor):
(s_k, s_q) = attn_bias.shape[-2:]
if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len:
raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.')
seq_len = prefix_mask.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len)
prefix = prefix_mask.view(-1, 1, 1, seq_len)
cannot_attend = ~torch.logical_or(causal, prefix.bool())
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor):
seq_len = sequence_id.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1)
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.Tensor]=None):
return_dict = return_dict if return_dict is not None else self.config.return_dict
use_cache = use_cache if use_cache is not None else self.config.use_cache
if attention_mask is not None:
attention_mask = attention_mask.bool()
if prefix_mask is not None:
prefix_mask = prefix_mask.bool()
if not return_dict:
raise NotImplementedError('return_dict False is not implemented yet for MPT')
if output_attentions:
if self.attn_impl != 'torch':
raise NotImplementedError('output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.')
if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training:
raise NotImplementedError('MPT does not support training with left padding.')
if self.prefix_lm and prefix_mask is None:
raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.')
if self.training:
if self.attn_uses_sequence_id and sequence_id is None:
raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.')
elif self.attn_uses_sequence_id is False and sequence_id is not None:
warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.')
if input_ids is not None:
S = input_ids.size(1)
assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}'
tok_emb = self.wte(input_ids)
else:
assert inputs_embeds is not None
assert self.alibi, 'inputs_embeds is not implemented for MPT unless for alibi.'
S = inputs_embeds.size(1)
tok_emb = inputs_embeds
if self.alibi:
x = tok_emb
else:
past_position = 0
if past_key_values is not None:
if len(past_key_values) != self.config.n_layers:
raise ValueError(f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).')
past_position = past_key_values[0][0].size(1)
if self.attn_impl == 'torch':
past_position = past_key_values[0][0].size(3)
if S + past_position > self.config.max_seq_len:
raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.')
pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0)
if attention_mask is not None:
pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0)
pos_emb = self.wpe(pos)
x = tok_emb + pos_emb
if self.embedding_fraction == 1:
x = self.emb_drop(x)
else:
x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction)
assert isinstance(self.emb_drop, nn.Module)
x = self.emb_drop(x_shrunk)
(attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id)
if use_cache and past_key_values is None:
past_key_values = [() for _ in range(self.config.n_layers)]
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for (b_idx, block) in enumerate(self.blocks):
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
past_key_value = past_key_values[b_idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
(x, attn_weights, past_key_value) = torch.utils.checkpoint.checkpoint(block, x, past_key_value, attn_bias, attention_mask, self.is_causal)
else:
(x, attn_weights, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal)
if past_key_values is not None:
past_key_values[b_idx] = past_key_value
if output_attentions:
assert all_self_attns is not None
all_self_attns = all_self_attns + (attn_weights,)
x = self.norm_f(x)
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns)
def param_init_fn(self, module):
init_fn_name = self.config.init_config['name'] | MODEL_INIT_REGISTRY[init_fn_name](module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config) | 11 | 2023-10-23 05:43:54+00:00 | 12k |
deepseek-ai/DreamCraft3D | threestudio/systems/base.py | [
{
"identifier": "Exporter",
"path": "threestudio/models/exporters/base.py",
"snippet": "class Exporter(BaseObject):\n @dataclass\n class Config(BaseObject.Config):\n save_video: bool = False\n\n cfg: Config\n\n def configure(\n self,\n geometry: BaseImplicitGeometry,\n ... | import os
import pytorch_lightning as pl
import torch.nn.functional as F
import threestudio
from dataclasses import dataclass, field
from threestudio.models.exporters.base import Exporter, ExporterOutput
from threestudio.systems.utils import parse_optimizer, parse_scheduler
from threestudio.utils.base import (
Updateable,
update_end_if_possible,
update_if_possible,
)
from threestudio.utils.config import parse_structured
from threestudio.utils.misc import C, cleanup, get_device, load_module_weights, find_last_path
from threestudio.utils.saving import SaverMixin
from threestudio.utils.typing import *
from threestudio.utils.config import load_config, parse_structured | 9,660 |
class BaseSystem(pl.LightningModule, Updateable, SaverMixin):
@dataclass
class Config:
loggers: dict = field(default_factory=dict)
loss: dict = field(default_factory=dict)
optimizer: dict = field(default_factory=dict)
scheduler: Optional[dict] = None
weights: Optional[str] = None
weights_ignore_modules: Optional[List[str]] = None
cleanup_after_validation_step: bool = False
cleanup_after_test_step: bool = False
cfg: Config
def __init__(self, cfg, resumed=False) -> None:
super().__init__()
self.cfg = parse_structured(self.Config, cfg)
self._save_dir: Optional[str] = None
self._resumed: bool = resumed
self._resumed_eval: bool = False
self._resumed_eval_status: dict = {"global_step": 0, "current_epoch": 0}
if "loggers" in cfg:
self.create_loggers(cfg.loggers)
self.configure()
if self.cfg.weights is not None:
self.load_weights(self.cfg.weights, self.cfg.weights_ignore_modules)
self.post_configure()
def load_weights(self, weights: str, ignore_modules: Optional[List[str]] = None):
state_dict, epoch, global_step = load_module_weights(
weights, ignore_modules=ignore_modules, map_location="cpu"
)
self.load_state_dict(state_dict, strict=False)
# restore step-dependent states
self.do_update_step(epoch, global_step, on_load_weights=True)
def set_resume_status(self, current_epoch: int, global_step: int):
# restore correct epoch and global step in eval
self._resumed_eval = True
self._resumed_eval_status["current_epoch"] = current_epoch
self._resumed_eval_status["global_step"] = global_step
@property
def resumed(self):
# whether from resumed checkpoint
return self._resumed
@property
def true_global_step(self):
if self._resumed_eval:
return self._resumed_eval_status["global_step"]
else:
return self.global_step
@property
def true_current_epoch(self):
if self._resumed_eval:
return self._resumed_eval_status["current_epoch"]
else:
return self.current_epoch
def configure(self) -> None:
pass
def post_configure(self) -> None:
"""
executed after weights are loaded
"""
pass
def C(self, value: Any) -> float:
return C(value, self.true_current_epoch, self.true_global_step)
def configure_optimizers(self):
optim = parse_optimizer(self.cfg.optimizer, self)
ret = {
"optimizer": optim,
}
if self.cfg.scheduler is not None:
ret.update(
{
"lr_scheduler": parse_scheduler(self.cfg.scheduler, optim),
}
)
return ret
def training_step(self, batch, batch_idx):
raise NotImplementedError
def validation_step(self, batch, batch_idx):
raise NotImplementedError
def on_train_batch_end(self, outputs, batch, batch_idx):
self.dataset = self.trainer.train_dataloader.dataset
update_end_if_possible(
self.dataset, self.true_current_epoch, self.true_global_step
)
self.do_update_step_end(self.true_current_epoch, self.true_global_step)
def on_validation_batch_end(self, outputs, batch, batch_idx):
self.dataset = self.trainer.val_dataloaders.dataset
update_end_if_possible(
self.dataset, self.true_current_epoch, self.true_global_step
)
self.do_update_step_end(self.true_current_epoch, self.true_global_step)
if self.cfg.cleanup_after_validation_step:
# cleanup to save vram
|
class BaseSystem(pl.LightningModule, Updateable, SaverMixin):
@dataclass
class Config:
loggers: dict = field(default_factory=dict)
loss: dict = field(default_factory=dict)
optimizer: dict = field(default_factory=dict)
scheduler: Optional[dict] = None
weights: Optional[str] = None
weights_ignore_modules: Optional[List[str]] = None
cleanup_after_validation_step: bool = False
cleanup_after_test_step: bool = False
cfg: Config
def __init__(self, cfg, resumed=False) -> None:
super().__init__()
self.cfg = parse_structured(self.Config, cfg)
self._save_dir: Optional[str] = None
self._resumed: bool = resumed
self._resumed_eval: bool = False
self._resumed_eval_status: dict = {"global_step": 0, "current_epoch": 0}
if "loggers" in cfg:
self.create_loggers(cfg.loggers)
self.configure()
if self.cfg.weights is not None:
self.load_weights(self.cfg.weights, self.cfg.weights_ignore_modules)
self.post_configure()
def load_weights(self, weights: str, ignore_modules: Optional[List[str]] = None):
state_dict, epoch, global_step = load_module_weights(
weights, ignore_modules=ignore_modules, map_location="cpu"
)
self.load_state_dict(state_dict, strict=False)
# restore step-dependent states
self.do_update_step(epoch, global_step, on_load_weights=True)
def set_resume_status(self, current_epoch: int, global_step: int):
# restore correct epoch and global step in eval
self._resumed_eval = True
self._resumed_eval_status["current_epoch"] = current_epoch
self._resumed_eval_status["global_step"] = global_step
@property
def resumed(self):
# whether from resumed checkpoint
return self._resumed
@property
def true_global_step(self):
if self._resumed_eval:
return self._resumed_eval_status["global_step"]
else:
return self.global_step
@property
def true_current_epoch(self):
if self._resumed_eval:
return self._resumed_eval_status["current_epoch"]
else:
return self.current_epoch
def configure(self) -> None:
pass
def post_configure(self) -> None:
"""
executed after weights are loaded
"""
pass
def C(self, value: Any) -> float:
return C(value, self.true_current_epoch, self.true_global_step)
def configure_optimizers(self):
optim = parse_optimizer(self.cfg.optimizer, self)
ret = {
"optimizer": optim,
}
if self.cfg.scheduler is not None:
ret.update(
{
"lr_scheduler": parse_scheduler(self.cfg.scheduler, optim),
}
)
return ret
def training_step(self, batch, batch_idx):
raise NotImplementedError
def validation_step(self, batch, batch_idx):
raise NotImplementedError
def on_train_batch_end(self, outputs, batch, batch_idx):
self.dataset = self.trainer.train_dataloader.dataset
update_end_if_possible(
self.dataset, self.true_current_epoch, self.true_global_step
)
self.do_update_step_end(self.true_current_epoch, self.true_global_step)
def on_validation_batch_end(self, outputs, batch, batch_idx):
self.dataset = self.trainer.val_dataloaders.dataset
update_end_if_possible(
self.dataset, self.true_current_epoch, self.true_global_step
)
self.do_update_step_end(self.true_current_epoch, self.true_global_step)
if self.cfg.cleanup_after_validation_step:
# cleanup to save vram | cleanup() | 9 | 2023-10-23 07:40:20+00:00 | 12k |
YORG-AI/Open-Assistant | package/src/yorgassistant/core/assistant/async_threads.py | [
{
"identifier": "Assistants",
"path": "package/src/yorgassistant/core/assistant/assistant.py",
"snippet": "class Assistants():\n def __init__(self, config,yaml_path:Optional[str] = None):\n self.config = config\n YamlPathConfig.assistants_yaml_path = yaml_path if yaml_path else 'assista... | import uuid
import time
import yaml
import os
import re
import logging
import json
import inspect
from typing import Any, List, Optional,Dict
from .assistant import Assistants
from ..nodes.openai.openai import OpenAINode,AsyncOpenAINode
from ..nodes.openai.openai_model import *
from .tools.tools import Tools, Tool
from .config import *
from .prompt.few_shot_cot_tools_choose_prompt import *
from .prompt.parameters_generate_prompt import *
from .prompt.response_generate_prompt import * | 7,362 |
class AsyncThreads:
current_tool: Tool
chat_node: OpenAINode # Threads 全局的 OpenAI node,仅用于 chat 交互以及对 tool 执行结果的分析(选择 tool 以及生成参数不使用该 node)
def __init__(self, config: ThreadsConfig,threads_yaml_path:Optional[str] = None):
self._config = config
self.current_tool = None
YamlPathConfig.threads_yaml_path = threads_yaml_path if threads_yaml_path else "threads.yaml"
@property
def config(self):
return self._config
@property
def id(self):
return self._config.id
def set_threads_yaml_path(yaml_path:str):
# 检查 yaml_path 是否为绝对路径
if not os.path.isabs(yaml_path):
# 获取调用此方法的栈帧
stack = inspect.stack()
caller_frame = stack[1]
# 获取调用者的文件路径
caller_path = caller_frame.filename
# 获取调用者的目录路径
caller_dir = os.path.dirname(caller_path)
# 构建 yaml 文件的绝对路径
full_yaml_path = os.path.join(caller_dir, yaml_path)
else:
full_yaml_path = yaml_path
# 获取 yaml 文件所在的目录
yaml_dir = os.path.dirname(full_yaml_path)
# 如果目录不存在,则创建它
os.makedirs(yaml_dir, exist_ok=True)
# 设置 yaml_path
YamlPathConfig.threads_yaml_path = full_yaml_path
async def save_to_yaml(self):
# 构建 threads.yaml 文件的绝对路径
threads_yaml_path = YamlPathConfig.threads_yaml_path
# 检查文件是否存在,如果不存在,则创建一个空的yaml文件
if not os.path.exists(threads_yaml_path):
with open(threads_yaml_path, 'w') as file:
file.write('') # 创建一个空文件
# 使用绝对路径打开 threads.yaml 文件
with open(threads_yaml_path, "r") as file:
data = yaml.safe_load(file) or []
# 查找具有相同 id 的 assistant
for i, d in enumerate(data):
if d["id"] == self.config.id:
# 如果找到了,就更新它
data[i] = self.config.to_dict()
break
else:
# 如果没有找到,就添加新的 assistant 到列表中
data.append(self.config.to_dict())
# 写回 YAML 文件
with open(threads_yaml_path, "w") as file:
yaml.dump(data, file)
@staticmethod
def create(yaml_file_path:str) -> "AsyncThreads":
# 创建 ThreadsConfig 对象
config = ThreadsConfig(
id=str(uuid.uuid4()),
object="AsyncThreads",
created_at=int(time.time()),
message_history=[],
metadata={},
)
# 创建 Threads 对象
threads = AsyncThreads(config,YamlPathConfig.threads_yaml_path)
# 保存到 YAML 文件
threads.save_to_yaml()
return threads
@classmethod
def from_id(cls, id: str) -> 'AsyncThreads':
# 使用传入的 yaml_path 参数打开 YAML 文件
with open(YamlPathConfig.threads_yaml_path, 'r') as file:
data = yaml.safe_load(file) or []
# 查找具有相同 id 的配置
for d in data:
if d['id'] == id:
# 如果找到了,就用这个配置创建一个新的对象
config = ThreadsConfig.from_dict(d)
return cls(config, YamlPathConfig.threads_yaml_path) # 使用传入的 yaml_path 创建 实例
# 如果没有找到,就抛出一个异常
raise ValueError(f'No threads with id {id} found in YAML file.')
@staticmethod
def get_all_threads() -> List[Dict[str, Any]]:
"""
读取 YAML 文件并返回所有 threads 的信息列表。
"""
# 确保 YAML 文件路径已经被设置
if YamlPathConfig.threads_yaml_path:
if not os.path.isfile(YamlPathConfig.threads_yaml_path):
# 如果文件路径存在但文件不存在,则创建一个空文件
with open(YamlPathConfig.threads_yaml_path, 'w') as file:
yaml.dump([], file)
else:
raise FileNotFoundError("The threads YAML file path is not set.")
# 读取 YAML 文件
with open(YamlPathConfig.threads_yaml_path, 'r') as file:
data = yaml.safe_load(file) or []
# 使用 from_dict 方法将每个字典转换为 ThreadsConfig 实例
threads_list = []
for item in data:
config = ThreadsConfig.from_dict(item)
threads_list.append(config)
return threads_list
async def run(self, assistant_id: str, input_text: str, **kwargs):
try:
# 使用 from_id 方法获取助手
assistant = Assistants.from_id(assistant_id)
tools_list = assistant.get_tools_type_list()
# 初始化 Tools 对象
|
def extract_bracket_content(s: str) -> list:
content = re.findall(r"\[(.*?)\]", s)
content = [c.replace("'", "") for c in content]
content = filter(lambda x: x != "", content)
ret = []
for item in content:
if "," in item:
ret.extend(item.split(","))
else:
ret.append(item)
return ret
class AsyncThreads:
current_tool: Tool
chat_node: OpenAINode # Threads 全局的 OpenAI node,仅用于 chat 交互以及对 tool 执行结果的分析(选择 tool 以及生成参数不使用该 node)
def __init__(self, config: ThreadsConfig,threads_yaml_path:Optional[str] = None):
self._config = config
self.current_tool = None
YamlPathConfig.threads_yaml_path = threads_yaml_path if threads_yaml_path else "threads.yaml"
@property
def config(self):
return self._config
@property
def id(self):
return self._config.id
def set_threads_yaml_path(yaml_path:str):
# 检查 yaml_path 是否为绝对路径
if not os.path.isabs(yaml_path):
# 获取调用此方法的栈帧
stack = inspect.stack()
caller_frame = stack[1]
# 获取调用者的文件路径
caller_path = caller_frame.filename
# 获取调用者的目录路径
caller_dir = os.path.dirname(caller_path)
# 构建 yaml 文件的绝对路径
full_yaml_path = os.path.join(caller_dir, yaml_path)
else:
full_yaml_path = yaml_path
# 获取 yaml 文件所在的目录
yaml_dir = os.path.dirname(full_yaml_path)
# 如果目录不存在,则创建它
os.makedirs(yaml_dir, exist_ok=True)
# 设置 yaml_path
YamlPathConfig.threads_yaml_path = full_yaml_path
async def save_to_yaml(self):
# 构建 threads.yaml 文件的绝对路径
threads_yaml_path = YamlPathConfig.threads_yaml_path
# 检查文件是否存在,如果不存在,则创建一个空的yaml文件
if not os.path.exists(threads_yaml_path):
with open(threads_yaml_path, 'w') as file:
file.write('') # 创建一个空文件
# 使用绝对路径打开 threads.yaml 文件
with open(threads_yaml_path, "r") as file:
data = yaml.safe_load(file) or []
# 查找具有相同 id 的 assistant
for i, d in enumerate(data):
if d["id"] == self.config.id:
# 如果找到了,就更新它
data[i] = self.config.to_dict()
break
else:
# 如果没有找到,就添加新的 assistant 到列表中
data.append(self.config.to_dict())
# 写回 YAML 文件
with open(threads_yaml_path, "w") as file:
yaml.dump(data, file)
@staticmethod
def create(yaml_file_path:str) -> "AsyncThreads":
# 创建 ThreadsConfig 对象
config = ThreadsConfig(
id=str(uuid.uuid4()),
object="AsyncThreads",
created_at=int(time.time()),
message_history=[],
metadata={},
)
# 创建 Threads 对象
threads = AsyncThreads(config,YamlPathConfig.threads_yaml_path)
# 保存到 YAML 文件
threads.save_to_yaml()
return threads
@classmethod
def from_id(cls, id: str) -> 'AsyncThreads':
# 使用传入的 yaml_path 参数打开 YAML 文件
with open(YamlPathConfig.threads_yaml_path, 'r') as file:
data = yaml.safe_load(file) or []
# 查找具有相同 id 的配置
for d in data:
if d['id'] == id:
# 如果找到了,就用这个配置创建一个新的对象
config = ThreadsConfig.from_dict(d)
return cls(config, YamlPathConfig.threads_yaml_path) # 使用传入的 yaml_path 创建 实例
# 如果没有找到,就抛出一个异常
raise ValueError(f'No threads with id {id} found in YAML file.')
@staticmethod
def get_all_threads() -> List[Dict[str, Any]]:
"""
读取 YAML 文件并返回所有 threads 的信息列表。
"""
# 确保 YAML 文件路径已经被设置
if YamlPathConfig.threads_yaml_path:
if not os.path.isfile(YamlPathConfig.threads_yaml_path):
# 如果文件路径存在但文件不存在,则创建一个空文件
with open(YamlPathConfig.threads_yaml_path, 'w') as file:
yaml.dump([], file)
else:
raise FileNotFoundError("The threads YAML file path is not set.")
# 读取 YAML 文件
with open(YamlPathConfig.threads_yaml_path, 'r') as file:
data = yaml.safe_load(file) or []
# 使用 from_dict 方法将每个字典转换为 ThreadsConfig 实例
threads_list = []
for item in data:
config = ThreadsConfig.from_dict(item)
threads_list.append(config)
return threads_list
async def run(self, assistant_id: str, input_text: str, **kwargs):
try:
# 使用 from_id 方法获取助手
assistant = Assistants.from_id(assistant_id)
tools_list = assistant.get_tools_type_list()
# 初始化 Tools 对象 | tools = Tools() | 3 | 2023-10-24 15:15:48+00:00 | 12k |
zju3dv/4K4D | scripts/renbody/warp_gaussian_with_smpl.py | [
{
"identifier": "dotdict",
"path": "easyvolcap/utils/base_utils.py",
"snippet": "class dotdict(dict, Dict[KT, VT]):\n \"\"\"\n This is the default data passing object used throughout the codebase\n Main function: dot access for dict values & dict like merging and updates\n\n a dictionary tha... | import os
import argparse
import torch
import numpy as np
import torch.nn.functional as F
import sys
from glob import glob
from os.path import join
from tqdm import tqdm
from easymocap.bodymodel.smpl import SMPLModel
from easymocap.bodymodel.lbs import batch_rodrigues
from easyvolcap.utils.sh_utils import *
from easyvolcap.utils.console_utils import *
from easyvolcap.utils.base_utils import dotdict
from easyvolcap.utils.data_utils import load_mesh, load_dotdict, to_tensor
from easyvolcap.utils.net_utils import load_network
from easyvolcap.utils.gaussian_utils import GaussianModel
from easyvolcap.utils.easy_utils import load_bodymodel
from easyvolcap.utils.blend_utils import world_points_to_pose_points, pose_points_to_world_points
from easyvolcap.utils.sample_utils import sample_blend_K_closest_points
from pytorch3d.structures import Meshes, Pointclouds
from pytorch3d.transforms import matrix_to_quaternion, quaternion_to_matrix
from pytorch3d.ops import knn_points
from easyvolcap.scripts.main import test # will do everything a normal user would do
from easyvolcap.engine import cfg | 9,589 |
def get_transform(weights, A, eps=torch.finfo(torch.float32).eps, inverse=False):
"""
weights: B, N, J
A: B, J, D, D
"""
T = torch.einsum('bpn,bnij->bpij', weights, A)
dim = T.shape[-1]
if inverse:
T = (T + eps * torch.eye(dim, device=T.device, dtype=T.dtype)[None, None]).inverse()
return T
def transform(xyz, T):
xyz = F.pad(xyz, (0, 1), value=1.0)
xyz = torch.einsum("bpij,bpj->bpi", T, xyz)[..., :3]
return xyz
def load_pcd(path, sh_deg, smpl, prefix='sampler.pcds.0.', freeze=True, norm_with_smpl=True):
pcd = GaussianModel(torch.rand(1, 3), None, 0.1, sh_deg)
load_network(pcd, path, prefix=prefix)
if norm_with_smpl:
Rh = smpl['Rh']
Th = smpl['Th']
pcd._xyz.data = world_points_to_pose_points(pcd._xyz[None], Rh, Th)[0]
pcd._xyz.grad = None
R = quaternion_to_matrix(pcd._rotation)
R = Rh[0].mT @ R
pcd._rotation.data = matrix_to_quaternion(R)
pcd._rotation.grad = None
if freeze:
for params in pcd.parameters():
params.requires_grad = False
assert pcd.active_sh_degree.item() == sh_deg
return pcd
def load_smpl(path):
smpl = to_tensor(load_dotdict(path))
smpl = dotdict({
'shapes': smpl.shapes[:1],
'poses': smpl.poses[:1],
'Rh': batch_rodrigues(smpl.Rh[:1]),
'Th': smpl.Th[:1],
})
return smpl
def compute_lbs(pcd: GaussianModel, smpl: dotdict, bodymodel: SMPLModel, K=4):
xyz = pcd.get_xyz
smpl_verts = bodymodel(shapes=smpl['shapes'], poses=smpl['poses'])
|
def get_transform(weights, A, eps=torch.finfo(torch.float32).eps, inverse=False):
"""
weights: B, N, J
A: B, J, D, D
"""
T = torch.einsum('bpn,bnij->bpij', weights, A)
dim = T.shape[-1]
if inverse:
T = (T + eps * torch.eye(dim, device=T.device, dtype=T.dtype)[None, None]).inverse()
return T
def transform(xyz, T):
xyz = F.pad(xyz, (0, 1), value=1.0)
xyz = torch.einsum("bpij,bpj->bpi", T, xyz)[..., :3]
return xyz
def load_pcd(path, sh_deg, smpl, prefix='sampler.pcds.0.', freeze=True, norm_with_smpl=True):
pcd = GaussianModel(torch.rand(1, 3), None, 0.1, sh_deg)
load_network(pcd, path, prefix=prefix)
if norm_with_smpl:
Rh = smpl['Rh']
Th = smpl['Th']
pcd._xyz.data = world_points_to_pose_points(pcd._xyz[None], Rh, Th)[0]
pcd._xyz.grad = None
R = quaternion_to_matrix(pcd._rotation)
R = Rh[0].mT @ R
pcd._rotation.data = matrix_to_quaternion(R)
pcd._rotation.grad = None
if freeze:
for params in pcd.parameters():
params.requires_grad = False
assert pcd.active_sh_degree.item() == sh_deg
return pcd
def load_smpl(path):
smpl = to_tensor(load_dotdict(path))
smpl = dotdict({
'shapes': smpl.shapes[:1],
'poses': smpl.poses[:1],
'Rh': batch_rodrigues(smpl.Rh[:1]),
'Th': smpl.Th[:1],
})
return smpl
def compute_lbs(pcd: GaussianModel, smpl: dotdict, bodymodel: SMPLModel, K=4):
xyz = pcd.get_xyz
smpl_verts = bodymodel(shapes=smpl['shapes'], poses=smpl['poses']) | weights, dists = sample_blend_K_closest_points(xyz[None], smpl_verts, bodymodel.weights[None], K=K) | 9 | 2023-10-17 04:48:46+00:00 | 12k |
codefuse-ai/Test-Agent | chat/model/model_adapter.py | [
{
"identifier": "CPU_ISA",
"path": "chat/constants.py",
"snippet": "CPU_ISA = os.getenv(\"CPU_ISA\")"
},
{
"identifier": "GptqConfig",
"path": "chat/modules/gptq.py",
"snippet": "class GptqConfig:\n ckpt: str = field(\n default=None,\n metadata={\n \"help\": \... | import math
import os
import sys
import warnings
import accelerate
import psutil
import torch
import intel_extension_for_pytorch as ipex
import intel_extension_for_pytorch as ipex
from typing import Dict, List, Optional
from functools import cache
from functools import lru_cache as cache
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForSeq2SeqLM,
AutoTokenizer,
LlamaTokenizer,
LlamaForCausalLM,
T5Tokenizer,
)
from chat.constants import CPU_ISA
from chat.modules.gptq import GptqConfig, load_gptq_quantized
from chat.modules.awq import AWQConfig, load_awq_quantized
from chat.conversation import Conversation, get_conv_template
from chat.model.compression import load_compress_model
from chat.model.llama_condense_monkey_patch import (
replace_llama_with_condense,
)
from chat.model.model_chatglm import generate_stream_chatglm
from chat.model.model_codet5p import generate_stream_codet5p
from chat.model.model_falcon import generate_stream_falcon
from chat.model.monkey_patch_non_inplace import (
replace_llama_attn_with_non_inplace_operations,
)
from chat.utils import get_gpu_memory
from transformers import BitsAndBytesConfig
from chat.server.inference import generate_stream
from peft import PeftConfig, PeftModel
from peft import PeftConfig, PeftModel
from chat.model.rwkv_model import RwkvModel
from transformers.generation import GenerationConfig | 8,642 | model = AutoModel.from_pretrained(
model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs
)
return model, tokenizer
def load_compress_model(self, model_path, device, torch_dtype, revision="main"):
return load_compress_model(
model_path,
device,
torch_dtype,
use_fast=self.use_fast_tokenizer,
revision=revision,
)
def get_default_conv_template(self, model_path: str) -> Conversation:
return get_conv_template("one_shot")
# A global registry for all model adapters
# TODO (lmzheng): make it a priority queue.
model_adapters: List[BaseModelAdapter] = []
def register_model_adapter(cls):
"""Register a model adapter."""
model_adapters.append(cls())
@cache
def get_model_adapter(model_path: str) -> BaseModelAdapter:
"""Get a model adapter for a model_path."""
model_path_basename = os.path.basename(os.path.normpath(model_path))
# Try the basename of model_path at first
for adapter in model_adapters:
if adapter.match(model_path_basename) and type(adapter) != BaseModelAdapter:
return adapter
# Then try the full path
for adapter in model_adapters:
if adapter.match(model_path):
return adapter
raise ValueError(f"No valid model adapter for {model_path}")
def raise_warning_for_incompatible_cpu_offloading_configuration(
device: str, load_8bit: bool, cpu_offloading: bool
):
if cpu_offloading:
if not load_8bit:
warnings.warn(
"The cpu-offloading feature can only be used while also using 8-bit-quantization.\n"
"Use '--load-8bit' to enable 8-bit-quantization\n"
"Continuing without cpu-offloading enabled\n"
)
return False
if not "linux" in sys.platform:
warnings.warn(
"CPU-offloading is only supported on linux-systems due to the limited compatability with the bitsandbytes-package\n"
"Continuing without cpu-offloading enabled\n"
)
return False
if device != "cuda":
warnings.warn(
"CPU-offloading is only enabled when using CUDA-devices\n"
"Continuing without cpu-offloading enabled\n"
)
return False
return cpu_offloading
def load_model(
model_path: str,
device: str = "cuda",
num_gpus: int = 1,
max_gpu_memory: Optional[str] = None,
load_8bit: bool = False,
cpu_offloading: bool = False,
gptq_config: Optional[GptqConfig] = None,
awq_config: Optional[AWQConfig] = None,
revision: str = "main",
debug: bool = False,
):
"""Load a model from Hugging Face."""
# get model adapter
adapter = get_model_adapter(model_path)
# Handle device mapping
cpu_offloading = raise_warning_for_incompatible_cpu_offloading_configuration(
device, load_8bit, cpu_offloading
)
if device == "cpu":
kwargs = {"torch_dtype": torch.float32}
if CPU_ISA in ["avx512_bf16", "amx"]:
try:
kwargs = {"torch_dtype": torch.bfloat16}
except ImportError:
warnings.warn(
"Intel Extension for PyTorch is not installed, it can be installed to accelerate cpu inference"
)
elif device == "cuda":
kwargs = {"torch_dtype": torch.float16}
if num_gpus != 1:
kwargs["device_map"] = "auto"
if max_gpu_memory is None:
kwargs[
"device_map"
] = "sequential" # This is important for not the same VRAM sizes
available_gpu_memory = get_gpu_memory(num_gpus)
kwargs["max_memory"] = {
i: str(int(available_gpu_memory[i] * 0.85)) + "GiB"
for i in range(num_gpus)
}
else:
kwargs["max_memory"] = {i: max_gpu_memory for i in range(num_gpus)}
elif device == "mps":
kwargs = {"torch_dtype": torch.float16}
# Avoid bugs in mps backend by not using in-place operations.
| """Model adapter registration."""
if sys.version_info >= (3, 9):
else:
# Check an environment variable to check if we should be sharing Peft model
# weights. When false we treat all Peft models as separate.
peft_share_base_weights = (
os.environ.get("PEFT_SHARE_BASE_WEIGHTS", "false").lower() == "true"
)
class BaseModelAdapter:
"""The base and the default model adapter."""
use_fast_tokenizer = True
def match(self, model_path: str):
return True
def load_model(self, model_path: str, from_pretrained_kwargs: dict):
revision = from_pretrained_kwargs.get("revision", "main")
try:
tokenizer = AutoTokenizer.from_pretrained(
model_path,
use_fast=self.use_fast_tokenizer,
revision=revision,
trust_remote_code=True,
)
except TypeError:
tokenizer = AutoTokenizer.from_pretrained(
model_path, use_fast=False, revision=revision, trust_remote_code=True
)
try:
model = AutoModelForCausalLM.from_pretrained(
model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs
)
except NameError:
model = AutoModel.from_pretrained(
model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs
)
return model, tokenizer
def load_compress_model(self, model_path, device, torch_dtype, revision="main"):
return load_compress_model(
model_path,
device,
torch_dtype,
use_fast=self.use_fast_tokenizer,
revision=revision,
)
def get_default_conv_template(self, model_path: str) -> Conversation:
return get_conv_template("one_shot")
# A global registry for all model adapters
# TODO (lmzheng): make it a priority queue.
model_adapters: List[BaseModelAdapter] = []
def register_model_adapter(cls):
"""Register a model adapter."""
model_adapters.append(cls())
@cache
def get_model_adapter(model_path: str) -> BaseModelAdapter:
"""Get a model adapter for a model_path."""
model_path_basename = os.path.basename(os.path.normpath(model_path))
# Try the basename of model_path at first
for adapter in model_adapters:
if adapter.match(model_path_basename) and type(adapter) != BaseModelAdapter:
return adapter
# Then try the full path
for adapter in model_adapters:
if adapter.match(model_path):
return adapter
raise ValueError(f"No valid model adapter for {model_path}")
def raise_warning_for_incompatible_cpu_offloading_configuration(
device: str, load_8bit: bool, cpu_offloading: bool
):
if cpu_offloading:
if not load_8bit:
warnings.warn(
"The cpu-offloading feature can only be used while also using 8-bit-quantization.\n"
"Use '--load-8bit' to enable 8-bit-quantization\n"
"Continuing without cpu-offloading enabled\n"
)
return False
if not "linux" in sys.platform:
warnings.warn(
"CPU-offloading is only supported on linux-systems due to the limited compatability with the bitsandbytes-package\n"
"Continuing without cpu-offloading enabled\n"
)
return False
if device != "cuda":
warnings.warn(
"CPU-offloading is only enabled when using CUDA-devices\n"
"Continuing without cpu-offloading enabled\n"
)
return False
return cpu_offloading
def load_model(
model_path: str,
device: str = "cuda",
num_gpus: int = 1,
max_gpu_memory: Optional[str] = None,
load_8bit: bool = False,
cpu_offloading: bool = False,
gptq_config: Optional[GptqConfig] = None,
awq_config: Optional[AWQConfig] = None,
revision: str = "main",
debug: bool = False,
):
"""Load a model from Hugging Face."""
# get model adapter
adapter = get_model_adapter(model_path)
# Handle device mapping
cpu_offloading = raise_warning_for_incompatible_cpu_offloading_configuration(
device, load_8bit, cpu_offloading
)
if device == "cpu":
kwargs = {"torch_dtype": torch.float32}
if CPU_ISA in ["avx512_bf16", "amx"]:
try:
kwargs = {"torch_dtype": torch.bfloat16}
except ImportError:
warnings.warn(
"Intel Extension for PyTorch is not installed, it can be installed to accelerate cpu inference"
)
elif device == "cuda":
kwargs = {"torch_dtype": torch.float16}
if num_gpus != 1:
kwargs["device_map"] = "auto"
if max_gpu_memory is None:
kwargs[
"device_map"
] = "sequential" # This is important for not the same VRAM sizes
available_gpu_memory = get_gpu_memory(num_gpus)
kwargs["max_memory"] = {
i: str(int(available_gpu_memory[i] * 0.85)) + "GiB"
for i in range(num_gpus)
}
else:
kwargs["max_memory"] = {i: max_gpu_memory for i in range(num_gpus)}
elif device == "mps":
kwargs = {"torch_dtype": torch.float16}
# Avoid bugs in mps backend by not using in-place operations. | replace_llama_attn_with_non_inplace_operations() | 12 | 2023-10-20 08:56:20+00:00 | 12k |
thuml/iTransformer | run.py | [
{
"identifier": "Exp_Long_Term_Forecast",
"path": "experiments/exp_long_term_forecasting.py",
"snippet": "class Exp_Long_Term_Forecast(Exp_Basic):\n def __init__(self, args):\n super(Exp_Long_Term_Forecast, self).__init__(args)\n\n def _build_model(self):\n model = self.model_dict[se... | import argparse
import torch
import random
import numpy as np
from experiments.exp_long_term_forecasting import Exp_Long_Term_Forecast
from experiments.exp_long_term_forecasting_partial import Exp_Long_Term_Forecast_Partial | 9,456 |
if __name__ == '__main__':
fix_seed = 2023
random.seed(fix_seed)
torch.manual_seed(fix_seed)
np.random.seed(fix_seed)
parser = argparse.ArgumentParser(description='iTransformer')
# basic config
parser.add_argument('--is_training', type=int, required=True, default=1, help='status')
parser.add_argument('--model_id', type=str, required=True, default='test', help='model id')
parser.add_argument('--model', type=str, required=True, default='iTransformer',
help='model name, options: [iTransformer, iInformer, iReformer, iFlowformer, iFlashformer]')
# data loader
parser.add_argument('--data', type=str, required=True, default='custom', help='dataset type')
parser.add_argument('--root_path', type=str, default='./data/electricity/', help='root path of the data file')
parser.add_argument('--data_path', type=str, default='electricity.csv', help='data csv file')
parser.add_argument('--features', type=str, default='M',
help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')
parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')
parser.add_argument('--freq', type=str, default='h',
help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')
parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')
# forecasting task
parser.add_argument('--seq_len', type=int, default=96, help='input sequence length')
parser.add_argument('--label_len', type=int, default=48, help='start token length') # no longer needed in inverted Transformers
parser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length')
# model define
parser.add_argument('--enc_in', type=int, default=7, help='encoder input size')
parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')
parser.add_argument('--c_out', type=int, default=7, help='output size') # applicable on arbitrary number of variates in inverted Transformers
parser.add_argument('--d_model', type=int, default=512, help='dimension of model')
parser.add_argument('--n_heads', type=int, default=8, help='num of heads')
parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers')
parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')
parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn')
parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')
parser.add_argument('--factor', type=int, default=1, help='attn factor')
parser.add_argument('--distil', action='store_false',
help='whether to use distilling in encoder, using this argument means not using distilling',
default=True)
parser.add_argument('--dropout', type=float, default=0.1, help='dropout')
parser.add_argument('--embed', type=str, default='timeF',
help='time features encoding, options:[timeF, fixed, learned]')
parser.add_argument('--activation', type=str, default='gelu', help='activation')
parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')
parser.add_argument('--do_predict', action='store_true', help='whether to predict unseen future data')
# optimization
parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')
parser.add_argument('--itr', type=int, default=1, help='experiments times')
parser.add_argument('--train_epochs', type=int, default=10, help='train epochs')
parser.add_argument('--batch_size', type=int, default=32, help='batch size of train input data')
parser.add_argument('--patience', type=int, default=3, help='early stopping patience')
parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')
parser.add_argument('--des', type=str, default='test', help='exp description')
parser.add_argument('--loss', type=str, default='MSE', help='loss function')
parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')
parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)
# GPU
parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')
parser.add_argument('--gpu', type=int, default=0, help='gpu')
parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)
parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')
# iTransformer
parser.add_argument('--exp_name', type=str, required=False, default='MTSF',
help='experiemnt name, options:[MTSF, partial_train]')
parser.add_argument('--channel_independence', type=bool, default=False, help='whether to use channel_independence mechanism')
parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)
parser.add_argument('--class_strategy', type=str, default='projection', help='projection/average/cls_token')
parser.add_argument('--target_root_path', type=str, default='./data/electricity/', help='root path of the data file')
parser.add_argument('--target_data_path', type=str, default='electricity.csv', help='data file')
parser.add_argument('--efficient_training', type=bool, default=False, help='whether to use efficient_training (exp_name should be partial train)') # See Figure 8 of our paper for the detail
parser.add_argument('--use_norm', type=int, default=True, help='use norm and denorm')
parser.add_argument('--partial_start_index', type=int, default=0, help='the start index of variates for partial training, '
'you can select [partial_start_index, min(enc_in + partial_start_index, N)]')
args = parser.parse_args()
args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False
if args.use_gpu and args.use_multi_gpu:
args.devices = args.devices.replace(' ', '')
device_ids = args.devices.split(',')
args.device_ids = [int(id_) for id_ in device_ids]
args.gpu = args.device_ids[0]
print('Args in experiment:')
print(args)
if args.exp_name == 'partial_train': # See Figure 8 of our paper, for the detail
|
if __name__ == '__main__':
fix_seed = 2023
random.seed(fix_seed)
torch.manual_seed(fix_seed)
np.random.seed(fix_seed)
parser = argparse.ArgumentParser(description='iTransformer')
# basic config
parser.add_argument('--is_training', type=int, required=True, default=1, help='status')
parser.add_argument('--model_id', type=str, required=True, default='test', help='model id')
parser.add_argument('--model', type=str, required=True, default='iTransformer',
help='model name, options: [iTransformer, iInformer, iReformer, iFlowformer, iFlashformer]')
# data loader
parser.add_argument('--data', type=str, required=True, default='custom', help='dataset type')
parser.add_argument('--root_path', type=str, default='./data/electricity/', help='root path of the data file')
parser.add_argument('--data_path', type=str, default='electricity.csv', help='data csv file')
parser.add_argument('--features', type=str, default='M',
help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')
parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')
parser.add_argument('--freq', type=str, default='h',
help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')
parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')
# forecasting task
parser.add_argument('--seq_len', type=int, default=96, help='input sequence length')
parser.add_argument('--label_len', type=int, default=48, help='start token length') # no longer needed in inverted Transformers
parser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length')
# model define
parser.add_argument('--enc_in', type=int, default=7, help='encoder input size')
parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')
parser.add_argument('--c_out', type=int, default=7, help='output size') # applicable on arbitrary number of variates in inverted Transformers
parser.add_argument('--d_model', type=int, default=512, help='dimension of model')
parser.add_argument('--n_heads', type=int, default=8, help='num of heads')
parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers')
parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')
parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn')
parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')
parser.add_argument('--factor', type=int, default=1, help='attn factor')
parser.add_argument('--distil', action='store_false',
help='whether to use distilling in encoder, using this argument means not using distilling',
default=True)
parser.add_argument('--dropout', type=float, default=0.1, help='dropout')
parser.add_argument('--embed', type=str, default='timeF',
help='time features encoding, options:[timeF, fixed, learned]')
parser.add_argument('--activation', type=str, default='gelu', help='activation')
parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')
parser.add_argument('--do_predict', action='store_true', help='whether to predict unseen future data')
# optimization
parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')
parser.add_argument('--itr', type=int, default=1, help='experiments times')
parser.add_argument('--train_epochs', type=int, default=10, help='train epochs')
parser.add_argument('--batch_size', type=int, default=32, help='batch size of train input data')
parser.add_argument('--patience', type=int, default=3, help='early stopping patience')
parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')
parser.add_argument('--des', type=str, default='test', help='exp description')
parser.add_argument('--loss', type=str, default='MSE', help='loss function')
parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')
parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)
# GPU
parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')
parser.add_argument('--gpu', type=int, default=0, help='gpu')
parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)
parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')
# iTransformer
parser.add_argument('--exp_name', type=str, required=False, default='MTSF',
help='experiemnt name, options:[MTSF, partial_train]')
parser.add_argument('--channel_independence', type=bool, default=False, help='whether to use channel_independence mechanism')
parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)
parser.add_argument('--class_strategy', type=str, default='projection', help='projection/average/cls_token')
parser.add_argument('--target_root_path', type=str, default='./data/electricity/', help='root path of the data file')
parser.add_argument('--target_data_path', type=str, default='electricity.csv', help='data file')
parser.add_argument('--efficient_training', type=bool, default=False, help='whether to use efficient_training (exp_name should be partial train)') # See Figure 8 of our paper for the detail
parser.add_argument('--use_norm', type=int, default=True, help='use norm and denorm')
parser.add_argument('--partial_start_index', type=int, default=0, help='the start index of variates for partial training, '
'you can select [partial_start_index, min(enc_in + partial_start_index, N)]')
args = parser.parse_args()
args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False
if args.use_gpu and args.use_multi_gpu:
args.devices = args.devices.replace(' ', '')
device_ids = args.devices.split(',')
args.device_ids = [int(id_) for id_ in device_ids]
args.gpu = args.device_ids[0]
print('Args in experiment:')
print(args)
if args.exp_name == 'partial_train': # See Figure 8 of our paper, for the detail | Exp = Exp_Long_Term_Forecast_Partial | 1 | 2023-10-19 03:23:15+00:00 | 12k |
kylesargent/ZeroNVS | threestudio/models/geometry/base.py | [
{
"identifier": "IsosurfaceHelper",
"path": "threestudio/models/isosurface.py",
"snippet": "class IsosurfaceHelper(nn.Module):\n points_range: Tuple[float, float] = (0, 1)\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"N 3\"]:\n raise NotImplementedError"
},
{
"identi... | from dataclasses import dataclass, field
from threestudio.models.isosurface import (
IsosurfaceHelper,
MarchingCubeCPUHelper,
MarchingTetrahedraHelper,
)
from threestudio.models.mesh import Mesh
from threestudio.utils.base import BaseModule
from threestudio.utils.ops import chunk_batch, scale_tensor
from threestudio.utils.typing import *
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio | 7,648 |
def contract_to_unisphere(
x: Float[Tensor, "... 3"], bbox: Float[Tensor, "2 3"], unbounded: bool = False
) -> Float[Tensor, "... 3"]:
if unbounded:
# import pdb
# pdb.set_trace()
x = scale_tensor(x, bbox, (0, 1))
x = x * 2 - 1 # aabb is at [-1, 1]
mag = x.norm(dim=-1, keepdim=True)
mask = mag.squeeze(-1) > 1
x = x.clone()
x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])
x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]
else:
x = scale_tensor(x, bbox, (0, 1))
return x
class BaseGeometry(BaseModule):
@dataclass
class Config(BaseModule.Config):
pass
cfg: Config
@staticmethod
def create_from(
other: "BaseGeometry", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs
) -> "BaseGeometry":
raise TypeError(
f"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}"
)
def export(self, *args, **kwargs) -> Dict[str, Any]:
return {}
class BaseImplicitGeometry(BaseGeometry):
@dataclass
class Config(BaseGeometry.Config):
radius: float = 1.0
isosurface: bool = True
isosurface_method: str = "mt"
isosurface_resolution: int = 128
isosurface_threshold: Union[float, str] = 0.0
isosurface_chunk: int = 0
isosurface_coarse_to_fine: bool = True
isosurface_deformable_grid: bool = False
isosurface_remove_outliers: bool = True
isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01
cfg: Config
def configure(self) -> None:
self.bbox: Float[Tensor, "2 3"]
self.register_buffer(
"bbox",
torch.as_tensor(
[
[-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],
[self.cfg.radius, self.cfg.radius, self.cfg.radius],
],
dtype=torch.float32,
),
)
self.isosurface_helper: Optional[IsosurfaceHelper] = None
self.unbounded: bool = True
def _initilize_isosurface_helper(self):
if self.cfg.isosurface and self.isosurface_helper is None:
if self.cfg.isosurface_method == "mc-cpu":
|
def contract_to_unisphere(
x: Float[Tensor, "... 3"], bbox: Float[Tensor, "2 3"], unbounded: bool = False
) -> Float[Tensor, "... 3"]:
if unbounded:
# import pdb
# pdb.set_trace()
x = scale_tensor(x, bbox, (0, 1))
x = x * 2 - 1 # aabb is at [-1, 1]
mag = x.norm(dim=-1, keepdim=True)
mask = mag.squeeze(-1) > 1
x = x.clone()
x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])
x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]
else:
x = scale_tensor(x, bbox, (0, 1))
return x
class BaseGeometry(BaseModule):
@dataclass
class Config(BaseModule.Config):
pass
cfg: Config
@staticmethod
def create_from(
other: "BaseGeometry", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs
) -> "BaseGeometry":
raise TypeError(
f"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}"
)
def export(self, *args, **kwargs) -> Dict[str, Any]:
return {}
class BaseImplicitGeometry(BaseGeometry):
@dataclass
class Config(BaseGeometry.Config):
radius: float = 1.0
isosurface: bool = True
isosurface_method: str = "mt"
isosurface_resolution: int = 128
isosurface_threshold: Union[float, str] = 0.0
isosurface_chunk: int = 0
isosurface_coarse_to_fine: bool = True
isosurface_deformable_grid: bool = False
isosurface_remove_outliers: bool = True
isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01
cfg: Config
def configure(self) -> None:
self.bbox: Float[Tensor, "2 3"]
self.register_buffer(
"bbox",
torch.as_tensor(
[
[-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],
[self.cfg.radius, self.cfg.radius, self.cfg.radius],
],
dtype=torch.float32,
),
)
self.isosurface_helper: Optional[IsosurfaceHelper] = None
self.unbounded: bool = True
def _initilize_isosurface_helper(self):
if self.cfg.isosurface and self.isosurface_helper is None:
if self.cfg.isosurface_method == "mc-cpu": | self.isosurface_helper = MarchingCubeCPUHelper( | 1 | 2023-10-24 19:02:44+00:00 | 12k |
princeton-nlp/LLM-Shearing | llmshearing/models/composer_pythia.py | [
{
"identifier": "L0Module",
"path": "llmshearing/models/l0_module.py",
"snippet": "class L0Module(nn.Module):\n def __init__(self, cfg, device):\n super(L0Module, self).__init__()\n\n # base and target model info\n n_matrix_mlp = 2 if \"pythia\" in cfg.name else 3\n self.b... | import math
import torch
import torch.nn as nn
from typing import List, Optional, Tuple
from einops import rearrange
from omegaconf import DictConfig
from torch.nn import functional as F
from transformers.pytorch_utils import (find_pruneable_heads_and_indices,
prune_linear_layer)
from llmshearing.models.l0_module import L0Module
from llmshearing.models.composer_llama import ComposerMosaicLlama, prepare_decoder_attention_mask, turn_head_z, turn_mlp_z, normal_attn_fn, flash_attn_fn
from transformers.models.gpt_neox.modeling_gpt_neox import apply_rotary_pos_emb | 9,275 | input, self.normalized_shape, self.weight, self.bias, self.eps)
return output
def prune_params(self, hidden_z):
remaining_index = torch.where(~hidden_z.eq(0))[0]
# self.weight = torch.nn.Parameter(self.weight.data.mul(hidden_z.squeeze())[remaining_index])
self.weight = torch.nn.parameter.Parameter(self.weight.index_select(0, remaining_index))
self.bias = torch.nn.parameter.Parameter(self.bias.index_select(0, remaining_index))
self.normalized_shape = (len(remaining_index),)
class PythiaEmbedding(nn.Embedding):
def forward(self, input, hidden_z=None):
embeddings = super().forward(input)
if hidden_z is not None:
embeddings = embeddings.mul(hidden_z)
return embeddings
def prune_params(self, hidden_z):
remaining_index = torch.where(~hidden_z.eq(0))[0]
self.weight.data = self.weight.data.mul(hidden_z)
self.weight = torch.nn.parameter.Parameter(self.weight.index_select(1, remaining_index).clone())
self.embedding_dim = len(remaining_index)
print(f" Embedding: {len(hidden_z)} -> {len(remaining_index)}")
class PythiaModel(nn.Module):
def __init__(self, cfg: DictConfig):
super().__init__()
print(f'Tried to build Pythia model with cfg.name={cfg.name}')
self.cfg = cfg
### added ###
self.l0_module = None
if getattr(self.cfg, "l0_module", None) is not None:
self.l0_module = L0Module(self.cfg, device=cfg.init_device)
#############
layernorm_class = CoFiLayerNorm
self.attn_impl = cfg.attn_impl
self.embedding_fraction = cfg.get('embedding_fraction', 1)
assert 0 < self.embedding_fraction <= 1, 'model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!'
self.transformer = nn.ModuleDict({
"wte": PythiaEmbedding(cfg.vocab_size,
cfg.d_model,
device=cfg.init_device),
})
self.transformer.update({
'blocks':
nn.ModuleList([
PythiaBlock(cfg, device=cfg.init_device)
for _ in range(cfg.n_layers)
])
})
self.transformer.update({
"output": nn.Linear(cfg.d_model, cfg.vocab_size, device=cfg.init_device, bias=False),
})
self.transformer.update({
"ln_f": layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=cfg.init_device), # TODO: add to config
})
self.is_causal = True
if cfg.get('verbose') and cfg.get('verbose') > 2:
print(self)
def prune_params(self, zs=None):
# TODO
if zs is None:
self.l0_module.eval()
zs = self.l0_module(calculate_lagrangian=False)
# wte as well :)
# ln_f if hidden states are to be pruned
if "hidden_z" in zs:
hidden_z = zs["hidden_z"]
remaining_index = torch.where(~hidden_z.eq(0))[0]
self.transformer.ln_f.prune_params(hidden_z)
self.transformer.wte.weight.data = self.transformer.wte.weight.data.mul(hidden_z)
self.transformer.wte.weight = torch.nn.parameter.Parameter(
self.transformer.wte.weight.index_select(1, remaining_index).clone())
self.transformer.wte.embedding_dim = len(remaining_index)
# self.transformer.output.weight.data = self.transformer.output.weight.data.mul(hidden_z)
half = self.transformer.output.weight.data.dtype == torch.float16
self.transformer.output = prune_linear_layer(self.transformer.output, remaining_index, dim=1)
if half:
self.transformer.output = self.transformer.output.half()
for i, block in enumerate(self.transformer.blocks):
zs_block = self.get_zs_block(zs, i)
block.prune_params(zs_block)
def get_zs_block(self, zs, block_idx):
zs_block = {}
if zs is not None:
for key in zs:
if key == "hidden_z": zs_block["hidden_z"] = zs["hidden_z"]
else: zs_block[key] = zs[key][block_idx]
return zs_block
def forward(
self,
input_ids: torch.LongTensor,
key_padding_mask: Optional[torch.ByteTensor] = None,
past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None,
pruned_steps: int = 0,
retain_grad: bool = False,
**zs,):
S = input_ids.size(1)
assert S <= self.cfg.max_seq_len, f"Sequence length ({S}) exceeds model maximum sequence length ({self.cfg.max_seq_len})!"
tok_emb = self.transformer.wte(input_ids)
if "hidden_z" in zs:
tok_emb = tok_emb.mul(zs["hidden_z"])
x = tok_emb
attn_bias = None # only consider the flash attention case
|
class ComposerMosaicPythia(ComposerMosaicLlama):
def __init__(self, cfg):
super().__init__(cfg)
self.model = PythiaModel(cfg)
class CoFiLayerNorm(torch.nn.LayerNorm):
def __init__(self, normalized_shape, eps: float = 1e-5, elementwise_affine: bool = True, device=None) -> None:
super().__init__(normalized_shape, eps, elementwise_affine, device)
def forward(self, input, hidden_z=None):
if hidden_z is not None:
remaining_index = torch.where(~hidden_z.eq(0))[0]
compressed_input = torch.index_select(
input, dim=-1, index=remaining_index)
compressed_weight = self.weight[remaining_index]
compressed_bias = self.bias[remaining_index]
normalized_shape = len(remaining_index)
normed_input = F.layer_norm(
compressed_input, [normalized_shape], compressed_weight, compressed_bias, self.eps)
output = input.clone()
normed_input = normed_input.to(output.dtype)
output[..., remaining_index] = normed_input
else:
output = F.layer_norm(
input, self.normalized_shape, self.weight, self.bias, self.eps)
return output
def prune_params(self, hidden_z):
remaining_index = torch.where(~hidden_z.eq(0))[0]
# self.weight = torch.nn.Parameter(self.weight.data.mul(hidden_z.squeeze())[remaining_index])
self.weight = torch.nn.parameter.Parameter(self.weight.index_select(0, remaining_index))
self.bias = torch.nn.parameter.Parameter(self.bias.index_select(0, remaining_index))
self.normalized_shape = (len(remaining_index),)
class PythiaEmbedding(nn.Embedding):
def forward(self, input, hidden_z=None):
embeddings = super().forward(input)
if hidden_z is not None:
embeddings = embeddings.mul(hidden_z)
return embeddings
def prune_params(self, hidden_z):
remaining_index = torch.where(~hidden_z.eq(0))[0]
self.weight.data = self.weight.data.mul(hidden_z)
self.weight = torch.nn.parameter.Parameter(self.weight.index_select(1, remaining_index).clone())
self.embedding_dim = len(remaining_index)
print(f" Embedding: {len(hidden_z)} -> {len(remaining_index)}")
class PythiaModel(nn.Module):
def __init__(self, cfg: DictConfig):
super().__init__()
print(f'Tried to build Pythia model with cfg.name={cfg.name}')
self.cfg = cfg
### added ###
self.l0_module = None
if getattr(self.cfg, "l0_module", None) is not None:
self.l0_module = L0Module(self.cfg, device=cfg.init_device)
#############
layernorm_class = CoFiLayerNorm
self.attn_impl = cfg.attn_impl
self.embedding_fraction = cfg.get('embedding_fraction', 1)
assert 0 < self.embedding_fraction <= 1, 'model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!'
self.transformer = nn.ModuleDict({
"wte": PythiaEmbedding(cfg.vocab_size,
cfg.d_model,
device=cfg.init_device),
})
self.transformer.update({
'blocks':
nn.ModuleList([
PythiaBlock(cfg, device=cfg.init_device)
for _ in range(cfg.n_layers)
])
})
self.transformer.update({
"output": nn.Linear(cfg.d_model, cfg.vocab_size, device=cfg.init_device, bias=False),
})
self.transformer.update({
"ln_f": layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=cfg.init_device), # TODO: add to config
})
self.is_causal = True
if cfg.get('verbose') and cfg.get('verbose') > 2:
print(self)
def prune_params(self, zs=None):
# TODO
if zs is None:
self.l0_module.eval()
zs = self.l0_module(calculate_lagrangian=False)
# wte as well :)
# ln_f if hidden states are to be pruned
if "hidden_z" in zs:
hidden_z = zs["hidden_z"]
remaining_index = torch.where(~hidden_z.eq(0))[0]
self.transformer.ln_f.prune_params(hidden_z)
self.transformer.wte.weight.data = self.transformer.wte.weight.data.mul(hidden_z)
self.transformer.wte.weight = torch.nn.parameter.Parameter(
self.transformer.wte.weight.index_select(1, remaining_index).clone())
self.transformer.wte.embedding_dim = len(remaining_index)
# self.transformer.output.weight.data = self.transformer.output.weight.data.mul(hidden_z)
half = self.transformer.output.weight.data.dtype == torch.float16
self.transformer.output = prune_linear_layer(self.transformer.output, remaining_index, dim=1)
if half:
self.transformer.output = self.transformer.output.half()
for i, block in enumerate(self.transformer.blocks):
zs_block = self.get_zs_block(zs, i)
block.prune_params(zs_block)
def get_zs_block(self, zs, block_idx):
zs_block = {}
if zs is not None:
for key in zs:
if key == "hidden_z": zs_block["hidden_z"] = zs["hidden_z"]
else: zs_block[key] = zs[key][block_idx]
return zs_block
def forward(
self,
input_ids: torch.LongTensor,
key_padding_mask: Optional[torch.ByteTensor] = None,
past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None,
pruned_steps: int = 0,
retain_grad: bool = False,
**zs,):
S = input_ids.size(1)
assert S <= self.cfg.max_seq_len, f"Sequence length ({S}) exceeds model maximum sequence length ({self.cfg.max_seq_len})!"
tok_emb = self.transformer.wte(input_ids)
if "hidden_z" in zs:
tok_emb = tok_emb.mul(zs["hidden_z"])
x = tok_emb
attn_bias = None # only consider the flash attention case | attention_mask = prepare_decoder_attention_mask((tok_emb.size(0), tok_emb.size(1)), tok_emb) | 2 | 2023-10-16 12:26:08+00:00 | 12k |
hkchengrex/Cutie | cutie/inference/inference_core.py | [
{
"identifier": "MemoryManager",
"path": "cutie/inference/memory_manager.py",
"snippet": "class MemoryManager:\n \"\"\"\n Manages all three memory stores and the transition between working/long-term memory\n \"\"\"\n def __init__(self, cfg: DictConfig, object_manager: ObjectManager):\n ... | from typing import List, Optional, Iterable, Dict
from omegaconf import DictConfig
from cutie.inference.memory_manager import MemoryManager
from cutie.inference.object_manager import ObjectManager
from cutie.inference.image_feature_store import ImageFeatureStore
from cutie.model.cutie import CUTIE
from cutie.utils.tensor_utils import pad_divide_by, unpad, aggregate
import logging
import numpy as np
import torch
import torch.nn.functional as F | 9,330 |
log = logging.getLogger()
class InferenceCore:
def __init__(self,
network: CUTIE,
cfg: DictConfig,
*,
image_feature_store: ImageFeatureStore = None):
self.network = network
self.cfg = cfg
self.mem_every = cfg.mem_every
stagger_updates = cfg.stagger_updates
self.chunk_size = cfg.chunk_size
self.save_aux = cfg.save_aux
self.max_internal_size = cfg.max_internal_size
self.flip_aug = cfg.flip_aug
self.curr_ti = -1
self.last_mem_ti = 0
# at which time indices should we update the sensory memory
if stagger_updates >= self.mem_every:
self.stagger_ti = set(range(1, self.mem_every + 1))
else:
self.stagger_ti = set(
np.round(np.linspace(1, self.mem_every, stagger_updates)).astype(int))
self.object_manager = ObjectManager()
|
log = logging.getLogger()
class InferenceCore:
def __init__(self,
network: CUTIE,
cfg: DictConfig,
*,
image_feature_store: ImageFeatureStore = None):
self.network = network
self.cfg = cfg
self.mem_every = cfg.mem_every
stagger_updates = cfg.stagger_updates
self.chunk_size = cfg.chunk_size
self.save_aux = cfg.save_aux
self.max_internal_size = cfg.max_internal_size
self.flip_aug = cfg.flip_aug
self.curr_ti = -1
self.last_mem_ti = 0
# at which time indices should we update the sensory memory
if stagger_updates >= self.mem_every:
self.stagger_ti = set(range(1, self.mem_every + 1))
else:
self.stagger_ti = set(
np.round(np.linspace(1, self.mem_every, stagger_updates)).astype(int))
self.object_manager = ObjectManager() | self.memory = MemoryManager(cfg=cfg, object_manager=self.object_manager) | 0 | 2023-10-19 17:49:24+00:00 | 12k |
MolecularAI/REINVENT4 | tests/chemistry/library_design/test_fragment_reactions_slice_enumerator.py | [
{
"identifier": "Conversions",
"path": "reinvent/chemistry/conversions.py",
"snippet": "class Conversions:\n @staticmethod\n def smiles_to_mols_and_indices(query_smiles: List[str]) -> Tuple[List[Mol], List[int]]:\n mols = [MolFromSmiles(smile) for smile in query_smiles]\n valid_mask ... | import unittest
from reinvent.chemistry import Conversions
from reinvent.chemistry.library_design import (
FragmentReactionSliceEnumerator,
BondMaker,
AttachmentPoints,
)
from reinvent.chemistry.library_design.dtos import FilteringConditionDTO
from reinvent.chemistry.library_design.enums import MolecularDescriptorsEnum
from reinvent.chemistry.library_design.fragment_reactions import FragmentReactions
from tests.chemistry.library_design.fixtures import FRAGMENT_REACTION_SUZUKI, FRAGMENT_REACTIONS
from tests.chemistry.fixtures.test_data import CELECOXIB | 9,975 |
class TestSingleFragmentReactionsSliceEnumerator(unittest.TestCase):
def setUp(self):
self.chemistry = Conversions()
self.reactions = FragmentReactions()
self._bond_maker = BondMaker()
self._attachment_points = AttachmentPoints()
self._suzuki_reaction_dto_list = self.reactions.create_reactions_from_smirks(
FRAGMENT_REACTION_SUZUKI
)
self.suzuki_positive_smile = CELECOXIB
self.suzuki_positive_molecule = self.chemistry.smile_to_mol(self.suzuki_positive_smile)
scaffold_conditions = []
decoration_conditions = []
|
class TestSingleFragmentReactionsSliceEnumerator(unittest.TestCase):
def setUp(self):
self.chemistry = Conversions()
self.reactions = FragmentReactions()
self._bond_maker = BondMaker()
self._attachment_points = AttachmentPoints()
self._suzuki_reaction_dto_list = self.reactions.create_reactions_from_smirks(
FRAGMENT_REACTION_SUZUKI
)
self.suzuki_positive_smile = CELECOXIB
self.suzuki_positive_molecule = self.chemistry.smile_to_mol(self.suzuki_positive_smile)
scaffold_conditions = []
decoration_conditions = [] | self._slice_enumerator = FragmentReactionSliceEnumerator( | 3 | 2023-10-20 06:43:16+00:00 | 12k |
jhejna/cpl | research/algs/off_policy_algorithm.py | [
{
"identifier": "ReplayBuffer",
"path": "research/datasets/replay_buffer/buffer.py",
"snippet": "class ReplayBuffer(torch.utils.data.IterableDataset):\n \"\"\"\n Generic Replay Buffer Class.\n\n This class adheres to the following conventions to support multiprocessing:\n 1. Variables/functi... | import datetime
import functools
import os
import sys
import tempfile
import gym
import numpy as np
import torch
from abc import abstractmethod
from typing import Any, Dict, Optional, Union
from research.datasets import ReplayBuffer
from research.datasets.replay_buffer import storage
from research.envs.base import EmptyEnv
from research.networks.base import ModuleContainer
from research.utils import runners, utils
from .base import Algorithm
from research.utils.config import Config | 9,788 |
class OffPolicyAlgorithm(Algorithm):
def __init__(
self,
*args,
offline_steps: int = 0, # Run fully offline by setting to -1
random_steps: int = 1000,
async_runner_ep_lag: int = 1,
**kwargs,
):
super().__init__(*args, **kwargs)
self.offline_steps = offline_steps
self.random_steps = random_steps
self.async_runner_ep_lag = async_runner_ep_lag
def setup_datasets(self, env: gym.Env, total_steps: int):
super().setup_datasets(env, total_steps)
# Assign the correct update function based on what is passed in.
if env is None or isinstance(env, EmptyEnv) or self.offline_steps < 0:
self.env_step = self._empty_step
|
class OffPolicyAlgorithm(Algorithm):
def __init__(
self,
*args,
offline_steps: int = 0, # Run fully offline by setting to -1
random_steps: int = 1000,
async_runner_ep_lag: int = 1,
**kwargs,
):
super().__init__(*args, **kwargs)
self.offline_steps = offline_steps
self.random_steps = random_steps
self.async_runner_ep_lag = async_runner_ep_lag
def setup_datasets(self, env: gym.Env, total_steps: int):
super().setup_datasets(env, total_steps)
# Assign the correct update function based on what is passed in.
if env is None or isinstance(env, EmptyEnv) or self.offline_steps < 0:
self.env_step = self._empty_step | elif isinstance(env, runners.AsyncEnv): | 4 | 2023-10-19 17:25:45+00:00 | 12k |
nbasyl/LLM-FP4 | configs/FPQ_baseline_config_llama.py | [
{
"identifier": "FPPTQSLBatchingQuantLinear_fpq_baseline",
"path": "quant_layers/fp_linear.py",
"snippet": "class FPPTQSLBatchingQuantLinear_fpq_baseline(FPPTQSLQuantLinear):\n def __init__(self, \n in_features: int,\n out_features: int,\n bias: bool = True,\n mode = \"raw... | from quant_layers.fp_linear import FPPTQSLBatchingQuantLinear_fpq_baseline
from quant_layers.fp_embed import FPPTQSLQuantEmbedding_fpq_baseline | 9,686 |
bit = 8
exp_bit = 4
embed_name_list = ["qembedding"]
fc_name_list = [ "qlinear_query", "qlinear_key", "qlinear_value", "qlinear_o","qlinear_gate","qlinear_down","qlinear_up","qlinear_score"]
matmul_name_list = [ "qmatmul_qk", "qmatmul_scorev"]
w_bit = {name: bit for name in fc_name_list}
a_bit = {name: bit for name in fc_name_list}
embed_bit = {name: bit for name in embed_name_list}
A_bit = {name: bit for name in matmul_name_list}
B_bit = {name: bit for name in matmul_name_list}
w_exp_bit = {name: exp_bit for name in fc_name_list}
a_exp_bit = {name: exp_bit for name in fc_name_list}
embed_exp_bit = {name: exp_bit for name in embed_name_list}
A_exp_bit = {name: exp_bit for name in matmul_name_list}
B_exp_bit = {name: exp_bit for name in matmul_name_list}
ptqsl_embedding_kwargs = {
"metric": "L2_norm",
"eq_alpha": 0.01,
"eq_beta": 1.2,
"eq_n": 100,
'search_round': 3,
"n_V": 1,
"n_H": 1
}
ptqsl_linear_kwargs = {
"metric": "L2_norm",
"eq_alpha": 0.01,
"eq_beta": 1.2,
"eq_n": 100,
'search_round': 3,
"n_V": 1,
"n_H": 1,
"n_a": 1,
"bias_correction":True # Conventionally I'll not add an actual bias correction in linear
}
def get_module(module_type, *args, **kwargs):
if "embedding" in module_type:
kwargs.update(ptqsl_embedding_kwargs)
module= FPPTQSLQuantEmbedding_fpq_baseline(*args,**kwargs,bit= embed_bit[module_type], exponent_bit=embed_exp_bit[module_type], padding_idx=0)
elif "qlinear" in module_type:
kwargs.update(ptqsl_linear_kwargs)
if module_type == "qlinear_score":
kwargs["n_V"] = 1
|
bit = 8
exp_bit = 4
embed_name_list = ["qembedding"]
fc_name_list = [ "qlinear_query", "qlinear_key", "qlinear_value", "qlinear_o","qlinear_gate","qlinear_down","qlinear_up","qlinear_score"]
matmul_name_list = [ "qmatmul_qk", "qmatmul_scorev"]
w_bit = {name: bit for name in fc_name_list}
a_bit = {name: bit for name in fc_name_list}
embed_bit = {name: bit for name in embed_name_list}
A_bit = {name: bit for name in matmul_name_list}
B_bit = {name: bit for name in matmul_name_list}
w_exp_bit = {name: exp_bit for name in fc_name_list}
a_exp_bit = {name: exp_bit for name in fc_name_list}
embed_exp_bit = {name: exp_bit for name in embed_name_list}
A_exp_bit = {name: exp_bit for name in matmul_name_list}
B_exp_bit = {name: exp_bit for name in matmul_name_list}
ptqsl_embedding_kwargs = {
"metric": "L2_norm",
"eq_alpha": 0.01,
"eq_beta": 1.2,
"eq_n": 100,
'search_round': 3,
"n_V": 1,
"n_H": 1
}
ptqsl_linear_kwargs = {
"metric": "L2_norm",
"eq_alpha": 0.01,
"eq_beta": 1.2,
"eq_n": 100,
'search_round': 3,
"n_V": 1,
"n_H": 1,
"n_a": 1,
"bias_correction":True # Conventionally I'll not add an actual bias correction in linear
}
def get_module(module_type, *args, **kwargs):
if "embedding" in module_type:
kwargs.update(ptqsl_embedding_kwargs)
module= FPPTQSLQuantEmbedding_fpq_baseline(*args,**kwargs,bit= embed_bit[module_type], exponent_bit=embed_exp_bit[module_type], padding_idx=0)
elif "qlinear" in module_type:
kwargs.update(ptqsl_linear_kwargs)
if module_type == "qlinear_score":
kwargs["n_V"] = 1 | module= FPPTQSLBatchingQuantLinear_fpq_baseline(*args,**kwargs,w_bit=w_bit[module_type],a_bit=a_bit[module_type],w_exponent_bit=w_exp_bit[module_type],a_exponent_bit=a_exp_bit[module_type]) | 0 | 2023-10-15 06:05:13+00:00 | 12k |
bcmi/libcom | libcom/controllable_composition/source/ControlCom/ldm/models/diffusion/ddpm.py | [
{
"identifier": "log_txt_as_img",
"path": "libcom/controllable_composition/source/ControlCom/ldm/util.py",
"snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt ... | from genericpath import samefile
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.rank_zero import rank_zero_only
from libcom.controllable_composition.source.ControlCom.ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from libcom.controllable_composition.source.ControlCom.ldm.modules.ema import LitEma
from libcom.controllable_composition.source.ControlCom.ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from libcom.controllable_composition.source.ControlCom.ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
from libcom.controllable_composition.source.ControlCom.ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from libcom.controllable_composition.source.ControlCom.ldm.models.diffusion.ddim import DDIMSampler
from torchvision.transforms import Resize, Normalize
from torch.autograd import Variable
from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
from PIL import Image
from torch.utils.data import DataLoader
import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
import torch.nn.functional as F
import math
import time
import random
import os, torchvision
import shutil | 9,468 | """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
u_cond_percent=0,
):
super().__init__()
assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"'
self.parameterization = parameterization
# print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size
self.channels = channels
self.u_cond_percent=u_cond_percent
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
# count_params(self.model, verbose=True)
self.use_ema = use_ema
if self.use_ema:
self.model_ema = LitEma(self.model)
# print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
self.use_scheduler = scheduler_config is not None
if self.use_scheduler:
self.scheduler_config = scheduler_config
self.v_posterior = v_posterior
self.original_elbo_weight = original_elbo_weight
self.l_simple_weight = l_simple_weight
if monitor is not None:
self.monitor = monitor
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
self.loss_type = loss_type
self.learn_logvar = learn_logvar
self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
if self.learn_logvar:
self.logvar = nn.Parameter(self.logvar, requires_grad=True)
def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if exists(given_betas):
betas = given_betas
else:
| """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
u_cond_percent=0,
):
super().__init__()
assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"'
self.parameterization = parameterization
# print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size
self.channels = channels
self.u_cond_percent=u_cond_percent
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
# count_params(self.model, verbose=True)
self.use_ema = use_ema
if self.use_ema:
self.model_ema = LitEma(self.model)
# print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
self.use_scheduler = scheduler_config is not None
if self.use_scheduler:
self.scheduler_config = scheduler_config
self.v_posterior = v_posterior
self.original_elbo_weight = original_elbo_weight
self.l_simple_weight = l_simple_weight
if monitor is not None:
self.monitor = monitor
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
self.loss_type = loss_type
self.learn_logvar = learn_logvar
self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
if self.learn_logvar:
self.logvar = nn.Parameter(self.logvar, requires_grad=True)
def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if exists(given_betas):
betas = given_betas
else: | betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) | 14 | 2023-10-19 05:08:12+00:00 | 12k |
e4s2023/E4S2023 | img_recon.py | [
{
"identifier": "CelebAHQDataset",
"path": "datasets/dataset.py",
"snippet": "class CelebAHQDataset(Dataset):\n \"\"\"\n CelebA-HQ数据集,具体数据来自于 https://github.com/ZPdesu/SEAN\n \"\"\"\n def __init__(self, dataset_root, mode=\"test\",\n img_transform=TO_TENSOR, label_transform=T... | from torch.utils.data import DataLoader
from datasets.dataset import CelebAHQDataset, get_transforms, TO_TENSOR, NORMALIZE, MASK_CONVERT_TF, MASK_CONVERT_TF_DETAILED
from models.networks import Net3
from options.test_options import TestOptions
from utils import torch_utils
from tqdm import tqdm
from PIL import Image
from options.swap_face_options import SwapFaceOptions
from swap_face_fine.face_parsing.face_parsing_demo import init_faceParsing_pretrained_model, faceParsing_demo, vis_parsing_maps
import torchvision.transforms as transforms
import glob
import os
import json
import sys
import pprint
import torch
import numpy as np | 7,957 | """
This file runs the main training/val loop
"""
sys.path.append(".")
sys.path.append("..")
# 重建一张/几张图片
@torch.no_grad()
def recon_imgs(opts, imgs_path, out_dir="./tmp"):
net = Net3(opts).eval().to(opts.device)
ckpt_dict=torch.load("/apdcephfs/share_1290939/zhianliu/running_results/our_editing/work_dirs/ablation_study/v_15_baseline_seg12_finetuneGD_8A100_remainLyrIdx13_flip_FFHQ_300KIters/checkpoints/iteration_300000.pt")
net.latent_avg = ckpt_dict['latent_avg'].to(opts.device)
net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["state_dict"],prefix="module."))
print("Load pre-trained weights.")
# face parsing 模型
faceParsing_ckpt = "./pretrained/faceseg/79999_iter.pth"
faceParsing_model = init_faceParsing_pretrained_model(faceParsing_ckpt)
for idx, img_path in enumerate(tqdm(imgs_path)):
img_pil = Image.open(img_path).convert("RGB")
sample_name = os.path.basename(img_path)[:-4]
mask = faceParsing_demo(faceParsing_model, img_pil, convert_to_seg12=True)
# wrap data
| """
This file runs the main training/val loop
"""
sys.path.append(".")
sys.path.append("..")
# 重建一张/几张图片
@torch.no_grad()
def recon_imgs(opts, imgs_path, out_dir="./tmp"):
net = Net3(opts).eval().to(opts.device)
ckpt_dict=torch.load("/apdcephfs/share_1290939/zhianliu/running_results/our_editing/work_dirs/ablation_study/v_15_baseline_seg12_finetuneGD_8A100_remainLyrIdx13_flip_FFHQ_300KIters/checkpoints/iteration_300000.pt")
net.latent_avg = ckpt_dict['latent_avg'].to(opts.device)
net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["state_dict"],prefix="module."))
print("Load pre-trained weights.")
# face parsing 模型
faceParsing_ckpt = "./pretrained/faceseg/79999_iter.pth"
faceParsing_model = init_faceParsing_pretrained_model(faceParsing_ckpt)
for idx, img_path in enumerate(tqdm(imgs_path)):
img_pil = Image.open(img_path).convert("RGB")
sample_name = os.path.basename(img_path)[:-4]
mask = faceParsing_demo(faceParsing_model, img_pil, convert_to_seg12=True)
# wrap data | img = transforms.Compose([TO_TENSOR, NORMALIZE])(img_pil) | 3 | 2023-10-15 12:15:01+00:00 | 12k |
sotopia-lab/sotopia | sotopia-chat/chat_server.py | [
{
"identifier": "redis_agent",
"path": "sotopia/agents/redis_agent.py",
"snippet": "class RedisAgent(BaseAgent[Observation, AgentAction]):\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n session_id: str | None = None,\n agen... | import asyncio
import logging
import os
import random
import subprocess
import redis.asyncio as redis
import typer
from asyncio import gather
from asyncio import run as aiorun
from datetime import datetime
from logging import FileHandler
from typing import Literal, cast
from rich.logging import RichHandler
from sotopia.agents import redis_agent
from sotopia.agents.llm_agent import LLMAgent
from sotopia.database import EnvAgentComboStorage
from sotopia.database.persistent_profile import (
AgentProfile,
EnvironmentList,
EnvironmentProfile,
)
from sotopia.envs.evaluators import (
ReachGoalLLMEvaluator,
RuleBasedTerminatedEvaluator,
)
from sotopia.envs.parallel import ParallelSotopiaEnv
from sotopia.server import arun_one_episode | 8,780 |
process = subprocess.Popen(
["git", "rev-parse", "HEAD"], shell=False, stdout=subprocess.PIPE
)
git_head_hash = process.communicate()[0].strip()
FORMAT = "%(asctime)s - %(levelname)s - %(name)s - %(message)s"
logging.basicConfig(
level=15,
format=FORMAT,
datefmt="[%X]",
handlers=[
RichHandler(),
FileHandler(
datetime.now().strftime(
f"./logs/%H_%M_%d_%m_%Y_{str(git_head_hash.decode('utf-8'))}.log"
)
),
],
)
app = typer.Typer()
async def _start_server_with_two_session_ids_and_agent_env_combo(
session_ids: list[str], agent_env_combo_pk: str
) -> None:
env_agent_combo_storage = EnvAgentComboStorage.get(agent_env_combo_pk)
|
process = subprocess.Popen(
["git", "rev-parse", "HEAD"], shell=False, stdout=subprocess.PIPE
)
git_head_hash = process.communicate()[0].strip()
FORMAT = "%(asctime)s - %(levelname)s - %(name)s - %(message)s"
logging.basicConfig(
level=15,
format=FORMAT,
datefmt="[%X]",
handlers=[
RichHandler(),
FileHandler(
datetime.now().strftime(
f"./logs/%H_%M_%d_%m_%Y_{str(git_head_hash.decode('utf-8'))}.log"
)
),
],
)
app = typer.Typer()
async def _start_server_with_two_session_ids_and_agent_env_combo(
session_ids: list[str], agent_env_combo_pk: str
) -> None:
env_agent_combo_storage = EnvAgentComboStorage.get(agent_env_combo_pk) | env = ParallelSotopiaEnv( | 8 | 2023-10-23 19:47:26+00:00 | 12k |
qualabs/video-headline | player/tests.py | [
{
"identifier": "EmbedView",
"path": "player/views.py",
"snippet": "class EmbedView(TemplateView):\n template_name = \"player/index.html\"\n\n def validate_domain(self, channel_allowed_domains, referer_domain):\n allowed_domains = settings.ALLOWED_DOMAINS + channel_allowed_domains\n\n ... | from django.test import TestCase, Client
from django.urls import reverse
from rest_framework import status
from player.views import EmbedView
from organization.models import Organization, Channel
from test_utils import create_organizations, create_user, create_channels, create_videos, \
add_channel_to_video, create_live_videos
from video.models import Media, LiveVideo | 8,230 | 'video_id': video.video_id})
response = self.client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertTrue(response.context['error'])
# Validate specific error message
self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message'])
def test_error_message_video_state_processing(self):
video = \
create_videos('Video', self.user1, self.org1, 1, Media.State.PROCESSING, None,
None, False)[0]
add_channel_to_video(self.chan1, video)
url = reverse('embed',
kwargs={'channel_id': self.chan1.channel_id,
'video_id': video.video_id})
response = self.client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertTrue(response.context['error'])
# Validate specific error message
self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message'])
def test_error_message_video_state_processing_failed(self):
video = \
create_videos('Video', self.user1, self.org1, 1, Media.State.PROCESSING_FAILED, None,
None, False)[0]
add_channel_to_video(self.chan1, video)
url = reverse('embed',
kwargs={'channel_id': self.chan1.channel_id,
'video_id': video.video_id})
response = self.client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertTrue(response.context['error'])
# Validate specific error message
self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message'])
def test_error_message_video_state_not_finished(self):
video = \
create_videos('Video', self.user1, self.org1, 1, Media.State.NOT_FINISHED, None,
None, False)[0]
add_channel_to_video(self.chan1, video)
url = reverse('embed',
kwargs={'channel_id': self.chan1.channel_id,
'video_id': video.video_id})
response = self.client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertTrue(response.context['error'])
# Validate specific error message
self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message'])
def test_error_message_video_state_failed(self):
video = \
create_videos('Video', self.user1, self.org1, 1, Media.State.FAILED, None,
None, False)[0]
add_channel_to_video(self.chan1, video)
url = reverse('embed',
kwargs={'channel_id': self.chan1.channel_id,
'video_id': video.video_id})
response = self.client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertTrue(response.context['error'])
# Validate specific error message
self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message'])
def test_error_message_video_with_disabled_org(self):
video = \
create_videos('Video', self.user1, self.org1, 1, Media.State.FAILED, None,
None, False)[0]
add_channel_to_video(self.chan1, video)
self.org1.upload_enable = False
self.org1.save()
url = reverse('embed',
kwargs={'channel_id': self.chan1.channel_id,
'video_id': video.video_id})
response = self.client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertTrue(response.context['error'])
# Validate specific error message
self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message'])
self.org1.upload_enable = True
self.org1.save()
def test_error_message_live_video_state_starting(self):
live = \
|
INVALID_DOMAIN_MESSAGE = 'Content is not available on this site.'
UNAVAILABLE_MESSAGE = 'The content is not available.'
class PlayerTests(TestCase):
@classmethod
def setUpClass(cls):
# Organizations
cls.org1 = create_organizations('Organization', 1)[0]
# Users
cls.user1 = create_user('user1', '12345678', cls.org1)
def setUp(self):
# Channel with ads_vast_url
self.chan1 = \
create_channels('Channel with ads vast', self.org1, 1, [],
'http://www.channel-vast-url.com')[0]
# Channel with autoplay
self.chan2 = \
create_channels('Channel with autoplay', self.org1, 1, [], None, False, True)[0]
# Channel with allowed domains
self.chan3 = \
create_channels('Channel with all allowed domains', self.org1, 1, [])[0]
self.chan4 = \
create_channels('Channel with simple allowed domain', self.org1, 1,
['www.allowed-domain.com'])[0]
self.chan5 = \
create_channels('Channel with wildcard domain', self.org1, 1, ['www.*.test.com'])[0]
self.chan6 = \
create_channels('Channel with double wildcard domain', self.org1, 1,
['www.*.*.test.com'])[0]
self.chan7 = \
create_channels('Channel with common domain', self.org1, 1, ['*.domain.com'])[0]
# Video with default options
self.video1 = \
create_videos('Video', self.user1, self.org1, 1, Media.State.FINISHED)[0]
add_channel_to_video(self.chan1, self.video1)
# Video with ads_vast_url and without enabled ads
self.video2 = \
create_videos('Video with ads vast and without enable ads', self.user1, self.org1, 1,
Media.State.FINISHED, None, 'http://www.video-vast-url.com', False)[0]
add_channel_to_video(self.chan1, self.video2)
# Video with ads_vast_url
self.video3 = \
create_videos('Video with ads vast', self.user1, self.org1, 1, Media.State.FINISHED,
None, 'http://www.video-vast-url.com')[0]
add_channel_to_video(self.chan1, self.video3)
# Video without ads_vast_url and with enable_ads false
self.video4 = \
create_videos('Video without ads vast and with enable ads', self.user1, self.org1, 1,
Media.State.FINISHED, None,
None, False)[0]
add_channel_to_video(self.chan1, self.video4)
# Videos with autoplay options
self.video5 = \
create_videos('Video with autoplay no', self.user1, self.org1, 1, Media.State.FINISHED,
None, None, True, 'n')[0]
add_channel_to_video(self.chan1, self.video5)
self.video6 = \
create_videos('Video with autoplay yes', self.user1, self.org1, 1,
Media.State.FINISHED, None, None, True, 'y')[0]
add_channel_to_video(self.chan1, self.video6)
self.client = Client(HTTP_REFERER='http://qhub-tests.com')
def tearDown(self):
Media.objects.all().delete()
Channel.objects.all().delete()
@classmethod
def tearDownClass(cls):
cls.org1.delete()
# <editor-fold desc="Video vast TESTS">
def test_video_override_channel_vast(self):
url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id,
'video_id': self.video3.video_id})
response = self.client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate adTagUrl in response
self.assertEquals(self.video3.ads_vast_url, response.context['adTagUrl'])
def test_video_without_vast(self):
url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id,
'video_id': self.video1.video_id})
response = self.client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate adTagUrl in response
self.assertEquals(self.chan1.ads_vast_url, response.context['adTagUrl'])
# </editor-fold>
# <editor-fold desc="Video no-ads flag TESTS">
def test_video_flag_override_channel_vast(self):
url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id,
'video_id': self.video3.video_id})
response = self.client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate adTagUrl in response
self.assertEquals(self.video3.ads_vast_url, response.context['adTagUrl'])
def test_video_flag_use_channel_vast(self):
url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id,
'video_id': self.video1.video_id})
response = self.client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate adTagUrl in response
self.assertEquals(self.chan1.ads_vast_url, response.context['adTagUrl'])
def test_video_flag_false_vast(self):
url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id,
'video_id': self.video4.video_id})
response = self.client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate adTagUrl in response
self.assertEquals('', response.context['adTagUrl'])
def test_video_flag_false_without_vast(self):
url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id,
'video_id': self.video4.video_id})
response = self.client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate adTagUrl in response
self.assertEquals('', response.context['adTagUrl'])
# </editor-fold>
# <editor-fold desc="Allowed Domain TESTS">
def test_valid_all_domains(self):
client = Client(HTTP_REFERER='http://www.allowed-domain.com')
add_channel_to_video(self.chan3, self.video3)
url = reverse('embed',
kwargs={'video_id': self.video3.video_id})
response = client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertFalse(response.context['error'])
def test_valid_simple_domain(self):
client = Client(HTTP_REFERER='http://www.allowed-domain.com')
add_channel_to_video(self.chan4, self.video3)
url = reverse('embed',
kwargs={'video_id': self.video3.video_id})
response = client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertFalse(response.context['error'])
def test_invalid_simple_domain(self):
client = Client(HTTP_REFERER='http://www.not-allowed-domain.com')
add_channel_to_video(self.chan4, self.video3)
url = reverse('embed',
kwargs={'video_id': self.video3.video_id})
response = client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertTrue(response.context['error'])
# Validate specific error message
self.assertEqual(INVALID_DOMAIN_MESSAGE, response.context['message'])
# VALID WILDCARD
def test_valid_wildcard_domain(self):
client = Client(HTTP_REFERER='http://www.wildcard.test.com')
add_channel_to_video(self.chan5, self.video3)
url = reverse('embed', kwargs={'video_id': self.video3.video_id})
response = client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertFalse(response.context['error'])
def test_second_valid_wildcard_domain(self):
client = Client(HTTP_REFERER='http://www.wild-card.test.com')
add_channel_to_video(self.chan5, self.video3)
url = reverse('embed', kwargs={'video_id': self.video3.video_id})
response = client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertFalse(response.context['error'])
def test_third_valid_wildcard_domain(self):
client = Client(HTTP_REFERER='http://www.wild_c4rd-test.test.com')
add_channel_to_video(self.chan5, self.video3)
url = reverse('embed', kwargs={'video_id': self.video3.video_id})
response = client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertFalse(response.context['error'])
def test_valid_double_wildcard_domain(self):
client = Client(HTTP_REFERER='http://www.wild.card.test.com')
add_channel_to_video(self.chan6, self.video3)
url = reverse('embed', kwargs={'video_id': self.video3.video_id})
response = client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertFalse(response.context['error'])
def test_valid_common_domain(self):
client = Client(HTTP_REFERER='http://www.domain.com')
add_channel_to_video(self.chan7, self.video3)
url = reverse('embed', kwargs={'video_id': self.video3.video_id})
response = client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertFalse(response.context['error'])
# INVALID WILDCARD
def test_invalid_wildcard_domain(self):
client = Client(HTTP_REFERER='http://www.wildcard.test.invalid.com')
add_channel_to_video(self.chan5, self.video3)
url = reverse('embed', kwargs={'video_id': self.video3.video_id})
response = client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertTrue(response.context['error'])
# Validate specific error message
self.assertEqual(INVALID_DOMAIN_MESSAGE, response.context['message'])
def test_second_invalid_wildcard_domain(self):
client = Client(HTTP_REFERER='http://www.test.com')
add_channel_to_video(self.chan5, self.video3)
url = reverse('embed', kwargs={'video_id': self.video3.video_id})
response = client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertTrue(response.context['error'])
# Validate specific error message
self.assertEqual(INVALID_DOMAIN_MESSAGE, response.context['message'])
def test_third_invalid_wildcard_domain(self):
client = Client(HTTP_REFERER='http://www.invalid.wildcard.test.com')
add_channel_to_video(self.chan5, self.video3)
url = reverse('embed', kwargs={'video_id': self.video3.video_id})
response = client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertTrue(response.context['error'])
# Validate specific error message
self.assertEqual(INVALID_DOMAIN_MESSAGE, response.context['message'])
def test_fourth_invalid_wildcard_domain(self):
client = Client(HTTP_REFERER='http://www..test.com')
add_channel_to_video(self.chan5, self.video3)
url = reverse('embed', kwargs={'video_id': self.video3.video_id})
response = client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertTrue(response.context['error'])
# Validate specific error message
self.assertEqual(INVALID_DOMAIN_MESSAGE, response.context['message'])
def test_invalid_double_wildcard_domain(self):
client = Client(HTTP_REFERER='http://www.wild.test.com')
add_channel_to_video(self.chan6, self.video3)
url = reverse('embed', kwargs={'video_id': self.video3.video_id})
response = client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertTrue(response.context['error'])
# Validate specific error message
self.assertEqual(INVALID_DOMAIN_MESSAGE, response.context['message'])
def test_second_invalid_double_wildcard_domain(self):
client = Client(HTTP_REFERER='http://www.wild.test.card.com')
add_channel_to_video(self.chan6, self.video3)
url = reverse('embed', kwargs={'video_id': self.video3.video_id})
response = client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertTrue(response.context['error'])
# Validate specific error message
self.assertEqual(INVALID_DOMAIN_MESSAGE, response.context['message'])
# VIDEO AUTOPLAY
def test_channel_autoplay_with_video_autoplay_channel_must_autoplay(self):
"""
The channel as autoplay, the video has autoplay as the channel config.
The video must autoplay
"""
client = Client()
add_channel_to_video(self.chan2, self.video1)
url = reverse('embed', kwargs={'channel_id': self.chan2.channel_id,
'video_id': self.video1.video_id})
response = client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# We check against the string given that it must be the word 'True' for it to work
# and not something that evaluates to True
self.assertTrue(str(response.context['autoplay']) == 'True')
def test_channel_autoplay_with_video_autoplay_no_must_not_autoplay(self):
"""
The channel as autoplay, the video has autoplay as no.
The video must not autoplay
"""
client = Client()
add_channel_to_video(self.chan2, self.video5)
url = reverse('embed', kwargs={'channel_id': self.chan2.channel_id,
'video_id': self.video5.video_id})
response = client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate autoplay in response
self.assertTrue(response.context['autoplay'] == '')
def test_channel_autoplay_with_video_autoplay_yes_must_autoplay(self):
"""
The channel as autoplay, the video has autoplay activated.
The video must not autoplay
"""
client = Client()
add_channel_to_video(self.chan2, self.video6)
url = reverse('embed', kwargs={'channel_id': self.chan2.channel_id,
'video_id': self.video6.video_id})
response = client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# We check against the string given that it must be the word 'True' for it to work
# and not something that evaluates to True
self.assertTrue(str(response.context['autoplay']) == 'True')
def test_channel_no_autoplay_with_video_autoplay_channel_must_not_autoplay(self):
"""
The channel has no autoplay, the video has autoplay as channel.
The video must not autoplay
"""
client = Client()
url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id,
'video_id':
self.video1.video_id})
response = client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate autoplay in response
self.assertTrue(response.context['autoplay'] == '')
def test_channel_no_autoplay_with_video_autoplay_no_must_not_autoplay(self):
"""
The channel has no autoplay, the video has autoplay no autoplay.
The video must not autoplay
"""
client = Client()
url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id,
'video_id':
self.video5.video_id})
response = client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate autoplay in response
self.assertTrue(response.context['autoplay'] == '')
def test_channel_no_autoplay_with_video_autoplay_yes_must_autoplay(self):
"""
The channel has no autoplay, the video has autoplay activated.
The video must autoplay
"""
client = Client()
url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id,
'video_id':
self.video6.video_id})
response = client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# We check against the string given that it must be the word 'True' for it to work
# and not something that evaluates to True
self.assertTrue(str(response.context['autoplay']) == 'True')
# </editor-fold>
# <editor-fold desc="Available Content Test"
def test_error_message_video_state_waiting_file(self):
video = \
create_videos('Video', self.user1, self.org1, 1, Media.State.WAITING_FILE, None, None,
False)[0]
add_channel_to_video(self.chan1, video)
url = reverse('embed',
kwargs={'channel_id': self.chan1.channel_id,
'video_id': video.video_id})
response = self.client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertTrue(response.context['error'])
# Validate specific error message
self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message'])
def test_error_message_video_state_queued(self):
video = \
create_videos('Video', self.user1, self.org1, 1, Media.State.QUEUED, None, None,
False)[0]
add_channel_to_video(self.chan1, video)
url = reverse('embed',
kwargs={'channel_id': self.chan1.channel_id,
'video_id': video.video_id})
response = self.client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertTrue(response.context['error'])
# Validate specific error message
self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message'])
def test_error_message_video_state_queued_failed(self):
video = \
create_videos('Video', self.user1, self.org1, 1, Media.State.QUEUING_FAILED, None,
None, False)[0]
add_channel_to_video(self.chan1, video)
url = reverse('embed',
kwargs={'channel_id': self.chan1.channel_id,
'video_id': video.video_id})
response = self.client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertTrue(response.context['error'])
# Validate specific error message
self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message'])
def test_error_message_video_state_processing(self):
video = \
create_videos('Video', self.user1, self.org1, 1, Media.State.PROCESSING, None,
None, False)[0]
add_channel_to_video(self.chan1, video)
url = reverse('embed',
kwargs={'channel_id': self.chan1.channel_id,
'video_id': video.video_id})
response = self.client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertTrue(response.context['error'])
# Validate specific error message
self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message'])
def test_error_message_video_state_processing_failed(self):
video = \
create_videos('Video', self.user1, self.org1, 1, Media.State.PROCESSING_FAILED, None,
None, False)[0]
add_channel_to_video(self.chan1, video)
url = reverse('embed',
kwargs={'channel_id': self.chan1.channel_id,
'video_id': video.video_id})
response = self.client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertTrue(response.context['error'])
# Validate specific error message
self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message'])
def test_error_message_video_state_not_finished(self):
video = \
create_videos('Video', self.user1, self.org1, 1, Media.State.NOT_FINISHED, None,
None, False)[0]
add_channel_to_video(self.chan1, video)
url = reverse('embed',
kwargs={'channel_id': self.chan1.channel_id,
'video_id': video.video_id})
response = self.client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertTrue(response.context['error'])
# Validate specific error message
self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message'])
def test_error_message_video_state_failed(self):
video = \
create_videos('Video', self.user1, self.org1, 1, Media.State.FAILED, None,
None, False)[0]
add_channel_to_video(self.chan1, video)
url = reverse('embed',
kwargs={'channel_id': self.chan1.channel_id,
'video_id': video.video_id})
response = self.client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertTrue(response.context['error'])
# Validate specific error message
self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message'])
def test_error_message_video_with_disabled_org(self):
video = \
create_videos('Video', self.user1, self.org1, 1, Media.State.FAILED, None,
None, False)[0]
add_channel_to_video(self.chan1, video)
self.org1.upload_enable = False
self.org1.save()
url = reverse('embed',
kwargs={'channel_id': self.chan1.channel_id,
'video_id': video.video_id})
response = self.client.get(url)
# Validate status code
self.assertEquals(status.HTTP_200_OK, response.status_code)
# Validate error in response
self.assertTrue(response.context['error'])
# Validate specific error message
self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message'])
self.org1.upload_enable = True
self.org1.save()
def test_error_message_live_video_state_starting(self):
live = \ | create_live_videos('Live', self.user1, self.org1, 1, LiveVideo.State.STARTING, None, | 8 | 2023-10-17 19:44:32+00:00 | 12k |
Qualcomm-AI-research/geometric-algebra-transformer | gatr/nets/axial_gatr.py | [
{
"identifier": "SelfAttentionConfig",
"path": "gatr/layers/attention/config.py",
"snippet": "class SelfAttentionConfig:\n \"\"\"Configuration for attention.\n\n Parameters\n ----------\n in_mv_channels : int\n Number of input multivector channels.\n out_mv_channels : int\n ... | from dataclasses import replace
from typing import Optional, Tuple, Union
from einops import rearrange
from torch import nn
from torch.utils.checkpoint import checkpoint
from gatr.layers.attention.config import SelfAttentionConfig
from gatr.layers.gatr_block import GATrBlock
from gatr.layers.linear import EquiLinear
from gatr.layers.mlp.config import MLPConfig
import torch | 7,273 | # Copyright (c) 2023 Qualcomm Technologies, Inc.
# All rights reserved.
# Default rearrange patterns
_MV_REARRANGE_PATTERN = "... i j c x -> ... j i c x"
_S_REARRANGE_PATTERN = "... i j c -> ... j i c"
class AxialGATr(nn.Module): # pylint: disable=duplicate-code
"""Axial GATr network for two token dimensions.
This, together with gatr.nets.gatr.GATr, is the main architecture proposed in our paper.
It combines `num_blocks` GATr transformer blocks, each consisting of geometric self-attention
layers, a geometric MLP, residual connections, and normalization layers. In addition, there
are initial and final equivariant linear layers.
Assumes input data with shape `(..., num_items_1, num_items_2, num_channels, 16)`.
The first, third, fifth, ... block computes attention over the `items_2` axis. The other blocks
compute attention over the `items_1` axis. Positional encoding can be specified separately for
both axes.
Parameters
----------
in_mv_channels : int
Number of input multivector channels.
out_mv_channels : int
Number of output multivector channels.
hidden_mv_channels : int
Number of hidden multivector channels.
in_s_channels : None or int
If not None, sets the number of scalar input channels.
out_s_channels : None or int
If not None, sets the number of scalar output channels.
hidden_s_channels : None or int
If not None, sets the number of scalar hidden channels.
attention: Dict
Data for SelfAttentionConfig
mlp: Dict
Data for MLPConfig
num_blocks : int
Number of transformer blocks.
pos_encodings : tuple of bool
Whether to apply rotary positional embeddings along the item dimensions to the scalar keys
and queries. The first element in the tuple determines whether positional embeddings
are applied to the first item dimension, the second element the same for the second item
dimension.
collapse_dims_for_odd_blocks : bool
Whether the batch dimensions will be collapsed in odd blocks (to support xformers block
attention)
"""
def __init__(
self,
in_mv_channels: int,
out_mv_channels: int,
hidden_mv_channels: int,
in_s_channels: Optional[int],
out_s_channels: Optional[int],
hidden_s_channels: Optional[int],
attention: SelfAttentionConfig,
| # Copyright (c) 2023 Qualcomm Technologies, Inc.
# All rights reserved.
# Default rearrange patterns
_MV_REARRANGE_PATTERN = "... i j c x -> ... j i c x"
_S_REARRANGE_PATTERN = "... i j c -> ... j i c"
class AxialGATr(nn.Module): # pylint: disable=duplicate-code
"""Axial GATr network for two token dimensions.
This, together with gatr.nets.gatr.GATr, is the main architecture proposed in our paper.
It combines `num_blocks` GATr transformer blocks, each consisting of geometric self-attention
layers, a geometric MLP, residual connections, and normalization layers. In addition, there
are initial and final equivariant linear layers.
Assumes input data with shape `(..., num_items_1, num_items_2, num_channels, 16)`.
The first, third, fifth, ... block computes attention over the `items_2` axis. The other blocks
compute attention over the `items_1` axis. Positional encoding can be specified separately for
both axes.
Parameters
----------
in_mv_channels : int
Number of input multivector channels.
out_mv_channels : int
Number of output multivector channels.
hidden_mv_channels : int
Number of hidden multivector channels.
in_s_channels : None or int
If not None, sets the number of scalar input channels.
out_s_channels : None or int
If not None, sets the number of scalar output channels.
hidden_s_channels : None or int
If not None, sets the number of scalar hidden channels.
attention: Dict
Data for SelfAttentionConfig
mlp: Dict
Data for MLPConfig
num_blocks : int
Number of transformer blocks.
pos_encodings : tuple of bool
Whether to apply rotary positional embeddings along the item dimensions to the scalar keys
and queries. The first element in the tuple determines whether positional embeddings
are applied to the first item dimension, the second element the same for the second item
dimension.
collapse_dims_for_odd_blocks : bool
Whether the batch dimensions will be collapsed in odd blocks (to support xformers block
attention)
"""
def __init__(
self,
in_mv_channels: int,
out_mv_channels: int,
hidden_mv_channels: int,
in_s_channels: Optional[int],
out_s_channels: Optional[int],
hidden_s_channels: Optional[int],
attention: SelfAttentionConfig, | mlp: MLPConfig, | 3 | 2023-10-23 15:58:36+00:00 | 12k |
tomguluson92/cloth2tex | phase1_inference.py | [
{
"identifier": "ClothRenderer",
"path": "renderer/cloth_renderer.py",
"snippet": "class ClothRenderer(object):\n \n def __init__(self, objfile, resolution=512, focal_distance=1.6, scale_factor=1):\n self.device = torch.device(\"cuda:0\")\n\n self.img_size = resolution\n self.... | import argparse
import datetime
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
import numpy as np
import pickle
import os
import os.path as osp
import torchvision
import torchvision.transforms as transforms
import torch.nn.functional as F
import thinplate as tps
import time
import matplotlib.pyplot as plt
import importlib
import random
import json
import cv2
from torchvision.models.feature_extraction import create_feature_extractor, get_graph_node_names
from renderer.cloth_renderer import ClothRenderer
from PIL import Image
from utils.frequency import extract_ampl_phase
from utils.binary_function import Binarize
from utils.tvl_loss import TVLoss, TVMaskLoss
from tqdm import tqdm
from pytorch3d.io import load_obj, save_obj
from itertools import chain
from pytorch3d.structures import Meshes
from pytorch3d.transforms import RotateAxisAngle
from pytorch3d.loss import (
mesh_edge_loss,
mesh_laplacian_smoothing,
mesh_normal_consistency,
)
from lib.deformation_graph import DeformationGraph
from lib.mesh_sampling import generate_transform_matrices_coma
from lib.utils_dg import to_edge_index, to_sparse, get_vert_connectivity, scipy_to_torch_sparse
from models import DeformGraphModel
from torch_geometric.transforms import FaceToEdge
from torch_geometric.data import Data
from psbody.mesh import Mesh
from torch_geometric.io import read_ply | 8,827 | # -*- coding: utf-8 -*-
"""
@date: 2023.03.29-31 week13
@func: PhaseI inference code.
"""
class Trainer(object):
def __init__(self, objfile, savedir, resolution=512, focal_distance=2, verts_num=9648, scale_factor=1.0):
self.device = torch.device("cuda")
#set mesh and visualizer----------------------
self.cloth_renderer = ClothRenderer(objfile, resolution, focal_distance, scale_factor)
if os.path.exists(os.path.join("experiments", savedir)):
pass
else:
os.makedirs(os.path.join("experiments", savedir))
self.savedir = savedir
self.uv = torch.ones((1, 512, 512, 3)).cuda()
self.uv.requires_grad = True
self.optimizer = optim.Adam([self.uv], lr=5e-3, betas=(0.5, 0.999))
# define loss
self.criterion = nn.MSELoss() # nn.L1Loss() nn.MSELoss()
self.mse = nn.MSELoss()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# You can choose TVMaskLoss and test if it is suitable for your case.
| # -*- coding: utf-8 -*-
"""
@date: 2023.03.29-31 week13
@func: PhaseI inference code.
"""
class Trainer(object):
def __init__(self, objfile, savedir, resolution=512, focal_distance=2, verts_num=9648, scale_factor=1.0):
self.device = torch.device("cuda")
#set mesh and visualizer----------------------
self.cloth_renderer = ClothRenderer(objfile, resolution, focal_distance, scale_factor)
if os.path.exists(os.path.join("experiments", savedir)):
pass
else:
os.makedirs(os.path.join("experiments", savedir))
self.savedir = savedir
self.uv = torch.ones((1, 512, 512, 3)).cuda()
self.uv.requires_grad = True
self.optimizer = optim.Adam([self.uv], lr=5e-3, betas=(0.5, 0.999))
# define loss
self.criterion = nn.MSELoss() # nn.L1Loss() nn.MSELoss()
self.mse = nn.MSELoss()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# You can choose TVMaskLoss and test if it is suitable for your case. | self.tvl_loss = TVLoss(weight=1) # TVMaskLoss(weight=1) or TVLoss(weight=1) | 3 | 2023-10-17 11:30:53+00:00 | 12k |
uukuguy/multi_loras | multi_loras/slora/models/peft/lora_unordered_batch_infer.py | [
{
"identifier": "lora_get_qkvo_fwd_shrink",
"path": "multi_loras/slora/models/peft/triton_kernel/lora/lora_prefill.py",
"snippet": "@torch.inference_mode()\ndef lora_get_qkvo_fwd_shrink(x, w, o, b_loc, b_lora_start, b_lora_ranks, b_start_loc, b_seq_len, b_indicies, hidden_size, qkvo, max_rank, max_input... | import numpy as np
import torch
import torch.nn as nn
from typing import final
from .triton_kernel.lora.lora_prefill import lora_get_qkvo_fwd_shrink, lora_get_qkvo_fwd_expand
from ..llama.triton_kernel.context_flashattention_nopad import context_attention_fwd
from ..llama.triton_kernel.rotary_emb import rotary_emb_fwd
from ...common.infer_utils import init_bloc
from ...router.model_infer.naive_infer_adapter import NaiveInferAdapter
from ...utils.infer_utils import mark_cost_time
from ...utils.infer_utils import calculate_time, mark_start, mark_end
from slora._kernels import dispatch_bgmv | 7,941 | o = self._lora_get_o(layer_id, o, infer_state, no_lora_compute)
# if self.world_size_ > 1:
# dist.all_reduce(o, op=dist.ReduceOp.SUM, async_op=False)
# residual
input_embs.add_(o.view(-1, layer_infer.embed_dim_))
return
# @calculate_time(show=True, min_cost_ms=0)
# this impl dont to use @mark_cost_time
def _lora_token_attention(self, layer_id, input_embs, infer_state, no_lora_compute=False, no_lora_copy=False):
layer_weight = self.base_model.trans_layers_weight[layer_id]
layer_infer = self.base_model.layers_infer[layer_id]
# layer normalization
input1 = layer_infer._att_norm(input_embs, infer_state, layer_weight)
# fetch k, v
cache_k, cache_v = layer_infer._pre_cache_kv(infer_state, layer_weight)
# gen new q, k, v (batch different adapters)
q = self._batch_lora_get_qkv(layer_id, input1, cache_k, cache_v, infer_state, no_lora_compute, no_lora_copy)
input1 = None
layer_infer._post_cache_kv(cache_k, cache_v, infer_state, layer_weight)
# compute attention
o = layer_infer._token_attention_kernel(q, infer_state, layer_weight)
q = None
o = self._batch_lora_get_o(layer_id, o, infer_state, no_lora_compute)
# if self.world_size_ > 1:
# dist.all_reduce(o, op=dist.ReduceOp.SUM, async_op=False)
input_embs.add_(o.view(-1, layer_infer.embed_dim_))
return
# @calculate_time(show=True, min_cost_ms=0)
def _batch_lora_get_qkv(self, layer_id, input_embs, cache_k, cache_v, infer_state, no_lora_compute=False, no_lora_copy=False)->torch.Tensor:
base_model = self.base_model
base_layer_weight = base_model.trans_layers_weight[layer_id]
base_layer_infer = base_model.layers_infer[layer_id]
# q (bs, H)
q = torch.mm(input_embs.view(-1, base_layer_infer.embed_dim_), base_layer_weight.q_weight_)
# @TODO: fix me, filter requests querying only base model
assert(len(q)==len(self.req_bins))
if not no_lora_compute:
# mark_start("get_q")
delta_qA = self.delta[0]
dispatch_bgmv(delta_qA, input_embs.view(-1, base_layer_infer.embed_dim_),
self.key_buffer[layer_id],
self.infer_adapter.a_start, self.infer_adapter.a_len,
self.infer_adapter.a_loc, self.req_bins, 0, self.infer_adapter.a_scaling)
dispatch_bgmv(q, delta_qA, self.value_buffer[layer_id], self.infer_adapter.a_start,
self.infer_adapter.a_len, self.infer_adapter.a_loc,
self.req_bins, 0, self.infer_adapter.a_scaling)
# delta_qA = None
# mark_end("get_q")
rotary_emb_fwd(q.view(-1, base_layer_infer.tp_q_head_num_, base_model.head_dim_),
infer_state.position_cos, infer_state.position_sin)
# k (bs, H)
torch.mm(input_embs.view(-1, base_layer_infer.embed_dim_), base_layer_weight.k_weight_,
out=cache_k.view(-1, base_model.tp_k_head_num_ * base_model.head_dim_))
if not no_lora_compute:
# mark_start("get_k")
delta_kA = self.delta[1]
dispatch_bgmv(delta_kA, input_embs.view(-1, base_layer_infer.embed_dim_),
self.key_buffer[layer_id],
self.infer_adapter.a_start, self.infer_adapter.a_len,
self.infer_adapter.a_loc, self.req_bins, 1, self.infer_adapter.a_scaling)
dispatch_bgmv(cache_k.view(-1, base_model.tp_k_head_num_ * base_model.head_dim_),
delta_kA, self.value_buffer[layer_id], self.infer_adapter.a_start,
self.infer_adapter.a_len, self.infer_adapter.a_loc,
self.req_bins, 1, self.infer_adapter.a_scaling)
# delta_kA = None
# mark_end("get_k")
rotary_emb_fwd(cache_k, infer_state.position_cos, infer_state.position_sin)
# v (bs, H)
torch.mm(input_embs.view(-1, base_layer_infer.embed_dim_), base_layer_weight.v_weight_,
out=cache_v.view(-1, base_model.tp_k_head_num_ * base_model.head_dim_))
if not no_lora_compute:
# mark_start("get_v")
delta_vA = self.delta[2]
dispatch_bgmv(delta_vA, input_embs.view(-1, base_layer_infer.embed_dim_),
self.key_buffer[layer_id],
self.infer_adapter.a_start, self.infer_adapter.a_len,
self.infer_adapter.a_loc, self.req_bins, 2, self.infer_adapter.a_scaling)
dispatch_bgmv(cache_v.view(-1, base_model.tp_k_head_num_ * base_model.head_dim_),
delta_vA, self.value_buffer[layer_id], self.infer_adapter.a_start,
self.infer_adapter.a_len, self.infer_adapter.a_loc,
self.req_bins, 2, self.infer_adapter.a_scaling)
# delta_vA = None
# mark_end("get_v")
return q
def _lora_get_qkv(self, layer_id, input_embs, cache_k, cache_v, infer_state, no_lora_compute=False)->torch.Tensor:
base_model = self.base_model
base_layer_weight = base_model.trans_layers_weight[layer_id]
base_layer_infer = base_model.layers_infer[layer_id]
# q (S, H)
q = torch.mm(input_embs.view(-1, base_layer_infer.embed_dim_),
base_layer_weight.q_weight_)
assert(len(q)==len(self.batch_req_bins))
# q = q_base + input * A * B * scaling
# input: (S, H) A: (H, R) B: (R, H)
if not no_lora_compute:
# fix me: @TODO we need to filter out requests querying only base model
delta_qA = self.delta[0]
if self.max_b_seq_len >= 200 and self.max_lora_dim >= 64 and len(infer_state.b_seq_len) >= 2:
# if 1 == 0:
lora_get_qkvo_fwd_shrink(input_embs.view(-1, base_layer_infer.embed_dim_),
self.key_buffer[layer_id].view(-1, self.kv_embed_dim),
delta_qA, self.infer_adapter.a_loc, self.infer_adapter.a_start,
self.infer_adapter.a_len, infer_state.b_start_loc,
infer_state.b_seq_len, self.req_bins, base_layer_infer.embed_dim_,
0, self.max_lora_dim, self.max_b_seq_len)
|
class LoraUnorderedBatchInfer:
def __init__(self, base_model, adapters, infer_adapter=None):
self.base_model = base_model
lora_layer_dim = [adapter.r if adapter is not None else 0 for adapter in adapters]
self.max_lora_dim = max(lora_layer_dim)
self.req_bins = torch.zeros(len(adapters), dtype=torch.long, device="cuda")
if infer_adapter is not None:
self.infer_adapter = infer_adapter
if isinstance(infer_adapter, NaiveInferAdapter):
self.key_buffer = infer_adapter.key_buffer
self.value_buffer = infer_adapter.value_buffer
else:
self.key_buffer = infer_adapter.mem_manager.key_buffer
self.value_buffer = infer_adapter.mem_manager.value_buffer
for i, adapter in enumerate(adapters):
# FIX ME @TODO: currently not supporting adapter is None
if adapter is None: continue
idx = infer_adapter.adapter_dirs.index(adapter.lora_dir)
self.req_bins[i] = idx
self.kv_embed_dim = base_model.tp_k_head_num_ * base_model.head_dim_
@torch.no_grad()
def forward(
self,
batch_size, # number of request
total_token_num,
max_len_in_batch,
input_ids, # 1D input tensor
b_loc, # mapping to memory pool
b_start_loc, # the start index of each request
b_seq_len, # the current length of each request
is_prefill=True,
use_bmm=True,
no_lora_compute=False,
no_lora_copy=False):
# Notice that batch_lora only support decoding
assert len(b_loc) == len(b_start_loc) == len(b_seq_len)
self.delta = []
self.max_b_seq_len = torch.max(b_seq_len).item()
if is_prefill:
assert(len(self.req_bins)==len(b_seq_len))
self.batch_req_bins = torch.repeat_interleave(self.req_bins, b_seq_len)
# self.b_start_loc = torch.cumsum(torch.cat([torch.tensor([0], dtype=torch.long, device="cuda"), b_seq_len[:-1]]), dim=0)
for _ in range(3):
self.delta.append(torch.zeros((len(self.batch_req_bins), self.max_lora_dim), dtype=torch.float16, device="cuda"))
return self._prefill(batch_size, total_token_num, max_len_in_batch,
input_ids,
b_loc, b_start_loc, b_seq_len, no_lora_compute)
else:
for _ in range(3):
self.delta.append(torch.zeros((len(b_seq_len), self.max_lora_dim), dtype=torch.float16, device="cuda"))
return self._decode(batch_size, total_token_num, max_len_in_batch,
input_ids,
b_loc, b_start_loc, b_seq_len,
no_lora_compute, no_lora_copy)
def _prefill(self, batch_size, total_token_num, max_len_in_batch,
input_ids,
b_loc, b_start_loc, b_seq_len, no_lora_compute=False):
infer_state = self.base_model.infer_state_class()
infer_state.is_prefill = True
infer_state.batch_size = batch_size
infer_state.total_token_num = total_token_num
infer_state.max_len_in_batch = max_len_in_batch
assert (input_ids.shape[0] == total_token_num)
assert (b_loc.shape[0] == b_start_loc.shape[0] == b_seq_len.shape[0])
b_seq_len_numpy = b_seq_len.cpu().numpy()
position_ids = torch.from_numpy(np.concatenate([np.arange(0, b_seq_len_numpy[i])
for i in range(len(b_seq_len_numpy))], axis=0)).cuda()
infer_state.position_cos = torch.index_select(
self.base_model._cos_cached, 0, position_ids).view(position_ids.shape[0], -1)
infer_state.position_sin = torch.index_select(
self.base_model._sin_cached, 0, position_ids).view(position_ids.shape[0], -1)
position_ids = None
infer_state.b_loc = b_loc
infer_state.b_start_loc = b_start_loc
infer_state.b_seq_len = b_seq_len
infer_state.mem_manager = self.base_model.mem_manager
infer_state.prefill_mem_index = self.base_model.mem_manager.alloc(infer_state.total_token_num)
infer_state.prefill_key_buffer = torch.empty(
(infer_state.total_token_num, self.base_model.tp_k_head_num_, self.base_model.head_dim_),
dtype=torch.float16, device="cuda")
infer_state.prefill_value_buffer = torch.empty(
(infer_state.total_token_num, self.base_model.tp_k_head_num_, self.base_model.head_dim_),
dtype=torch.float16, device="cuda")
init_bloc(b_loc, b_seq_len, max_len_in_batch, infer_state.prefill_mem_index)
predict_logics = self._context_forward(input_ids, infer_state, no_lora_compute)
return predict_logics
def _decode(self, batch_size, total_token_num, max_len_in_batch,
input_ids,
b_loc, b_start_loc, b_seq_len, no_lora_compute=False, no_lora_copy=False):
infer_state = self.base_model.infer_state_class()
infer_state.is_prefill = False
infer_state.batch_size = batch_size
infer_state.total_token_num = total_token_num
infer_state.max_len_in_batch = max_len_in_batch
assert (b_loc.shape[0] == b_start_loc.shape[0] == b_seq_len.shape[0])
infer_state.b_loc = b_loc
infer_state.b_start_loc = b_start_loc
infer_state.b_seq_len = b_seq_len
infer_state.mem_manager = self.base_model.mem_manager
alloc_mem = self.base_model.mem_manager.alloc_contiguous(batch_size)
if alloc_mem is not None:
infer_state.decode_is_contiguous = True
infer_state.decode_mem_index = alloc_mem[0]
infer_state.decode_mem_start = alloc_mem[1]
infer_state.decode_mem_end = alloc_mem[2]
b_loc[:, max_len_in_batch - 1] = infer_state.decode_mem_index
else:
infer_state.decode_is_contiguous = False
alloc_mem = self.base_model.mem_manager.alloc(batch_size)
infer_state.decode_mem_index = alloc_mem
infer_state.decode_key_buffer = torch.empty(
(batch_size, self.base_model.tp_k_head_num_, self.base_model.head_dim_),
dtype=torch.float16, device="cuda")
infer_state.decode_value_buffer = torch.empty(
(batch_size, self.base_model.tp_k_head_num_, self.base_model.head_dim_),
dtype=torch.float16, device="cuda")
b_loc[:, max_len_in_batch - 1] = infer_state.decode_mem_index
infer_state.init_some_extra_state(self.base_model, batch_size, total_token_num, max_len_in_batch,
input_ids, b_loc, b_start_loc, b_seq_len, False)
predict_logics = self._token_forward(input_ids, infer_state, no_lora_compute, no_lora_copy)
return predict_logics
@final
def _context_forward(self, input_ids, infer_state, no_lora_compute=False):
cuda_input_ids = input_ids
input_embs = self.base_model.pre_infer.context_forward(
cuda_input_ids, infer_state, self.base_model.pre_post_weight)
for i in range(self.base_model.layers_num):
input_embs = self._lora_context_forward(i, input_embs, infer_state, no_lora_compute)
predict_logics = self.base_model.post_infer.token_forward(
input_embs, infer_state, self.base_model.pre_post_weight, return_logics=True)
return predict_logics
@final
def _token_forward(self, input_ids, infer_state, no_lora_compute=False, no_lora_copy=False):
cuda_input_ids = input_ids
input_embs = self.base_model.pre_infer.token_forward(
cuda_input_ids, infer_state, self.base_model.pre_post_weight)
for i in range(self.base_model.layers_num):
input_embs = self._lora_token_forward(i, input_embs, infer_state, no_lora_compute, no_lora_copy)
predict_logics = self.base_model.post_infer.token_forward(
input_embs, infer_state, self.base_model.pre_post_weight, return_logics=True)
return predict_logics
@final
def _lora_context_forward(self, layer_id, input_embs, infer_state, no_lora_compute=False):
self._lora_context_attention(layer_id, input_embs, infer_state, no_lora_compute)
layer_weight = self.base_model.trans_layers_weight[layer_id]
layer_infer = self.base_model.layers_infer[layer_id]
layer_infer._context_ffn(input_embs, infer_state, layer_weight)
return input_embs
@final
# @calculate_time(show=True, min_cost_ms=0)
def _lora_token_forward(self, layer_id, input_embs, infer_state, no_lora_compute=False, no_lora_copy=False):
self._lora_token_attention(layer_id, input_embs, infer_state, no_lora_compute, no_lora_copy)
layer_weight = self.base_model.trans_layers_weight[layer_id]
layer_infer = self.base_model.layers_infer[layer_id]
# mark_start("token_ffn")
layer_infer._token_ffn(input_embs, infer_state, layer_weight)
# mark_end("token_ffn")
return input_embs
# @mark_cost_time("trans context flash forward time cost") # dont to remove this, will make performence down, did not know why
def _lora_context_attention(self, layer_id, input_embs, infer_state, no_lora_compute=False):
layer_weight = self.base_model.trans_layers_weight[layer_id]
layer_infer = self.base_model.layers_infer[layer_id]
# layer normalization
input1 = layer_infer._att_norm(input_embs, infer_state, layer_weight)
# fetch k, v
cache_k, cache_v = layer_infer._pre_cache_kv(infer_state, layer_weight)
# gen new q, k, v (batch different adapters)
q = self._lora_get_qkv(layer_id, input1, cache_k, cache_v, infer_state, no_lora_compute)
input1 = None
layer_infer._post_cache_kv(cache_k, cache_v, infer_state, layer_weight)
# compute attention
o = layer_infer._context_attention_kernel(q, cache_k, cache_v, infer_state, layer_weight)
q = None
o = self._lora_get_o(layer_id, o, infer_state, no_lora_compute)
# if self.world_size_ > 1:
# dist.all_reduce(o, op=dist.ReduceOp.SUM, async_op=False)
# residual
input_embs.add_(o.view(-1, layer_infer.embed_dim_))
return
# @calculate_time(show=True, min_cost_ms=0)
# this impl dont to use @mark_cost_time
def _lora_token_attention(self, layer_id, input_embs, infer_state, no_lora_compute=False, no_lora_copy=False):
layer_weight = self.base_model.trans_layers_weight[layer_id]
layer_infer = self.base_model.layers_infer[layer_id]
# layer normalization
input1 = layer_infer._att_norm(input_embs, infer_state, layer_weight)
# fetch k, v
cache_k, cache_v = layer_infer._pre_cache_kv(infer_state, layer_weight)
# gen new q, k, v (batch different adapters)
q = self._batch_lora_get_qkv(layer_id, input1, cache_k, cache_v, infer_state, no_lora_compute, no_lora_copy)
input1 = None
layer_infer._post_cache_kv(cache_k, cache_v, infer_state, layer_weight)
# compute attention
o = layer_infer._token_attention_kernel(q, infer_state, layer_weight)
q = None
o = self._batch_lora_get_o(layer_id, o, infer_state, no_lora_compute)
# if self.world_size_ > 1:
# dist.all_reduce(o, op=dist.ReduceOp.SUM, async_op=False)
input_embs.add_(o.view(-1, layer_infer.embed_dim_))
return
# @calculate_time(show=True, min_cost_ms=0)
def _batch_lora_get_qkv(self, layer_id, input_embs, cache_k, cache_v, infer_state, no_lora_compute=False, no_lora_copy=False)->torch.Tensor:
base_model = self.base_model
base_layer_weight = base_model.trans_layers_weight[layer_id]
base_layer_infer = base_model.layers_infer[layer_id]
# q (bs, H)
q = torch.mm(input_embs.view(-1, base_layer_infer.embed_dim_), base_layer_weight.q_weight_)
# @TODO: fix me, filter requests querying only base model
assert(len(q)==len(self.req_bins))
if not no_lora_compute:
# mark_start("get_q")
delta_qA = self.delta[0]
dispatch_bgmv(delta_qA, input_embs.view(-1, base_layer_infer.embed_dim_),
self.key_buffer[layer_id],
self.infer_adapter.a_start, self.infer_adapter.a_len,
self.infer_adapter.a_loc, self.req_bins, 0, self.infer_adapter.a_scaling)
dispatch_bgmv(q, delta_qA, self.value_buffer[layer_id], self.infer_adapter.a_start,
self.infer_adapter.a_len, self.infer_adapter.a_loc,
self.req_bins, 0, self.infer_adapter.a_scaling)
# delta_qA = None
# mark_end("get_q")
rotary_emb_fwd(q.view(-1, base_layer_infer.tp_q_head_num_, base_model.head_dim_),
infer_state.position_cos, infer_state.position_sin)
# k (bs, H)
torch.mm(input_embs.view(-1, base_layer_infer.embed_dim_), base_layer_weight.k_weight_,
out=cache_k.view(-1, base_model.tp_k_head_num_ * base_model.head_dim_))
if not no_lora_compute:
# mark_start("get_k")
delta_kA = self.delta[1]
dispatch_bgmv(delta_kA, input_embs.view(-1, base_layer_infer.embed_dim_),
self.key_buffer[layer_id],
self.infer_adapter.a_start, self.infer_adapter.a_len,
self.infer_adapter.a_loc, self.req_bins, 1, self.infer_adapter.a_scaling)
dispatch_bgmv(cache_k.view(-1, base_model.tp_k_head_num_ * base_model.head_dim_),
delta_kA, self.value_buffer[layer_id], self.infer_adapter.a_start,
self.infer_adapter.a_len, self.infer_adapter.a_loc,
self.req_bins, 1, self.infer_adapter.a_scaling)
# delta_kA = None
# mark_end("get_k")
rotary_emb_fwd(cache_k, infer_state.position_cos, infer_state.position_sin)
# v (bs, H)
torch.mm(input_embs.view(-1, base_layer_infer.embed_dim_), base_layer_weight.v_weight_,
out=cache_v.view(-1, base_model.tp_k_head_num_ * base_model.head_dim_))
if not no_lora_compute:
# mark_start("get_v")
delta_vA = self.delta[2]
dispatch_bgmv(delta_vA, input_embs.view(-1, base_layer_infer.embed_dim_),
self.key_buffer[layer_id],
self.infer_adapter.a_start, self.infer_adapter.a_len,
self.infer_adapter.a_loc, self.req_bins, 2, self.infer_adapter.a_scaling)
dispatch_bgmv(cache_v.view(-1, base_model.tp_k_head_num_ * base_model.head_dim_),
delta_vA, self.value_buffer[layer_id], self.infer_adapter.a_start,
self.infer_adapter.a_len, self.infer_adapter.a_loc,
self.req_bins, 2, self.infer_adapter.a_scaling)
# delta_vA = None
# mark_end("get_v")
return q
def _lora_get_qkv(self, layer_id, input_embs, cache_k, cache_v, infer_state, no_lora_compute=False)->torch.Tensor:
base_model = self.base_model
base_layer_weight = base_model.trans_layers_weight[layer_id]
base_layer_infer = base_model.layers_infer[layer_id]
# q (S, H)
q = torch.mm(input_embs.view(-1, base_layer_infer.embed_dim_),
base_layer_weight.q_weight_)
assert(len(q)==len(self.batch_req_bins))
# q = q_base + input * A * B * scaling
# input: (S, H) A: (H, R) B: (R, H)
if not no_lora_compute:
# fix me: @TODO we need to filter out requests querying only base model
delta_qA = self.delta[0]
if self.max_b_seq_len >= 200 and self.max_lora_dim >= 64 and len(infer_state.b_seq_len) >= 2:
# if 1 == 0:
lora_get_qkvo_fwd_shrink(input_embs.view(-1, base_layer_infer.embed_dim_),
self.key_buffer[layer_id].view(-1, self.kv_embed_dim),
delta_qA, self.infer_adapter.a_loc, self.infer_adapter.a_start,
self.infer_adapter.a_len, infer_state.b_start_loc,
infer_state.b_seq_len, self.req_bins, base_layer_infer.embed_dim_,
0, self.max_lora_dim, self.max_b_seq_len) | lora_get_qkvo_fwd_expand(delta_qA, self.value_buffer[layer_id].view(-1, self.kv_embed_dim), | 1 | 2023-10-16 02:39:47+00:00 | 12k |
MobileLLM/AutoDroid | droidbot/input_policy.py | [
{
"identifier": "UTG",
"path": "droidbot/utg.py",
"snippet": "class UTG(object):\n \"\"\"\n UI transition graph\n \"\"\"\n\n def __init__(self, device, app, random_input):\n self.logger = logging.getLogger(self.__class__.__name__)\n self.device = device\n self.app = app\... | import sys
import json
import re
import logging
import random
import yaml
import copy
import requests
import ast
import time
import tools
import pdb
import os
import traceback
import time
import time
import os
import time
import numpy as np
from abc import abstractmethod
from .input_event import *
from .utg import UTG
from .input_event import ScrollEvent
from query_lmql import prompt_llm_with_history
from xmlrpc.client import ServerProxy
from xmlrpclib import ServerProxy
from InstructorEmbedding import INSTRUCTOR
from sklearn.metrics.pairwise import cosine_similarity | 8,152 | yaml.dump(data, f)
def _make_prompt_lmql(self, state_prompt, action_history, is_text, state_str, view_text=None, thought_history=None, use_thoughts=False):
if self.use_memory:
# if isinstance(state_str, list):
# if len(state_str) == 1:
# state_str = state_str[0]
# else:
# state_str = self.memory.hash_state(state_prompt)
# new_state_prompt = self.f(action_history, state_prompt, state_str)
# if new_state_prompt !z= None and new_state_prompt != 'no_description':
# state_prompt = new_state_prompt
if len(action_history) <= len(self.similar_ele_path):
current_ui_id = len(action_history) - 1
new_state_prompt = tools.insert_onclick_into_prompt(state_prompt, self.similar_ele_path[current_ui_id], self.similar_ele_function)
if new_state_prompt != state_prompt: # current state contains an element of insight
self.state_ele_memory[state_str] = new_state_prompt
state_prompt = new_state_prompt
# elif state_str in self.state_ele_memory.keys():
# state_prompt = self.state_ele_memory[state_str]
if use_thoughts:
history_with_thought = []
for idx in range(len(action_history)):
history_with_thought.append(action_history[idx] + ' Reason: ' + thought_history[idx])
else:
history_with_thought = action_history
return '\n'.join(history_with_thought),state_prompt
def _make_prompt(self, state_prompt, action_history, is_text, state_str, view_text=None, thought_history=None, use_thoughts=False):
if self.use_memory:
# if isinstance(state_str, list):
# if len(state_str) == 1:
# state_str = state_str[0]
# else:
# state_str = self.memory.hash_state(state_prompt)
# new_state_prompt = self.f(action_history, state_prompt, state_str)
# if new_state_prompt !z= None and new_state_prompt != 'no_description':
# state_prompt = new_state_prompt
if len(action_history) <= len(self.similar_ele_path):
current_ui_id = len(action_history) - 1
new_state_prompt = tools.insert_onclick_into_prompt(state_prompt, self.similar_ele_path[current_ui_id], self.similar_ele_function)
if new_state_prompt != state_prompt: # current state contains an element of insight
self.state_ele_memory[state_str] = new_state_prompt
state_prompt = new_state_prompt
# elif state_str in self.state_ele_memory.keys():
# state_prompt = self.state_ele_memory[state_str]
if use_thoughts:
history_with_thought = []
for idx in range(len(action_history)):
history_with_thought.append(action_history[idx] + ' Reason: ' + thought_history[idx])
else:
history_with_thought = action_history
introduction = '''You are a smartphone assistant to help users complete tasks by interacting with mobile apps.Given a task, the previous UI actions, and the content of current UI state, your job is to decide whether the task is already finished by the previous actions, and if not, decide which UI element in current UI state should be interacted.'''
task_prompt = 'Task: ' + self.task
history_prompt = 'Previous UI actions: \n' + '\n'.join(history_with_thought)
full_state_prompt = 'Current UI state: \n' + state_prompt
request_prompt = '''Your answer should always use the following format:1. Completing this task on a smartphone usually involves these steps: <?>.\n2. Analyses of the relations between the task and the previous UI actions and current UI state: <?>.\n3. Based on the previous actions, is the task already finished? <Y/N>. The next step should be <?/None>.\n4. Can the task be proceeded with the current UI state? <Y/N>. Fill in the blanks about the next one interaction: - id=<id number> - action=<tap/input> - input text=<text or N/A>'''
prompt = introduction + '\n' + task_prompt + '\n' + history_prompt + '\n' + full_state_prompt + '\n' + request_prompt
return prompt
def _extract_input_text(self, string, start='Text: ', end=' Thought'):
start_index = string.find(start) + len(start) # Find the location of 'start'
if start_index == -1:
start_index = 0
end_index = string.find(end) # Find the location of 'end'
substring = string[start_index:end_index] if end_index != -1 else string[start_index:]
return substring
def _extract_input_textv2(self, string):
if string[:11] == 'InputText: ':
return string[11:]
else:
return string
def _get_text_view_description(self, view):
content_description = safe_dict_get(view, 'content_description', default='')
view_text = safe_dict_get(view, 'text', default='')
view_desc = f"<input class='&'>#</input>"#.replace('&', view_class)#.replace('#', text)
if view_text:
view_desc = view_desc.replace('#', view_text)
else:
view_desc = view_desc.replace('#', '')
if content_description:
view_desc = view_desc.replace('&', content_description)
else:
view_desc = view_desc.replace(" class='&'", "")
return view_desc
def _get_action_from_views_actions(self, action_history, thought_history, views=None, candidate_actions=None, state_strs=None, current_state=None):
'''
get action choice from LLM based on a list of views and corresponding actions
'''
if current_state:
state_prompt, candidate_actions, _, _ = current_state.get_described_actions()
state_str = current_state.state_str
if USE_LMQL:
history, state_prompt = self._make_prompt_lmql(state_prompt, action_history, is_text=False, state_str=state_str,
thought_history=thought_history)
else:
prompt = self._make_prompt(state_prompt, action_history, is_text=False, state_str=state_str, thought_history=thought_history)
else:
views_with_id = []
for id in range(len(views)):
views_with_id.append(tools.insert_id_into_view(views[id], id))
state_prompt = '\n'.join(views_with_id)
state_str = tools.hash_string(state_prompt)
if USE_LMQL:
history, state_prompt = self._make_prompt_lmql(state_prompt, action_history, is_text=False, state_str=state_str,
thought_history=thought_history)
else:
prompt = self._make_prompt(state_prompt, action_history, is_text=False, state_str=state_str, thought_history=thought_history)
# ids = [str(idx) for idx, i in enumerate(candidate_actions)]
ids = str([i for i in range(len(candidate_actions))])
if USE_LMQL:
| # from memory.memory_builder import Memory
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# Max number of restarts
MAX_NUM_RESTARTS = 5
# Max number of steps outside the app
MAX_NUM_STEPS_OUTSIDE = 1000
MAX_NUM_STEPS_OUTSIDE_KILL = 1000
# Max number of replay tries
MAX_REPLY_TRIES = 5
# Some input event flags
EVENT_FLAG_STARTED = "+started"
EVENT_FLAG_START_APP = "+start_app"
EVENT_FLAG_STOP_APP = "+stop_app"
EVENT_FLAG_EXPLORE = "+explore"
EVENT_FLAG_NAVIGATE = "+navigate"
EVENT_FLAG_TOUCH = "+touch"
# Policy taxanomy
POLICY_NAIVE_DFS = "dfs_naive"
POLICY_GREEDY_DFS = "dfs_greedy"
POLICY_NAIVE_BFS = "bfs_naive"
POLICY_GREEDY_BFS = "bfs_greedy"
POLICY_REPLAY = "replay"
POLICY_MANUAL = "manual"
POLICY_MONKEY = "monkey"
POLICY_TASK = "task"
POLICY_NONE = "none"
POLICY_MEMORY_GUIDED = "memory_guided" # implemented in input_policy2
FINISHED = "task_completed"
MAX_SCROLL_NUM = 7
USE_LMQL = False
class InputInterruptedException(Exception):
pass
def safe_dict_get(view_dict, key, default=None):
return_itm = view_dict[key] if (key in view_dict) else default
if return_itm == None:
return_itm = ''
return return_itm
class InputPolicy(object):
"""
This class is responsible for generating events to stimulate more app behaviour
It should call AppEventManager.send_event method continuously
"""
def __init__(self, device, app):
self.logger = logging.getLogger(self.__class__.__name__)
self.device = device
self.app = app
self.action_count = 0
self.master = None
def start(self, input_manager):
"""
start producing events
:param input_manager: instance of InputManager
"""
self.action_count = 0
while input_manager.enabled and self.action_count < input_manager.event_count:
try:
# # make sure the first event is go to HOME screen
# # the second event is to start the app
# if self.action_count == 0 and self.master is None:
# event = KeyEvent(name="HOME")
# elif self.action_count == 1 and self.master is None:
# event = IntentEvent(self.app.get_start_intent())
if self.action_count == 0 and self.master is None:
event = KillAppEvent(app=self.app)
else:
event = self.generate_event(input_manager)
if event == FINISHED:
break
input_manager.add_event(event)
except KeyboardInterrupt:
break
except InputInterruptedException as e:
self.logger.warning("stop sending events: %s" % e)
break
# except RuntimeError as e:
# self.logger.warning(e.message)
# break
except Exception as e:
self.logger.warning("exception during sending events: %s" % e)
traceback.print_exc()
continue
self.action_count += 1
@abstractmethod
def generate_event(self, input_manager):
"""
generate an event
@return:
"""
pass
class NoneInputPolicy(InputPolicy):
"""
do not send any event
"""
def __init__(self, device, app):
super(NoneInputPolicy, self).__init__(device, app)
def generate_event(self):
"""
generate an event
@return:
"""
return None
class UtgBasedInputPolicy(InputPolicy):
"""
state-based input policy
"""
def __init__(self, device, app, random_input):
super(UtgBasedInputPolicy, self).__init__(device, app)
self.random_input = random_input
self.script = None
self.master = None
self.script_events = []
self.last_event = None
self.last_state = None
self.current_state = None
self.utg = UTG(device=device, app=app, random_input=random_input)
self.script_event_idx = 0
if self.device.humanoid is not None:
self.humanoid_view_trees = []
self.humanoid_events = []
def generate_event(self, input_manager):
"""
generate an event
@return:
"""
# Get current device state
self.current_state = self.device.get_current_state()
if self.current_state is None:
time.sleep(5)
return KeyEvent(name="BACK")
self.__update_utg()
# update last view trees for humanoid
if self.device.humanoid is not None:
self.humanoid_view_trees = self.humanoid_view_trees + [self.current_state.view_tree]
if len(self.humanoid_view_trees) > 4:
self.humanoid_view_trees = self.humanoid_view_trees[1:]
event = None
# if the previous operation is not finished, continue
if len(self.script_events) > self.script_event_idx:
event = self.script_events[self.script_event_idx].get_transformed_event(self)
self.script_event_idx += 1
# First try matching a state defined in the script
if event is None and self.script is not None:
operation = self.script.get_operation_based_on_state(self.current_state)
if operation is not None:
self.script_events = operation.events
# restart script
event = self.script_events[0].get_transformed_event(self)
self.script_event_idx = 1
if event is None:
old_state, event = self.generate_event_based_on_utg(input_manager)
time.sleep(3)
# update last events for humanoid
if self.device.humanoid is not None:
self.humanoid_events = self.humanoid_events + [event]
if len(self.humanoid_events) > 3:
self.humanoid_events = self.humanoid_events[1:]
self.last_state = self.current_state if old_state is None else old_state
self.last_event = event
return event
def __update_utg(self):
self.utg.add_transition(self.last_event, self.last_state, self.current_state)
@abstractmethod
def generate_event_based_on_utg(self, input_manager):
"""
generate an event based on UTG
:return: InputEvent
"""
pass
class UtgNaiveSearchPolicy(UtgBasedInputPolicy):
"""
depth-first strategy to explore UFG (old)
"""
def __init__(self, device, app, random_input, search_method):
super(UtgNaiveSearchPolicy, self).__init__(device, app, random_input)
self.logger = logging.getLogger(self.__class__.__name__)
self.explored_views = set()
self.state_transitions = set()
self.search_method = search_method
self.last_event_flag = ""
self.last_event_str = None
self.last_state = None
self.preferred_buttons = ["yes", "ok", "activate", "detail", "more", "access",
"allow", "check", "agree", "try", "go", "next"]
def generate_event_based_on_utg(self):
"""
generate an event based on current device state
note: ensure these fields are properly maintained in each transaction:
last_event_flag, last_touched_view, last_state, exploited_views, state_transitions
@return: InputEvent
"""
self.save_state_transition(self.last_event_str, self.last_state, self.current_state)
if self.device.is_foreground(self.app):
# the app is in foreground, clear last_event_flag
self.last_event_flag = EVENT_FLAG_STARTED
else:
number_of_starts = self.last_event_flag.count(EVENT_FLAG_START_APP)
# If we have tried too many times but the app is still not started, stop DroidBot
if number_of_starts > MAX_NUM_RESTARTS:
raise InputInterruptedException("The app cannot be started.")
# if app is not started, try start it
if self.last_event_flag.endswith(EVENT_FLAG_START_APP):
# It seems the app stuck at some state, and cannot be started
# just pass to let viewclient deal with this case
self.logger.info("The app had been restarted %d times.", number_of_starts)
self.logger.info("Trying to restart app...")
pass
else:
start_app_intent = self.app.get_start_intent()
self.last_event_flag += EVENT_FLAG_START_APP
self.last_event_str = EVENT_FLAG_START_APP
return IntentEvent(start_app_intent)
# select a view to click
view_to_touch = self.select_a_view(self.current_state)
# if no view can be selected, restart the app
if view_to_touch is None:
stop_app_intent = self.app.get_stop_intent()
self.last_event_flag += EVENT_FLAG_STOP_APP
self.last_event_str = EVENT_FLAG_STOP_APP
return IntentEvent(stop_app_intent)
view_to_touch_str = view_to_touch['view_str']
if view_to_touch_str.startswith('BACK'):
result = KeyEvent('BACK')
else:
result = TouchEvent(view=view_to_touch)
self.last_event_flag += EVENT_FLAG_TOUCH
self.last_event_str = view_to_touch_str
self.save_explored_view(self.current_state, self.last_event_str)
return result
def select_a_view(self, state):
"""
select a view in the view list of given state, let droidbot touch it
@param state: DeviceState
@return:
"""
views = []
for view in state.views:
if view['enabled'] and len(view['children']) == 0:
views.append(view)
if self.random_input:
random.shuffle(views)
# add a "BACK" view, consider go back first/last according to search policy
mock_view_back = {'view_str': 'BACK_%s' % state.foreground_activity,
'text': 'BACK_%s' % state.foreground_activity}
if self.search_method == POLICY_NAIVE_DFS:
views.append(mock_view_back)
elif self.search_method == POLICY_NAIVE_BFS:
views.insert(0, mock_view_back)
# first try to find a preferable view
for view in views:
view_text = view['text'] if view['text'] is not None else ''
view_text = view_text.lower().strip()
if view_text in self.preferred_buttons \
and (state.foreground_activity, view['view_str']) not in self.explored_views:
self.logger.info("selected an preferred view: %s" % view['view_str'])
return view
# try to find a un-clicked view
for view in views:
if (state.foreground_activity, view['view_str']) not in self.explored_views:
self.logger.info("selected an un-clicked view: %s" % view['view_str'])
return view
# if all enabled views have been clicked, try jump to another activity by clicking one of state transitions
if self.random_input:
random.shuffle(views)
transition_views = {transition[0] for transition in self.state_transitions}
for view in views:
if view['view_str'] in transition_views:
self.logger.info("selected a transition view: %s" % view['view_str'])
return view
# no window transition found, just return a random view
# view = views[0]
# self.logger.info("selected a random view: %s" % view['view_str'])
# return view
# DroidBot stuck on current state, return None
self.logger.info("no view could be selected in state: %s" % state.tag)
return None
def save_state_transition(self, event_str, old_state, new_state):
"""
save the state transition
@param event_str: str, representing the event cause the transition
@param old_state: DeviceState
@param new_state: DeviceState
@return:
"""
if event_str is None or old_state is None or new_state is None:
return
if new_state.is_different_from(old_state):
self.state_transitions.add((event_str, old_state.tag, new_state.tag))
def save_explored_view(self, state, view_str):
"""
save the explored view
@param state: DeviceState, where the view located
@param view_str: str, representing a view
@return:
"""
if not state:
return
state_activity = state.foreground_activity
self.explored_views.add((state_activity, view_str))
class UtgGreedySearchPolicy(UtgBasedInputPolicy):
"""
DFS/BFS (according to search_method) strategy to explore UFG (new)
"""
def __init__(self, device, app, random_input, search_method):
super(UtgGreedySearchPolicy, self).__init__(device, app, random_input)
self.logger = logging.getLogger(self.__class__.__name__)
self.search_method = search_method
self.preferred_buttons = ["yes", "ok", "activate", "detail", "more", "access",
"allow", "check", "agree", "try", "go", "next"]
self.__nav_target = None
self.__nav_num_steps = -1
self.__num_restarts = 0
self.__num_steps_outside = 0
self.__event_trace = ""
self.__missed_states = set()
self.__random_explore = False
def generate_event_based_on_utg(self, input_manager):
"""
generate an event based on current UTG
@return: InputEvent
"""
current_state = self.current_state
self.logger.info("Current state: %s" % current_state.state_str)
if current_state.state_str in self.__missed_states:
self.__missed_states.remove(current_state.state_str)
if current_state.get_app_activity_depth(self.app) < 0:
# If the app is not in the activity stack
start_app_intent = self.app.get_start_intent()
# It seems the app stucks at some state, has been
# 1) force stopped (START, STOP)
# just start the app again by increasing self.__num_restarts
# 2) started at least once and cannot be started (START)
# pass to let viewclient deal with this case
# 3) nothing
# a normal start. clear self.__num_restarts.
if self.__event_trace.endswith(EVENT_FLAG_START_APP + EVENT_FLAG_STOP_APP) \
or self.__event_trace.endswith(EVENT_FLAG_START_APP):
self.__num_restarts += 1
self.logger.info("The app had been restarted %d times.", self.__num_restarts)
else:
self.__num_restarts = 0
# pass (START) through
if not self.__event_trace.endswith(EVENT_FLAG_START_APP):
if self.__num_restarts > MAX_NUM_RESTARTS:
# If the app had been restarted too many times, enter random mode
msg = "The app had been restarted too many times. Entering random mode."
self.logger.info(msg)
self.__random_explore = True
else:
# Start the app
self.__event_trace += EVENT_FLAG_START_APP
self.logger.info("Trying to start the app...")
return IntentEvent(intent=start_app_intent)
elif current_state.get_app_activity_depth(self.app) > 0:
# If the app is in activity stack but is not in foreground
self.__num_steps_outside += 1
if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE:
# If the app has not been in foreground for too long, try to go back
if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE_KILL:
stop_app_intent = self.app.get_stop_intent()
go_back_event = IntentEvent(stop_app_intent)
else:
go_back_event = KeyEvent(name="BACK")
self.__event_trace += EVENT_FLAG_NAVIGATE
self.logger.info("Going back to the app...")
return go_back_event
else:
# If the app is in foreground
self.__num_steps_outside = 0
# Get all possible input events
possible_events = current_state.get_possible_input()
if self.random_input:
random.shuffle(possible_events)
if self.search_method == POLICY_GREEDY_DFS:
possible_events.append(KeyEvent(name="BACK"))
elif self.search_method == POLICY_GREEDY_BFS:
possible_events.insert(0, KeyEvent(name="BACK"))
# get humanoid result, use the result to sort possible events
# including back events
if self.device.humanoid is not None:
possible_events = self.__sort_inputs_by_humanoid(possible_events)
# If there is an unexplored event, try the event first
for input_event in possible_events:
if not self.utg.is_event_explored(event=input_event, state=current_state):
self.logger.info("Trying an unexplored event.")
self.__event_trace += EVENT_FLAG_EXPLORE
return input_event
target_state = self.__get_nav_target(current_state)
if target_state:
navigation_steps = self.utg.get_navigation_steps(from_state=current_state, to_state=target_state)
if navigation_steps and len(navigation_steps) > 0:
self.logger.info("Navigating to %s, %d steps left." % (target_state.state_str, len(navigation_steps)))
self.__event_trace += EVENT_FLAG_NAVIGATE
return navigation_steps[0][1]
if self.__random_explore:
self.logger.info("Trying random event.")
random.shuffle(possible_events)
return possible_events[0]
# If couldn't find a exploration target, stop the app
stop_app_intent = self.app.get_stop_intent()
self.logger.info("Cannot find an exploration target. Trying to restart app...")
self.__event_trace += EVENT_FLAG_STOP_APP
return IntentEvent(intent=stop_app_intent)
def __sort_inputs_by_humanoid(self, possible_events):
if sys.version.startswith("3"):
else:
proxy = ServerProxy("http://%s/" % self.device.humanoid)
request_json = {
"history_view_trees": self.humanoid_view_trees,
"history_events": [x.__dict__ for x in self.humanoid_events],
"possible_events": [x.__dict__ for x in possible_events],
"screen_res": [self.device.display_info["width"],
self.device.display_info["height"]]
}
result = json.loads(proxy.predict(json.dumps(request_json)))
new_idx = result["indices"]
text = result["text"]
new_events = []
# get rid of infinite recursive by randomizing first event
if not self.utg.is_state_reached(self.current_state):
new_first = random.randint(0, len(new_idx) - 1)
new_idx[0], new_idx[new_first] = new_idx[new_first], new_idx[0]
for idx in new_idx:
if isinstance(possible_events[idx], SetTextEvent):
possible_events[idx].text = text
new_events.append(possible_events[idx])
return new_events
def __get_nav_target(self, current_state):
# If last event is a navigation event
if self.__nav_target and self.__event_trace.endswith(EVENT_FLAG_NAVIGATE):
navigation_steps = self.utg.get_navigation_steps(from_state=current_state, to_state=self.__nav_target)
if navigation_steps and 0 < len(navigation_steps) <= self.__nav_num_steps:
# If last navigation was successful, use current nav target
self.__nav_num_steps = len(navigation_steps)
return self.__nav_target
else:
# If last navigation was failed, add nav target to missing states
self.__missed_states.add(self.__nav_target.state_str)
reachable_states = self.utg.get_reachable_states(current_state)
if self.random_input:
random.shuffle(reachable_states)
for state in reachable_states:
# Only consider foreground states
if state.get_app_activity_depth(self.app) != 0:
continue
# Do not consider missed states
if state.state_str in self.__missed_states:
continue
# Do not consider explored states
if self.utg.is_state_explored(state):
continue
self.__nav_target = state
navigation_steps = self.utg.get_navigation_steps(from_state=current_state, to_state=self.__nav_target)
if len(navigation_steps) > 0:
self.__nav_num_steps = len(navigation_steps)
return state
self.__nav_target = None
self.__nav_num_steps = -1
return None
class UtgReplayPolicy(InputPolicy):
"""
Replay DroidBot output generated by UTG policy
"""
def __init__(self, device, app, replay_output):
super(UtgReplayPolicy, self).__init__(device, app)
self.logger = logging.getLogger(self.__class__.__name__)
self.replay_output = replay_output
event_dir = os.path.join(replay_output, "events")
self.event_paths = sorted([os.path.join(event_dir, x) for x in
next(os.walk(event_dir))[2]
if x.endswith(".json")])
# skip HOME and start app intent
self.device = device
self.app = app
self.event_idx = 2
self.num_replay_tries = 0
self.utg = UTG(device=device, app=app, random_input=None)
self.last_event = None
self.last_state = None
self.current_state = None
def generate_event(self):
"""
generate an event based on replay_output
@return: InputEvent
"""
while self.event_idx < len(self.event_paths) and \
self.num_replay_tries < MAX_REPLY_TRIES:
self.num_replay_tries += 1
current_state = self.device.get_current_state()
if current_state is None:
time.sleep(5)
self.num_replay_tries = 0
return KeyEvent(name="BACK")
curr_event_idx = self.event_idx
self.__update_utg()
while curr_event_idx < len(self.event_paths):
event_path = self.event_paths[curr_event_idx]
with open(event_path, "r") as f:
curr_event_idx += 1
try:
event_dict = json.load(f)
except Exception as e:
self.logger.info("Loading %s failed" % event_path)
continue
if event_dict["start_state"] != current_state.state_str:
continue
if not self.device.is_foreground(self.app):
# if current app is in background, bring it to foreground
component = self.app.get_package_name()
if self.app.get_main_activity():
component += "/%s" % self.app.get_main_activity()
return IntentEvent(Intent(suffix=component))
self.logger.info("Replaying %s" % event_path)
self.event_idx = curr_event_idx
self.num_replay_tries = 0
# return InputEvent.from_dict(event_dict["event"])
event = InputEvent.from_dict(event_dict["event"])
self.last_state = self.current_state
self.last_event = event
return event
time.sleep(5)
# raise InputInterruptedException("No more record can be replayed.")
def __update_utg(self):
self.utg.add_transition(self.last_event, self.last_state, self.current_state)
class ManualPolicy(UtgBasedInputPolicy):
"""
manually explore UFG
"""
def __init__(self, device, app):
super(ManualPolicy, self).__init__(device, app, False)
self.logger = logging.getLogger(self.__class__.__name__)
self.__first_event = True
def generate_event_based_on_utg(self):
"""
generate an event based on current UTG
@return: InputEvent
"""
if self.__first_event:
self.__first_event = False
self.logger.info("Trying to start the app...")
start_app_intent = self.app.get_start_intent()
return IntentEvent(intent=start_app_intent)
else:
return ManualEvent()
class TaskPolicy(UtgBasedInputPolicy):
def __init__(self, device, app, random_input, task, use_memory=True, debug_mode=False):
super(TaskPolicy, self).__init__(device, app, random_input)
self.logger = logging.getLogger(self.__class__.__name__)
self.task = task
self.__nav_target = None
self.__nav_num_steps = -1
self.__num_restarts = 0
self.__num_steps_outside = 0
self.__event_trace = ""
self.__missed_states = set()
self.__random_explore = random_input
self.__action_history = []
self.__thought_history = []
self.use_memory = use_memory
# if use_memory:
# self.memory = Memory(app_name=self.app.app_name, app_output_path=self.device.output_dir)
if self.use_memory:
self.similar_ele_path, self.similar_ele_function, self.similar_ele_statement = self.get_most_similar_element()
if not self.similar_ele_function:
self.use_memory = False
print('=============\nWarning: Did not find the memory of this app, the app memory is disabled\n=============')
else:
print(f'============\nFound element: {self.similar_ele_statement}\nPath: {self.similar_ele_path}\nFunction: {self.similar_ele_function}\n============')
self.state_ele_memory = {} # memorize some important states that contain elements of insight
def get_most_similar_element(self):
model = INSTRUCTOR('hkunlp/instructor-xl')
task_embedding = model.encode('task: ' + self.task).reshape(1, -1)
with open('memory/node_filtered_elements.json') as file:
ele_statements = json.load(file)
with open('memory/element_description.json') as file:
ele_functions = json.load(file)
with open('memory/embedded_elements_desc.json') as file:
embeddings = json.load(file)
app_name = self.device.output_dir.split('/')[-1]
if app_name not in embeddings.keys():
return None, None, None
app_embeddings = embeddings[app_name]
# similarities = {}
max_similarity, similar_ele_idx = -9999, -9999
for state_str, elements in app_embeddings.items():
# if the target element is in the first ui, no onclick is needed
# if ele_statements[app_name][state_str]['path'] == []:
# continue
# similarities[state_str] = []
for idx, ele in enumerate(elements):
if ele:
npele = np.array(ele).reshape(1, -1)
similarity = cosine_similarity(task_embedding, npele)[0][0]
else:
similarity = -9999
# similarities[state_str].append(similarity)
if similarity > max_similarity:
max_similarity = similarity
similar_ele_idx = idx
similar_state_str = state_str
similar_ele = ele_statements[app_name][similar_state_str]['elements'][similar_ele_idx]
similar_ele_path = ele_statements[app_name][similar_state_str]['path']
similar_ele_desc = ele_functions[app_name][similar_state_str][similar_ele_idx]
del model
return similar_ele_path, similar_ele_desc, similar_ele
def _scroll_to_top(self, scroller, all_views_for_mark, old_state=None):
prefix_scroll_event = []
if old_state is None:
old_state = self.current_state
for _ in range(MAX_SCROLL_NUM): # first scroll up to the top
self.device.send_event(ScrollEvent(view=scroller, direction="UP"))
scrolled_state = self.device.get_current_state()
self.utg.add_transition(ScrollEvent(view=scroller, direction="UP"), old_state, scrolled_state)
old_state = scrolled_state
state_prompt, scrolled_candidate_actions, scrolled_views, _ = scrolled_state.get_described_actions()
scrolled_new_views = [] # judge whether there is a new view after scrolling
for scrolled_view in scrolled_views:
if scrolled_view not in all_views_for_mark:
scrolled_new_views.append(scrolled_view)
all_views_for_mark.append(scrolled_view)
if len(scrolled_new_views) == 0:
break
prefix_scroll_event.append(ScrollEvent(view=scroller, direction="UP"))
return prefix_scroll_event
def generate_event_based_on_utg(self, input_manager):
"""
generate an event based on current UTG
@return: InputEvent
"""
current_state = self.current_state
self.logger.info("Current state: %s" % current_state.state_str)
if current_state.state_str in self.__missed_states:
self.__missed_states.remove(current_state.state_str)
if current_state.get_app_activity_depth(self.app) < 0:
# If the app is not in the activity stack
start_app_intent = self.app.get_start_intent()
# It seems the app stucks at some state, has been
# 1) force stopped (START, STOP)
# just start the app again by increasing self.__num_restarts
# 2) started at least once and cannot be started (START)
# pass to let viewclient deal with this case
# 3) nothing
# a normal start. clear self.__num_restarts.
if self.__event_trace.endswith(EVENT_FLAG_START_APP + EVENT_FLAG_STOP_APP) \
or self.__event_trace.endswith(EVENT_FLAG_START_APP):
self.__num_restarts += 1
self.logger.info("The app had been restarted %d times.", self.__num_restarts)
else:
self.__num_restarts = 0
# pass (START) through
if not self.__event_trace.endswith(EVENT_FLAG_START_APP):
if self.__num_restarts > MAX_NUM_RESTARTS:
# If the app had been restarted too many times, enter random mode
msg = "The app had been restarted too many times. Entering random mode."
self.logger.info(msg)
self.__random_explore = True
else:
# Start the app
self.__event_trace += EVENT_FLAG_START_APP
self.logger.info("Trying to start the app...")
# self.__action_history = [f'- start the app {self.app.app_name}']
self.__action_history = [f'- launchApp {self.app.app_name}']
self.__thought_history = [f'launch the app {self.app.app_name} to finish the task {self.task}']
return None, IntentEvent(intent=start_app_intent)
elif current_state.get_app_activity_depth(self.app) > 0:
# If the app is in activity stack but is not in foreground
self.__num_steps_outside += 1
if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE:
# If the app has not been in foreground for too long, try to go back
if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE_KILL:
stop_app_intent = self.app.get_stop_intent()
go_back_event = IntentEvent(stop_app_intent)
else:
go_back_event = KeyEvent(name="BACK")
self.__event_trace += EVENT_FLAG_NAVIGATE
self.logger.info("Going back to the app...")
self.__action_history.append('- go back')
self.__thought_history.append('the app has not been in foreground for too long, try to go back')
return None, go_back_event
else:
# If the app is in foreground
self.__num_steps_outside = 0
scrollable_views = current_state.get_scrollable_views()#self._get_scrollable_views(current_state)
if len(scrollable_views) > 0:
'''
if there is at least one scroller in the screen, we scroll each scroller many times until all the screens after scrolling have been recorded, you do not need to read
'''
# print(scrollable_views)
actions_dict = {}
whole_state_views, whole_state_actions, whole_state_strs = [], [], []
# state_strs = [current_state.state_str]
state_prompt, current_candidate_actions, current_views, _ = current_state.get_described_actions()
all_views_for_mark = copy.deepcopy(current_views) # just for judging whether the screen has been scrolled up to the top
for scrollerid in range(len(scrollable_views)):
scroller = scrollable_views[scrollerid]
# prefix_scroll_event = []
actions_dict[scrollerid] = []
prefix_scroll_event = self._scroll_to_top(scroller, all_views_for_mark)
# after scrolling to the top, update the current_state
top_state = self.device.get_current_state()
state_prompt, top_candidate_actions, top_views, _ = top_state.get_described_actions()
all_views_without_id, all_actions = top_views, top_candidate_actions
too_few_item_time = 0
for _ in range(MAX_SCROLL_NUM): # then scroll down to the bottom
whole_state_strs.append(top_state.state_str) # record the states from the top to the bottom
self.device.send_event(ScrollEvent(view=scroller, direction="DOWN"))
scrolled_state = self.device.get_current_state()
state_prompt, scrolled_candidate_actions, scrolled_views, _ = scrolled_state.get_described_actions()
scrolled_new_views = []
for scrolled_view_id in range(len(scrolled_views)):
scrolled_view = scrolled_views[scrolled_view_id]
if scrolled_view not in all_views_without_id:
scrolled_new_views.append(scrolled_view)
all_views_without_id.append(scrolled_view)
all_actions.append(prefix_scroll_event + [ScrollEvent(view=scroller, direction="DOWN"), scrolled_candidate_actions[scrolled_view_id]])
# print('found new views:', scrolled_new_views)
if len(scrolled_new_views) == 0:
break
prefix_scroll_event.append(ScrollEvent(view=scroller, direction="DOWN"))
if len(scrolled_new_views) < 2:
too_few_item_time += 1
if too_few_item_time >= 2:
break
self.utg.add_transition(ScrollEvent(view=scroller, direction="DOWN"), top_state, scrolled_state)
top_state = scrolled_state
# filter out the views that have been added to the whole_state by scrolling other scrollers
for all_view_id in range(len(all_views_without_id)):
view = all_views_without_id[all_view_id]
if view not in whole_state_views:
whole_state_views.append(view)
whole_state_actions.append(all_actions[all_view_id])
all_views_for_mark = []
_ = self._scroll_to_top(scroller, all_views_for_mark, top_state)
# print(whole_state_views)
action, candidate_actions, target_view, thought = self._get_action_from_views_actions(
views=whole_state_views, candidate_actions=whole_state_actions, state_strs=whole_state_strs, action_history=self.__action_history, thought_history=self.__thought_history)
if isinstance(action, list): # the screen has to be scrolled first
last_state = None
for eventid in range(len(action) - 1):
self.device.send_event(action[eventid])
last_state = self.device.get_current_state()
# self.__action_history.append(current_state.get_action_desc(action[eventid]))
self.__action_history.append(current_state.get_action_descv2(action[-1], target_view))
self.__thought_history.append(thought)
return last_state, action[-1]
'''
end for dealing with scrollers
'''
else:
action, candidate_actions, target_view, thought = self._get_action_from_views_actions(
current_state=current_state, action_history=self.__action_history, thought_history=self.__thought_history, state_strs=current_state.state_str)
if action == FINISHED:
return None, FINISHED
if action is not None:
self.__action_history.append(current_state.get_action_descv2(action, target_view))
self.__thought_history.append(thought)
return None, action
if self.__random_explore:
self.logger.info("Trying random event.")
action = random.choice(candidate_actions)
self.__action_history.append(current_state.get_action_descv2(action, target_view))
self.__thought_history.append('random trying')
return None, action
# If couldn't find a exploration target, stop the app
stop_app_intent = self.app.get_stop_intent()
self.logger.info("Cannot find an exploration target. Trying to restart app...")
self.__action_history.append('- stop the app')
self.__thought_history.append("couldn't find a exploration target, stop the app")
self.__event_trace += EVENT_FLAG_STOP_APP
return None, IntentEvent(intent=stop_app_intent)
def _save2yaml(self, file_name, state_prompt, idx, state_str, inputs='null'):
if not os.path.exists(file_name):
tmp_data = {
'task_name': self.task,
'step_num': 0,
'records': []
}
with open(file_name, 'w', encoding='utf-8') as f:
yaml.dump(tmp_data, f)
with open(file_name, 'r', encoding='utf-8') as f:
old_yaml_data = yaml.safe_load(f)
new_records = old_yaml_data['records']
new_records.append(
{'State': state_prompt,
'Choice': idx,
'Input': inputs,
'state_str': state_str}
)
# import pdb;pdb.set_trace()
data = {
'task_name': self.task,
'step_num': len(list(old_yaml_data['records'])),
'records': new_records
}
with open(file_name, 'w', encoding='utf-8') as f:
yaml.dump(data, f)
def _make_prompt_lmql(self, state_prompt, action_history, is_text, state_str, view_text=None, thought_history=None, use_thoughts=False):
if self.use_memory:
# if isinstance(state_str, list):
# if len(state_str) == 1:
# state_str = state_str[0]
# else:
# state_str = self.memory.hash_state(state_prompt)
# new_state_prompt = self.f(action_history, state_prompt, state_str)
# if new_state_prompt !z= None and new_state_prompt != 'no_description':
# state_prompt = new_state_prompt
if len(action_history) <= len(self.similar_ele_path):
current_ui_id = len(action_history) - 1
new_state_prompt = tools.insert_onclick_into_prompt(state_prompt, self.similar_ele_path[current_ui_id], self.similar_ele_function)
if new_state_prompt != state_prompt: # current state contains an element of insight
self.state_ele_memory[state_str] = new_state_prompt
state_prompt = new_state_prompt
# elif state_str in self.state_ele_memory.keys():
# state_prompt = self.state_ele_memory[state_str]
if use_thoughts:
history_with_thought = []
for idx in range(len(action_history)):
history_with_thought.append(action_history[idx] + ' Reason: ' + thought_history[idx])
else:
history_with_thought = action_history
return '\n'.join(history_with_thought),state_prompt
def _make_prompt(self, state_prompt, action_history, is_text, state_str, view_text=None, thought_history=None, use_thoughts=False):
if self.use_memory:
# if isinstance(state_str, list):
# if len(state_str) == 1:
# state_str = state_str[0]
# else:
# state_str = self.memory.hash_state(state_prompt)
# new_state_prompt = self.f(action_history, state_prompt, state_str)
# if new_state_prompt !z= None and new_state_prompt != 'no_description':
# state_prompt = new_state_prompt
if len(action_history) <= len(self.similar_ele_path):
current_ui_id = len(action_history) - 1
new_state_prompt = tools.insert_onclick_into_prompt(state_prompt, self.similar_ele_path[current_ui_id], self.similar_ele_function)
if new_state_prompt != state_prompt: # current state contains an element of insight
self.state_ele_memory[state_str] = new_state_prompt
state_prompt = new_state_prompt
# elif state_str in self.state_ele_memory.keys():
# state_prompt = self.state_ele_memory[state_str]
if use_thoughts:
history_with_thought = []
for idx in range(len(action_history)):
history_with_thought.append(action_history[idx] + ' Reason: ' + thought_history[idx])
else:
history_with_thought = action_history
introduction = '''You are a smartphone assistant to help users complete tasks by interacting with mobile apps.Given a task, the previous UI actions, and the content of current UI state, your job is to decide whether the task is already finished by the previous actions, and if not, decide which UI element in current UI state should be interacted.'''
task_prompt = 'Task: ' + self.task
history_prompt = 'Previous UI actions: \n' + '\n'.join(history_with_thought)
full_state_prompt = 'Current UI state: \n' + state_prompt
request_prompt = '''Your answer should always use the following format:1. Completing this task on a smartphone usually involves these steps: <?>.\n2. Analyses of the relations between the task and the previous UI actions and current UI state: <?>.\n3. Based on the previous actions, is the task already finished? <Y/N>. The next step should be <?/None>.\n4. Can the task be proceeded with the current UI state? <Y/N>. Fill in the blanks about the next one interaction: - id=<id number> - action=<tap/input> - input text=<text or N/A>'''
prompt = introduction + '\n' + task_prompt + '\n' + history_prompt + '\n' + full_state_prompt + '\n' + request_prompt
return prompt
def _extract_input_text(self, string, start='Text: ', end=' Thought'):
start_index = string.find(start) + len(start) # Find the location of 'start'
if start_index == -1:
start_index = 0
end_index = string.find(end) # Find the location of 'end'
substring = string[start_index:end_index] if end_index != -1 else string[start_index:]
return substring
def _extract_input_textv2(self, string):
if string[:11] == 'InputText: ':
return string[11:]
else:
return string
def _get_text_view_description(self, view):
content_description = safe_dict_get(view, 'content_description', default='')
view_text = safe_dict_get(view, 'text', default='')
view_desc = f"<input class='&'>#</input>"#.replace('&', view_class)#.replace('#', text)
if view_text:
view_desc = view_desc.replace('#', view_text)
else:
view_desc = view_desc.replace('#', '')
if content_description:
view_desc = view_desc.replace('&', content_description)
else:
view_desc = view_desc.replace(" class='&'", "")
return view_desc
def _get_action_from_views_actions(self, action_history, thought_history, views=None, candidate_actions=None, state_strs=None, current_state=None):
'''
get action choice from LLM based on a list of views and corresponding actions
'''
if current_state:
state_prompt, candidate_actions, _, _ = current_state.get_described_actions()
state_str = current_state.state_str
if USE_LMQL:
history, state_prompt = self._make_prompt_lmql(state_prompt, action_history, is_text=False, state_str=state_str,
thought_history=thought_history)
else:
prompt = self._make_prompt(state_prompt, action_history, is_text=False, state_str=state_str, thought_history=thought_history)
else:
views_with_id = []
for id in range(len(views)):
views_with_id.append(tools.insert_id_into_view(views[id], id))
state_prompt = '\n'.join(views_with_id)
state_str = tools.hash_string(state_prompt)
if USE_LMQL:
history, state_prompt = self._make_prompt_lmql(state_prompt, action_history, is_text=False, state_str=state_str,
thought_history=thought_history)
else:
prompt = self._make_prompt(state_prompt, action_history, is_text=False, state_str=state_str, thought_history=thought_history)
# ids = [str(idx) for idx, i in enumerate(candidate_actions)]
ids = str([i for i in range(len(candidate_actions))])
if USE_LMQL: | idx, action_type, input_text=prompt_llm_with_history(task=self.task, history=history, ui_desc=state_prompt, ids=ids) | 2 | 2023-10-23 03:32:58+00:00 | 12k |
cvlab-yonsei/ACLS | calibrate/evaluation/calibrate_evaluator.py | [
{
"identifier": "DatasetEvaluator",
"path": "calibrate/evaluation/evaluator.py",
"snippet": "class DatasetEvaluator(metaclass=ABCMeta):\n \"\"\"\n Base class for a dataset evaluator\n \"\"\"\n @abstractmethod\n def reset(self):\n \"\"\"\n Preparation for a new round of evalu... | import logging
import numpy as np
import torch
import torch.nn.functional as F
from terminaltables import AsciiTable
from torch import nn
from .evaluator import DatasetEvaluator
from .metrics import ECELoss, AdaptiveECELoss, ClasswiseECELoss
from .reliability_diagram import ReliabilityDiagram
from calibrate.utils.torch_helper import to_numpy | 8,737 |
logger = logging.getLogger(__name__)
class CalibrateEvaluator(DatasetEvaluator):
def __init__(self, num_classes, num_bins=15, device="cuda:0") -> None:
self.num_classes = num_classes
self.num_bins = num_bins
self.device = device
self.reset()
def reset(self) -> None:
self.logits = None
self.labels = None
def num_samples(self):
return (
self.labels.shape[0]
if self.labels is not None
else 0
)
def main_metric(self) -> None:
return "ece"
def update(self, logits: torch.Tensor, labels: torch.Tensor) -> None:
"""update
Args:
logits (torch.Tensor): n x num_classes
label (torch.Tensor): n x 1
"""
assert logits.shape[0] == labels.shape[0]
if self.logits is None:
self.logits = logits
self.labels = labels
else:
self.logits = torch.cat((self.logits, logits), dim=0)
self.labels = torch.cat((self.labels, labels), dim=0)
def mean_score(self, print=False, all_metric=True):
nll_criterion = nn.CrossEntropyLoss().to(self.device)
ece_criterion = ECELoss(self.num_bins).to(self.device)
aece_criterion = AdaptiveECELoss(self.num_bins).to(self.device)
cece_criterion = ClasswiseECELoss(self.num_bins).to(self.device)
nll = nll_criterion(self.logits, self.labels).item()
ece = ece_criterion(self.logits, self.labels).item()
aece = aece_criterion(self.logits, self.labels).item()
cece = cece_criterion(self.logits, self.labels).item()
metric = {"nll": nll, "ece": ece, "aece": aece, "cece": cece}
columns = ["samples", "nll", "ece", "aece", "cece"]
table_data = [columns]
table_data.append(
[
self.num_samples(),
"{:.5f}".format(nll),
"{:.5f}".format(ece),
"{:.5f}".format(aece),
"{:.5f}".format(cece),
]
)
if print:
table = AsciiTable(table_data)
logger.info("\n" + table.table)
if all_metric:
return metric, table_data
else:
return metric[self.main_metric()]
def plot_reliability_diagram(self, title=""):
|
logger = logging.getLogger(__name__)
class CalibrateEvaluator(DatasetEvaluator):
def __init__(self, num_classes, num_bins=15, device="cuda:0") -> None:
self.num_classes = num_classes
self.num_bins = num_bins
self.device = device
self.reset()
def reset(self) -> None:
self.logits = None
self.labels = None
def num_samples(self):
return (
self.labels.shape[0]
if self.labels is not None
else 0
)
def main_metric(self) -> None:
return "ece"
def update(self, logits: torch.Tensor, labels: torch.Tensor) -> None:
"""update
Args:
logits (torch.Tensor): n x num_classes
label (torch.Tensor): n x 1
"""
assert logits.shape[0] == labels.shape[0]
if self.logits is None:
self.logits = logits
self.labels = labels
else:
self.logits = torch.cat((self.logits, logits), dim=0)
self.labels = torch.cat((self.labels, labels), dim=0)
def mean_score(self, print=False, all_metric=True):
nll_criterion = nn.CrossEntropyLoss().to(self.device)
ece_criterion = ECELoss(self.num_bins).to(self.device)
aece_criterion = AdaptiveECELoss(self.num_bins).to(self.device)
cece_criterion = ClasswiseECELoss(self.num_bins).to(self.device)
nll = nll_criterion(self.logits, self.labels).item()
ece = ece_criterion(self.logits, self.labels).item()
aece = aece_criterion(self.logits, self.labels).item()
cece = cece_criterion(self.logits, self.labels).item()
metric = {"nll": nll, "ece": ece, "aece": aece, "cece": cece}
columns = ["samples", "nll", "ece", "aece", "cece"]
table_data = [columns]
table_data.append(
[
self.num_samples(),
"{:.5f}".format(nll),
"{:.5f}".format(ece),
"{:.5f}".format(aece),
"{:.5f}".format(cece),
]
)
if print:
table = AsciiTable(table_data)
logger.info("\n" + table.table)
if all_metric:
return metric, table_data
else:
return metric[self.main_metric()]
def plot_reliability_diagram(self, title=""): | diagram = ReliabilityDiagram(bins=25, style="curve") | 4 | 2023-10-23 09:55:13+00:00 | 12k |
myshell-ai/AIlice | ailice/AIliceMain.py | [
{
"identifier": "config",
"path": "ailice/common/AConfig.py",
"snippet": "class AConfig():\n def __init__(self):\n def Initialize(self, needOpenaiGPTKey = False):\n def Load(self, configFile: str) -> dict:\n def Store(self, configFile: str):"
},
{
"identifier": "AProcessor",
"pat... | import time
import simplejson as json
import argparse
from termcolor import colored
from ailice.common.AConfig import config
from ailice.core.AProcessor import AProcessor
from ailice.core.llm.ALLMPool import llmPool
from ailice.common.utils.ALogger import ALogger
from ailice.common.ARemoteAccessors import clientPool
from ailice.AServices import StartServices
from ailice.common.APrompts import promptsManager
from ailice.prompts.APromptChat import APromptChat
from ailice.prompts.APromptMain import APromptMain
from ailice.prompts.APromptSearchEngine import APromptSearchEngine
from ailice.prompts.APromptResearcher import APromptResearcher
from ailice.prompts.APromptCoder import APromptCoder
from ailice.prompts.APromptModuleCoder import APromptModuleCoder
from ailice.prompts.APromptModuleLoader import APromptModuleLoader
from ailice.prompts.APromptCoderProxy import APromptCoderProxy
from ailice.prompts.APromptArticleDigest import APromptArticleDigest | 8,820 |
def GetInput(speech) -> str:
if config.speechOn:
print(colored("USER: ", "green"), end="", flush=True)
inp = speech.GetAudio()
print(inp, end="", flush=True)
print("")
else:
inp = input(colored("USER: ", "green"))
return inp
def mainLoop(modelID: str, quantization: str, maxMemory: dict, prompt: str, temperature: float, flashAttention2: bool, speechOn: bool, ttsDevice: str, sttDevice: str, contextWindowRatio: float, localExecution: bool, trace: str):
config.Initialize(needOpenaiGPTKey = ("oai:" in modelID))
config.quantization = quantization
config.maxMemory = maxMemory
config.temperature = temperature
config.flashAttention2 = flashAttention2
config.speechOn = speechOn
config.contextWindowRatio = contextWindowRatio
config.localExecution = localExecution
print(colored("The port range of the ext-modules has been changed from 2005-2016 to 59000-59200. If you are using an old version, startup failure will occur after updating the code. Please modify the port number in config.json and rebuild the docker image.", "yellow"))
StartServices()
clientPool.Init()
if speechOn:
speech = clientPool.GetClient(config.services['speech']['addr'])
if (ttsDevice not in {'cpu','cuda'}) or (sttDevice not in {'cpu','cuda'}):
print("the value of ttsDevice and sttDevice should be one of cpu or cuda, the default is cpu.")
exit(-1)
else:
speech.SetDevices({"tts": ttsDevice, "stt": sttDevice})
else:
speech = None
|
def GetInput(speech) -> str:
if config.speechOn:
print(colored("USER: ", "green"), end="", flush=True)
inp = speech.GetAudio()
print(inp, end="", flush=True)
print("")
else:
inp = input(colored("USER: ", "green"))
return inp
def mainLoop(modelID: str, quantization: str, maxMemory: dict, prompt: str, temperature: float, flashAttention2: bool, speechOn: bool, ttsDevice: str, sttDevice: str, contextWindowRatio: float, localExecution: bool, trace: str):
config.Initialize(needOpenaiGPTKey = ("oai:" in modelID))
config.quantization = quantization
config.maxMemory = maxMemory
config.temperature = temperature
config.flashAttention2 = flashAttention2
config.speechOn = speechOn
config.contextWindowRatio = contextWindowRatio
config.localExecution = localExecution
print(colored("The port range of the ext-modules has been changed from 2005-2016 to 59000-59200. If you are using an old version, startup failure will occur after updating the code. Please modify the port number in config.json and rebuild the docker image.", "yellow"))
StartServices()
clientPool.Init()
if speechOn:
speech = clientPool.GetClient(config.services['speech']['addr'])
if (ttsDevice not in {'cpu','cuda'}) or (sttDevice not in {'cpu','cuda'}):
print("the value of ttsDevice and sttDevice should be one of cpu or cuda, the default is cpu.")
exit(-1)
else:
speech.SetDevices({"tts": ttsDevice, "stt": sttDevice})
else:
speech = None
| for promptCls in [APromptChat, APromptMain, APromptSearchEngine, APromptResearcher, APromptCoder, APromptModuleCoder, APromptModuleLoader, APromptCoderProxy, APromptArticleDigest]: | 10 | 2023-10-16 01:51:14+00:00 | 12k |
city96/ComfyUI_ExtraModels | PixArt/models/PixArtMS.py | [
{
"identifier": "auto_grad_checkpoint",
"path": "PixArt/models/utils.py",
"snippet": "def _ntuple(n):\n def parse(x):\ndef set_grad_checkpoint(model, use_fp32_attention=False, gc_step=1):\n def set_attr(module):\ndef auto_grad_checkpoint(module, *args, **kwargs):\ndef checkpoint_sequential(functio... | import torch
import torch.nn as nn
from tqdm import tqdm
from timm.models.layers import DropPath
from timm.models.vision_transformer import Mlp
from .utils import auto_grad_checkpoint, to_2tuple
from .PixArt_blocks import t2i_modulate, CaptionEmbedder, WindowAttention, MultiHeadCrossAttention, T2IFinalLayer, TimestepEmbedder, SizeEmbedder
from .PixArt import PixArt, get_2d_sincos_pos_embed
| 7,634 | use_rel_pos=use_rel_pos, **block_kwargs)
self.cross_attn = MultiHeadCrossAttention(hidden_size, num_heads, **block_kwargs)
self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
# to be compatible with lower version pytorch
approx_gelu = lambda: nn.GELU(approximate="tanh")
self.mlp = Mlp(in_features=hidden_size, hidden_features=int(hidden_size * mlp_ratio), act_layer=approx_gelu, drop=0)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.window_size = window_size
self.scale_shift_table = nn.Parameter(torch.randn(6, hidden_size) / hidden_size ** 0.5)
def forward(self, x, y, t, mask=None, **kwargs):
B, N, C = x.shape
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (self.scale_shift_table[None] + t.reshape(B, 6, -1)).chunk(6, dim=1)
x = x + self.drop_path(gate_msa * self.attn(t2i_modulate(self.norm1(x), shift_msa, scale_msa)))
x = x + self.cross_attn(x, y, mask)
x = x + self.drop_path(gate_mlp * self.mlp(t2i_modulate(self.norm2(x), shift_mlp, scale_mlp)))
return x
#############################################################################
# Core PixArt Model #
#################################################################################
class PixArtMS(PixArt):
"""
Diffusion model with a Transformer backbone.
"""
def __init__(
self,
input_size=32,
patch_size=2,
in_channels=4,
hidden_size=1152,
depth=28,
num_heads=16,
mlp_ratio=4.0,
class_dropout_prob=0.1,
learn_sigma=True,
pred_sigma=True,
drop_path: float = 0.,
window_size=0,
window_block_indexes=[],
use_rel_pos=False,
caption_channels=4096,
lewei_scale=1.,
config=None,
**kwargs,
):
super().__init__(
input_size=input_size,
patch_size=patch_size,
in_channels=in_channels,
hidden_size=hidden_size,
depth=depth,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
class_dropout_prob=class_dropout_prob,
learn_sigma=learn_sigma,
pred_sigma=pred_sigma,
drop_path=drop_path,
window_size=window_size,
window_block_indexes=window_block_indexes,
use_rel_pos=use_rel_pos,
lewei_scale=lewei_scale,
config=config,
**kwargs,
)
self.dtype = torch.get_default_dtype()
self.h = self.w = 0
approx_gelu = lambda: nn.GELU(approximate="tanh")
self.t_block = nn.Sequential(
nn.SiLU(),
nn.Linear(hidden_size, 6 * hidden_size, bias=True)
)
self.x_embedder = PatchEmbed(patch_size, in_channels, hidden_size, bias=True)
self.y_embedder = CaptionEmbedder(in_channels=caption_channels, hidden_size=hidden_size, uncond_prob=class_dropout_prob, act_layer=approx_gelu)
self.csize_embedder = SizeEmbedder(hidden_size//3) # c_size embed
self.ar_embedder = SizeEmbedder(hidden_size//3) # aspect ratio embed
drop_path = [x.item() for x in torch.linspace(0, drop_path, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
PixArtMSBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio, drop_path=drop_path[i],
input_size=(input_size // patch_size, input_size // patch_size),
window_size=window_size if i in window_block_indexes else 0,
use_rel_pos=use_rel_pos if i in window_block_indexes else False)
for i in range(depth)
])
self.final_layer = T2IFinalLayer(hidden_size, patch_size, self.out_channels)
self.training = False
self.initialize()
def forward_raw(self, x, t, y, mask=None, data_info=None, **kwargs):
"""
Original forward pass of PixArt.
x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
t: (N,) tensor of diffusion timesteps
y: (N, 1, 120, C) tensor of class labels
"""
bs = x.shape[0]
c_size, ar = data_info['img_hw'], data_info['aspect_ratio']
self.h, self.w = x.shape[-2]//self.patch_size, x.shape[-1]//self.patch_size
pos_embed = torch.from_numpy(get_2d_sincos_pos_embed(self.pos_embed.shape[-1], (self.h, self.w), lewei_scale=self.lewei_scale, base_size=self.base_size)).unsqueeze(0).to(x.device).to(self.dtype)
x = self.x_embedder(x) + pos_embed # (N, T, D), where T = H * W / patch_size ** 2
t = self.t_embedder(t) # (N, D)
csize = self.csize_embedder(c_size, bs) # (N, D)
ar = self.ar_embedder(ar, bs) # (N, D)
t = t + torch.cat([csize, ar], dim=1)
t0 = self.t_block(t)
y = self.y_embedder(y, self.training) # (N, D)
if mask is not None:
if mask.shape[0] != y.shape[0]:
mask = mask.repeat(y.shape[0] // mask.shape[0], 1)
mask = mask.squeeze(1).squeeze(1)
y = y.squeeze(1).masked_select(mask.unsqueeze(-1) != 0).view(1, -1, x.shape[-1])
y_lens = mask.sum(dim=1).tolist()
else:
y_lens = [y.shape[2]] * y.shape[0]
y = y.squeeze(1).view(1, -1, x.shape[-1])
for block in self.blocks:
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# GLIDE: https://github.com/openai/glide-text2im
# MAE: https://github.com/facebookresearch/mae/blob/main/models_mae.py
# --------------------------------------------------------
class PatchEmbed(nn.Module):
""" 2D Image to Patch Embedding
"""
def __init__(
self,
patch_size=16,
in_chans=3,
embed_dim=768,
norm_layer=None,
flatten=True,
bias=True,
):
super().__init__()
patch_size = to_2tuple(patch_size)
self.patch_size = patch_size
self.flatten = flatten
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
x = self.norm(x)
return x
class PixArtMSBlock(nn.Module):
"""
A PixArt block with adaptive layer norm zero (adaLN-Zero) conditioning.
"""
def __init__(self, hidden_size, num_heads, mlp_ratio=4.0, drop_path=0., window_size=0, input_size=None, use_rel_pos=False, **block_kwargs):
super().__init__()
self.hidden_size = hidden_size
self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.attn = WindowAttention(hidden_size, num_heads=num_heads, qkv_bias=True,
input_size=input_size if window_size == 0 else (window_size, window_size),
use_rel_pos=use_rel_pos, **block_kwargs)
self.cross_attn = MultiHeadCrossAttention(hidden_size, num_heads, **block_kwargs)
self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
# to be compatible with lower version pytorch
approx_gelu = lambda: nn.GELU(approximate="tanh")
self.mlp = Mlp(in_features=hidden_size, hidden_features=int(hidden_size * mlp_ratio), act_layer=approx_gelu, drop=0)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.window_size = window_size
self.scale_shift_table = nn.Parameter(torch.randn(6, hidden_size) / hidden_size ** 0.5)
def forward(self, x, y, t, mask=None, **kwargs):
B, N, C = x.shape
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (self.scale_shift_table[None] + t.reshape(B, 6, -1)).chunk(6, dim=1)
x = x + self.drop_path(gate_msa * self.attn(t2i_modulate(self.norm1(x), shift_msa, scale_msa)))
x = x + self.cross_attn(x, y, mask)
x = x + self.drop_path(gate_mlp * self.mlp(t2i_modulate(self.norm2(x), shift_mlp, scale_mlp)))
return x
#############################################################################
# Core PixArt Model #
#################################################################################
class PixArtMS(PixArt):
"""
Diffusion model with a Transformer backbone.
"""
def __init__(
self,
input_size=32,
patch_size=2,
in_channels=4,
hidden_size=1152,
depth=28,
num_heads=16,
mlp_ratio=4.0,
class_dropout_prob=0.1,
learn_sigma=True,
pred_sigma=True,
drop_path: float = 0.,
window_size=0,
window_block_indexes=[],
use_rel_pos=False,
caption_channels=4096,
lewei_scale=1.,
config=None,
**kwargs,
):
super().__init__(
input_size=input_size,
patch_size=patch_size,
in_channels=in_channels,
hidden_size=hidden_size,
depth=depth,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
class_dropout_prob=class_dropout_prob,
learn_sigma=learn_sigma,
pred_sigma=pred_sigma,
drop_path=drop_path,
window_size=window_size,
window_block_indexes=window_block_indexes,
use_rel_pos=use_rel_pos,
lewei_scale=lewei_scale,
config=config,
**kwargs,
)
self.dtype = torch.get_default_dtype()
self.h = self.w = 0
approx_gelu = lambda: nn.GELU(approximate="tanh")
self.t_block = nn.Sequential(
nn.SiLU(),
nn.Linear(hidden_size, 6 * hidden_size, bias=True)
)
self.x_embedder = PatchEmbed(patch_size, in_channels, hidden_size, bias=True)
self.y_embedder = CaptionEmbedder(in_channels=caption_channels, hidden_size=hidden_size, uncond_prob=class_dropout_prob, act_layer=approx_gelu)
self.csize_embedder = SizeEmbedder(hidden_size//3) # c_size embed
self.ar_embedder = SizeEmbedder(hidden_size//3) # aspect ratio embed
drop_path = [x.item() for x in torch.linspace(0, drop_path, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
PixArtMSBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio, drop_path=drop_path[i],
input_size=(input_size // patch_size, input_size // patch_size),
window_size=window_size if i in window_block_indexes else 0,
use_rel_pos=use_rel_pos if i in window_block_indexes else False)
for i in range(depth)
])
self.final_layer = T2IFinalLayer(hidden_size, patch_size, self.out_channels)
self.training = False
self.initialize()
def forward_raw(self, x, t, y, mask=None, data_info=None, **kwargs):
"""
Original forward pass of PixArt.
x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
t: (N,) tensor of diffusion timesteps
y: (N, 1, 120, C) tensor of class labels
"""
bs = x.shape[0]
c_size, ar = data_info['img_hw'], data_info['aspect_ratio']
self.h, self.w = x.shape[-2]//self.patch_size, x.shape[-1]//self.patch_size
pos_embed = torch.from_numpy(get_2d_sincos_pos_embed(self.pos_embed.shape[-1], (self.h, self.w), lewei_scale=self.lewei_scale, base_size=self.base_size)).unsqueeze(0).to(x.device).to(self.dtype)
x = self.x_embedder(x) + pos_embed # (N, T, D), where T = H * W / patch_size ** 2
t = self.t_embedder(t) # (N, D)
csize = self.csize_embedder(c_size, bs) # (N, D)
ar = self.ar_embedder(ar, bs) # (N, D)
t = t + torch.cat([csize, ar], dim=1)
t0 = self.t_block(t)
y = self.y_embedder(y, self.training) # (N, D)
if mask is not None:
if mask.shape[0] != y.shape[0]:
mask = mask.repeat(y.shape[0] // mask.shape[0], 1)
mask = mask.squeeze(1).squeeze(1)
y = y.squeeze(1).masked_select(mask.unsqueeze(-1) != 0).view(1, -1, x.shape[-1])
y_lens = mask.sum(dim=1).tolist()
else:
y_lens = [y.shape[2]] * y.shape[0]
y = y.squeeze(1).view(1, -1, x.shape[-1])
for block in self.blocks:
| x = auto_grad_checkpoint(block, x, y, t0, y_lens, **kwargs) # (N, T, D) #support grad checkpoint
| 0 | 2023-10-20 21:19:44+00:00 | 12k |
apple/ml-nvas3d | demo/generate_demo_video.py | [
{
"identifier": "convolve_moving_receiver",
"path": "nvas3d/utils/dynamic_utils.py",
"snippet": "def convolve_moving_receiver(\n source_audio: np.ndarray,\n rirs: np.ndarray,\n interp_index: T.List[int],\n interp_weight: T.List[float]\n) -> np.ndarray:\n \"\"\"\n Apply convolution betw... | import os
import json
import argparse
import itertools
import subprocess
import typing as T
import torch
import imageio
import torchaudio
import numpy as np
import matplotlib.pyplot as plt
from moviepy.editor import *
from nvas3d.utils.dynamic_utils import convolve_moving_receiver, setup_dynamic_interp
from nvas3d.utils.audio_utils import clip_two, clip_all
from soundspaces_nvas3d.utils.ss_utils import create_scene, render_rir_parallel
from soundspaces_nvas3d.utils.aihabitat_utils import load_room_grid
from soundspaces_nvas3d.soundspaces_nvas3d import Receiver, Source, Scene | 9,182 | #
# For licensing see accompanying LICENSE file.
# Copyright (C) 2023 Apple Inc. All Rights Reserved.
#
def normalize(input: torch.Tensor) -> torch.Tensor:
output = (input - input.min()) / (input.max() - input.min())
output = 2 * output - 1
return output
def configure_scene_from_metadata(
metadata: T.Dict[str, T.Any],
image_size: T.Tuple[int, int] = (1000, 1000),
hfov: float = 90.0,
use_placeholder_mesh: bool = False
) -> Scene:
"""
Configures a scene using the provided metadata.
Args:
- metadata: Dictionary containing room and grid point information.
- image_size: The size of the rendered image.
- hfov: Horizontal field of view.
- use_placeholder_mesh: Flag to determine if placeholder meshes should be used.
Returns:
- Configured scene object.
"""
room = metadata['room'][0]
grid_points_source = metadata['grid_points'][0]
source_idx_list = [metadata['source1_idx'][0].item(), metadata['source2_idx'][0].item()]
receiver_idx_list_original = torch.tensor(metadata['receiver_idx_list'])[:4]
scene = create_scene(room, image_size=image_size, hfov=hfov)
if use_placeholder_mesh:
# Add placeholder mesh for sources and receivers to the scene
# Download the following mesh objects and locate it under data/objects/{mesh_name}.glb:
# - "Bluetooth Speaker" (https://skfb.ly/6VLyL) by Ramanan is licensed under Creative Commons Attribution (http://creativecommons.org/licenses/by/4.0/).
# - “Classic Microphone” (https://skfb.ly/6Aryq) by urbanmasque is licensed under Creative Commons Attribution (http://creativecommons.org/licenses/by/4.0/)
# - "Standard Drum Set" (https://skfb.ly/owroB) by Heataker is licensed under Creative Commons Attribution (http://creativecommons.org/licenses/by/4.0/).
# - "3D Posed People" (https://renderpeople.com/free-3d-people/) by Renderpeople: The licensing for our Renderpeople products includes that customers are allowed to use the data for rendering still images and animations for commercial or private purposes, such as video production, broadcasting, print, movies, advertising, illustrations and presentations (https://renderpeople.com/faq/)
ss_source1 = Source(
position=grid_points_source[source_idx_list[0]],
rotation=0,
dry_sound='',
mesh='bluetooth_speaker', # Need mesh object
device=torch.device('cpu')
)
ss_source2 = Source(
position=grid_points_source[source_idx_list[1]],
rotation=-90,
dry_sound='',
mesh='bluetooth_speaker', # Need mesh object
device=torch.device('cpu')
)
ss_mic_list = [
Source(
position=grid_points_source[idx],
rotation=180,
dry_sound='',
mesh='classic_microphone', # Need mesh object
device=torch.device('cpu')
) for idx in receiver_idx_list_original
]
scene.add_source_mesh = True
scene.source_list = [None] * (len(source_idx_list) + len(receiver_idx_list_original))
scene.update_source(ss_source1, 0)
scene.update_source(ss_source2, 1)
for m, mic in enumerate(ss_mic_list):
scene.update_source(mic, m + 2)
return scene
def interpolate_moving_audio(
source1_audio: torch.Tensor,
source2_audio: torch.Tensor,
ir1_list: T.List[torch.Tensor],
ir2_list: T.List[torch.Tensor],
receiver_position: torch.Tensor
) -> T.Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Interpolates audio for a moving receiver.
Args:
- source1_audio: First source audio.
- source2_audio: Second source audio.
- ir1_list: List of impulse responses for source 1.
- ir2_list: List of impulse responses for source 2.
- receiver_position: Positions of the moving receiver.
Returns:
- Tuple containing combined audio, interpolated audio from source 1, and interpolated audio from source 2.
"""
# Prepare for interpolation
audio_len = source1_audio.shape[-1]
interp_index, interp_weight = setup_dynamic_interp(receiver_position.numpy(), audio_len)
# Generate audio for moving receiver
| #
# For licensing see accompanying LICENSE file.
# Copyright (C) 2023 Apple Inc. All Rights Reserved.
#
def normalize(input: torch.Tensor) -> torch.Tensor:
output = (input - input.min()) / (input.max() - input.min())
output = 2 * output - 1
return output
def configure_scene_from_metadata(
metadata: T.Dict[str, T.Any],
image_size: T.Tuple[int, int] = (1000, 1000),
hfov: float = 90.0,
use_placeholder_mesh: bool = False
) -> Scene:
"""
Configures a scene using the provided metadata.
Args:
- metadata: Dictionary containing room and grid point information.
- image_size: The size of the rendered image.
- hfov: Horizontal field of view.
- use_placeholder_mesh: Flag to determine if placeholder meshes should be used.
Returns:
- Configured scene object.
"""
room = metadata['room'][0]
grid_points_source = metadata['grid_points'][0]
source_idx_list = [metadata['source1_idx'][0].item(), metadata['source2_idx'][0].item()]
receiver_idx_list_original = torch.tensor(metadata['receiver_idx_list'])[:4]
scene = create_scene(room, image_size=image_size, hfov=hfov)
if use_placeholder_mesh:
# Add placeholder mesh for sources and receivers to the scene
# Download the following mesh objects and locate it under data/objects/{mesh_name}.glb:
# - "Bluetooth Speaker" (https://skfb.ly/6VLyL) by Ramanan is licensed under Creative Commons Attribution (http://creativecommons.org/licenses/by/4.0/).
# - “Classic Microphone” (https://skfb.ly/6Aryq) by urbanmasque is licensed under Creative Commons Attribution (http://creativecommons.org/licenses/by/4.0/)
# - "Standard Drum Set" (https://skfb.ly/owroB) by Heataker is licensed under Creative Commons Attribution (http://creativecommons.org/licenses/by/4.0/).
# - "3D Posed People" (https://renderpeople.com/free-3d-people/) by Renderpeople: The licensing for our Renderpeople products includes that customers are allowed to use the data for rendering still images and animations for commercial or private purposes, such as video production, broadcasting, print, movies, advertising, illustrations and presentations (https://renderpeople.com/faq/)
ss_source1 = Source(
position=grid_points_source[source_idx_list[0]],
rotation=0,
dry_sound='',
mesh='bluetooth_speaker', # Need mesh object
device=torch.device('cpu')
)
ss_source2 = Source(
position=grid_points_source[source_idx_list[1]],
rotation=-90,
dry_sound='',
mesh='bluetooth_speaker', # Need mesh object
device=torch.device('cpu')
)
ss_mic_list = [
Source(
position=grid_points_source[idx],
rotation=180,
dry_sound='',
mesh='classic_microphone', # Need mesh object
device=torch.device('cpu')
) for idx in receiver_idx_list_original
]
scene.add_source_mesh = True
scene.source_list = [None] * (len(source_idx_list) + len(receiver_idx_list_original))
scene.update_source(ss_source1, 0)
scene.update_source(ss_source2, 1)
for m, mic in enumerate(ss_mic_list):
scene.update_source(mic, m + 2)
return scene
def interpolate_moving_audio(
source1_audio: torch.Tensor,
source2_audio: torch.Tensor,
ir1_list: T.List[torch.Tensor],
ir2_list: T.List[torch.Tensor],
receiver_position: torch.Tensor
) -> T.Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Interpolates audio for a moving receiver.
Args:
- source1_audio: First source audio.
- source2_audio: Second source audio.
- ir1_list: List of impulse responses for source 1.
- ir2_list: List of impulse responses for source 2.
- receiver_position: Positions of the moving receiver.
Returns:
- Tuple containing combined audio, interpolated audio from source 1, and interpolated audio from source 2.
"""
# Prepare for interpolation
audio_len = source1_audio.shape[-1]
interp_index, interp_weight = setup_dynamic_interp(receiver_position.numpy(), audio_len)
# Generate audio for moving receiver | receiver_audio_1 = convolve_moving_receiver(source1_audio.numpy()[0], ir1_list.numpy(), interp_index, interp_weight) | 0 | 2023-10-19 05:35:54+00:00 | 12k |
tiejundong/FlexPose | FlexPose/utils/prediction.py | [
{
"identifier": "FlexPose",
"path": "FlexPose/model/layers.py",
"snippet": "class FlexPose(torch.nn.Module):\n def __init__(self, args=None, param_path=None):\n super(FlexPose, self).__init__()\n if args is not None:\n self.init_param(args)\n else:\n self.in... | import os
import shutil
import sys
import argparse
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
import pyrosetta
import pyrosetta
from biopandas.pdb import PandasPdb
from collections import defaultdict
from ray.util.multiprocessing import Pool
from rdkit import Chem
from rdkit.Chem import AllChem, Descriptors
from einops import rearrange, repeat
from torch_scatter import scatter_min, scatter_add
from FlexPose.model.layers import FlexPose
from FlexPose.utils.common import *
from FlexPose.preprocess.prepare_for_training import try_prepare_task
from FlexPose.utils.APOPDBbind_data import pred_ens
from FlexPose.utils.pdbbind_preprocess import *
from FlexPose.utils.data_utils import *
from FlexPose.model.MMFF import MMFF_keys, MMFF_pad_dim, get_MMFF_param
from tqdm.notebook import tqdm, trange
from tqdm import tqdm, trange
from modeller import Environ
from modeller.scripts import complete_pdb | 7,996 | torch.cuda.set_device(device)
def get_torsion_from_pose(pose):
bb_torsion = []
sc_torsion = []
for i in range(1, pose.size() + 1):
try:
res = pose.residue(i)
assert res.name3() in ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'GLY', 'HIS', 'ILE',
'LEU', 'LYS', 'MET', 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL']
phi_psi = [pose.phi(i), pose.psi(i)]
chi = [c for c in res.chi()]
bb_torsion.append(phi_psi)
sc_torsion.append(chi)
except:
bb_torsion.append([None])
sc_torsion.append([None])
return {'bb_torsion': bb_torsion, 'sc_torsion': sc_torsion}
def prepare_single_input(tupin):
f_name_list, idx, cache_path = tupin
p_path, l_path, ref_path = f_name_list
max_len_ligand = 150
max_len_pocket = 150
# =========== ligand encoding ===========
ligand_mol = read_rdkit_mol(l_path)
if l_path.endswith('mol2'):
ligand_template = ligand_mol
else:
mol2 = '.'.join(l_path.split('.')[:-1]) + '.mol2'
if os.path.exists(mol2):
try:
ligand_template = Chem.MolFromMol2File(mol2)
ligand_mol = AllChem.AssignBondOrdersFromTemplate(ligand_template, ligand_mol)
print(f'Found mol2 {mol2} as input.')
except:
ligand_template = ligand_mol
else:
ligand_template = ligand_mol
if ligand_mol.GetNumConformers() == 0:
AllChem.EmbedMolecule(ligand_mol, maxAttempts=10, useRandomCoords=True, clearConfs=False)
ff = Chem.rdForceFieldHelpers.MMFFGetMoleculeForceField(
ligand_mol, Chem.rdForceFieldHelpers.MMFFGetMoleculeProperties(ligand_mol))
for atom_i in range(ligand_mol.GetNumAtoms()):
ff.MMFFAddPositionConstraint(atom_i, 1, 100) # maxDispl: maximum displacement
ff.Minimize(maxIts=20)
try:
dic_MMFF_param = get_MMFF_param(ligand_template)
except:
dic_MMFF_param = None
ligand_node_features = get_node_feature(ligand_template, 'ligand')
ligand_edge, ligand_edge_features = get_ligand_edge_feature(ligand_template)
ligand_match = get_ligand_match(ligand_template)
ligand_dismap = get_ligand_unrotable_distance(ligand_template) # not use in our model
ligand_coor_true = get_true_posi(ligand_mol)
ligand_coor_true = ligand_coor_true[get_ligand_match(ligand_mol, ligand_template)[0]]
ligand_data = [ligand_node_features, ligand_edge_features, ligand_coor_true, ligand_match, ligand_dismap]
assert len(ligand_node_features) <= max_len_ligand, 'ligand atoms need less than 150'
# =========== protein encoding ===========
# load modeller again for ray
with suppress_stdout_stderr():
env_ = Environ()
env_.libs.topology.read(file='$(LIB)/top_heav.lib')
env_.libs.parameters.read(file='$(LIB)/par.lib')
fixed_protein_path = cache_path + f'/{idx}_protein_tmp.pdb'
pdb_m = complete_pdb(env_, p_path)
pdb_m.write(fixed_protein_path)
opts = '-mute true -ignore_unrecognized_res true'
pyrosetta.distributed.init(opts)
pose = pyrosetta.io.pose_from_pdb(fixed_protein_path)
dic_tor = get_torsion_from_pose(pose)
ref_mol = read_rdkit_mol(ref_path, silence=True)
ref_coor = get_true_posi(ref_mol)
biodf_protein = PandasPdb().read_pdb(fixed_protein_path)
df_protein = biodf_protein.df['ATOM']
df_protein['chain_resi'] = df_protein['chain_id'].astype(str) + '_' + df_protein['residue_number'].astype(str)
df_pocket, sele_res = get_pocket(df_protein, ref_coor, max_len_protein=max_len_pocket)
SCtorsion_data = get_torsion(dic_tor, df_protein, df_pocket)
protein_data = encode_pocket(df_pocket) + [SCtorsion_data]
assert protein_data[0].shape[0] == SCtorsion_data[0].shape[0]
assert protein_data[0].shape[0] <= max_len_pocket, 'pocket residues need less than 150'
# os.remove(fixed_protein_path)
dic_data = dict(
ligand_data=ligand_data,
protein_data=protein_data,
protein_path=fixed_protein_path,
ligand_path=l_path,
sele_res=sele_res,
dic_MMFF_param=dic_MMFF_param,
)
pickle.dump(dic_data, open(cache_path + '/{}.pkl'.format(idx), 'wb'))
return True
def preprare_input_data(input_list, cache_path, prepare_data_with_multi_cpu):
delmkdir(cache_path)
tasks = []
for idx, f_name_list in enumerate(input_list):
tasks.append((prepare_single_input, (f_name_list, idx, cache_path)))
fail = 0
if prepare_data_with_multi_cpu:
pool = Pool()
print('Preparing input data...')
| sys.path.append('/'.join(os.path.abspath(__file__).split('/')[:-2]))
opts = '-mute true -ignore_unrecognized_res true'
pyrosetta.distributed.init(opts)
if is_notebook():
else:
def set_device(device):
if device == 'cpu':
torch.set_num_threads(16)
else:
torch.cuda.set_device(device)
def get_torsion_from_pose(pose):
bb_torsion = []
sc_torsion = []
for i in range(1, pose.size() + 1):
try:
res = pose.residue(i)
assert res.name3() in ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'GLY', 'HIS', 'ILE',
'LEU', 'LYS', 'MET', 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL']
phi_psi = [pose.phi(i), pose.psi(i)]
chi = [c for c in res.chi()]
bb_torsion.append(phi_psi)
sc_torsion.append(chi)
except:
bb_torsion.append([None])
sc_torsion.append([None])
return {'bb_torsion': bb_torsion, 'sc_torsion': sc_torsion}
def prepare_single_input(tupin):
f_name_list, idx, cache_path = tupin
p_path, l_path, ref_path = f_name_list
max_len_ligand = 150
max_len_pocket = 150
# =========== ligand encoding ===========
ligand_mol = read_rdkit_mol(l_path)
if l_path.endswith('mol2'):
ligand_template = ligand_mol
else:
mol2 = '.'.join(l_path.split('.')[:-1]) + '.mol2'
if os.path.exists(mol2):
try:
ligand_template = Chem.MolFromMol2File(mol2)
ligand_mol = AllChem.AssignBondOrdersFromTemplate(ligand_template, ligand_mol)
print(f'Found mol2 {mol2} as input.')
except:
ligand_template = ligand_mol
else:
ligand_template = ligand_mol
if ligand_mol.GetNumConformers() == 0:
AllChem.EmbedMolecule(ligand_mol, maxAttempts=10, useRandomCoords=True, clearConfs=False)
ff = Chem.rdForceFieldHelpers.MMFFGetMoleculeForceField(
ligand_mol, Chem.rdForceFieldHelpers.MMFFGetMoleculeProperties(ligand_mol))
for atom_i in range(ligand_mol.GetNumAtoms()):
ff.MMFFAddPositionConstraint(atom_i, 1, 100) # maxDispl: maximum displacement
ff.Minimize(maxIts=20)
try:
dic_MMFF_param = get_MMFF_param(ligand_template)
except:
dic_MMFF_param = None
ligand_node_features = get_node_feature(ligand_template, 'ligand')
ligand_edge, ligand_edge_features = get_ligand_edge_feature(ligand_template)
ligand_match = get_ligand_match(ligand_template)
ligand_dismap = get_ligand_unrotable_distance(ligand_template) # not use in our model
ligand_coor_true = get_true_posi(ligand_mol)
ligand_coor_true = ligand_coor_true[get_ligand_match(ligand_mol, ligand_template)[0]]
ligand_data = [ligand_node_features, ligand_edge_features, ligand_coor_true, ligand_match, ligand_dismap]
assert len(ligand_node_features) <= max_len_ligand, 'ligand atoms need less than 150'
# =========== protein encoding ===========
# load modeller again for ray
with suppress_stdout_stderr():
env_ = Environ()
env_.libs.topology.read(file='$(LIB)/top_heav.lib')
env_.libs.parameters.read(file='$(LIB)/par.lib')
fixed_protein_path = cache_path + f'/{idx}_protein_tmp.pdb'
pdb_m = complete_pdb(env_, p_path)
pdb_m.write(fixed_protein_path)
opts = '-mute true -ignore_unrecognized_res true'
pyrosetta.distributed.init(opts)
pose = pyrosetta.io.pose_from_pdb(fixed_protein_path)
dic_tor = get_torsion_from_pose(pose)
ref_mol = read_rdkit_mol(ref_path, silence=True)
ref_coor = get_true_posi(ref_mol)
biodf_protein = PandasPdb().read_pdb(fixed_protein_path)
df_protein = biodf_protein.df['ATOM']
df_protein['chain_resi'] = df_protein['chain_id'].astype(str) + '_' + df_protein['residue_number'].astype(str)
df_pocket, sele_res = get_pocket(df_protein, ref_coor, max_len_protein=max_len_pocket)
SCtorsion_data = get_torsion(dic_tor, df_protein, df_pocket)
protein_data = encode_pocket(df_pocket) + [SCtorsion_data]
assert protein_data[0].shape[0] == SCtorsion_data[0].shape[0]
assert protein_data[0].shape[0] <= max_len_pocket, 'pocket residues need less than 150'
# os.remove(fixed_protein_path)
dic_data = dict(
ligand_data=ligand_data,
protein_data=protein_data,
protein_path=fixed_protein_path,
ligand_path=l_path,
sele_res=sele_res,
dic_MMFF_param=dic_MMFF_param,
)
pickle.dump(dic_data, open(cache_path + '/{}.pkl'.format(idx), 'wb'))
return True
def preprare_input_data(input_list, cache_path, prepare_data_with_multi_cpu):
delmkdir(cache_path)
tasks = []
for idx, f_name_list in enumerate(input_list):
tasks.append((prepare_single_input, (f_name_list, idx, cache_path)))
fail = 0
if prepare_data_with_multi_cpu:
pool = Pool()
print('Preparing input data...') | for r in pool.map(try_prepare_task, tasks): | 1 | 2023-10-19 22:03:51+00:00 | 12k |
openvpi/SingingVocoders | training/univnet.py | [
{
"identifier": "UnivNet",
"path": "models/univnet/univnet.py",
"snippet": "class UnivNet(torch.nn.Module):\n \"\"\"Parallel WaveGAN Generator module.\"\"\"\n\n def __init__(self, h, use_weight_norm=True):\n\n super().__init__()\n\n in_channels = h['model_args']['cond_in_channels']\n... | import logging
import os
import pathlib
import random
import sys
import lightning.pytorch as pl
import matplotlib
import numpy as np
import torch.utils.data
import utils
from typing import Dict
from lightning.pytorch.utilities.rank_zero import rank_zero_debug, rank_zero_info, rank_zero_only
from matplotlib import pyplot as plt
from torch import nn
from torch.utils.data import Dataset
from torchmetrics import Metric, MeanMetric
from models.univnet.univnet import UnivNet
from modules.loss.univloss import univloss
from modules.univ_D.discriminator import MultiPeriodDiscriminator, MultiResSpecDiscriminator
from training.base_task_gan import GanBaseTask
from utils.wav2mel import PitchAdjustableMelSpectrogram | 10,294 |
def __len__(self):
return len(self.data_index)
def collater(self, minibatch):
samples_per_frame = self.config['hop_size']
if self.infer:
crop_mel_frames = 0
else:
crop_mel_frames = self.config['crop_mel_frames']
for record in minibatch:
# Filter out records that aren't long enough.
if len(record['spectrogram']) < crop_mel_frames:
del record['spectrogram']
del record['audio']
del record['f0']
continue
start = random.randint(0, record['spectrogram'].shape[0] - 1 - crop_mel_frames)
end = start + crop_mel_frames
if self.infer:
record['spectrogram'] = record['spectrogram'].T
record['f0'] = record['f0']
else:
record['spectrogram'] = record['spectrogram'][start:end].T
record['f0'] = record['f0'][start:end]
start *= samples_per_frame
end *= samples_per_frame
if self.infer:
cty=(len(record['spectrogram'].T) * samples_per_frame)
record['audio'] = record['audio'][:cty]
record['audio'] = np.pad(record['audio'], (
0, (len(record['spectrogram'].T) * samples_per_frame) - len(record['audio'])),
mode='constant')
pass
else:
# record['spectrogram'] = record['spectrogram'][start:end].T
record['audio'] = record['audio'][start:end]
record['audio'] = np.pad(record['audio'], (0, (end - start) - len(record['audio'])),
mode='constant')
if self.volume_aug:
for record in minibatch:
if random.random() < self.volume_aug_prob:
audio = record['audio']
audio_mel = record['spectrogram']
max_amp = float(np.max(np.abs(audio))) + 1e-5
max_shift = min(3, np.log(1 / max_amp))
log_mel_shift = random.uniform(-3, max_shift)
# audio *= (10 ** log_mel_shift)
audio *= np.exp(log_mel_shift)
audio_mel += log_mel_shift
audio_mel = torch.clamp(torch.from_numpy(audio_mel), min=np.log(1e-5)).numpy()
record['audio'] = audio
record['spectrogram'] = audio_mel
audio = np.stack([record['audio'] for record in minibatch if 'audio' in record])
spectrogram = np.stack([record['spectrogram'] for record in minibatch if 'spectrogram' in record])
f0 = np.stack([record['f0'] for record in minibatch if 'f0' in record])
return {
'audio': torch.from_numpy(audio).unsqueeze(1),
'mel': torch.from_numpy(spectrogram), 'f0': torch.from_numpy(f0),
}
class stftlog:
def __init__(self,
n_fft=2048,
win_length=2048,
hop_length=512,
center=False,):
self.hop_length=hop_length
self.win_size=win_length
self.n_fft = n_fft
self.win_size = win_length
self.center = center
self.hann_window = {}
def exc(self,y):
hann_window_key = f"{y.device}"
if hann_window_key not in self.hann_window:
self.hann_window[hann_window_key] = torch.hann_window(
self.win_size, device=y.device
)
y = torch.nn.functional.pad(
y.unsqueeze(1),
(
int((self.win_size - self.hop_length) // 2),
int((self.win_size - self.hop_length+1) // 2),
),
mode="reflect",
)
y = y.squeeze(1)
spec = torch.stft(
y,
self.n_fft,
hop_length=self.hop_length,
win_length=self.win_size,
window=self.hann_window[hann_window_key],
center=self.center,
pad_mode="reflect",
normalized=False,
onesided=True,
return_complex=True,
).abs()
return spec
class univnet_task(GanBaseTask):
def __init__(self, config):
super().__init__(config)
|
# from models.lvc_ddspgan.lvc_ddspgan import DDSPgan
# from models.nsf_HiFigan.models import Generator, AttrDict, MultiScaleDiscriminator, MultiPeriodDiscriminator
def spec_to_figure(spec, vmin=None, vmax=None):
if isinstance(spec, torch.Tensor):
spec = spec.cpu().numpy()
fig = plt.figure(figsize=(12, 9),dpi=100)
plt.pcolor(spec.T, vmin=vmin, vmax=vmax)
plt.tight_layout()
return fig
class nsf_HiFigan_dataset(Dataset):
def __init__(self, config: dict, data_dir, infer=False):
super().__init__()
self.config = config
self.data_dir = data_dir if isinstance(data_dir, pathlib.Path) else pathlib.Path(data_dir)
with open(self.data_dir, 'r', encoding='utf8') as f:
fills = f.read().strip().split('\n')
self.data_index = fills
self.infer = infer
self.volume_aug = self.config['volume_aug']
self.volume_aug_prob = self.config['volume_aug_prob'] if not infer else 0
def __getitem__(self, index):
data_path = self.data_index[index]
data = np.load(data_path)
return {'f0':data['f0'],'spectrogram':data['mel'],'audio':data['audio']}
def __len__(self):
return len(self.data_index)
def collater(self, minibatch):
samples_per_frame = self.config['hop_size']
if self.infer:
crop_mel_frames = 0
else:
crop_mel_frames = self.config['crop_mel_frames']
for record in minibatch:
# Filter out records that aren't long enough.
if len(record['spectrogram']) < crop_mel_frames:
del record['spectrogram']
del record['audio']
del record['f0']
continue
start = random.randint(0, record['spectrogram'].shape[0] - 1 - crop_mel_frames)
end = start + crop_mel_frames
if self.infer:
record['spectrogram'] = record['spectrogram'].T
record['f0'] = record['f0']
else:
record['spectrogram'] = record['spectrogram'][start:end].T
record['f0'] = record['f0'][start:end]
start *= samples_per_frame
end *= samples_per_frame
if self.infer:
cty=(len(record['spectrogram'].T) * samples_per_frame)
record['audio'] = record['audio'][:cty]
record['audio'] = np.pad(record['audio'], (
0, (len(record['spectrogram'].T) * samples_per_frame) - len(record['audio'])),
mode='constant')
pass
else:
# record['spectrogram'] = record['spectrogram'][start:end].T
record['audio'] = record['audio'][start:end]
record['audio'] = np.pad(record['audio'], (0, (end - start) - len(record['audio'])),
mode='constant')
if self.volume_aug:
for record in minibatch:
if random.random() < self.volume_aug_prob:
audio = record['audio']
audio_mel = record['spectrogram']
max_amp = float(np.max(np.abs(audio))) + 1e-5
max_shift = min(3, np.log(1 / max_amp))
log_mel_shift = random.uniform(-3, max_shift)
# audio *= (10 ** log_mel_shift)
audio *= np.exp(log_mel_shift)
audio_mel += log_mel_shift
audio_mel = torch.clamp(torch.from_numpy(audio_mel), min=np.log(1e-5)).numpy()
record['audio'] = audio
record['spectrogram'] = audio_mel
audio = np.stack([record['audio'] for record in minibatch if 'audio' in record])
spectrogram = np.stack([record['spectrogram'] for record in minibatch if 'spectrogram' in record])
f0 = np.stack([record['f0'] for record in minibatch if 'f0' in record])
return {
'audio': torch.from_numpy(audio).unsqueeze(1),
'mel': torch.from_numpy(spectrogram), 'f0': torch.from_numpy(f0),
}
class stftlog:
def __init__(self,
n_fft=2048,
win_length=2048,
hop_length=512,
center=False,):
self.hop_length=hop_length
self.win_size=win_length
self.n_fft = n_fft
self.win_size = win_length
self.center = center
self.hann_window = {}
def exc(self,y):
hann_window_key = f"{y.device}"
if hann_window_key not in self.hann_window:
self.hann_window[hann_window_key] = torch.hann_window(
self.win_size, device=y.device
)
y = torch.nn.functional.pad(
y.unsqueeze(1),
(
int((self.win_size - self.hop_length) // 2),
int((self.win_size - self.hop_length+1) // 2),
),
mode="reflect",
)
y = y.squeeze(1)
spec = torch.stft(
y,
self.n_fft,
hop_length=self.hop_length,
win_length=self.win_size,
window=self.hann_window[hann_window_key],
center=self.center,
pad_mode="reflect",
normalized=False,
onesided=True,
return_complex=True,
).abs()
return spec
class univnet_task(GanBaseTask):
def __init__(self, config):
super().__init__(config) | self.TF = PitchAdjustableMelSpectrogram( f_min=0, | 5 | 2023-10-17 13:45:09+00:00 | 12k |
OllieBoyne/FOUND | FOUND/utils/eval_utils.py | [
{
"identifier": "modified_chamf",
"path": "FOUND/utils/pytorch3d.py",
"snippet": "def modified_chamf(x,y, x_lengths=None, y_lengths=None,\n x_normals=None, y_normals=None,\n norm: int = 2):\n \"\"\"\n \tA modified version of pytorch3d.loss.chamfer_distance\n \tto allow for no point or batch... | from pytorch3d.renderer import TexturesVertex
from pytorch3d.structures import Meshes
from multiprocessing import Process
from prettytable import PrettyTable
from .pytorch3d import modified_chamf, modified_sample
from .renderer import Renderer, view_from
from .vis import produce_grid, put_text, colourbar
from matplotlib import pyplot as plt
import os
import trimesh
import cv2
import multiprocessing as mp
import torch
import torch.nn.functional as F
import numpy as np
import json | 8,026 |
sole_vert_positions = pred_mesh_trimesh.vertices[FIND_sole_verts] # save sole vertex positions to refind them after mesh pre-processing
pred_mesh_trimesh.update_faces(~np.isin(np.arange(pred_mesh_trimesh.faces.shape[0]), FIND_cutoff_surface))
pred_mesh_trimesh = cutoff_slice_FIND(pred_mesh_trimesh)
# define a mask
# want to be able to define a mask on the FIND model, so that errors of verts in this mask aren't considered real -> pred, but are considered in reverse
# (for sole verts, unfair to count the error on them, but likewise incorrect to just remove them all, especially at the boundary)
# recalculate sole vertices
FIND_sole_vert_idxs = np.argwhere(np.all(pred_mesh_trimesh.vertices[:, None, :] == sole_vert_positions[None, ...], axis=-1))[:, 0]
FIND_sole_vertex_mask = np.isin(np.arange(pred_mesh_trimesh.vertices.shape[0]), FIND_sole_vert_idxs) # mask of which vertices correspond to the sole
FIND_sole_faces_mask = np.any(FIND_sole_vertex_mask[pred_mesh_trimesh.faces], axis=-1) # mask of which faces are in sole
else:
pred_mesh_trimesh = cutoff_slice_FIND(pred_mesh_trimesh)
# Convert to PyTorch3D
p3d_from_trimesh = lambda mesh: Meshes(verts=torch.from_numpy(np.asarray(mesh.vertices)[None, ...]).float(),
faces=torch.from_numpy(np.asarray(mesh.faces)[None, ...])).to(device)
gt_mesh = p3d_from_trimesh(gt_mesh_trimesh)
pred_mesh = p3d_from_trimesh(pred_mesh_trimesh)
# Sample vertices uniformly from mesh, returning vertex position, normal, and original face/vert idxs
gt_sample_dict = modified_sample(gt_mesh, num_samples=10_000, return_normals=True)
pred_sample_dict = modified_sample(pred_mesh, num_samples=10_000, return_normals=True)
# Calculate errors for reporting - by considering samples over the surface
errs = modified_chamf(pred_sample_dict['verts'], gt_sample_dict['verts'],
x_normals=pred_sample_dict['normals'], y_normals=gt_sample_dict['normals'])
# Calculate errors for visualisation - by considering every vertex
vis_errs = modified_chamf(pred_mesh.verts_padded(), gt_mesh.verts_padded(),
x_normals=pred_mesh.verts_normals_padded(), y_normals=gt_mesh.verts_normals_padded())
# convert from cosine similarity to error in degrees
errs['cham_norm_x'] = torch.rad2deg(torch.acos(errs['cham_norm_x']))
errs['cham_norm_y'] = torch.rad2deg(torch.acos(errs['cham_norm_y']))
vis_errs['cham_norm_x'] = torch.rad2deg(torch.acos(vis_errs['cham_norm_x']))
vis_errs['cham_norm_y'] = torch.rad2deg(torch.acos(vis_errs['cham_norm_y']))
if settings.get('model', 'FIND') == 'FIND':
# apply masking here to not include errors for sole in pred -> real
# errs has a sample of the vertices in, need to do correct indexing
sampled_vertex_mask = FIND_sole_faces_mask[pred_sample_dict['face_idxs'].cpu().detach().numpy()[0]]
errs['cham_x'][:, sampled_vertex_mask] = np.nan
errs['cham_norm_x'][:, sampled_vertex_mask] = np.nan
# vis_errs has all vertices in mesh in
vis_errs['cham_x'][:, FIND_sole_vertex_mask] = np.nan
vis_errs['cham_norm_x'][:, FIND_sole_vertex_mask] = np.nan
# visualisation info for each metric of error
vis_params = {
'cham': dict(vmin=0, vmax=1e-4, mag=1_000_000, units='um', cutoffs=np.array([5, 10, 15, 20, 25])*1e-6, xscale='log'),
'cham_norm': dict(vmin=0, vmax=60, mag=1, units='deg', cutoffs=[5, 7.5, 11.25, 22.5, 30], xscale='lin')
}
# define axes
fig, axs = plt.subplots(nrows=2, ncols=2, sharex='col')
axs[0, 0].set_title('Chamfer Error')
axs[0, 1].set_title('Normal Error')
axs[0, 0].set_ylabel('pred2real')
axs[1, 0].set_ylabel('real2pred')
axs[1, 0].set_xlabel('um')
axs[1, 1].set_xlabel('Degrees')
axs[1,1].set_xlim(0, 90)
axs[1, 1].set_yticks([0, 30, 60, 90])
with Reporter(os.path.join(eval_dir, 'report.txt')) as report:
report(f"Experiment: {exp_dir}")
i = 0
for L in ['cham', 'cham_norm']:
report(L)
table = PrettyTable()
cutoffs = vis_params[L]['cutoffs']
mag = vis_params[L]['mag']
table.field_names = ['Desc', 'Mean', 'Median', 'RMSE'] + [f'% < {round(x*mag)}' for x in cutoffs]
for desc, x in zip(['pred2real', 'real2pred'], ['x', 'y']):
e = errs[f'{L}_{x}'].cpu().detach().numpy()
e = e[~np.isnan(e)] # filter out nan values
metrics = eval_metrics(e, cutoffs=cutoffs)
table.add_row([desc] +
[f'{metrics[k] * mag:.2f}' for k in ['mean', 'median', 'rmse']] +
[f'{i * 100:.1f}' for i in metrics['cutoffs']]
)
# plot distribution of errors
ax = axs[i%2, i//2]
if vis_params[L]['xscale'] == 'log':
ax.hist(**get_loghist(np.ravel(e)*mag, 100), density=True)
ax.set_xscale('log')
else:
ax.hist(np.ravel(e) * mag, bins=100, density=True)
i+=1
results[f'{L}_{desc}'] = {**{k: metrics[k] * mag for k in ['mean', 'median', 'rmse']},
**{f'% < {round(c*mag)}': i * 100 for c, i in zip(cutoffs, metrics['cutoffs'])}}
report(table.get_string())
report("")
plt.savefig(os.path.join(eval_dir, 'err_dist.png'))
plt.close()
# Set up rendering
if render:
renderer: Renderer = Renderer(image_size=256, max_faces_per_bin=100_000, device=device)
| """Evaluate the performance of a fitted mesh"""
device = 'cuda'
def eval_metrics(arr, cutoffs=[5, 7.5, 11.25, 22.5, 30]):
"""Given a 1d array, return mean, median, rmse,
and % of values less than each in `cutoffs`"""
assert arr.ndim == 1, "eval_metrics requires 1D array"
out = dict(mean = arr.mean(), median = np.median(arr), rmse = (arr ** 2).mean() **.5,
cutoffs = [(arr < i).mean() for i in cutoffs])
return out
def err_to_colour(err: torch.Tensor, vmin:float=None, vmax:float=None, colmin=(0, 1, 0), colmax=(1, 0, 0), nan_colour=(0.3, 0.3, 0.3)):
"""Convert a tensor of errors (...) to an RGB colour scale (..., 3).
Linearly interpolate so that err of vmin -> colmin, err of vmax -> colmax
if vmin and vmax not given, take min and max of err
If any nan's given, set their colour to nan_colour
"""
ndim = err.ndim
colmin = torch.tensor(colmin)[(None,)*ndim].to(err.device) # expand colmin to [..., 3]
colmax = torch.tensor(colmax)[(None,)*ndim].to(err.device)
colnan = torch.tensor(nan_colour)[(None,)*ndim].to(err.device)
vmin = err.nanmin() if vmin is None else vmin
vmax = err.nanmax() if vmax is None else vmax
fracs = (err - vmin) / (vmax - vmin)
rgba = (colmin + fracs.unsqueeze(-1) * (colmax - colmin)).to(err.device)
rgba = torch.clip(rgba, min=0, max=1)
rgba[torch.any(torch.isnan(rgba), dim=-1)] = colnan
return rgba
class Reporter:
"""Receive statements, on exit print all and save all to file"""
def __init__(self, out_file_loc):
self.lines = []
self.out_file_loc = out_file_loc
def __call__(self, line):
self.lines.append(line)
def __enter__(self, *args):
return self
def __exit__(self, *args):
[*map(print, self.lines)]
with open(self.out_file_loc, 'w') as outfile:
outfile.writelines([s + '\n' for s in self.lines])
def get_max_fit(exp_dir):
"""Search in an experiment directory for the fit_xx.obj with the highest value"""
f = lambda s: -1 if 'fit_' not in s else int(s.split('fit_')[1].split('.obj')[0])
return max(os.listdir(exp_dir), key=f)
def cutoff_slice_FIND(mesh, max_heel_height = 0.04, cutoff_height = 0.1):
"""Similar mesh slicing method to FIND: identify heel keypoint, slice off 1cm above"""
X, Y, Z = mesh.vertices.T
Xma = np.ma.array(X, mask= Z >= max_heel_height)
heel_idx = np.ma.argmin(Xma)
slice_height = min(Z[heel_idx] + cutoff_height, Z.max() - 5e-3)
return mesh.slice_plane([0, 0, slice_height], [0, 0, -1], cap=False)
def get_loghist(x, nbins):
hist, bins = np.histogram(x, bins=nbins)
logbins = np.logspace(np.log10(bins[0]),np.log10(bins[-1]),len(bins))
return dict(x=x, bins=logbins)
def eval_exp(exp_dir, render=True):
results = {} # return results as errors
if not any('fit_' in f for f in os.listdir(exp_dir)):
print(f"No fits for {exp_dir}, skipping...")
return
pred_obj_loc = os.path.join(exp_dir, get_max_fit(exp_dir))
# load settings to get folder
opts_loc = os.path.join(exp_dir, 'opts.json')
if not os.path.isfile(opts_loc):
print(f"No opts for {exp_dir}, skipping...")
return
with open(opts_loc) as infile:
settings = json.load(infile)
# assume GT OBJ loc is
# (1) saved in <data_folder>/mesh.obj if <data_folder> given
if 'data_folder' in settings:
gt_obj_loc = os.path.join(settings['data_folder'], 'mesh.obj')
# (2) saved in <exp_dir>/gt_mesh.obj otherwise
else:
gt_obj_loc = os.path.join(exp_dir, 'gt_mesh.obj')
eval_dir = os.path.join(exp_dir, 'eval')
os.makedirs(eval_dir, exist_ok=True)
with open(gt_obj_loc) as infile:
d = trimesh.exchange.obj.load_obj(infile, process=False)
gt_mesh_trimesh = trimesh.Trimesh(**d)
with open(pred_obj_loc) as infile:
d = trimesh.exchange.obj.load_obj(infile, process=False)
pred_mesh_trimesh = trimesh.Trimesh(**d)
# pre-process meshes, w/ cutoff
# Same method as used for Foot3D here for slicing GT
gt_mesh_trimesh = cutoff_slice_FIND(gt_mesh_trimesh)
if settings.get('model', 'FIND') == 'FIND':
# slice FIND faces
FIND_cutoff_surface = np.load(os.path.join(settings['find_pth'], 'templ_masked_faces.npy'))
FIND_sole_faces = np.load(os.path.join(settings['find_pth'], 'templ_sole_faces.npy'))
FIND_sole_verts = np.unique(np.ravel(pred_mesh_trimesh.faces[FIND_sole_faces])) # all vertices considered part of the sole
sole_vert_positions = pred_mesh_trimesh.vertices[FIND_sole_verts] # save sole vertex positions to refind them after mesh pre-processing
pred_mesh_trimesh.update_faces(~np.isin(np.arange(pred_mesh_trimesh.faces.shape[0]), FIND_cutoff_surface))
pred_mesh_trimesh = cutoff_slice_FIND(pred_mesh_trimesh)
# define a mask
# want to be able to define a mask on the FIND model, so that errors of verts in this mask aren't considered real -> pred, but are considered in reverse
# (for sole verts, unfair to count the error on them, but likewise incorrect to just remove them all, especially at the boundary)
# recalculate sole vertices
FIND_sole_vert_idxs = np.argwhere(np.all(pred_mesh_trimesh.vertices[:, None, :] == sole_vert_positions[None, ...], axis=-1))[:, 0]
FIND_sole_vertex_mask = np.isin(np.arange(pred_mesh_trimesh.vertices.shape[0]), FIND_sole_vert_idxs) # mask of which vertices correspond to the sole
FIND_sole_faces_mask = np.any(FIND_sole_vertex_mask[pred_mesh_trimesh.faces], axis=-1) # mask of which faces are in sole
else:
pred_mesh_trimesh = cutoff_slice_FIND(pred_mesh_trimesh)
# Convert to PyTorch3D
p3d_from_trimesh = lambda mesh: Meshes(verts=torch.from_numpy(np.asarray(mesh.vertices)[None, ...]).float(),
faces=torch.from_numpy(np.asarray(mesh.faces)[None, ...])).to(device)
gt_mesh = p3d_from_trimesh(gt_mesh_trimesh)
pred_mesh = p3d_from_trimesh(pred_mesh_trimesh)
# Sample vertices uniformly from mesh, returning vertex position, normal, and original face/vert idxs
gt_sample_dict = modified_sample(gt_mesh, num_samples=10_000, return_normals=True)
pred_sample_dict = modified_sample(pred_mesh, num_samples=10_000, return_normals=True)
# Calculate errors for reporting - by considering samples over the surface
errs = modified_chamf(pred_sample_dict['verts'], gt_sample_dict['verts'],
x_normals=pred_sample_dict['normals'], y_normals=gt_sample_dict['normals'])
# Calculate errors for visualisation - by considering every vertex
vis_errs = modified_chamf(pred_mesh.verts_padded(), gt_mesh.verts_padded(),
x_normals=pred_mesh.verts_normals_padded(), y_normals=gt_mesh.verts_normals_padded())
# convert from cosine similarity to error in degrees
errs['cham_norm_x'] = torch.rad2deg(torch.acos(errs['cham_norm_x']))
errs['cham_norm_y'] = torch.rad2deg(torch.acos(errs['cham_norm_y']))
vis_errs['cham_norm_x'] = torch.rad2deg(torch.acos(vis_errs['cham_norm_x']))
vis_errs['cham_norm_y'] = torch.rad2deg(torch.acos(vis_errs['cham_norm_y']))
if settings.get('model', 'FIND') == 'FIND':
# apply masking here to not include errors for sole in pred -> real
# errs has a sample of the vertices in, need to do correct indexing
sampled_vertex_mask = FIND_sole_faces_mask[pred_sample_dict['face_idxs'].cpu().detach().numpy()[0]]
errs['cham_x'][:, sampled_vertex_mask] = np.nan
errs['cham_norm_x'][:, sampled_vertex_mask] = np.nan
# vis_errs has all vertices in mesh in
vis_errs['cham_x'][:, FIND_sole_vertex_mask] = np.nan
vis_errs['cham_norm_x'][:, FIND_sole_vertex_mask] = np.nan
# visualisation info for each metric of error
vis_params = {
'cham': dict(vmin=0, vmax=1e-4, mag=1_000_000, units='um', cutoffs=np.array([5, 10, 15, 20, 25])*1e-6, xscale='log'),
'cham_norm': dict(vmin=0, vmax=60, mag=1, units='deg', cutoffs=[5, 7.5, 11.25, 22.5, 30], xscale='lin')
}
# define axes
fig, axs = plt.subplots(nrows=2, ncols=2, sharex='col')
axs[0, 0].set_title('Chamfer Error')
axs[0, 1].set_title('Normal Error')
axs[0, 0].set_ylabel('pred2real')
axs[1, 0].set_ylabel('real2pred')
axs[1, 0].set_xlabel('um')
axs[1, 1].set_xlabel('Degrees')
axs[1,1].set_xlim(0, 90)
axs[1, 1].set_yticks([0, 30, 60, 90])
with Reporter(os.path.join(eval_dir, 'report.txt')) as report:
report(f"Experiment: {exp_dir}")
i = 0
for L in ['cham', 'cham_norm']:
report(L)
table = PrettyTable()
cutoffs = vis_params[L]['cutoffs']
mag = vis_params[L]['mag']
table.field_names = ['Desc', 'Mean', 'Median', 'RMSE'] + [f'% < {round(x*mag)}' for x in cutoffs]
for desc, x in zip(['pred2real', 'real2pred'], ['x', 'y']):
e = errs[f'{L}_{x}'].cpu().detach().numpy()
e = e[~np.isnan(e)] # filter out nan values
metrics = eval_metrics(e, cutoffs=cutoffs)
table.add_row([desc] +
[f'{metrics[k] * mag:.2f}' for k in ['mean', 'median', 'rmse']] +
[f'{i * 100:.1f}' for i in metrics['cutoffs']]
)
# plot distribution of errors
ax = axs[i%2, i//2]
if vis_params[L]['xscale'] == 'log':
ax.hist(**get_loghist(np.ravel(e)*mag, 100), density=True)
ax.set_xscale('log')
else:
ax.hist(np.ravel(e) * mag, bins=100, density=True)
i+=1
results[f'{L}_{desc}'] = {**{k: metrics[k] * mag for k in ['mean', 'median', 'rmse']},
**{f'% < {round(c*mag)}': i * 100 for c, i in zip(cutoffs, metrics['cutoffs'])}}
report(table.get_string())
report("")
plt.savefig(os.path.join(eval_dir, 'err_dist.png'))
plt.close()
# Set up rendering
if render:
renderer: Renderer = Renderer(image_size=256, max_faces_per_bin=100_000, device=device) | R, T = view_from(['side1', 'topdown', 'side2']) | 3 | 2023-10-24 11:46:42+00:00 | 12k |
RobertCsordas/moe | tasks/simple/language_model/enwik8_transformer.py | [
{
"identifier": "TransformerLanguageModel",
"path": "models/transformer_language_model.py",
"snippet": "class TransformerLanguageModel(LoggingLayer, torch.nn.Module):\n def __init__(self, voc_size: int, embedding_size: Optional[int], state_size: int, dropout: float,\n tied_embedding: ... | import framework
import torch
import torch.nn
import torch.utils.data
import dataset
import random
from models import TransformerLanguageModel
from ... import task, args
from .transformer_lm_mixin import TransformerLMMixin
from ..simple_task import SimpleTask
from typing import Tuple, Any, Dict, List, Union
from interfaces import LanguageModelInterface | 9,238 |
@args
def a(parser: framework.helpers.ArgumentParser):
parser.add_argument("-lm.state_drop_probability", default=0.0)
parser.add_argument("-lm.lstm_weight_drop", default=0.0)
parser.add_argument("-lm.unroll", default=100)
parser.add_argument("-lm.unroll_eval", default="none", parser=parser.int_or_none_parser)
parser.add_argument("-lm.example_context", default=100)
parser.add_argument("-lm.example_window", default=40)
|
@args
def a(parser: framework.helpers.ArgumentParser):
parser.add_argument("-lm.state_drop_probability", default=0.0)
parser.add_argument("-lm.lstm_weight_drop", default=0.0)
parser.add_argument("-lm.unroll", default=100)
parser.add_argument("-lm.unroll_eval", default="none", parser=parser.int_or_none_parser)
parser.add_argument("-lm.example_context", default=100)
parser.add_argument("-lm.example_window", default=40)
| @task() | 1 | 2023-10-16 11:26:45+00:00 | 12k |
blackgold3/SemanticBoost | mdm/model_util.py | [
{
"identifier": "MDM",
"path": "mdm/model/mdm.py",
"snippet": "class MDM(nn.Module):\n def __init__(self, njoints, nfeats, latent_dim=256, ff_size=1024, num_layers=8, num_heads=4, dropout=0.1,\n activation=\"gelu\", dataset='amass', clip_dim=512,\n arch='trans_enc', cl... | from mdm.model.mdm import MDM
from mdm.diffusion import gaussian_diffusion as gd
from mdm.diffusion.respace import SpacedDiffusion, space_timesteps, InpaintingGaussianDiffusion
from mdm.model.trt_model import TRT_MDM | 7,589 |
def load_model_wo_clip(model, state_dict):
print("load model checkpoints without clip")
try:
new_state_dict = {}
for key, value in state_dict.items():
if "in_proj" in key:
keyq = key.replace("in_proj_weight", "wq.weight")
keyk = key.replace("in_proj_weight", "wk.weight")
keyv = key.replace("in_proj_weight", "wv.weight")
inshape = value.shape[0] // 3
valueq = value[:inshape]
valuek = value[inshape:inshape * 2]
valuev = value[inshape * 2:]
new_state_dict[keyq] = valueq
new_state_dict[keyk] = valuek
new_state_dict[keyv] = valuev
elif "out_proj" in key:
newkey = key.replace("out_proj", "wo")
new_state_dict[newkey] = value
else:
new_state_dict[key] = value
missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False)
except:
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
print(unexpected_keys)
other_miss = []
for key in missing_keys:
if not key.startswith('clip_model.'):
other_miss.append(key)
print(other_miss)
assert all([k.startswith('clip_model.') for k in missing_keys])
def load_ft_model_wo_clip(model, state_dict):
print("load model checkpoints without clip")
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
print(unexpected_keys)
# for name, value in model.named_parameters():
# if "seqTransEncoder" in name and "self_attn" in name:
# value.requires_grad = False
# if name.startswith("code_full") or name.startswith("encode_compress") or name.startswith("input_process"):
# value.requires_grad = False
assert all([k.startswith('clip_pose_encoder.') for k in unexpected_keys])
# assert all([k.startswith('clip_model.') or k.startswith('clip_pose_encoder.') or k.startswith('embed_text.') for k in missing_keys])
def create_model_and_diffusion(args, mode="text", json_dict=None):
model = MDM(**get_model_args(args), json_dict=json_dict)
diffusion = create_gaussian_diffusion(args, mode)
return model, diffusion
def create_trt_model(args, model, mode="text", json_dict=None, device="cuda"):
model = TRT_MDM(model, json_dict, device=device)
diffusion = create_gaussian_diffusion(args, mode)
return model, diffusion
def get_model_args(args):
# default args
clip_version = 'ViT-B/32'
if args.unconstrained:
cond_mode = 'no_cond'
elif args.dataset in ['kit', 'humanml']:
cond_mode = "text"
activation = args.trans_activate if args.arch != "trans_enc" else "gelu"
if args.dataset == 'humanml':
njoints = 263
nfeats = 1
elif args.dataset == 'kit':
njoints = 251
nfeats = 1
if args.rep == "smr":
njoints += 6
nfeats = 1
return {'njoints': njoints, 'nfeats': nfeats, 'latent_dim': args.latent_dim, 'ff_size': args.ff_size, 'num_layers': args.layers, 'num_heads': args.heads,
'dropout': 0.1, 'activation': activation, 'cond_mode': cond_mode, 'cond_mask_prob': args.cond_mask_prob, 'arch': args.arch,
'clip_version': clip_version, 'dataset': args.dataset, "local":args.local, "encode_full":args.encode_full, "txt_tokens":args.txt_tokens,
"dataset_path":args.dataset_path, "num_frames":args.num_frames, "conv_bias":args.conv_bias, "conv_activate":args.conv_activate,
"conv_norm":args.conv_norm}
def create_gaussian_diffusion(args, mode="text"):
# default params
predict_xstart = True # we always predict x_start (a.k.a. x0), that's our deal!
steps = 1000
scale_beta = 1. # no scaling
timestep_respacing = '' # can be used for ddim sampling, we don't use it.
learn_sigma = False
rescale_timesteps = False
betas = gd.get_named_beta_schedule(args.noise_schedule, steps, scale_beta)
loss_type = gd.LossType.MSE
if not timestep_respacing:
timestep_respacing = [steps]
if mode is not None and (mode.startswith("finetune_control") or mode == "control_length"):
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> inpainting diffusion model")
diffusion = InpaintingGaussianDiffusion
else:
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> SpacedDiffusion")
|
def load_model_wo_clip(model, state_dict):
print("load model checkpoints without clip")
try:
new_state_dict = {}
for key, value in state_dict.items():
if "in_proj" in key:
keyq = key.replace("in_proj_weight", "wq.weight")
keyk = key.replace("in_proj_weight", "wk.weight")
keyv = key.replace("in_proj_weight", "wv.weight")
inshape = value.shape[0] // 3
valueq = value[:inshape]
valuek = value[inshape:inshape * 2]
valuev = value[inshape * 2:]
new_state_dict[keyq] = valueq
new_state_dict[keyk] = valuek
new_state_dict[keyv] = valuev
elif "out_proj" in key:
newkey = key.replace("out_proj", "wo")
new_state_dict[newkey] = value
else:
new_state_dict[key] = value
missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False)
except:
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
print(unexpected_keys)
other_miss = []
for key in missing_keys:
if not key.startswith('clip_model.'):
other_miss.append(key)
print(other_miss)
assert all([k.startswith('clip_model.') for k in missing_keys])
def load_ft_model_wo_clip(model, state_dict):
print("load model checkpoints without clip")
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
print(unexpected_keys)
# for name, value in model.named_parameters():
# if "seqTransEncoder" in name and "self_attn" in name:
# value.requires_grad = False
# if name.startswith("code_full") or name.startswith("encode_compress") or name.startswith("input_process"):
# value.requires_grad = False
assert all([k.startswith('clip_pose_encoder.') for k in unexpected_keys])
# assert all([k.startswith('clip_model.') or k.startswith('clip_pose_encoder.') or k.startswith('embed_text.') for k in missing_keys])
def create_model_and_diffusion(args, mode="text", json_dict=None):
model = MDM(**get_model_args(args), json_dict=json_dict)
diffusion = create_gaussian_diffusion(args, mode)
return model, diffusion
def create_trt_model(args, model, mode="text", json_dict=None, device="cuda"):
model = TRT_MDM(model, json_dict, device=device)
diffusion = create_gaussian_diffusion(args, mode)
return model, diffusion
def get_model_args(args):
# default args
clip_version = 'ViT-B/32'
if args.unconstrained:
cond_mode = 'no_cond'
elif args.dataset in ['kit', 'humanml']:
cond_mode = "text"
activation = args.trans_activate if args.arch != "trans_enc" else "gelu"
if args.dataset == 'humanml':
njoints = 263
nfeats = 1
elif args.dataset == 'kit':
njoints = 251
nfeats = 1
if args.rep == "smr":
njoints += 6
nfeats = 1
return {'njoints': njoints, 'nfeats': nfeats, 'latent_dim': args.latent_dim, 'ff_size': args.ff_size, 'num_layers': args.layers, 'num_heads': args.heads,
'dropout': 0.1, 'activation': activation, 'cond_mode': cond_mode, 'cond_mask_prob': args.cond_mask_prob, 'arch': args.arch,
'clip_version': clip_version, 'dataset': args.dataset, "local":args.local, "encode_full":args.encode_full, "txt_tokens":args.txt_tokens,
"dataset_path":args.dataset_path, "num_frames":args.num_frames, "conv_bias":args.conv_bias, "conv_activate":args.conv_activate,
"conv_norm":args.conv_norm}
def create_gaussian_diffusion(args, mode="text"):
# default params
predict_xstart = True # we always predict x_start (a.k.a. x0), that's our deal!
steps = 1000
scale_beta = 1. # no scaling
timestep_respacing = '' # can be used for ddim sampling, we don't use it.
learn_sigma = False
rescale_timesteps = False
betas = gd.get_named_beta_schedule(args.noise_schedule, steps, scale_beta)
loss_type = gd.LossType.MSE
if not timestep_respacing:
timestep_respacing = [steps]
if mode is not None and (mode.startswith("finetune_control") or mode == "control_length"):
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> inpainting diffusion model")
diffusion = InpaintingGaussianDiffusion
else:
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> SpacedDiffusion") | diffusion = SpacedDiffusion | 2 | 2023-10-20 14:53:26+00:00 | 12k |
pythonlessons/FinRock | experiments/testing_ppo_sinusoid.py | [
{
"identifier": "PdDataFeeder",
"path": "finrock/data_feeder.py",
"snippet": "class PdDataFeeder:\n def __init__(\n self, \n df: pd.DataFrame,\n indicators: list = [],\n min: float = None,\n max: float = None\n ) -> None:\n self... | import numpy as np
import pandas as pd
import tensorflow as tf
from finrock.data_feeder import PdDataFeeder
from finrock.trading_env import TradingEnv
from finrock.render import PygameRender
from finrock.scalers import MinMaxScaler
from finrock.reward import simpleReward
from finrock.metrics import DifferentActions, AccountValue, MaxDrawdown, SharpeRatio
from finrock.indicators import BolingerBands, RSI, PSAR, SMA | 9,058 | tf.get_logger().setLevel('ERROR')
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
df = pd.read_csv('Datasets/random_sinusoid.csv')
df = df[-1000:]
pd_data_feeder = PdDataFeeder(
df,
indicators = [
BolingerBands(data=df, period=20, std=2),
RSI(data=df, period=14),
PSAR(data=df),
SMA(data=df, period=7),
SMA(data=df, period=25),
SMA(data=df, period=99),
]
)
env = TradingEnv(
data_feeder = pd_data_feeder,
output_transformer = MinMaxScaler(min=pd_data_feeder.min, max=pd_data_feeder.max),
initial_balance = 1000.0,
max_episode_steps = 1000,
window_size = 50,
| tf.get_logger().setLevel('ERROR')
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
df = pd.read_csv('Datasets/random_sinusoid.csv')
df = df[-1000:]
pd_data_feeder = PdDataFeeder(
df,
indicators = [
BolingerBands(data=df, period=20, std=2),
RSI(data=df, period=14),
PSAR(data=df),
SMA(data=df, period=7),
SMA(data=df, period=25),
SMA(data=df, period=99),
]
)
env = TradingEnv(
data_feeder = pd_data_feeder,
output_transformer = MinMaxScaler(min=pd_data_feeder.min, max=pd_data_feeder.max),
initial_balance = 1000.0,
max_episode_steps = 1000,
window_size = 50, | reward_function = simpleReward, | 4 | 2023-10-23 07:44:54+00:00 | 12k |
hitlic/deepepochs | deepepochs/trainer.py | [
{
"identifier": "StopLoopException",
"path": "deepepochs/loops.py",
"snippet": "class StopLoopException(Exception):\r\n pass\r"
},
{
"identifier": "LoopException",
"path": "deepepochs/loops.py",
"snippet": "class LoopException(Exception):\r\n pass\r"
},
{
"identifier": "Ten... | import math
import time
import torch
from datetime import datetime
from collections import defaultdict
from typing import List, Dict, Callable
from torch.optim import Adam
from torch.utils.data import DataLoader
from accelerate import Accelerator
from .loops import (StopLoopException, LoopException, TensorTuple,
flatten_dict, default_loss, concat_dicts, to_numpy, listify, batch_size, concat, detach_clone)
from .tools import batches
from .optimizer import Optimizer, Optimizers
from .patches import PatchBase, MeanPatch, TensorPatch, run_patch_dict, run_patch_dicts
from .callbacks import CallbackPool, DefaultCallback, CallbackException
from tqdm import tqdm
| 7,749 | patch_dict = step_out
else:
patch_dict = {}
self.batch_patch_dict.update(patch_dict)
epoch_patch_dicts.append(self.batch_patch_dict)
# 计算当前batch的指标
batch_metric_values = flatten_dict(run_patch_dict(self.batch_patch_dict), sep='')
self.callbacks.trigger(f'after_{self.stage}_batch', trainer=self.trainer, metrics=batch_metric_values, batch_idx=batch_idx)
# 清空 self.batch_patch_dict
self.batch_patch_dict = {}
# 计算当前epoch的指标
epoch_metrics_values = flatten_dict(run_patch_dicts(epoch_patch_dicts), sep='')
self.callbacks.trigger(f'after_{self.stage}_epoch', trainer=self.trainer, task=self, metrics=epoch_metrics_values)
return epoch_metrics_values
class ModelWrapper:
"""
用于实现回调:
on_before_train_forward on_after_train_forward
on_before_val_forward on_after_val_forward
on_before_test_forward on_after_test_forward
"""
def __init__(self, model, trainer):
# self.model = torch.compile(model)
self.model = model
self.trainer = trainer
self.stage = None
def __getattr__(self, name):
return getattr(self.model, name)
def __call__(self, *args, **kwds):
self.trainer.callbacks.trigger(f'before_{self.stage}_forward', trainer=self)
model_out = self.model(*args, **kwds)
self.trainer.callbacks.trigger(f'after_{self.stage}_forward', trainer=self, model_out=model_out)
return model_out
def train(self):
self.model.train()
def eval(self):
self.model.eval()
def to(self, device):
self.model = self.model.to(device)
return self
def cpu(self):
self.model = self.model.cpu()
return self
def cuda(self):
self.model = self.model.cuda()
return self
def parameters(self):
return self.model.parameters()
def modules(self):
return self.model.modules()
def state_dict(self):
return self.model.state_dict()
def load_state_dict(self, state_dict):
self.model.load_state_dict(state_dict)
class LossWrapper:
"""
1. 自动完成zero_grad、backward、opt.step等操作
2. 配合实现梯度累积
3. 实现回调
on_before_backward on_after_backward
on_before_optimize on_after_optimize
on_train_metrics on_val_metrics on_test_metrics
"""
def __init__(self, loss_fn, trainer):
self.loss_fn = loss_fn
self.trainer = trainer
self.stage = None
self.do_loss = None
self.task = None
self.total_loss = 0 # 用于实现累积梯度
self.model_outs = [] # 用于实现累积梯度
self.batch_ys = [] # 用于实现累积梯度
def optimize(self):
self.trainer.callbacks.trigger('before_optimize', trainer=self)
self.trainer.opt.step()
self.trainer.opt.zero_grad()
self.trainer.callbacks.trigger('after_optimize', trainer=self)
def __call__(self, model_out, batch_y, grad_accumulate=False):
"""
Args:
model_out: 模型预测输出
batch_y: 标签
grad_accumulate: 是否累积梯度
"""
if self.stage == 'train':
# 计算损失
loss = self.loss_fn(model_out, batch_y)
# backward
self.trainer.callbacks.trigger('before_backward', trainer=self, loss=loss)
if self.trainer.accelerator is None:
(loss/self.trainer.grad_accumulate_steps).backward()
else: # accelerate的backward
self.trainer.accelerator.backward(loss/self.trainer.grad_accumulate_steps)
self.trainer.callbacks.trigger('after_backward', trainer=self, loss=loss)
# 记录各sub-batch的总损失、模型输出、标签
_loss = loss.detach().clone()
self.total_loss += _loss * batch_size(model_out)
| """
@author: liuchen
"""
class EpochTask:
"""一个Epoch的训练、验证或测试任务"""
def __init__(self, dataloader, metrics=None, do_loss=True, **step_args):
"""
Args:
dataloader: pytorch Dataloader
metrics: 指标函数列表
do_loss: 验证和测试中是否计算据损失
step_args: 其他需要传递给`step`、`train_step`、`val_step`、`test_step`和`evaluate`方法的参数
"""
self.dataloader = dataloader
self.batchs = len(dataloader)
self.metrics = listify(metrics)
self.do_loss = do_loss
self.trainer = None
self.stage = None
self.val_freq = None
self.step_args = step_args
self.batch_patch_dict = {} # 由DefaultCallback中的on_train/val/test_prediction回调注入
def __len__(self):
return self.batchs
def __getattr__(self, name):
"""如果要找的属性和方法不存在,则到trainer中找"""
return getattr(self.trainer, name, None)
def __call__(self):
phase = 'train' if self.stage=='train' else 'evaluate'
if self.stage == 'train':
self.model.train()
else:
self.model.eval()
self.model.stage = self.stage
self.loss.stage = self.stage
self.loss.do_loss = self.do_loss
self.loss.task = self
# 配置指标,在DefaultCallback中的on_train/val/test_prediction中用于构造Patch
if self.stage == 'train':
self.metrics = [m for m in self.metrics if m not in self.train_metrics] + self.train_metrics
elif self.stage == 'val':
self.metrics = [m for m in self.metrics if m not in self.val_metrics] + self.val_metrics
else:
self.metrics = [m for m in self.metrics if m not in self.test_metrics] + self.test_metrics
with torch.no_grad():
self.callbacks.trigger(f'before_{self.stage}_epoch', trainer=self, task=self)
epoch_patch_dicts = []
for batch_idx, batch_data in enumerate(self.dataloader):
batch_x, batch_y = self.prepare_data(batch_data)
self.callbacks.trigger(f'before_{self.stage}_batch', trainer=self.trainer, batch_x=batch_x, batch_y=batch_y, batch_idx=batch_idx)
# 获取mini-batch的`*step`方法
# 1. 最优先使用`EpochTask.step`、`Trainer.step`
step_method = getattr(self, 'step', None)
# 2. 次优先使用`EpochTask.train_step`、`Epoch.val_step`、`EpochTask.test_step`
# 3. 其次使用`Trainer.train_step`、`Trainer.val_step`、`Trainer.test_step`
step_method = getattr(self, f'{self.stage}_step') if step_method is None else step_method
# 4. 再次使用`EpochTask.evaluate_step`方法
# 5. 最次使用`Trainer.evaluate_step`
step_method = getattr(self, f'{phase}_step') if step_method is None else step_method
# 运行mini-batch的`*step`方法
if self.stage == 'train':
with torch.enable_grad():
step_out = step_method(batch_x, batch_y, **self.step_args)
else:
step_out = step_method(batch_x, batch_y, **self.step_args)
if step_out is not None:
if not isinstance(step_out, dict):
raise LoopException(f'{step_method} 方法的返回值必须为字典!')
if not all(isinstance(v, PatchBase) for k, v in step_out.items()):
raise LoopException(f'{step_method} 方法返回字典的value必须为Patch(deepepochs.PatchBase子类对象)!')
patch_dict = step_out
else:
patch_dict = {}
self.batch_patch_dict.update(patch_dict)
epoch_patch_dicts.append(self.batch_patch_dict)
# 计算当前batch的指标
batch_metric_values = flatten_dict(run_patch_dict(self.batch_patch_dict), sep='')
self.callbacks.trigger(f'after_{self.stage}_batch', trainer=self.trainer, metrics=batch_metric_values, batch_idx=batch_idx)
# 清空 self.batch_patch_dict
self.batch_patch_dict = {}
# 计算当前epoch的指标
epoch_metrics_values = flatten_dict(run_patch_dicts(epoch_patch_dicts), sep='')
self.callbacks.trigger(f'after_{self.stage}_epoch', trainer=self.trainer, task=self, metrics=epoch_metrics_values)
return epoch_metrics_values
class ModelWrapper:
"""
用于实现回调:
on_before_train_forward on_after_train_forward
on_before_val_forward on_after_val_forward
on_before_test_forward on_after_test_forward
"""
def __init__(self, model, trainer):
# self.model = torch.compile(model)
self.model = model
self.trainer = trainer
self.stage = None
def __getattr__(self, name):
return getattr(self.model, name)
def __call__(self, *args, **kwds):
self.trainer.callbacks.trigger(f'before_{self.stage}_forward', trainer=self)
model_out = self.model(*args, **kwds)
self.trainer.callbacks.trigger(f'after_{self.stage}_forward', trainer=self, model_out=model_out)
return model_out
def train(self):
self.model.train()
def eval(self):
self.model.eval()
def to(self, device):
self.model = self.model.to(device)
return self
def cpu(self):
self.model = self.model.cpu()
return self
def cuda(self):
self.model = self.model.cuda()
return self
def parameters(self):
return self.model.parameters()
def modules(self):
return self.model.modules()
def state_dict(self):
return self.model.state_dict()
def load_state_dict(self, state_dict):
self.model.load_state_dict(state_dict)
class LossWrapper:
"""
1. 自动完成zero_grad、backward、opt.step等操作
2. 配合实现梯度累积
3. 实现回调
on_before_backward on_after_backward
on_before_optimize on_after_optimize
on_train_metrics on_val_metrics on_test_metrics
"""
def __init__(self, loss_fn, trainer):
self.loss_fn = loss_fn
self.trainer = trainer
self.stage = None
self.do_loss = None
self.task = None
self.total_loss = 0 # 用于实现累积梯度
self.model_outs = [] # 用于实现累积梯度
self.batch_ys = [] # 用于实现累积梯度
def optimize(self):
self.trainer.callbacks.trigger('before_optimize', trainer=self)
self.trainer.opt.step()
self.trainer.opt.zero_grad()
self.trainer.callbacks.trigger('after_optimize', trainer=self)
def __call__(self, model_out, batch_y, grad_accumulate=False):
"""
Args:
model_out: 模型预测输出
batch_y: 标签
grad_accumulate: 是否累积梯度
"""
if self.stage == 'train':
# 计算损失
loss = self.loss_fn(model_out, batch_y)
# backward
self.trainer.callbacks.trigger('before_backward', trainer=self, loss=loss)
if self.trainer.accelerator is None:
(loss/self.trainer.grad_accumulate_steps).backward()
else: # accelerate的backward
self.trainer.accelerator.backward(loss/self.trainer.grad_accumulate_steps)
self.trainer.callbacks.trigger('after_backward', trainer=self, loss=loss)
# 记录各sub-batch的总损失、模型输出、标签
_loss = loss.detach().clone()
self.total_loss += _loss * batch_size(model_out)
| self.model_outs.append(detach_clone(model_out))
| 10 | 2023-10-19 05:41:48+00:00 | 12k |
vorausrobotik/voraus-ad-dataset | train.py | [
{
"identifier": "Configuration",
"path": "configuration.py",
"snippet": "class Configuration(BaseModel):\n \"\"\"Describes the configuration parameters.\"\"\"\n\n seed: int\n epochs: int\n batchsize: int\n n_hidden_layers: int = Field(alias=\"nHiddenLayers\")\n n_coupling_blocks: int =... | import random
import numpy
import pandas
import torch
import torch.backends.cudnn
from pathlib import Path
from typing import Dict, List, Optional
from sklearn import metrics
from torch import optim
from configuration import Configuration
from normalizing_flow import NormalizingFlow, get_loss, get_loss_per_sample
from voraus_ad import ANOMALY_CATEGORIES, Signals, load_torch_dataloaders | 7,372 | """Contains the training of the normalizing flow model."""
# If deterministic CUDA is activated, some calculations cannot be calculated in parallel on the GPU.
# The training will take much longer but is reproducible.
DETERMINISTIC_CUDA = False
DATASET_PATH = Path.home() / "Downloads" / "voraus-ad-dataset-100hz.parquet"
MODEL_PATH: Optional[Path] = Path.cwd() / "model.pth"
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Define the training configuration and hyperparameters of the model.
configuration = Configuration(
columns="machine",
epochs=70,
frequencyDivider=1,
trainGain=1.0,
seed=177,
batchsize=32,
nCouplingBlocks=4,
clamp=1.2,
learningRate=8e-4,
normalize=True,
pad=True,
nHiddenLayers=0,
scale=2,
kernelSize1=13,
dilation1=2,
kernelSize2=1,
dilation2=1,
kernelSize3=1,
dilation3=1,
milestones=[11, 61],
gamma=0.1,
)
# Make the training reproducible.
torch.manual_seed(configuration.seed)
torch.cuda.manual_seed_all(configuration.seed)
numpy.random.seed(configuration.seed)
random.seed(configuration.seed)
if DETERMINISTIC_CUDA:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Disable pylint too-many-variables here for readability.
# The whole training should run in a single function call.
def train() -> List[Dict]: # pylint: disable=too-many-locals
"""Trains the model with the paper-given parameters.
Returns:
The auroc (mean over categories) and loss per epoch.
"""
# Load the dataset as torch data loaders.
| """Contains the training of the normalizing flow model."""
# If deterministic CUDA is activated, some calculations cannot be calculated in parallel on the GPU.
# The training will take much longer but is reproducible.
DETERMINISTIC_CUDA = False
DATASET_PATH = Path.home() / "Downloads" / "voraus-ad-dataset-100hz.parquet"
MODEL_PATH: Optional[Path] = Path.cwd() / "model.pth"
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Define the training configuration and hyperparameters of the model.
configuration = Configuration(
columns="machine",
epochs=70,
frequencyDivider=1,
trainGain=1.0,
seed=177,
batchsize=32,
nCouplingBlocks=4,
clamp=1.2,
learningRate=8e-4,
normalize=True,
pad=True,
nHiddenLayers=0,
scale=2,
kernelSize1=13,
dilation1=2,
kernelSize2=1,
dilation2=1,
kernelSize3=1,
dilation3=1,
milestones=[11, 61],
gamma=0.1,
)
# Make the training reproducible.
torch.manual_seed(configuration.seed)
torch.cuda.manual_seed_all(configuration.seed)
numpy.random.seed(configuration.seed)
random.seed(configuration.seed)
if DETERMINISTIC_CUDA:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Disable pylint too-many-variables here for readability.
# The whole training should run in a single function call.
def train() -> List[Dict]: # pylint: disable=too-many-locals
"""Trains the model with the paper-given parameters.
Returns:
The auroc (mean over categories) and loss per epoch.
"""
# Load the dataset as torch data loaders. | train_dataset, _, train_dl, test_dl = load_torch_dataloaders( | 6 | 2023-10-18 15:09:24+00:00 | 12k |
invictus717/UniDG | domainbed/scripts/visualize_adaption.py | [
{
"identifier": "datasets",
"path": "domainbed/datasets.py",
"snippet": "DATASETS = [\n # Debug\n \"Debug28\",\n \"Debug224\",\n # Small images\n \"ColoredMNIST\",\n \"RotatedMNIST\",\n # Big images\n \"VLCS\",\n \"PACS\",\n \"OfficeHome\",\n \"TerraIncognita\",\n \"D... | import argparse
import collections
import json
import os
import random
import sys
import time
import uuid
import itertools
import copy
import numpy as np
import PIL
import torch
import torchvision
import torch.utils.data
import itertools
import matplotlib.pyplot as plt
import numpy as np
from argparse import Namespace
from itertools import chain
from domainbed import datasets
from domainbed import hparams_registry
from domainbed import algorithms
from domainbed.lib import misc
from domainbed.lib.misc import accuracy_ent
from domainbed.lib.fast_data_loader import InfiniteDataLoader, FastDataLoader, DataParallelPassthrough
from domainbed import model_selection
from domainbed.lib.query import Q
from domainbed import adapt_algorithms
from MulticoreTSNE import MulticoreTSNE as TSNE | 7,569 | epochs_path = os.path.join(args_in.input_dir, 'results.jsonl')
records = []
with open(epochs_path, 'r') as f:
for line in f:
records.append(json.loads(line[:-1]))
records = Q(records)
r = records[0]
args = Namespace(**r['args'])
print(args)
args.input_dir = args_in.input_dir
if '-' in args_in.adapt_algorithm:
args.adapt_algorithm, test_batch_size = args_in.adapt_algorithm.split('-')
args.test_batch_size = int(test_batch_size)
else:
args.adapt_algorithm = args_in.adapt_algorithm
args.test_batch_size = 128 # default
args.test_batch_size = 128 # default
args.output_dir = args.input_dir
alg_name = args_in.adapt_algorithm
if args.adapt_algorithm in['T3A', 'TentPreBN', 'TentClf', 'PLClf']:
use_featurer_cache = True
else:
use_featurer_cache = False
if os.path.exists(os.path.join(args.output_dir, 'done_{}'.format(alg_name))):
print("{} has already excecuted".format(alg_name))
# If we ever want to implement checkpointing, just persist these values
# every once in a while, and then load them from disk here.
algorithm_dict = None
# os.makedirs(args.output_dir, exist_ok=True)
sys.stdout = misc.Tee(os.path.join(args.output_dir, 'out_{}.txt'.format(alg_name)))
sys.stderr = misc.Tee(os.path.join(args.output_dir, 'err_{}.txt'.format(alg_name)))
print("Environment:")
print("\tPython: {}".format(sys.version.split(" ")[0]))
print("\tPyTorch: {}".format(torch.__version__))
print("\tTorchvision: {}".format(torchvision.__version__))
print("\tCUDA: {}".format(torch.version.cuda))
print("\tCUDNN: {}".format(torch.backends.cudnn.version()))
print("\tNumPy: {}".format(np.__version__))
print("\tPIL: {}".format(PIL.__version__))
print('Args:')
for k, v in sorted(vars(args).items()):
print('\t{}: {}'.format(k, v))
if args.hparams_seed == 0:
hparams = hparams_registry.default_hparams(args.algorithm, args.dataset)
else:
hparams = hparams_registry.random_hparams(args.algorithm, args.dataset,
misc.seed_hash(args.hparams_seed, args.trial_seed))
if args.hparams:
hparams.update(json.loads(args.hparams))
print('HParams:')
for k, v in sorted(hparams.items()):
print('\t{}: {}'.format(k, v))
assert os.path.exists(os.path.join(args.output_dir, 'done'))
assert os.path.exists(os.path.join(args.output_dir, 'IID_best.pkl')) # IID_best is produced by train.py
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
if args.dataset in vars(datasets):
dataset = vars(datasets)[args.dataset](args.data_dir,
args.test_envs, hparams)
else:
raise NotImplementedError
# Split each env into an 'in-split' and an 'out-split'. We'll train on
# each in-split except the test envs, and evaluate on all splits.
# To allow unsupervised domain adaptation experiments, we split each test
# env into 'in-split', 'uda-split' and 'out-split'. The 'in-split' is used
# by collect_results.py to compute classification accuracies. The
# 'out-split' is used by the Oracle model selectino method. The unlabeled
# samples in 'uda-split' are passed to the algorithm at training time if
# args.task == "domain_adaptation". If we are interested in comparing
# domain generalization and domain adaptation results, then domain
# generalization algorithms should create the same 'uda-splits', which will
# be discared at training.
in_splits = []
out_splits = []
uda_splits = []
for env_i, env in enumerate(dataset):
uda = []
out, in_ = misc.split_dataset(env,
int(len(env)*args.holdout_fraction),
misc.seed_hash(args.trial_seed, env_i))
if env_i in args.test_envs:
uda, in_ = misc.split_dataset(in_,
int(len(in_)*args.uda_holdout_fraction),
misc.seed_hash(args.trial_seed, env_i))
if hparams['class_balanced']:
in_weights = misc.make_weights_for_balanced_classes(in_)
out_weights = misc.make_weights_for_balanced_classes(out)
if uda is not None:
uda_weights = misc.make_weights_for_balanced_classes(uda)
else:
in_weights, out_weights, uda_weights = None, None, None
in_splits.append((in_, in_weights))
out_splits.append((out, out_weights))
if len(uda):
uda_splits.append((uda, uda_weights))
# Use out splits as training data (to fair comparison with train.py)
| # The code is modified from domainbed.scripts.train
def softmax_entropy(x: torch.Tensor) -> torch.Tensor:
"""Entropy of softmax distribution from logits."""
return -(x.softmax(1) * x.log_softmax(1)).sum(1)
class Dataset:
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
def generate_featurelized_loader(loader, network, classifier, batch_size=128):
"""
The classifier adaptation does not need to repeat the heavy forward path,
We speeded up the experiments by converting the observations into representations.
"""
z_list = []
y_list = []
p_list = []
network.eval()
classifier.eval()
for x, y in loader:
x = x.to(device)
z = network(x)
p = classifier(z)
z_list.append(z.detach().cpu())
y_list.append(y.detach().cpu())
p_list.append(p.detach().cpu())
# p_list.append(p.argmax(1).float().cpu().detach())
network.train()
classifier.train()
z = torch.cat(z_list)
y = torch.cat(y_list)
p = torch.cat(p_list)
ent = softmax_entropy(p)
py = p.argmax(1).float().cpu().detach()
dataset1, dataset2 = Dataset(z, y), Dataset(z, py)
loader1 = torch.utils.data.DataLoader(dataset1, batch_size=batch_size, shuffle=False, drop_last=True)
loader2 = torch.utils.data.DataLoader(dataset2, batch_size=batch_size, shuffle=False, drop_last=True)
return loader1, loader2, ent
def visualize_tsne(network, loader, weights, device, adapt,env, name):
print("Start visualizing {}...".format(name))
if adapt:
flag = 'Adapted'
else:
flag = 'Base'
network.eval()
for x, y in loader:
x = x.to(device)
y = y.to(device)
if adapt is False:
p = network(x)
else:
p = network(x, adapt)
x = p.detach().cpu().numpy()
tsne = TSNE(n_components=2).fit_transform(x)
label = np.squeeze(y.cpu().numpy())
plt.figure(figsize=(6, 6))
size=100
line=0.7
t=.8
# plt.scatter(tsne[:, 0], tsne[:, 1], c=label,cmap=plt.get_cmap('hsv'),marker = 'o',linewidths=line,alpha=t,edgecolors='black')
plt.scatter(tsne[:, 0], tsne[:, 1], c=label,cmap=plt.get_cmap('terrain'),marker = 'o',linewidths=line,alpha=t,edgecolors='black')
plt.axis('off')
plt.colorbar()
plt.savefig('./visualization/vis_test_{}_{}_{}.jpg'.format(env,flag,name))
print("Visualization Results Saved...")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Domain generalization')
parser.add_argument('--input_dir', type=str)
parser.add_argument('--adapt_algorithm', type=str, default="UniDG")
args_in = parser.parse_args()
epochs_path = os.path.join(args_in.input_dir, 'results.jsonl')
records = []
with open(epochs_path, 'r') as f:
for line in f:
records.append(json.loads(line[:-1]))
records = Q(records)
r = records[0]
args = Namespace(**r['args'])
print(args)
args.input_dir = args_in.input_dir
if '-' in args_in.adapt_algorithm:
args.adapt_algorithm, test_batch_size = args_in.adapt_algorithm.split('-')
args.test_batch_size = int(test_batch_size)
else:
args.adapt_algorithm = args_in.adapt_algorithm
args.test_batch_size = 128 # default
args.test_batch_size = 128 # default
args.output_dir = args.input_dir
alg_name = args_in.adapt_algorithm
if args.adapt_algorithm in['T3A', 'TentPreBN', 'TentClf', 'PLClf']:
use_featurer_cache = True
else:
use_featurer_cache = False
if os.path.exists(os.path.join(args.output_dir, 'done_{}'.format(alg_name))):
print("{} has already excecuted".format(alg_name))
# If we ever want to implement checkpointing, just persist these values
# every once in a while, and then load them from disk here.
algorithm_dict = None
# os.makedirs(args.output_dir, exist_ok=True)
sys.stdout = misc.Tee(os.path.join(args.output_dir, 'out_{}.txt'.format(alg_name)))
sys.stderr = misc.Tee(os.path.join(args.output_dir, 'err_{}.txt'.format(alg_name)))
print("Environment:")
print("\tPython: {}".format(sys.version.split(" ")[0]))
print("\tPyTorch: {}".format(torch.__version__))
print("\tTorchvision: {}".format(torchvision.__version__))
print("\tCUDA: {}".format(torch.version.cuda))
print("\tCUDNN: {}".format(torch.backends.cudnn.version()))
print("\tNumPy: {}".format(np.__version__))
print("\tPIL: {}".format(PIL.__version__))
print('Args:')
for k, v in sorted(vars(args).items()):
print('\t{}: {}'.format(k, v))
if args.hparams_seed == 0:
hparams = hparams_registry.default_hparams(args.algorithm, args.dataset)
else:
hparams = hparams_registry.random_hparams(args.algorithm, args.dataset,
misc.seed_hash(args.hparams_seed, args.trial_seed))
if args.hparams:
hparams.update(json.loads(args.hparams))
print('HParams:')
for k, v in sorted(hparams.items()):
print('\t{}: {}'.format(k, v))
assert os.path.exists(os.path.join(args.output_dir, 'done'))
assert os.path.exists(os.path.join(args.output_dir, 'IID_best.pkl')) # IID_best is produced by train.py
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
if args.dataset in vars(datasets):
dataset = vars(datasets)[args.dataset](args.data_dir,
args.test_envs, hparams)
else:
raise NotImplementedError
# Split each env into an 'in-split' and an 'out-split'. We'll train on
# each in-split except the test envs, and evaluate on all splits.
# To allow unsupervised domain adaptation experiments, we split each test
# env into 'in-split', 'uda-split' and 'out-split'. The 'in-split' is used
# by collect_results.py to compute classification accuracies. The
# 'out-split' is used by the Oracle model selectino method. The unlabeled
# samples in 'uda-split' are passed to the algorithm at training time if
# args.task == "domain_adaptation". If we are interested in comparing
# domain generalization and domain adaptation results, then domain
# generalization algorithms should create the same 'uda-splits', which will
# be discared at training.
in_splits = []
out_splits = []
uda_splits = []
for env_i, env in enumerate(dataset):
uda = []
out, in_ = misc.split_dataset(env,
int(len(env)*args.holdout_fraction),
misc.seed_hash(args.trial_seed, env_i))
if env_i in args.test_envs:
uda, in_ = misc.split_dataset(in_,
int(len(in_)*args.uda_holdout_fraction),
misc.seed_hash(args.trial_seed, env_i))
if hparams['class_balanced']:
in_weights = misc.make_weights_for_balanced_classes(in_)
out_weights = misc.make_weights_for_balanced_classes(out)
if uda is not None:
uda_weights = misc.make_weights_for_balanced_classes(uda)
else:
in_weights, out_weights, uda_weights = None, None, None
in_splits.append((in_, in_weights))
out_splits.append((out, out_weights))
if len(uda):
uda_splits.append((uda, uda_weights))
# Use out splits as training data (to fair comparison with train.py) | train_loaders = [FastDataLoader( | 6 | 2023-10-15 14:26:12+00:00 | 12k |
jianlanluo/SAQ | vqn/vqn_main.py | [
{
"identifier": "VQN",
"path": "vqn/vqn.py",
"snippet": "class VQN(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.embedding_dim = 128\n config.codebook_size = 64\n config.commitment_cost = 1.0\n config.quantiza... | import os
import time
import uuid
import numpy as np
import jax
import jax.numpy as jnp
import flax
import gym
import d4rl
import absl.app
import absl.flags
from copy import deepcopy
from pprint import pprint
from robomimic.utils.dataset import SequenceDataset
from .vqn import VQN
from .replay_buffer import get_d4rl_dataset, subsample_batch
from .jax_utils import batch_to_jax
from .model import TanhGaussianPolicy, FullyConnectedQFunction, SamplerPolicy
from .sampler import StepSampler, TrajSampler
from .robomimic_utils import (
make_dataset, process_robomimic_dataset, D4RLDataset, get_robomimic_env,
ENV_TO_HORIZON_MAP, OBS_KEYS
)
from .utils import (
Timer, define_flags_with_default, set_random_seed, print_flags,
get_user_flags, prefix_metrics, WandBLogger
) | 7,219 |
FLAGS_DEF = define_flags_with_default(
env='halfcheetah-medium-v2',
max_traj_length=200,
algorithm='vqn',
seed=42,
save_model=False,
batch_size=256,
reward_scale=1.0,
reward_bias=0.0,
clip_action=0.999,
vqvae_n_epochs=500,
dqn_n_epochs=1000,
bc_epochs=1001,
n_train_step_per_epoch=10,
eval_period=10,
eval_n_trajs=5,
vqn=VQN.get_default_config(),
logging=WandBLogger.get_default_config(),
)
def main(argv):
FLAGS = absl.flags.FLAGS
variant = get_user_flags(FLAGS, FLAGS_DEF)
wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant)
|
FLAGS_DEF = define_flags_with_default(
env='halfcheetah-medium-v2',
max_traj_length=200,
algorithm='vqn',
seed=42,
save_model=False,
batch_size=256,
reward_scale=1.0,
reward_bias=0.0,
clip_action=0.999,
vqvae_n_epochs=500,
dqn_n_epochs=1000,
bc_epochs=1001,
n_train_step_per_epoch=10,
eval_period=10,
eval_n_trajs=5,
vqn=VQN.get_default_config(),
logging=WandBLogger.get_default_config(),
)
def main(argv):
FLAGS = absl.flags.FLAGS
variant = get_user_flags(FLAGS, FLAGS_DEF)
wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant) | set_random_seed(FLAGS.seed) | 17 | 2023-10-18 06:31:20+00:00 | 12k |
naver-ai/dual-teacher | tools/train.py | [
{
"identifier": "__version__",
"path": "mmseg/version.py",
"snippet": "def parse_version_info(version_str):"
},
{
"identifier": "set_random_seed",
"path": "mmseg/apis/train.py",
"snippet": "def set_random_seed(seed, deterministic=False):\n \"\"\"Set random seed.\n Args:\n se... | import argparse
import copy
import os
import os.path as osp
import time
import logging
import mmcv
import torch
import numpy as np
import seg_core.eval_seg as eval_seg
import torch.nn.functional as F
import warnings
import torch.distributed as dist
import random
import tempfile
from mmcv.runner import init_dist
from mmcv.utils import Config, DictAction, get_git_hash
from torchvision.transforms import ToTensor
from mmseg import __version__
from mmseg.apis import set_random_seed, train_segmentor
from mmseg.datasets import build_dataset, build_dataloader
from mmseg.models import build_segmentor
from mmseg.utils import collect_env, get_root_logger
from seg_core.model import MiT_SegFormer
from seg_core.optimizer import PolyWarmupAdamW
from seg_core.augmentations import ClassMixLoss, compute_classmix, compute_cutmix, compute_ic
from torchvision.utils import save_image
from dist_helper import setup_distributed
from mmseg.apis import single_gpu_test
from mmcv.image import tensor2imgs
from PIL import Image, ImageOps, ImageFilter
from torchvision import transforms
from copy import deepcopy | 7,720 | meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = MiT_SegFormer(backbone=args.backbone,
num_classes=150,
embedding_dim=256,
pretrained=True)
if args.ddp: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
model_teacher = MiT_SegFormer(backbone=args.backbone + '_ema',
num_classes=150,
embedding_dim=256,
pretrained=True).cuda()
for p in model_teacher.parameters():
p.requires_grad = False
model_teacher2 = MiT_SegFormer(backbone=args.backbone + '_ema',
num_classes=150,
embedding_dim=256,
pretrained=True).cuda()
for p in model_teacher2.parameters():
p.requires_grad = False
param_groups = model.get_param_groups()
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
if rank == 0:
print('trainable_params:', trainable_params)
shuffle = True
if args.ddp:
local_rank = int(os.environ["LOCAL_RANK"])
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[local_rank],
output_device=local_rank,
find_unused_parameters=True,
)
shuffle = False
max_iters = 50000
print_iters = 100
eval_iters = 5000
optimizer = PolyWarmupAdamW(
params=[
{
"params": param_groups[0],
"lr": cfg.optimizer.lr,
"weight_decay": cfg.optimizer.weight_decay,
},
{
"params": param_groups[1],
"lr": cfg.optimizer.lr,
"weight_decay": 0.0,
},
{
"params": param_groups[2],
"lr": cfg.optimizer.lr * 10,
"weight_decay": cfg.optimizer.weight_decay,
},
],
lr=cfg.optimizer.lr,
weight_decay=cfg.optimizer.weight_decay,
betas=cfg.optimizer.betas,
warmup_iter=cfg.scheduler.warmup_iters,
max_iter=max_iters,
warmup_ratio=cfg.scheduler.warmup_ratio,
power=cfg.scheduler.power
)
supervised_full = False
if supervised_full:
datasets = [build_dataset(cfg.data.train)]
else:
datasets = [build_dataset(cfg.data.train_semi_l)]
datasets_u = [build_dataset(cfg.data.train_semi_u)]
datasets_val = [build_dataset(cfg.data.val)]
batch_size = 4
train_loader = [
build_dataloader(
ds,
samples_per_gpu=batch_size,
workers_per_gpu=0,
num_gpus=1,
dist=distributed,
shuffle=shuffle,
seed=cfg.seed,
drop_last=True,
pin_memory=True) for ds in datasets
]
train_loader_u = [
build_dataloader(
ds,
samples_per_gpu=batch_size,
workers_per_gpu=0,
num_gpus=1,
dist=distributed,
shuffle=shuffle,
seed=cfg.seed,
drop_last=True,
pin_memory=True) for ds in datasets_u
]
val_loader = [
build_dataloader(
ds,
samples_per_gpu=1,
workers_per_gpu=0,
num_gpus=1,
dist=distributed,
shuffle=False,
seed=cfg.seed,
drop_last=False,
pin_memory=True) for ds in datasets_val
]
criterion = torch.nn.CrossEntropyLoss(ignore_index=255).cuda()
| """
Dual-Teacher
Copyright (c) 2023-present NAVER Cloud Corp.
distributed under NVIDIA Source Code License for SegFormer
--------------------------------------------------------
References:
SegFormer: https://github.com/NVlabs/SegFormer
--------------------------------------------------------
"""
warnings.filterwarnings("ignore")
criterion_u = torch.nn.CrossEntropyLoss(reduction='none').cuda()
def train_sup(args, model, optimizer, train_loader, val_loader, criterion, max_iters, print_iters, eval_iters):
train_iterator = iter(train_loader)
if args.ddp:
rank, world_size = dist.get_rank(), dist.get_world_size()
else:
rank = 0
for epoch in range(200):
for i in range(len(train_loader)):
model.train()
try:
batch_data = next(train_iterator)
except:
train_iterator = iter(train_loader)
batch_data = next(train_iterator)
image = batch_data['img'].data[0].cuda(non_blocking=True)
label = batch_data['gt_semantic_seg'].data[0].squeeze(dim=1).cuda(non_blocking=True)
outputs = model(image)
outputs = F.interpolate(outputs, size=label.shape[1:], mode='bilinear', align_corners=False)
seg_loss = criterion(outputs, label.type(torch.long))
optimizer.zero_grad()
seg_loss.backward()
optimizer.step()
if rank == 0:
lr = optimizer.param_groups[0]['lr']
logging.info("save_path:{}".format(args.save_path))
logging.info("Iter: %d; LR: %.3e; seg_loss: %f" % (i + 1, lr, seg_loss.item()))
print("Iter: %d; LR: %.3e; seg_loss: %f" % (i + 1, lr, seg_loss.item()))
logging.info('[iter:{}] Validation:'.format(i + 1))
print('[iter:{}] Validation:'.format(i + 1))
val_score = val(model.module, val_loader)
logging.info('mIoU:{:.5f}'.format(val_score['Mean IoU'] * 100))
print('mIoU:{:.5f}'.format(val_score['Mean IoU'] * 100))
model.train()
def train_dual(args, model, model_teacher, model_teacher2, optimizer, train_loader, train_loader_u, val_loader, criterion, cm_loss_fn, max_iters, print_iters, eval_iters):
if args.ddp:
rank, world_size = dist.get_rank(), dist.get_world_size()
else:
rank = 0
best_miou, best_epoch = 0, 0
for epoch in range(200):
model.train()
train_loader.sampler.set_epoch(epoch)
train_loader_u.sampler.set_epoch(epoch)
train_iterator = iter(train_loader)
train_iterator_u = iter(train_loader_u)
if epoch % 2 == 0:
ema_model = model_teacher
do_cut_mix = True
do_class_mix = False
else:
ema_model = model_teacher2
do_cut_mix = False
do_class_mix = True
ema_model.train()
for i in range(len(train_loader)):
try:
batch_data_u = next(train_iterator_u)
except:
train_iterator_u = iter(train_loader_u)
batch_data_u = next(train_iterator_u)
try:
batch_data = next(train_iterator)
except:
train_iterator = iter(train_loader)
batch_data = next(train_iterator)
image = batch_data['img'].data[0].cuda(non_blocking=True)
label = batch_data['gt_semantic_seg'].data[0].squeeze(dim=1).cuda(non_blocking=True)
image_u = batch_data_u['img'].data[0].cuda(non_blocking=True)
label_u = batch_data['gt_semantic_seg'].data[0].squeeze(dim=1).cuda(non_blocking=True)
b, _, h, w = image.shape
image_u_strong = deepcopy(image_u)
image_u_strong = transforms.ColorJitter(0.5, 0.5, 0.5, 0.25)(image_u_strong)
image_u_strong = transforms.RandomGrayscale(p=0.2)(image_u_strong)
if do_class_mix:
loss = compute_classmix(b, h, w, criterion, cm_loss_fn, model, ema_model, image, label, image_u, image_u_strong, threshold=0.95)
if do_cut_mix:
loss = compute_cutmix(h, w, image, label, criterion, model, ema_model, image_u, threshold=0.95)
loss_dc = compute_ic(model, ema_model, image_u, image_u_strong, criterion_u, label_u, h, w, threshold=0.95)
total_loss = loss + loss_dc * 0.2
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
if args.ddp:
reduced_loss = loss.clone().detach()
dist.all_reduce(reduced_loss)
update_ema(model_teacher=ema_model, model=model, alpha_teacher=0.99, iteration=i)
if rank == 0:
if (i + 1) % print_iters == 0:
lr = optimizer.param_groups[0]['lr']
logging.info("Epoch: %d; Iter: %d; LR: %.3e; loss: %f" % (epoch, i + 1, lr, loss.item()))
print("Epoch: %d; Iter: %d; LR: %.3e; loss: %f" % (epoch, i + 1, lr, loss.item()))
if rank == 0:
logging.info('[Epoch {}] [iter:{}] Validation:'.format(epoch, i + 1))
print('[Epoch {}] [iter:{}] Validation:'.format(epoch, i + 1))
val_score = val(model.module, val_loader)
miou = val_score['Mean IoU'] * 100
if miou > best_miou:
best_miou = miou
best_epoch = epoch
logging.info('mIoU:{:.5f} Best mIOU:{:.5f} on epoch {}'.format(miou, best_miou, best_epoch))
print('mIoU:{:.5f} Best mIOU:{:.5f} on epoch {}'.format(miou, best_miou, best_epoch))
model.train()
def synchronize():
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def val(model, data_loader):
model.eval()
preds, gts = [], []
for i, data in enumerate(data_loader):
with torch.no_grad():
image = data['img'][0].cuda(non_blocking=True)
label = data['gt_semantic_seg'][0].cuda(non_blocking=True)
outputs = model(image)
resized_outputs = F.interpolate(outputs, size=label.shape[1:], mode='bilinear', align_corners=False)
preds += list(torch.argmax(resized_outputs, dim=1).cpu().numpy().astype(np.int16))
gts += list(label.cpu().numpy().astype(np.int16))
score = eval_seg.scores(gts, preds, num_classes=150)
model.train()
return score
def val_ddp(args, epoch, model, data_loader):
model.eval()
preds, gts = [], []
if args.ddp:
data_loader.sampler.set_epoch(epoch)
rank, world_size = dist.get_rank(), dist.get_world_size()
else:
rank = 0
for i, data in enumerate(data_loader):
with torch.no_grad():
# print(data)
image = data['img'][0].cuda(non_blocking=True)
label = data['gt_semantic_seg'][0].cuda(non_blocking=True)
outputs = model(image)
resized_outputs = F.interpolate(outputs, size=label.shape[1:], mode='bilinear', align_corners=False)
preds += list(torch.argmax(resized_outputs, dim=1).cpu().numpy().astype(np.int16))
gts += list(label.cpu().numpy().astype(np.int16))
if args.ddp:
preds = torch.from_numpy(np.array(preds)).cuda()
gts = torch.from_numpy(np.array(gts)).cuda()
dist.all_reduce(preds)
dist.all_reduce(gts)
gts = list(gts)
preds = list(preds)
score = eval_seg.scores(gts, preds, num_classes=150)
return score
def intersectionAndUnion(output, target, K, ignore_index):
# 'K' classes, output and target sizes are N or N * L or N * H * W, each value in range 0 to K - 1.
assert output.ndim in [1, 2, 3]
assert output.shape == target.shape
output = output.reshape(output.size).copy()
target = target.reshape(target.size)
output[np.where(target == ignore_index)[0]] = ignore_index
intersection = output[np.where(output == target)[0]]
area_intersection, _ = np.histogram(intersection, bins=np.arange(K + 1))
area_output, _ = np.histogram(output, bins=np.arange(K + 1))
area_target, _ = np.histogram(target, bins=np.arange(K + 1))
area_union = area_output + area_target - area_intersection
return area_intersection, area_union, area_target
def update_ema(model_teacher, model, alpha_teacher, iteration):
with torch.no_grad():
alpha_teacher = min(1 - 1 / (iteration + 1), alpha_teacher)
for ema_param, param in zip(model_teacher.parameters(), model.parameters()):
ema_param.data[:] = alpha_teacher * ema_param[:].data[:] + (1 - alpha_teacher) * param[:].data[:]
def setup_logger(filename='test.log'):
## setup logger
# logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(filename)s - %(levelname)s: %(message)s')
logFormatter = logging.Formatter('%(asctime)s - %(filename)s - %(levelname)s: %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fHandler = logging.FileHandler(filename, mode='w')
fHandler.setFormatter(logFormatter)
logger.addHandler(fHandler)
cHandler = logging.StreamHandler()
cHandler.setFormatter(logFormatter)
logger.addHandler(cHandler)
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument('--ddp', default=False, action='store_true')
parser.add_argument('--dual_teacher', default=False, action='store_true')
parser.add_argument('--unimatch_aug', default=False, action='store_true')
parser.add_argument('--save_path', type=str, help='log moemo')
parser.add_argument('--out', default='work_dirs/res.pkl', help='output result file in pickle format')
parser.add_argument('--config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--load-from', help='the checkpoint file to load weights from')
parser.add_argument('--resume-from', help='the checkpoint file to resume from')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--gpus', type=int, help='number of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument("--backbone", type=str)
parser.add_argument("--port", default=None, type=int)
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--dc', default=False, action='store_true')
args = parser.parse_args()
# if 'LOCAL_RANK' not in os.environ:
# os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def np2tmp(array, temp_file_name=None):
"""Save ndarray to local numpy file.
Args:
array (ndarray): Ndarray to save.
temp_file_name (str): Numpy file name. If 'temp_file_name=None', this
function will generate a file name with tempfile.NamedTemporaryFile
to save ndarray. Default: None.
Returns:
str: The numpy file name.
"""
if temp_file_name is None:
temp_file_name = tempfile.NamedTemporaryFile(
suffix='.npy', delete=False).name
np.save(temp_file_name, array)
return temp_file_name
def image_saver(input, name):
"""
:param name: "path/name"
"""
if input.dim() == 3:
input = input.unsqueeze(dim=0)
save_image(input.float(), str(name) + '.jpg')
def main():
setup_logger()
args = parse_args()
mit_type = args.backbone[-1]
if mit_type == '5':
args.config = 'local_configs/segformer/B' + mit_type + '/segformer.b' + mit_type + '.640x640.ade.160k.py'
else:
args.config = 'local_configs/segformer/B' + mit_type + '/segformer.b' + mit_type + '.512x512.ade.160k.py'
cfg = Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
torch.backends.cudnn.benchmark = False
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0])
if args.load_from is not None:
cfg.load_from = args.load_from
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
distributed = False
if args.ddp:
rank, word_size = setup_distributed(port=args.port)
distributed = True
else:
rank = 0
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
print('Environment info:\n' + dash_line + env_info + '\n' + dash_line)
meta['env_info'] = env_info
# log some basic info
print(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
print(f'Set random seed to {args.seed}, deterministic: '
f'{args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = MiT_SegFormer(backbone=args.backbone,
num_classes=150,
embedding_dim=256,
pretrained=True)
if args.ddp: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
model_teacher = MiT_SegFormer(backbone=args.backbone + '_ema',
num_classes=150,
embedding_dim=256,
pretrained=True).cuda()
for p in model_teacher.parameters():
p.requires_grad = False
model_teacher2 = MiT_SegFormer(backbone=args.backbone + '_ema',
num_classes=150,
embedding_dim=256,
pretrained=True).cuda()
for p in model_teacher2.parameters():
p.requires_grad = False
param_groups = model.get_param_groups()
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
if rank == 0:
print('trainable_params:', trainable_params)
shuffle = True
if args.ddp:
local_rank = int(os.environ["LOCAL_RANK"])
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[local_rank],
output_device=local_rank,
find_unused_parameters=True,
)
shuffle = False
max_iters = 50000
print_iters = 100
eval_iters = 5000
optimizer = PolyWarmupAdamW(
params=[
{
"params": param_groups[0],
"lr": cfg.optimizer.lr,
"weight_decay": cfg.optimizer.weight_decay,
},
{
"params": param_groups[1],
"lr": cfg.optimizer.lr,
"weight_decay": 0.0,
},
{
"params": param_groups[2],
"lr": cfg.optimizer.lr * 10,
"weight_decay": cfg.optimizer.weight_decay,
},
],
lr=cfg.optimizer.lr,
weight_decay=cfg.optimizer.weight_decay,
betas=cfg.optimizer.betas,
warmup_iter=cfg.scheduler.warmup_iters,
max_iter=max_iters,
warmup_ratio=cfg.scheduler.warmup_ratio,
power=cfg.scheduler.power
)
supervised_full = False
if supervised_full:
datasets = [build_dataset(cfg.data.train)]
else:
datasets = [build_dataset(cfg.data.train_semi_l)]
datasets_u = [build_dataset(cfg.data.train_semi_u)]
datasets_val = [build_dataset(cfg.data.val)]
batch_size = 4
train_loader = [
build_dataloader(
ds,
samples_per_gpu=batch_size,
workers_per_gpu=0,
num_gpus=1,
dist=distributed,
shuffle=shuffle,
seed=cfg.seed,
drop_last=True,
pin_memory=True) for ds in datasets
]
train_loader_u = [
build_dataloader(
ds,
samples_per_gpu=batch_size,
workers_per_gpu=0,
num_gpus=1,
dist=distributed,
shuffle=shuffle,
seed=cfg.seed,
drop_last=True,
pin_memory=True) for ds in datasets_u
]
val_loader = [
build_dataloader(
ds,
samples_per_gpu=1,
workers_per_gpu=0,
num_gpus=1,
dist=distributed,
shuffle=False,
seed=cfg.seed,
drop_last=False,
pin_memory=True) for ds in datasets_val
]
criterion = torch.nn.CrossEntropyLoss(ignore_index=255).cuda() | cm_loss_fn = ClassMixLoss(weight=None, reduction='none', ignore_index=255) | 10 | 2023-10-19 04:04:31+00:00 | 12k |
SLDGroup/G-CASCADE | lib/maxxvit_4out.py | [
{
"identifier": "build_model_with_cfg",
"path": "lib/models_timm/helpers.py",
"snippet": "def build_model_with_cfg(\n model_cls: Callable,\n variant: str,\n pretrained: bool,\n pretrained_cfg: Optional[Dict] = None,\n model_cfg: Optional[Any] = None,\n feature_c... | import math
import torch
from collections import OrderedDict
from dataclasses import dataclass, replace
from functools import partial
from typing import Callable, Optional, Union, Tuple, List
from torch import nn
from torch.utils.checkpoint import checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from lib.models_timm.helpers import build_model_with_cfg, checkpoint_seq, named_apply
from lib.models_timm.fx_features import register_notrace_function
from lib.models_timm.layers import Mlp, ConvMlp, DropPath, ClassifierHead, trunc_normal_tf_, LayerNorm2d, LayerNorm
from lib.models_timm.layers import create_attn, get_act_layer, get_norm_layer, get_norm_act_layer, create_conv2d
from lib.models_timm.layers import to_2tuple, extend_tuple, make_divisible, _assert
from lib.models_timm.registry import register_model
from lib.models_timm.vision_transformer_relpos import RelPosMlp, RelPosBias # FIXME move these to common location | 9,584 |
def init_weights(self, scheme=''):
named_apply(partial(_init_transformer, scheme=scheme), self.attn_block)
named_apply(partial(_init_transformer, scheme=scheme), self.attn_grid)
named_apply(partial(_init_conv, scheme=scheme), self.conv)
def forward(self, x):
# NCHW format
x = self.conv(x)
if not self.nchw_attn:
x = x.permute(0, 2, 3, 1) # to NHWC (channels-last)
x = self.attn_block(x)
x = self.attn_grid(x)
if not self.nchw_attn:
x = x.permute(0, 3, 1, 2) # back to NCHW
return x
class ParallelMaxxVitBlock(nn.Module):
""" MaxVit block with parallel cat(window + grid), one FF
Experimental timm block.
"""
def __init__(
self,
dim,
dim_out,
stride=1,
num_conv=2,
conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path=0.,
):
super().__init__()
conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock
if num_conv > 1:
convs = [conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)]
convs += [conv_cls(dim_out, dim_out, cfg=conv_cfg, drop_path=drop_path)] * (num_conv - 1)
self.conv = nn.Sequential(*convs)
else:
self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)
self.attn = ParallelPartitionAttention(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path)
def init_weights(self, scheme=''):
named_apply(partial(_init_transformer, scheme=scheme), self.attn)
named_apply(partial(_init_conv, scheme=scheme), self.conv)
def forward(self, x):
x = self.conv(x)
x = x.permute(0, 2, 3, 1)
x = self.attn(x)
x = x.permute(0, 3, 1, 2)
return x
class MaxxVitStage(nn.Module):
def __init__(
self,
in_chs: int,
out_chs: int,
stride: int = 2,
depth: int = 4,
feat_size: Tuple[int, int] = (14, 14),
block_types: Union[str, Tuple[str]] = 'C',
transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
drop_path: Union[float, List[float]] = 0.,
):
super().__init__()
self.grad_checkpointing = False
block_types = extend_tuple(block_types, depth)
blocks = []
for i, t in enumerate(block_types):
block_stride = stride if i == 0 else 1
assert t in ('C', 'T', 'M', 'PM')
if t == 'C':
conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock
blocks += [conv_cls(
in_chs,
out_chs,
stride=block_stride,
cfg=conv_cfg,
drop_path=drop_path[i],
)]
elif t == 'T':
rel_pos_cls = get_rel_pos_cls(transformer_cfg, feat_size)
blocks += [TransformerBlock2d(
in_chs,
out_chs,
stride=block_stride,
rel_pos_cls=rel_pos_cls,
cfg=transformer_cfg,
drop_path=drop_path[i],
)]
elif t == 'M':
blocks += [MaxxVitBlock(
in_chs,
out_chs,
stride=block_stride,
conv_cfg=conv_cfg,
transformer_cfg=transformer_cfg,
drop_path=drop_path[i],
)]
elif t == 'PM':
blocks += [ParallelMaxxVitBlock(
in_chs,
out_chs,
stride=block_stride,
conv_cfg=conv_cfg,
transformer_cfg=transformer_cfg,
drop_path=drop_path[i],
)]
in_chs = out_chs
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
if self.grad_checkpointing and not torch.jit.is_scripting():
| """ MaxVit and CoAtNet Vision Transformer - CNN Hybrids in PyTorch
This is a from-scratch implementation of both CoAtNet and MaxVit in PyTorch.
99% of the implementation was done from papers, however last minute some adjustments were made
based on the (as yet unfinished?) public code release https://github.com/google-research/maxvit
There are multiple sets of models defined for both architectures. Typically, names with a
`_rw` suffix are my own original configs prior to referencing https://github.com/google-research/maxvit.
These configs work well and appear to be a bit faster / lower resource than the paper.
The models without extra prefix / suffix' (coatnet_0_224, maxvit_tiny_224, etc), are intended to
match paper, BUT, without any official pretrained weights it's difficult to confirm a 100% match.
# FIXME / WARNING
This impl remains a WIP, some configs and models may vanish or change...
Papers:
MaxViT: Multi-Axis Vision Transformer - https://arxiv.org/abs/2204.01697
@article{tu2022maxvit,
title={MaxViT: Multi-Axis Vision Transformer},
author={Tu, Zhengzhong and Talebi, Hossein and Zhang, Han and Yang, Feng and Milanfar, Peyman and Bovik, Alan and Li, Yinxiao},
journal={ECCV},
year={2022},
}
CoAtNet: Marrying Convolution and Attention for All Data Sizes - https://arxiv.org/abs/2106.04803
@article{DBLP:journals/corr/abs-2106-04803,
author = {Zihang Dai and Hanxiao Liu and Quoc V. Le and Mingxing Tan},
title = {CoAtNet: Marrying Convolution and Attention for All Data Sizes},
journal = {CoRR},
volume = {abs/2106.04803},
year = {2021}
}
Hacked together by / Copyright 2022, Ross Wightman
"""
__all__ = ['MaxxVitCfg', 'MaxxVitConvCfg', 'MaxxVitTransformerCfg', 'MaxxVit']
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.95, 'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
'first_conv': 'stem.conv1', 'classifier': 'head.fc',
'fixed_input_size': True,
**kwargs
}
default_cfgs = {
# Fiddling with configs / defaults / still pretraining
'coatnet_pico_rw_224': _cfg(url=''),
'coatnet_nano_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_nano_rw_224_sw-f53093b4.pth',
crop_pct=0.9),
'coatnet_0_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_0_rw_224_sw-a6439706.pth'),
'coatnet_1_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_1_rw_224_sw-5cae1ea8.pth'
),
'coatnet_2_rw_224': _cfg(url=''),
'coatnet_3_rw_224': _cfg(url=''),
# Highly experimental configs
'coatnet_bn_0_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_bn_0_rw_224_sw-c228e218.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD,
crop_pct=0.95),
'coatnet_rmlp_nano_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_nano_rw_224_sw-bd1d51b3.pth',
crop_pct=0.9),
'coatnet_rmlp_0_rw_224': _cfg(url=''),
'coatnet_rmlp_1_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_1_rw_224_sw-9051e6c3.pth'),
'coatnet_rmlp_2_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_2_rw_224_sw-5ccfac55.pth'),
'coatnet_rmlp_3_rw_224': _cfg(url=''),
'coatnet_nano_cc_224': _cfg(url=''),
'coatnext_nano_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnext_nano_rw_224_ad-22cb71c2.pth',
crop_pct=0.9),
# Trying to be like the CoAtNet paper configs
'coatnet_0_224': _cfg(url=''),
'coatnet_1_224': _cfg(url=''),
'coatnet_2_224': _cfg(url=''),
'coatnet_3_224': _cfg(url=''),
'coatnet_4_224': _cfg(url=''),
'coatnet_5_224': _cfg(url=''),
# Experimental configs
'maxvit_pico_rw_256': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_nano_rw_256': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_nano_rw_256_sw-fb127241.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_tiny_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_tiny_rw_224_sw-7d0dffeb.pth'),
'maxvit_tiny_rw_256': _cfg(
url='',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_rmlp_pico_rw_256': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_pico_rw_256_sw-8d82f2c6.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_rmlp_nano_rw_256': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_nano_rw_256_sw-c17bb0d6.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_rmlp_tiny_rw_256': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_tiny_rw_256_sw-bbef0ff5.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_rmlp_small_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_small_rw_224_sw-6ef0ae4f.pth',
crop_pct=0.9,
),
'maxvit_rmlp_small_rw_256': _cfg(
url='',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_tiny_pm_256': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)),
'maxxvit_rmlp_nano_rw_256': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_nano_rw_256_sw-0325d459.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxxvit_rmlp_tiny_rw_256': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)),
'maxxvit_rmlp_small_rw_256': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_small_rw_256_sw-37e217ff.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
# Trying to be like the MaxViT paper configs
'maxvit_tiny_224': _cfg(url=''),
'maxvit_small_224': _cfg(url=''),
'maxvit_base_224': _cfg(url=''),
'maxvit_large_224': _cfg(url=''),
'maxvit_xlarge_224': _cfg(url=''),
}
@dataclass
class MaxxVitTransformerCfg:
dim_head: int = 32
expand_ratio: float = 4.0
expand_first: bool = True
shortcut_bias: bool = True
attn_bias: bool = True
attn_drop: float = 0.
proj_drop: float = 0.
pool_type: str = 'avg2'
rel_pos_type: str = 'bias'
rel_pos_dim: int = 512 # for relative position types w/ MLP
partition_ratio: int = 32
window_size: Optional[Tuple[int, int]] = None
grid_size: Optional[Tuple[int, int]] = None
init_values: Optional[float] = None
act_layer: str = 'gelu'
norm_layer: str = 'layernorm2d'
norm_layer_cl: str = 'layernorm'
norm_eps: float = 1e-6
def __post_init__(self):
if self.grid_size is not None:
self.grid_size = to_2tuple(self.grid_size)
if self.window_size is not None:
self.window_size = to_2tuple(self.window_size)
if self.grid_size is None:
self.grid_size = self.window_size
@dataclass
class MaxxVitConvCfg:
block_type: str = 'mbconv'
expand_ratio: float = 4.0
expand_output: bool = True # calculate expansion channels from output (vs input chs)
kernel_size: int = 3
group_size: int = 1 # 1 == depthwise
pre_norm_act: bool = False # activation after pre-norm
output_bias: bool = True # bias for shortcut + final 1x1 projection conv
stride_mode: str = 'dw' # stride done via one of 'pool', '1x1', 'dw'
pool_type: str = 'avg2'
downsample_pool_type: str = 'avg2'
attn_early: bool = False # apply attn between conv2 and norm2, instead of after norm2
attn_layer: str = 'se'
attn_act_layer: str = 'silu'
attn_ratio: float = 0.25
init_values: Optional[float] = 1e-6 # for ConvNeXt block, ignored by MBConv
act_layer: str = 'gelu'
norm_layer: str = ''
norm_layer_cl: str = ''
norm_eps: Optional[float] = None
def __post_init__(self):
# mbconv vs convnext blocks have different defaults, set in post_init to avoid explicit config args
assert self.block_type in ('mbconv', 'convnext')
use_mbconv = self.block_type == 'mbconv'
if not self.norm_layer:
self.norm_layer = 'batchnorm2d' if use_mbconv else 'layernorm2d'
if not self.norm_layer_cl and not use_mbconv:
self.norm_layer_cl = 'layernorm'
if self.norm_eps is None:
self.norm_eps = 1e-5 if use_mbconv else 1e-6
self.downsample_pool_type = self.downsample_pool_type or self.pool_type
@dataclass
class MaxxVitCfg:
embed_dim: Tuple[int, ...] = (96, 192, 384, 768)
depths: Tuple[int, ...] = (2, 3, 5, 2)
block_type: Tuple[Union[str, Tuple[str, ...]], ...] = ('C', 'C', 'T', 'T')
stem_width: Union[int, Tuple[int, int]] = 64
stem_bias: bool = True
conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg()
transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg()
weight_init: str = 'vit_eff'
def _rw_coat_cfg(
stride_mode='pool',
pool_type='avg2',
conv_output_bias=False,
conv_attn_early=False,
conv_attn_act_layer='relu',
conv_norm_layer='',
transformer_shortcut_bias=True,
transformer_norm_layer='layernorm2d',
transformer_norm_layer_cl='layernorm',
init_values=None,
rel_pos_type='bias',
rel_pos_dim=512,
):
# 'RW' timm variant models were created and trained before seeing https://github.com/google-research/maxvit
# Common differences for initial timm models:
# - pre-norm layer in MZBConv included an activation after norm
# - mbconv expansion calculated from input instead of output chs
# - mbconv shortcut and final 1x1 conv did not have a bias
# - SE act layer was relu, not silu
# - mbconv uses silu in timm, not gelu
# - expansion in attention block done via output proj, not input proj
# Variable differences (evolved over training initial models):
# - avg pool with kernel_size=2 favoured downsampling (instead of maxpool for coat)
# - SE attention was between conv2 and norm/act
# - default to avg pool for mbconv downsample instead of 1x1 or dw conv
# - transformer block shortcut has no bias
return dict(
conv_cfg=MaxxVitConvCfg(
stride_mode=stride_mode,
pool_type=pool_type,
pre_norm_act=True,
expand_output=False,
output_bias=conv_output_bias,
attn_early=conv_attn_early,
attn_act_layer=conv_attn_act_layer,
act_layer='silu',
norm_layer=conv_norm_layer,
),
transformer_cfg=MaxxVitTransformerCfg(
expand_first=False,
shortcut_bias=transformer_shortcut_bias,
pool_type=pool_type,
init_values=init_values,
norm_layer=transformer_norm_layer,
norm_layer_cl=transformer_norm_layer_cl,
rel_pos_type=rel_pos_type,
rel_pos_dim=rel_pos_dim,
),
)
def _rw_max_cfg(
stride_mode='dw',
pool_type='avg2',
conv_output_bias=False,
conv_attn_ratio=1 / 16,
conv_norm_layer='',
transformer_norm_layer='layernorm2d',
transformer_norm_layer_cl='layernorm',
window_size=None,
dim_head=32,
init_values=None,
rel_pos_type='bias',
rel_pos_dim=512,
):
# 'RW' timm variant models were created and trained before seeing https://github.com/google-research/maxvit
# Differences of initial timm models:
# - mbconv expansion calculated from input instead of output chs
# - mbconv shortcut and final 1x1 conv did not have a bias
# - mbconv uses silu in timm, not gelu
# - expansion in attention block done via output proj, not input proj
return dict(
conv_cfg=MaxxVitConvCfg(
stride_mode=stride_mode,
pool_type=pool_type,
expand_output=False,
output_bias=conv_output_bias,
attn_ratio=conv_attn_ratio,
act_layer='silu',
norm_layer=conv_norm_layer,
),
transformer_cfg=MaxxVitTransformerCfg(
expand_first=False,
pool_type=pool_type,
dim_head=dim_head,
window_size=window_size,
init_values=init_values,
norm_layer=transformer_norm_layer,
norm_layer_cl=transformer_norm_layer_cl,
rel_pos_type=rel_pos_type,
rel_pos_dim=rel_pos_dim,
),
)
def _next_cfg(
stride_mode='dw',
pool_type='avg2',
conv_norm_layer='layernorm2d',
conv_norm_layer_cl='layernorm',
transformer_norm_layer='layernorm2d',
transformer_norm_layer_cl='layernorm',
window_size=None,
init_values=1e-6,
rel_pos_type='mlp', # MLP by default for maxxvit
rel_pos_dim=512,
):
# For experimental models with convnext instead of mbconv
init_values = to_2tuple(init_values)
return dict(
conv_cfg=MaxxVitConvCfg(
block_type='convnext',
stride_mode=stride_mode,
pool_type=pool_type,
expand_output=False,
init_values=init_values[0],
norm_layer=conv_norm_layer,
norm_layer_cl=conv_norm_layer_cl,
),
transformer_cfg=MaxxVitTransformerCfg(
expand_first=False,
pool_type=pool_type,
window_size=window_size,
init_values=init_values[1],
norm_layer=transformer_norm_layer,
norm_layer_cl=transformer_norm_layer_cl,
rel_pos_type=rel_pos_type,
rel_pos_dim=rel_pos_dim,
),
)
model_cfgs = dict(
# Fiddling with configs / defaults / still pretraining
coatnet_pico_rw_224=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 3, 5, 2),
stem_width=(32, 64),
**_rw_max_cfg( # using newer max defaults here
conv_output_bias=True,
conv_attn_ratio=0.25,
),
),
coatnet_nano_rw_224=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(3, 4, 6, 3),
stem_width=(32, 64),
**_rw_max_cfg( # using newer max defaults here
stride_mode='pool',
conv_output_bias=True,
conv_attn_ratio=0.25,
),
),
coatnet_0_rw_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 3, 7, 2), # deeper than paper '0' model
stem_width=(32, 64),
**_rw_coat_cfg(
conv_attn_early=True,
transformer_shortcut_bias=False,
),
),
coatnet_1_rw_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
stem_width=(32, 64),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_early=True,
transformer_shortcut_bias=False,
)
),
coatnet_2_rw_224=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 14, 2),
stem_width=(64, 128),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_act_layer='silu',
init_values=1e-6,
),
),
coatnet_3_rw_224=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 6, 14, 2),
stem_width=(96, 192),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_act_layer='silu',
init_values=1e-6,
),
),
# Highly experimental configs
coatnet_bn_0_rw_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 3, 7, 2), # deeper than paper '0' model
stem_width=(32, 64),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_early=True,
transformer_shortcut_bias=False,
transformer_norm_layer='batchnorm2d',
)
),
coatnet_rmlp_nano_rw_224=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(3, 4, 6, 3),
stem_width=(32, 64),
**_rw_max_cfg(
conv_output_bias=True,
conv_attn_ratio=0.25,
rel_pos_type='mlp',
rel_pos_dim=384,
),
),
coatnet_rmlp_0_rw_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 3, 7, 2), # deeper than paper '0' model
stem_width=(32, 64),
**_rw_coat_cfg(
stride_mode='dw',
rel_pos_type='mlp',
),
),
coatnet_rmlp_1_rw_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
stem_width=(32, 64),
**_rw_coat_cfg(
pool_type='max',
conv_attn_early=True,
transformer_shortcut_bias=False,
rel_pos_type='mlp',
rel_pos_dim=384, # was supposed to be 512, woops
),
),
coatnet_rmlp_2_rw_224=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 14, 2),
stem_width=(64, 128),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_act_layer='silu',
init_values=1e-6,
rel_pos_type='mlp'
),
),
coatnet_rmlp_3_rw_224=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 6, 14, 2),
stem_width=(96, 192),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_act_layer='silu',
init_values=1e-6,
rel_pos_type='mlp'
),
),
coatnet_nano_cc_224=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(3, 4, 6, 3),
stem_width=(32, 64),
block_type=('C', 'C', ('C', 'T'), ('C', 'T')),
**_rw_coat_cfg(),
),
coatnext_nano_rw_224=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(3, 4, 6, 3),
stem_width=(32, 64),
weight_init='normal',
**_next_cfg(
rel_pos_type='bias',
init_values=(1e-5, None)
),
),
# Trying to be like the CoAtNet paper configs
coatnet_0_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 3, 5, 2),
stem_width=64,
),
coatnet_1_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
stem_width=64,
),
coatnet_2_224=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 14, 2),
stem_width=128,
),
coatnet_3_224=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 6, 14, 2),
stem_width=192,
),
coatnet_4_224=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 12, 28, 2),
stem_width=192,
),
coatnet_5_224=MaxxVitCfg(
embed_dim=(256, 512, 1280, 2048),
depths=(2, 12, 28, 2),
stem_width=192,
),
# Experimental MaxVit configs
maxvit_pico_rw_256=MaxxVitCfg(
embed_dim=(32, 64, 128, 256),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(24, 32),
**_rw_max_cfg(),
),
maxvit_nano_rw_256=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(1, 2, 3, 1),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(),
),
maxvit_tiny_rw_224=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(),
),
maxvit_tiny_rw_256=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(),
),
maxvit_rmlp_pico_rw_256=MaxxVitCfg(
embed_dim=(32, 64, 128, 256),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(24, 32),
**_rw_max_cfg(rel_pos_type='mlp'),
),
maxvit_rmlp_nano_rw_256=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(1, 2, 3, 1),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(rel_pos_type='mlp'),
),
maxvit_rmlp_tiny_rw_256=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(rel_pos_type='mlp'),
),
maxvit_rmlp_small_rw_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(
rel_pos_type='mlp',
init_values=1e-6,
),
),
maxvit_rmlp_small_rw_256=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(
rel_pos_type='mlp',
init_values=1e-6,
),
),
maxvit_tiny_pm_256=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('PM',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(),
),
maxxvit_rmlp_nano_rw_256=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(1, 2, 3, 1),
block_type=('M',) * 4,
stem_width=(32, 64),
weight_init='normal',
**_next_cfg(),
),
maxxvit_rmlp_tiny_rw_256=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_next_cfg(),
),
maxxvit_rmlp_small_rw_256=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(48, 96),
**_next_cfg(),
),
# Trying to be like the MaxViT paper configs
maxvit_tiny_224=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=64,
),
maxvit_small_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=64,
),
maxvit_base_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
block_type=('M',) * 4,
stem_width=64,
),
maxvit_large_224=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 14, 2),
block_type=('M',) * 4,
stem_width=128,
),
maxvit_xlarge_224=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 6, 14, 2),
block_type=('M',) * 4,
stem_width=192,
),
)
class Attention2d(nn.Module):
""" multi-head attention for 2D NCHW tensors"""
def __init__(
self,
dim: int,
dim_out: Optional[int] = None,
dim_head: int = 32,
bias: bool = True,
expand_first: bool = True,
rel_pos_cls: Callable = None,
attn_drop: float = 0.,
proj_drop: float = 0.
):
super().__init__()
dim_out = dim_out or dim
dim_attn = dim_out if expand_first else dim
self.num_heads = dim_attn // dim_head
self.dim_head = dim_head
self.scale = dim_head ** -0.5
self.qkv = nn.Conv2d(dim, dim_attn * 3, 1, bias=bias)
self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Conv2d(dim_attn, dim_out, 1, bias=bias)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
B, C, H, W = x.shape
q, k, v = self.qkv(x).view(B, self.num_heads, self.dim_head * 3, -1).chunk(3, dim=2)
attn = (q.transpose(-2, -1) @ k) * self.scale
if self.rel_pos is not None:
attn = self.rel_pos(attn)
elif shared_rel_pos is not None:
attn = attn + shared_rel_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W)
x = self.proj(x)
x = self.proj_drop(x)
return x
class AttentionCl(nn.Module):
""" Channels-last multi-head attention (B, ..., C) """
def __init__(
self,
dim: int,
dim_out: Optional[int] = None,
dim_head: int = 32,
bias: bool = True,
expand_first: bool = True,
rel_pos_cls: Callable = None,
attn_drop: float = 0.,
proj_drop: float = 0.
):
super().__init__()
dim_out = dim_out or dim
dim_attn = dim_out if expand_first and dim_out > dim else dim
assert dim_attn % dim_head == 0, 'attn dim should be divisible by head_dim'
self.num_heads = dim_attn // dim_head
self.dim_head = dim_head
self.scale = dim_head ** -0.5
self.qkv = nn.Linear(dim, dim_attn * 3, bias=bias)
self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim_attn, dim_out, bias=bias)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
B = x.shape[0]
restore_shape = x.shape[:-1]
q, k, v = self.qkv(x).view(B, -1, self.num_heads, self.dim_head * 3).transpose(1, 2).chunk(3, dim=3)
attn = (q @ k.transpose(-2, -1)) * self.scale
if self.rel_pos is not None:
attn = self.rel_pos(attn, shared_rel_pos=shared_rel_pos)
elif shared_rel_pos is not None:
attn = attn + shared_rel_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(restore_shape + (-1,))
x = self.proj(x)
x = self.proj_drop(x)
return x
class LayerScale(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
gamma = self.gamma
return x.mul_(gamma) if self.inplace else x * gamma
class LayerScale2d(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
gamma = self.gamma.view(1, -1, 1, 1)
return x.mul_(gamma) if self.inplace else x * gamma
class Downsample2d(nn.Module):
""" A downsample pooling module supporting several maxpool and avgpool modes
* 'max' - MaxPool2d w/ kernel_size 3, stride 2, padding 1
* 'max2' - MaxPool2d w/ kernel_size = stride = 2
* 'avg' - AvgPool2d w/ kernel_size 3, stride 2, padding 1
* 'avg2' - AvgPool2d w/ kernel_size = stride = 2
"""
def __init__(
self,
dim: int,
dim_out: int,
pool_type: str = 'avg2',
bias: bool = True,
):
super().__init__()
assert pool_type in ('max', 'max2', 'avg', 'avg2')
if pool_type == 'max':
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
elif pool_type == 'max2':
self.pool = nn.MaxPool2d(2) # kernel_size == stride == 2
elif pool_type == 'avg':
self.pool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1, count_include_pad=False)
else:
self.pool = nn.AvgPool2d(2) # kernel_size == stride == 2
if dim != dim_out:
self.expand = nn.Conv2d(dim, dim_out, 1, bias=bias)
else:
self.expand = nn.Identity()
def forward(self, x):
x = self.pool(x) # spatial downsample
x = self.expand(x) # expand chs
return x
def _init_transformer(module, name, scheme=''):
if isinstance(module, (nn.Conv2d, nn.Linear)):
if scheme == 'normal':
nn.init.normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif scheme == 'trunc_normal':
trunc_normal_tf_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif scheme == 'xavier_normal':
nn.init.xavier_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
else:
# vit like
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
if 'mlp' in name:
nn.init.normal_(module.bias, std=1e-6)
else:
nn.init.zeros_(module.bias)
class TransformerBlock2d(nn.Module):
""" Transformer block with 2D downsampling
'2D' NCHW tensor layout
Some gains can be seen on GPU using a 1D / CL block, BUT w/ the need to switch back/forth to NCHW
for spatial pooling, the benefit is minimal so ended up using just this variant for CoAt configs.
This impl was faster on TPU w/ PT XLA than the 1D experiment.
"""
def __init__(
self,
dim: int,
dim_out: int,
stride: int = 1,
rel_pos_cls: Callable = None,
cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps)
act_layer = get_act_layer(cfg.act_layer)
if stride == 2:
self.shortcut = Downsample2d(dim, dim_out, pool_type=cfg.pool_type, bias=cfg.shortcut_bias)
self.norm1 = nn.Sequential(OrderedDict([
('norm', norm_layer(dim)),
('down', Downsample2d(dim, dim, pool_type=cfg.pool_type)),
]))
else:
assert dim == dim_out
self.shortcut = nn.Identity()
self.norm1 = norm_layer(dim)
self.attn = Attention2d(
dim,
dim_out,
dim_head=cfg.dim_head,
expand_first=cfg.expand_first,
bias=cfg.attn_bias,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop
)
self.ls1 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim_out)
self.mlp = ConvMlp(
in_features=dim_out,
hidden_features=int(dim_out * cfg.expand_ratio),
act_layer=act_layer,
drop=cfg.proj_drop)
self.ls2 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def init_weights(self, scheme=''):
named_apply(partial(_init_transformer, scheme=scheme), self)
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
x = self.shortcut(x) + self.drop_path1(self.ls1(self.attn(self.norm1(x), shared_rel_pos=shared_rel_pos)))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
def _init_conv(module, name, scheme=''):
if isinstance(module, nn.Conv2d):
if scheme == 'normal':
nn.init.normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif scheme == 'trunc_normal':
trunc_normal_tf_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif scheme == 'xavier_normal':
nn.init.xavier_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
else:
# efficientnet like
fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels
fan_out //= module.groups
nn.init.normal_(module.weight, 0, math.sqrt(2.0 / fan_out))
if module.bias is not None:
nn.init.zeros_(module.bias)
def num_groups(group_size, channels):
if not group_size: # 0 or None
return 1 # normal conv with 1 group
else:
# NOTE group_size == 1 -> depthwise conv
assert channels % group_size == 0
return channels // group_size
class MbConvBlock(nn.Module):
""" Pre-Norm Conv Block - 1x1 - kxk - 1x1, w/ inverted bottleneck (expand)
"""
def __init__(
self,
in_chs: int,
out_chs: int,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
drop_path: float = 0.
):
super(MbConvBlock, self).__init__()
norm_act_layer = partial(get_norm_act_layer(cfg.norm_layer, cfg.act_layer), eps=cfg.norm_eps)
mid_chs = make_divisible((out_chs if cfg.expand_output else in_chs) * cfg.expand_ratio)
groups = num_groups(cfg.group_size, mid_chs)
if stride == 2:
self.shortcut = Downsample2d(in_chs, out_chs, pool_type=cfg.pool_type, bias=cfg.output_bias)
else:
self.shortcut = nn.Identity()
assert cfg.stride_mode in ('pool', '1x1', 'dw')
stride_pool, stride_1, stride_2 = 1, 1, 1
if cfg.stride_mode == 'pool':
# NOTE this is not described in paper, experiment to find faster option that doesn't stride in 1x1
stride_pool, dilation_2 = stride, dilation[1]
# FIXME handle dilation of avg pool
elif cfg.stride_mode == '1x1':
# NOTE I don't like this option described in paper, 1x1 w/ stride throws info away
stride_1, dilation_2 = stride, dilation[1]
else:
stride_2, dilation_2 = stride, dilation[0]
self.pre_norm = norm_act_layer(in_chs, apply_act=cfg.pre_norm_act)
if stride_pool > 1:
self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type)
else:
self.down = nn.Identity()
self.conv1_1x1 = create_conv2d(in_chs, mid_chs, 1, stride=stride_1)
self.norm1 = norm_act_layer(mid_chs)
self.conv2_kxk = create_conv2d(
mid_chs, mid_chs, cfg.kernel_size, stride=stride_2, dilation=dilation_2, groups=groups)
attn_kwargs = {}
if isinstance(cfg.attn_layer, str):
if cfg.attn_layer == 'se' or cfg.attn_layer == 'eca':
attn_kwargs['act_layer'] = cfg.attn_act_layer
attn_kwargs['rd_channels'] = int(cfg.attn_ratio * (out_chs if cfg.expand_output else mid_chs))
# two different orderings for SE and norm2 (due to some weights and trials using SE before norm2)
if cfg.attn_early:
self.se_early = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs)
self.norm2 = norm_act_layer(mid_chs)
self.se = None
else:
self.se_early = None
self.norm2 = norm_act_layer(mid_chs)
self.se = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs)
self.conv3_1x1 = create_conv2d(mid_chs, out_chs, 1, bias=cfg.output_bias)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def init_weights(self, scheme=''):
named_apply(partial(_init_conv, scheme=scheme), self)
def forward(self, x):
shortcut = self.shortcut(x)
x = self.pre_norm(x)
x = self.down(x)
# 1x1 expansion conv & norm-act
x = self.conv1_1x1(x)
x = self.norm1(x)
# depthwise / grouped 3x3 conv w/ SE (or other) channel attention & norm-act
x = self.conv2_kxk(x)
if self.se_early is not None:
x = self.se_early(x)
x = self.norm2(x)
if self.se is not None:
x = self.se(x)
# 1x1 linear projection to output width
x = self.conv3_1x1(x)
x = self.drop_path(x) + shortcut
return x
class ConvNeXtBlock(nn.Module):
""" ConvNeXt Block
"""
def __init__(
self,
in_chs: int,
out_chs: Optional[int] = None,
kernel_size: int = 7,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
conv_mlp: bool = True,
drop_path: float = 0.
):
super().__init__()
out_chs = out_chs or in_chs
act_layer = get_act_layer(cfg.act_layer)
if conv_mlp:
norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps)
mlp_layer = ConvMlp
else:
assert 'layernorm' in cfg.norm_layer
norm_layer = LayerNorm
mlp_layer = Mlp
self.use_conv_mlp = conv_mlp
if stride == 2:
self.shortcut = Downsample2d(in_chs, out_chs)
elif in_chs != out_chs:
self.shortcut = nn.Conv2d(in_chs, out_chs, kernel_size=1, bias=cfg.output_bias)
else:
self.shortcut = nn.Identity()
assert cfg.stride_mode in ('pool', 'dw')
stride_pool, stride_dw = 1, 1
# FIXME handle dilation?
if cfg.stride_mode == 'pool':
stride_pool = stride
else:
stride_dw = stride
if stride_pool == 2:
self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type)
else:
self.down = nn.Identity()
self.conv_dw = create_conv2d(
in_chs, out_chs, kernel_size=kernel_size, stride=stride_dw, dilation=dilation[1],
depthwise=True, bias=cfg.output_bias)
self.norm = norm_layer(out_chs)
self.mlp = mlp_layer(out_chs, int(cfg.expand_ratio * out_chs), bias=cfg.output_bias, act_layer=act_layer)
if conv_mlp:
self.ls = LayerScale2d(out_chs, cfg.init_values) if cfg.init_values else nn.Identity()
else:
self.ls = LayerScale(out_chs, cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.down(x)
x = self.conv_dw(x)
if self.use_conv_mlp:
x = self.norm(x)
x = self.mlp(x)
x = self.ls(x)
else:
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
x = self.mlp(x)
x = self.ls(x)
x = x.permute(0, 3, 1, 2)
x = self.drop_path(x) + shortcut
return x
def window_partition(x, window_size: List[int]):
B, H, W, C = x.shape
_assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})')
_assert(W % window_size[1] == 0, '')
x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse(windows, window_size: List[int], img_size: List[int]):
H, W = img_size
C = windows.shape[-1]
x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C)
return x
def grid_partition(x, grid_size: List[int]):
B, H, W, C = x.shape
_assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}')
_assert(W % grid_size[1] == 0, '')
x = x.view(B, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1], C)
windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, grid_size[0], grid_size[1], C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def grid_reverse(windows, grid_size: List[int], img_size: List[int]):
H, W = img_size
C = windows.shape[-1]
x = windows.view(-1, H // grid_size[0], W // grid_size[1], grid_size[0], grid_size[1], C)
x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, H, W, C)
return x
def get_rel_pos_cls(cfg: MaxxVitTransformerCfg, window_size):
rel_pos_cls = None
if cfg.rel_pos_type == 'mlp':
rel_pos_cls = partial(RelPosMlp, window_size=window_size, hidden_dim=cfg.rel_pos_dim)
elif cfg.rel_pos_type == 'bias':
rel_pos_cls = partial(RelPosBias, window_size=window_size)
return rel_pos_cls
class PartitionAttentionCl(nn.Module):
""" Grid or Block partition + Attn + FFN.
NxC 'channels last' tensor layout.
"""
def __init__(
self,
dim: int,
partition_type: str = 'block',
cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) # NOTE this block is channels-last
act_layer = get_act_layer(cfg.act_layer)
self.partition_block = partition_type == 'block'
self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size)
rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size)
self.norm1 = norm_layer(dim)
self.attn = AttentionCl(
dim,
dim,
dim_head=cfg.dim_head,
bias=cfg.attn_bias,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop,
)
self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * cfg.expand_ratio),
act_layer=act_layer,
drop=cfg.proj_drop)
self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def _partition_attn(self, x):
img_size = x.shape[1:3]
if self.partition_block:
partitioned = window_partition(x, self.partition_size)
else:
partitioned = grid_partition(x, self.partition_size)
partitioned = self.attn(partitioned)
if self.partition_block:
x = window_reverse(partitioned, self.partition_size, img_size)
else:
x = grid_reverse(partitioned, self.partition_size, img_size)
return x
def forward(self, x):
x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x))))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
class ParallelPartitionAttention(nn.Module):
""" Experimental. Grid and Block partition + single FFN
NxC tensor layout.
"""
def __init__(
self,
dim: int,
cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
assert dim % 2 == 0
norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) # NOTE this block is channels-last
act_layer = get_act_layer(cfg.act_layer)
assert cfg.window_size == cfg.grid_size
self.partition_size = to_2tuple(cfg.window_size)
rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size)
self.norm1 = norm_layer(dim)
self.attn_block = AttentionCl(
dim,
dim // 2,
dim_head=cfg.dim_head,
bias=cfg.attn_bias,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop,
)
self.attn_grid = AttentionCl(
dim,
dim // 2,
dim_head=cfg.dim_head,
bias=cfg.attn_bias,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop,
)
self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * cfg.expand_ratio),
out_features=dim,
act_layer=act_layer,
drop=cfg.proj_drop)
self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def _partition_attn(self, x):
img_size = x.shape[1:3]
partitioned_block = window_partition(x, self.partition_size)
partitioned_block = self.attn_block(partitioned_block)
x_window = window_reverse(partitioned_block, self.partition_size, img_size)
partitioned_grid = grid_partition(x, self.partition_size)
partitioned_grid = self.attn_grid(partitioned_grid)
x_grid = grid_reverse(partitioned_grid, self.partition_size, img_size)
return torch.cat([x_window, x_grid], dim=-1)
def forward(self, x):
x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x))))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
def window_partition_nchw(x, window_size: List[int]):
B, C, H, W = x.shape
_assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})')
_assert(W % window_size[1] == 0, '')
x = x.view(B, C, H // window_size[0], window_size[0], W // window_size[1], window_size[1])
windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, C, window_size[0], window_size[1])
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse_nchw(windows, window_size: List[int], img_size: List[int]):
H, W = img_size
C = windows.shape[1]
x = windows.view(-1, H // window_size[0], W // window_size[1], C, window_size[0], window_size[1])
x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, C, H, W)
return x
def grid_partition_nchw(x, grid_size: List[int]):
B, C, H, W = x.shape
_assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}')
_assert(W % grid_size[1] == 0, '')
x = x.view(B, C, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1])
windows = x.permute(0, 3, 5, 1, 2, 4).contiguous().view(-1, C, grid_size[0], grid_size[1])
return windows
@register_notrace_function # reason: int argument is a Proxy
def grid_reverse_nchw(windows, grid_size: List[int], img_size: List[int]):
H, W = img_size
C = windows.shape[1]
x = windows.view(-1, H // grid_size[0], W // grid_size[1], C, grid_size[0], grid_size[1])
x = x.permute(0, 3, 4, 1, 5, 2).contiguous().view(-1, C, H, W)
return x
class PartitionAttention2d(nn.Module):
""" Grid or Block partition + Attn + FFN
'2D' NCHW tensor layout.
"""
def __init__(
self,
dim: int,
partition_type: str = 'block',
cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) # NOTE this block is channels-last
act_layer = get_act_layer(cfg.act_layer)
self.partition_block = partition_type == 'block'
self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size)
rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size)
self.norm1 = norm_layer(dim)
self.attn = Attention2d(
dim,
dim,
dim_head=cfg.dim_head,
bias=cfg.attn_bias,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop,
)
self.ls1 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = ConvMlp(
in_features=dim,
hidden_features=int(dim * cfg.expand_ratio),
act_layer=act_layer,
drop=cfg.proj_drop)
self.ls2 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def _partition_attn(self, x):
img_size = x.shape[-2:]
if self.partition_block:
partitioned = window_partition_nchw(x, self.partition_size)
else:
partitioned = grid_partition_nchw(x, self.partition_size)
partitioned = self.attn(partitioned)
if self.partition_block:
x = window_reverse_nchw(partitioned, self.partition_size, img_size)
else:
x = grid_reverse_nchw(partitioned, self.partition_size, img_size)
return x
def forward(self, x):
x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x))))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
class MaxxVitBlock(nn.Module):
""" MaxVit conv, window partition + FFN , grid partition + FFN
"""
def __init__(
self,
dim: int,
dim_out: int,
stride: int = 1,
conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
use_nchw_attn: bool = False, # FIXME move to cfg? True is ~20-30% faster on TPU, 5-10% slower on GPU
drop_path: float = 0.,
):
super().__init__()
conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock
self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)
attn_kwargs = dict(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path)
partition_layer = PartitionAttention2d if use_nchw_attn else PartitionAttentionCl
self.nchw_attn = use_nchw_attn
self.attn_block = partition_layer(**attn_kwargs)
self.attn_grid = partition_layer(partition_type='grid', **attn_kwargs)
def init_weights(self, scheme=''):
named_apply(partial(_init_transformer, scheme=scheme), self.attn_block)
named_apply(partial(_init_transformer, scheme=scheme), self.attn_grid)
named_apply(partial(_init_conv, scheme=scheme), self.conv)
def forward(self, x):
# NCHW format
x = self.conv(x)
if not self.nchw_attn:
x = x.permute(0, 2, 3, 1) # to NHWC (channels-last)
x = self.attn_block(x)
x = self.attn_grid(x)
if not self.nchw_attn:
x = x.permute(0, 3, 1, 2) # back to NCHW
return x
class ParallelMaxxVitBlock(nn.Module):
""" MaxVit block with parallel cat(window + grid), one FF
Experimental timm block.
"""
def __init__(
self,
dim,
dim_out,
stride=1,
num_conv=2,
conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path=0.,
):
super().__init__()
conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock
if num_conv > 1:
convs = [conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)]
convs += [conv_cls(dim_out, dim_out, cfg=conv_cfg, drop_path=drop_path)] * (num_conv - 1)
self.conv = nn.Sequential(*convs)
else:
self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)
self.attn = ParallelPartitionAttention(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path)
def init_weights(self, scheme=''):
named_apply(partial(_init_transformer, scheme=scheme), self.attn)
named_apply(partial(_init_conv, scheme=scheme), self.conv)
def forward(self, x):
x = self.conv(x)
x = x.permute(0, 2, 3, 1)
x = self.attn(x)
x = x.permute(0, 3, 1, 2)
return x
class MaxxVitStage(nn.Module):
def __init__(
self,
in_chs: int,
out_chs: int,
stride: int = 2,
depth: int = 4,
feat_size: Tuple[int, int] = (14, 14),
block_types: Union[str, Tuple[str]] = 'C',
transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
drop_path: Union[float, List[float]] = 0.,
):
super().__init__()
self.grad_checkpointing = False
block_types = extend_tuple(block_types, depth)
blocks = []
for i, t in enumerate(block_types):
block_stride = stride if i == 0 else 1
assert t in ('C', 'T', 'M', 'PM')
if t == 'C':
conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock
blocks += [conv_cls(
in_chs,
out_chs,
stride=block_stride,
cfg=conv_cfg,
drop_path=drop_path[i],
)]
elif t == 'T':
rel_pos_cls = get_rel_pos_cls(transformer_cfg, feat_size)
blocks += [TransformerBlock2d(
in_chs,
out_chs,
stride=block_stride,
rel_pos_cls=rel_pos_cls,
cfg=transformer_cfg,
drop_path=drop_path[i],
)]
elif t == 'M':
blocks += [MaxxVitBlock(
in_chs,
out_chs,
stride=block_stride,
conv_cfg=conv_cfg,
transformer_cfg=transformer_cfg,
drop_path=drop_path[i],
)]
elif t == 'PM':
blocks += [ParallelMaxxVitBlock(
in_chs,
out_chs,
stride=block_stride,
conv_cfg=conv_cfg,
transformer_cfg=transformer_cfg,
drop_path=drop_path[i],
)]
in_chs = out_chs
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
if self.grad_checkpointing and not torch.jit.is_scripting(): | x = checkpoint_seq(self.blocks, x) | 1 | 2023-10-24 17:49:10+00:00 | 12k |
StackTipsLab/bloggy | bloggy/urls.py | [
{
"identifier": "settings",
"path": "bloggy/settings.py",
"snippet": "BASE_DIR = Path(__file__).resolve().parent.parent\nSECRET_KEY = os.getenv(\"SECRET_KEY\", get_random_secret_key())\nDEBUG = os.getenv(\"DEBUG\", \"False\") == \"True\"\nALLOWED_HOSTS = os.getenv(\"ALLOWED_HOSTS\", \"127.0.0.1, localho... | from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import LogoutView
from django.contrib.auth.views import PasswordChangeView
from django.contrib.sitemaps.views import sitemap, index
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.urls import path, include
from django.views.generic.base import TemplateView
from bloggy import settings
from bloggy.views import EditProfileView
from bloggy.views.courses_view import CoursesListView, CourseDetailsView, LessonDetailsView
from bloggy.views.pages import IndexView
from bloggy.views.category_view import CategoriesView, CategoryDetailsView
from .services.sitemaps import sitemaps_list
from .views import RegisterView
from .views.account import AccountActivationView
from .views.posts import PostListView, PostDetailsView
from .views.login import MyLoginView
from .views.pages import AdsTextView, robots
from .views.pages import PageDetailsView
from .views.quizzes_view import QuizListView, QuizDetailView
from .views.rss import PostsRssFeed, CoursesRssFeed
from .views.search import SearchListView
from .views.user import MyProfileView, PublicProfileView, AuthorsListView
from .views.user_collections import UserBookmarksView | 8,311 | """bloggy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
urlpatterns = [
path('admin/', admin.site.urls),
path('admin/password_change/', PasswordChangeView.as_view(), name='password_change'),
path('', IndexView.as_view(), name='index'),
path('articles', PostListView.as_view(), name='posts'),
path('articles/<slug:slug>', PostDetailsView.as_view(), name='post_single'),
path('topics', CategoriesView.as_view(), name='categories'),
path('topics/<str:slug>', CategoryDetailsView.as_view(), name='categories_single'),
path('search', SearchListView.as_view(), name='search'),
path('courses', CoursesListView.as_view(), name='courses'),
path('courses/<slug:slug>', CourseDetailsView.as_view(), name='courses_single'),
path('courses/<str:course>/<slug:slug>', LessonDetailsView.as_view(), name='lesson_single'),
path('quizzes', QuizListView.as_view(), name='quizzes'),
path('quizzes/<slug:slug>', QuizDetailView.as_view(), name='quiz_single'),
path('login', MyLoginView.as_view(template_name="auth/login.html"), name='login'),
path('logout', LogoutView.as_view(), name='logout'),
path('register', RegisterView.as_view(), name='register'),
path('activate/<str:uuid>/<str:token>', AccountActivationView.as_view(), name='activate_account'),
path('authors', AuthorsListView.as_view(), name="authors"),
path('user/<str:username>', PublicProfileView.as_view(), name="user_profile"),
path('edit-profile', login_required(EditProfileView.as_view()), name="profile.edit_profile"),
# path('dashboard', login_required(MyProfileView.as_view()), name="profile.dashboard"),
path('bookmarks', login_required(UserBookmarksView.as_view()), name="profile.bookmarks"),
path('contact', TemplateView.as_view(template_name="pages/contact.html"), name='pages.contact'),
path("rss/articles", PostsRssFeed(), name="articles_feed"),
path("rss/courses", CoursesRssFeed(), name="courses_feed"),
path('sitemap.xml', index, {'sitemaps': sitemaps_list}, name='django.contrib.sitemaps.views.index'),
path('sitemap/<str:section>.xml', sitemap, {'sitemaps': sitemaps_list},
name='django.contrib.sitemaps.views.sitemap'),
# static files for SEO or other reasons
path('robots.txt', robots, name='robots'),
| """bloggy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
urlpatterns = [
path('admin/', admin.site.urls),
path('admin/password_change/', PasswordChangeView.as_view(), name='password_change'),
path('', IndexView.as_view(), name='index'),
path('articles', PostListView.as_view(), name='posts'),
path('articles/<slug:slug>', PostDetailsView.as_view(), name='post_single'),
path('topics', CategoriesView.as_view(), name='categories'),
path('topics/<str:slug>', CategoryDetailsView.as_view(), name='categories_single'),
path('search', SearchListView.as_view(), name='search'),
path('courses', CoursesListView.as_view(), name='courses'),
path('courses/<slug:slug>', CourseDetailsView.as_view(), name='courses_single'),
path('courses/<str:course>/<slug:slug>', LessonDetailsView.as_view(), name='lesson_single'),
path('quizzes', QuizListView.as_view(), name='quizzes'),
path('quizzes/<slug:slug>', QuizDetailView.as_view(), name='quiz_single'),
path('login', MyLoginView.as_view(template_name="auth/login.html"), name='login'),
path('logout', LogoutView.as_view(), name='logout'),
path('register', RegisterView.as_view(), name='register'),
path('activate/<str:uuid>/<str:token>', AccountActivationView.as_view(), name='activate_account'),
path('authors', AuthorsListView.as_view(), name="authors"),
path('user/<str:username>', PublicProfileView.as_view(), name="user_profile"),
path('edit-profile', login_required(EditProfileView.as_view()), name="profile.edit_profile"),
# path('dashboard', login_required(MyProfileView.as_view()), name="profile.dashboard"),
path('bookmarks', login_required(UserBookmarksView.as_view()), name="profile.bookmarks"),
path('contact', TemplateView.as_view(template_name="pages/contact.html"), name='pages.contact'),
path("rss/articles", PostsRssFeed(), name="articles_feed"),
path("rss/courses", CoursesRssFeed(), name="courses_feed"),
path('sitemap.xml', index, {'sitemaps': sitemaps_list}, name='django.contrib.sitemaps.views.index'),
path('sitemap/<str:section>.xml', sitemap, {'sitemaps': sitemaps_list},
name='django.contrib.sitemaps.views.sitemap'),
# static files for SEO or other reasons
path('robots.txt', robots, name='robots'), | path('ads.txt', AdsTextView.as_view(), name='ads_txt'), | 14 | 2023-10-17 14:50:39+00:00 | 12k |
zabbix/python-zabbix-utils | .github/scripts/compatibility_api_test_6.py | [
{
"identifier": "Getter",
"path": "zabbix_utils/getter.py",
"snippet": "class Getter():\n \"\"\"Zabbix get implementation.\n\n Args:\n host (str, optional): Zabbix agent address. Defaults to `'127.0.0.1'`.\n\n port (int, optional): Zabbix agent port. Defaults to `10050`.\n\n t... | import sys
import time
import unittest
from zabbix_utils.getter import Getter
from zabbix_utils.exceptions import APIRequestError
from zabbix_utils.api import ZabbixAPI, APIVersion
from zabbix_utils.sender import ItemValue, Sender, TrapperResponse | 8,658 | #!/usr/bin/env python
# Copyright (C) 2001-2023 Zabbix SIA
#
# Zabbix SIA licenses this file under the MIT License.
# See the LICENSE file in the project root for more information.
sys.path.append('.')
ZABBIX_URL = 'localhost'
ZABBIX_USER = 'Admin'
ZABBIX_PASSWORD = 'zabbix'
class CompatibilityAPITest(unittest.TestCase):
"""Compatibility test with Zabbix API version 6.0"""
def setUp(self):
self.url = 'localhost'
self.user = 'Admin'
self.password = 'zabbix'
self.token_id = None
self.token = None
self.zapi = ZabbixAPI(
url=self.url
)
self._create_token()
def _create_token(self):
"""Tests auth using username and password"""
self.assertEqual(
type(self.zapi), ZabbixAPI, "Creating ZabbixAPI object was going wrong")
self.assertEqual(
type(self.zapi.api_version()), APIVersion, "Version getting was going wrong")
self.zapi.login(
user=self.user,
password=self.password
)
self.assertIsNotNone(self.zapi._ZabbixAPI__session_id, "Login was going wrong")
resp = self.zapi.user.checkAuthentication(sessionid=self.zapi._ZabbixAPI__session_id)
self.assertEqual(
type(resp), dict, "Request user.checkAuthentication was going wrong")
tokens = self.zapi.token.get(
filter={'name': f"{self.user} [{self.__class__.__name__}]"},
output=['tokenid']
)
if tokens:
self.token_id = int(tokens[0]['tokenid'])
self.assertEqual(
type(self.token_id), int, "Request token.get was going wrong")
else:
self.token_id = int(self.zapi.token.create(
name=f"{self.user} [{self.__class__.__name__}]"
)['tokenids'][0])
self.assertEqual(
type(self.token_id), int, "Request token.create was going wrong")
self.token = self.zapi.token.generate(*[self.token_id])[0]['token']
self.assertEqual(type(self.token), str, "Request token.generate was going wrong")
self.zapi.logout()
self.assertIsNone(self.zapi._ZabbixAPI__session_id, "Logout was going wrong")
| #!/usr/bin/env python
# Copyright (C) 2001-2023 Zabbix SIA
#
# Zabbix SIA licenses this file under the MIT License.
# See the LICENSE file in the project root for more information.
sys.path.append('.')
ZABBIX_URL = 'localhost'
ZABBIX_USER = 'Admin'
ZABBIX_PASSWORD = 'zabbix'
class CompatibilityAPITest(unittest.TestCase):
"""Compatibility test with Zabbix API version 6.0"""
def setUp(self):
self.url = 'localhost'
self.user = 'Admin'
self.password = 'zabbix'
self.token_id = None
self.token = None
self.zapi = ZabbixAPI(
url=self.url
)
self._create_token()
def _create_token(self):
"""Tests auth using username and password"""
self.assertEqual(
type(self.zapi), ZabbixAPI, "Creating ZabbixAPI object was going wrong")
self.assertEqual(
type(self.zapi.api_version()), APIVersion, "Version getting was going wrong")
self.zapi.login(
user=self.user,
password=self.password
)
self.assertIsNotNone(self.zapi._ZabbixAPI__session_id, "Login was going wrong")
resp = self.zapi.user.checkAuthentication(sessionid=self.zapi._ZabbixAPI__session_id)
self.assertEqual(
type(resp), dict, "Request user.checkAuthentication was going wrong")
tokens = self.zapi.token.get(
filter={'name': f"{self.user} [{self.__class__.__name__}]"},
output=['tokenid']
)
if tokens:
self.token_id = int(tokens[0]['tokenid'])
self.assertEqual(
type(self.token_id), int, "Request token.get was going wrong")
else:
self.token_id = int(self.zapi.token.create(
name=f"{self.user} [{self.__class__.__name__}]"
)['tokenids'][0])
self.assertEqual(
type(self.token_id), int, "Request token.create was going wrong")
self.token = self.zapi.token.generate(*[self.token_id])[0]['token']
self.assertEqual(type(self.token), str, "Request token.generate was going wrong")
self.zapi.logout()
self.assertIsNone(self.zapi._ZabbixAPI__session_id, "Logout was going wrong")
| with self.assertRaises(APIRequestError, | 1 | 2023-10-16 12:49:35+00:00 | 12k |
YefanZhou/TempBalance | main_tb.py | [
{
"identifier": "Tempbalance",
"path": "tempbalance.py",
"snippet": "class Tempbalance(object):\n def __init__(self, \n net, \n EVALS_THRESH=0.00001,\n bins=100, \n conv_norm=0.5,\n pl_fitting='median',\n ... | import os
import sys
import time
import argparse
import random
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import numpy as np
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import config as cf
import torch_optimizer
from pathlib import Path
from os.path import join
from tempbalance import Tempbalance
from sgdsnr import SGDSNR
from adamp import SGDP, AdamP
from lars_optim import LARS, LAMB
from utils import train, test, getNetwork, save_args_to_file | 10,394 | transform=transform_train)
testset = torchvision.datasets.SVHN(root=data_path,
split='test',
download=True,
transform=transform_test)
num_classes = 10
elif(args.dataset == 'tiny-imagenet-200'):
print("| Preparing tiny-imagenet-200 dataset...")
sys.stdout.write("| ")
trainset = datasets.ImageFolder(os.path.join(data_path, 'train'), transform_train)
testset = datasets.ImageFolder(os.path.join(data_path, 'val'), transform_test)
num_classes = 200
else:
raise NotImplementedError
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=args.batch_size,
shuffle=True,
num_workers=6)
testloader = torch.utils.data.DataLoader(testset,
batch_size=cf.eval_batchsize[args.dataset],
shuffle=False,
num_workers=4)
Path(args.ckpt_path).mkdir(parents=True, exist_ok=True)
if args.print_tofile:
# Open files for stdout and stderr redirection
stdout_file = open(os.path.join(args.ckpt_path, 'stdout.log'), 'w')
stderr_file = open(os.path.join(args.ckpt_path, 'stderr.log'), 'w')
# Redirect stdout and stderr to the files
sys.stdout = stdout_file
sys.stderr = stderr_file
# Model
print('\n[Phase 2] : Model setup')
if args.resume:
# Load checkpoint
print('| Resuming from checkpoint...')
net, file_name = getNetwork(args, num_classes)
checkpoint = torch.load(args.resume, map_location='cpu')
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['test_acc']
start_epoch = checkpoint['epoch']
print(f"Loaded Epoch: {start_epoch} \n Test Acc: {best_acc:.3f} Train Acc: {checkpoint['train_acc']:.3f}")
else:
print('| Building net type [' + args.net_type + ']...')
net, file_name = getNetwork(args, num_classes)
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
best_acc = 0
if use_cuda:
net.cuda()
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()
print(net)
if args.use_tb:
print("##############Enable and init Temp Balancing##################")
tb_scheduler = Tempbalance(net=net,
pl_fitting=args.pl_fitting,
xmin_pos=args.xmin_pos,
filter_zeros=args.filter_zeros,
remove_first_layer=args.remove_first_layer,
remove_last_layer=args.remove_last_layer,
esd_metric_for_tb=args.esd_metric_for_tb,
assign_func=args.assign_func,
lr_min_ratio=args.lr_min_ratio,
lr_max_ratio=args.lr_max_ratio,
batchnorm=args.batchnorm,
batchnorm_type=args.batchnorm_type
)
tb_param_group, _ = \
tb_scheduler.build_optimizer_param_group(untuned_lr=args.lr, initialize=True)
if args.optim_type == 'SGD':
optimizer = optim.SGD(tb_param_group,
momentum=0.9,
weight_decay=args.weight_decay)
elif args.optim_type == 'SGDSNR':
optimizer = SGDSNR(tb_param_group,
momentum=0.9,
weight_decay=args.weight_decay,
spectrum_regularization=args.sg)
elif args.optim_type == 'SGDP':
optimizer = SGDP(tb_param_group,
momentum=0.9,
weight_decay=args.weight_decay)
else:
raise NotImplementedError
else:
print('Disable Temp Balancing')
if args.optim_type == 'SGD':
optimizer = optim.SGD(net.parameters(),
lr=args.lr,
momentum=0.9,
weight_decay=args.weight_decay)
elif args.optim_type == 'SGDSNR':
optimizer = SGDSNR(net.parameters(),
lr=args.lr,
momentum=0.9,
weight_decay=args.weight_decay,
spectrum_regularization=args.sg)
elif args.optim_type == 'SGDP':
optimizer = SGDP( net.parameters(),
lr=args.lr,
momentum=0.9,
weight_decay=args.weight_decay)
elif args.optim_type == 'AdamP':
optimizer = AdamP( net.parameters(),
lr=args.lr,
betas=(0.9, 0.999),
weight_decay=args.weight_decay)
elif args.optim_type == 'LARS':
| from __future__ import print_function
parser = argparse.ArgumentParser(description='PyTorch CIFAR-10 Training')
parser.add_argument('--lr', type=float, default=0.01, help='learning_rate')
parser.add_argument('--net-type', type=str, default='wide-resnet', help='model')
parser.add_argument('--depth', type=int, default=28, help='depth of model')
parser.add_argument('--num-epochs', type=int, default=200, help='number of epochs')
parser.add_argument('--widen-factor', type=float, default=1, help='width of model')
parser.add_argument('--dataset', type=str, default='cifar10', help='dataset = [cifar10/cifar100]')
parser.add_argument('--lr-sche', type=str, default='cosine', choices=['cosine'])
parser.add_argument('--weight-decay', type=float, default=1e-4) # 5e-4
parser.add_argument('--ckpt-path', type=str, default='', help='path to checkpoints')
parser.add_argument('--print-tofile', default=False, type=lambda x: (str(x).lower() == 'true'), help='print to file')
parser.add_argument('--batch-size', type=int, default=128) # 5e-4
parser.add_argument('--datadir', type=str, default='', help='directory of dataset')
parser.add_argument('--optim-type', type=str, default='SGD', help='type of optimizer')
parser.add_argument('--resume', type=str, default='', help='resume from checkpoint')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--ww-interval', type=int, default=1)
parser.add_argument('--epochs-to-save', type=int, nargs='+', default=[])
parser.add_argument('--pl-fitting', type=str, default='median', choices=['median', 'goodness-of-fit', 'fix-finger'])
# temperature balance related
parser.add_argument('--use-tb', default=True, type=lambda x: (str(x).lower() == 'true'), help='use temp balance')
parser.add_argument('--remove-last-layer', default=True, type=lambda x: (str(x).lower() == 'true'), help='if remove the last layer')
parser.add_argument('--remove-first-layer', default=True, type=lambda x: (str(x).lower() == 'true'), help='if remove the first layer')
parser.add_argument('--batchnorm', default=True, type=lambda x: (str(x).lower() == 'true'), help='balancing batch norm layer')
parser.add_argument('--filter-zeros', default=False, type=lambda x: (str(x).lower() == 'true') )
parser.add_argument('--esd-metric-for-tb', type=str, default='alpha', help='ww metric')
parser.add_argument('--assign-func', type=str, default='', help='assignment function for layerwise lr')
parser.add_argument('--lr-min-ratio', type=float, default=0.5)
parser.add_argument('--lr-max-ratio', type=float, default=1.5)
parser.add_argument('--xmin-pos', type=float, default=2, help='xmin_index = size of eigs // xmin_pos')
parser.add_argument('--batchnorm-type', type=str, default='name', help='method to change batchnorm layer learning rate')
parser.add_argument('--look-k', type=int, default=5, help='')
parser.add_argument('--look-alpha', type=float, default=0.8, help='')
parser.add_argument('--T_0', type=int, default=10, help='')
parser.add_argument('--T-mult', type=int, default=2, help='')
# spectral regularization related
parser.add_argument('--sg', type=float, default=0.01, help='spectrum regularization')
args = parser.parse_args()
print(args)
# Save the arguments to a file
save_args_to_file(args, join(args.ckpt_path, 'args.json'))
def set_seed(seed=42):
print(f"=====> Set the random seed as {seed}")
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# When running on the CuDNN backend, two further options must be set
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Set a fixed value for the hash seed
os.environ["PYTHONHASHSEED"] = str(seed)
# Hyper Parameter settings
use_cuda = torch.cuda.is_available()
best_acc = 0
start_epoch = cf.start_epoch
set_seed(args.seed)
# Data Loader
print('\n[Phase 1] : Data Preparation')
print(f"prepare preprocessing, {args.dataset}")
transform_train = transforms.Compose([
transforms.RandomCrop(cf.crop_size[args.dataset], padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
]) # meanstd transformation
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
])
data_path = join(args.datadir, args.dataset)
if(args.dataset == 'cifar10'):
print("| Preparing CIFAR-10 dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.CIFAR10(root=data_path, train=True,
download=True,
transform=transform_train)
testset = torchvision.datasets.CIFAR10(root=data_path, train=False,
download=False,
transform=transform_test)
num_classes = 10
elif(args.dataset == 'cifar100'):
print("| Preparing CIFAR-100 dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.CIFAR100(root=data_path, train=True,
download=True,
transform=transform_train)
testset = torchvision.datasets.CIFAR100(root=data_path, train=False,
download=False,
transform=transform_test)
num_classes = 100
elif(args.dataset == 'svhn'):
print("| Preparing SVHN dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.SVHN(root=data_path,
split='train',
download=True,
transform=transform_train)
testset = torchvision.datasets.SVHN(root=data_path,
split='test',
download=True,
transform=transform_test)
num_classes = 10
elif(args.dataset == 'tiny-imagenet-200'):
print("| Preparing tiny-imagenet-200 dataset...")
sys.stdout.write("| ")
trainset = datasets.ImageFolder(os.path.join(data_path, 'train'), transform_train)
testset = datasets.ImageFolder(os.path.join(data_path, 'val'), transform_test)
num_classes = 200
else:
raise NotImplementedError
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=args.batch_size,
shuffle=True,
num_workers=6)
testloader = torch.utils.data.DataLoader(testset,
batch_size=cf.eval_batchsize[args.dataset],
shuffle=False,
num_workers=4)
Path(args.ckpt_path).mkdir(parents=True, exist_ok=True)
if args.print_tofile:
# Open files for stdout and stderr redirection
stdout_file = open(os.path.join(args.ckpt_path, 'stdout.log'), 'w')
stderr_file = open(os.path.join(args.ckpt_path, 'stderr.log'), 'w')
# Redirect stdout and stderr to the files
sys.stdout = stdout_file
sys.stderr = stderr_file
# Model
print('\n[Phase 2] : Model setup')
if args.resume:
# Load checkpoint
print('| Resuming from checkpoint...')
net, file_name = getNetwork(args, num_classes)
checkpoint = torch.load(args.resume, map_location='cpu')
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['test_acc']
start_epoch = checkpoint['epoch']
print(f"Loaded Epoch: {start_epoch} \n Test Acc: {best_acc:.3f} Train Acc: {checkpoint['train_acc']:.3f}")
else:
print('| Building net type [' + args.net_type + ']...')
net, file_name = getNetwork(args, num_classes)
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
best_acc = 0
if use_cuda:
net.cuda()
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()
print(net)
if args.use_tb:
print("##############Enable and init Temp Balancing##################")
tb_scheduler = Tempbalance(net=net,
pl_fitting=args.pl_fitting,
xmin_pos=args.xmin_pos,
filter_zeros=args.filter_zeros,
remove_first_layer=args.remove_first_layer,
remove_last_layer=args.remove_last_layer,
esd_metric_for_tb=args.esd_metric_for_tb,
assign_func=args.assign_func,
lr_min_ratio=args.lr_min_ratio,
lr_max_ratio=args.lr_max_ratio,
batchnorm=args.batchnorm,
batchnorm_type=args.batchnorm_type
)
tb_param_group, _ = \
tb_scheduler.build_optimizer_param_group(untuned_lr=args.lr, initialize=True)
if args.optim_type == 'SGD':
optimizer = optim.SGD(tb_param_group,
momentum=0.9,
weight_decay=args.weight_decay)
elif args.optim_type == 'SGDSNR':
optimizer = SGDSNR(tb_param_group,
momentum=0.9,
weight_decay=args.weight_decay,
spectrum_regularization=args.sg)
elif args.optim_type == 'SGDP':
optimizer = SGDP(tb_param_group,
momentum=0.9,
weight_decay=args.weight_decay)
else:
raise NotImplementedError
else:
print('Disable Temp Balancing')
if args.optim_type == 'SGD':
optimizer = optim.SGD(net.parameters(),
lr=args.lr,
momentum=0.9,
weight_decay=args.weight_decay)
elif args.optim_type == 'SGDSNR':
optimizer = SGDSNR(net.parameters(),
lr=args.lr,
momentum=0.9,
weight_decay=args.weight_decay,
spectrum_regularization=args.sg)
elif args.optim_type == 'SGDP':
optimizer = SGDP( net.parameters(),
lr=args.lr,
momentum=0.9,
weight_decay=args.weight_decay)
elif args.optim_type == 'AdamP':
optimizer = AdamP( net.parameters(),
lr=args.lr,
betas=(0.9, 0.999),
weight_decay=args.weight_decay)
elif args.optim_type == 'LARS': | optimizer = LARS(net.parameters(), | 2 | 2023-10-24 00:45:55+00:00 | 12k |
zhaojw1998/AccoMontage-3 | train_prior.py | [
{
"identifier": "Prior",
"path": "orchestrator/prior_model.py",
"snippet": "class Prior(nn.Module):\n def __init__(self, mixture_encoder=None,\n function_encoder=None,\n context_enc_layer=12, \n function_dec_layer=12, \n ... | import os
import time
import torch
import torch.multiprocessing as mp
from torch import optim
from orchestrator.prior_model import Prior
from orchestrator.prior_dataset import VQ_LMD_Dataset, collate_fn
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import LinearLR, CosineAnnealingLR
from orchestrator.utils.scheduler import OptimizerSchedulerWithWarmUp
from orchestrator.utils.training import SummaryWriters, LogPathManager, epoch_time
from tqdm import tqdm
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed import init_process_group, destroy_process_group | 9,880 | return loss_dic
def write_loss_to_dic(writer_names, loss_items):
loss_dic = {}
assert len(writer_names) == len(loss_items)
for key, val in zip(writer_names, loss_items):
loss_dic[key] = val.item()
return loss_dic
def init_loss_dic(writer_names):
loss_dic = {}
for key in writer_names:
loss_dic[key] = 0.
return loss_dic
def average_epoch_loss(epoch_loss_dict, num_batch):
for key in epoch_loss_dict:
epoch_loss_dict[key] /= num_batch
return epoch_loss_dict
def batch_report(loss, n_epoch, idx, num_batch, mode='training', verbose=False):
if verbose:
print(f'------------{mode}------------')
print('Epoch: [{0}][{1}/{2}]'.format(n_epoch, idx, num_batch))
print(f"\t time func loss: {loss['func_l']:.3f}")
def scheduler_show(optimizer_scheduler, verbose=False):
schedule_params = {}
schedule_params['lr'] = optimizer_scheduler.optimizer.param_groups[0]['lr']
if verbose:
print(schedule_params)
return schedule_params
def train(model, dataloader, optimizer_scheduler, writer_names, loss_writers, scheduler_writers, n_epoch, VERBOSE):
model.train()
epoch_loss_dic = init_loss_dic(writer_names)
for idx, batch in tqdm(enumerate(dataloader), total=len(dataloader)):
try:
optimizer_scheduler.optimizer_zero_grad()
loss = model('loss', *batch)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), optimizer_scheduler.clip)
optimizer_scheduler.step()
epoch_loss_dic = accumulate_loss_dic(writer_names, epoch_loss_dic, [loss])
batch_loss_dic = write_loss_to_dic(writer_names, [loss])
train_step = n_epoch * len(dataloader) + idx
if loss_writers is not None:
loss_writers.write_task('train', batch_loss_dic, train_step)
batch_report(batch_loss_dic, n_epoch, idx, len(dataloader), mode='train', verbose=VERBOSE)
scheduler_dic = scheduler_show(optimizer_scheduler, verbose=VERBOSE)
if scheduler_writers is not None:
scheduler_writers.write_task('train', scheduler_dic, train_step)
except Exception as exc:
print(exc)
print(batch[0].shape, batch[1].shape)
continue
scheduler_show(optimizer_scheduler, verbose=True)
epoch_loss_dic = average_epoch_loss(epoch_loss_dic, len(dataloader))
return epoch_loss_dic
def val(model, dataloader, writer_names, summary_writers, n_epoch, VERBOSE):
model.eval()
epoch_loss_dic = init_loss_dic(writer_names)
for idx, batch in tqdm(enumerate(dataloader), total=len(dataloader)):
try:
with torch.no_grad():
loss = model('loss', *batch)#, **input_params)
epoch_loss_dic = accumulate_loss_dic(writer_names, epoch_loss_dic, [loss])
batch_loss_dic = write_loss_to_dic(writer_names, [loss])
if summary_writers is not None:
batch_report(batch_loss_dic, n_epoch, idx, len(dataloader), mode='validation', verbose=VERBOSE)
except Exception as exc:
print(exc)
print(batch[0].shape, batch[1].shape)
continue
epoch_loss_dic = average_epoch_loss(epoch_loss_dic, len(dataloader))
if summary_writers is not None:
summary_writers.write_task('val', epoch_loss_dic, n_epoch)
return epoch_loss_dic
def epoch_report(start_time, end_time, train_loss, valid_loss, n_epoch):
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
print(f'Epoch: {n_epoch + 1:02} | '
f'Time: {epoch_mins}m {epoch_secs}s',
flush=True)
print(f'\tTrain Loss: {train_loss:.3f}', flush=True)
print(f'\t Valid. Loss: {valid_loss:.3f}', flush=True)
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES']= '0, 1'
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
MODEL_NAME = 'Prior-Model-VQ-Q&A-T large'
DEBUG = 0
if DEBUG:
save_root = 'AccoMontage3/prior_model_VQ-Q&A-T/save'
log_path_name = 'debug'
else:
save_root = '/data1/zhaojw/AccoMontage3/'
log_path_name = MODEL_NAME
readme_fn = 'AccoMontage3/prior_model_VQ-Q&A-T/train_DDP.py'
|
def ddp_setup(rank, world_size):
"""
Args:
rank: Unique identifier of each process
world_size: Total number of processes
"""
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
init_process_group(backend="nccl", rank=rank, world_size=world_size)
def main(rank, world_size, log_path_mng, VERBOSE, MODEL_NAME):
#print('rank:', rank)
ddp_setup(rank, world_size)
PRETRAIN_PATH = "/data1/zhaojw/AccoMontage3/2023-12-07_134449_VQ-Q&A-T/models/VQ-Q&A-T_009_epoch.pt"
BATCH_SIZE = 8
N_EPOCH = 10
CLIP = 1
LR = 1e-4
WARMUP_STEP = 1000
if VERBOSE:
N_EPOCH=5
LR = 1e-3
WARMUP_STEP=10
model = Prior.init_model(pretrain_model_path=PRETRAIN_PATH, DEVICE=rank)
model = DDP(model, device_ids=[rank], find_unused_parameters=False)
lmd_dir = "/data1/zhaojw/LMD/VQ-Q&A-T-009-reorder/"
train_set = VQ_LMD_Dataset(lmd_dir, debug_mode=VERBOSE, split='train', mode='train')
train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=False, collate_fn=lambda b: collate_fn(b, rank), sampler=DistributedSampler(train_set))
val_set = VQ_LMD_Dataset(lmd_dir, debug_mode=VERBOSE, split='validation', mode='train')
val_loader = DataLoader(val_set, batch_size=BATCH_SIZE, shuffle=False, collate_fn=lambda b: collate_fn(b, rank), sampler=DistributedSampler(val_set))
print(f'Dataset loaded. {len(train_loader)} samples for train and {len(val_loader)} samples for validation.')
#optimizer = optim.Adam(model.parameters(), lr=LR)
optimizer = optim.AdamW(model.parameters(), lr=LR, betas=[0.9, 0.999], weight_decay=1e-2)
warmup_scheduler = LinearLR(optimizer, start_factor=1e-14, end_factor=1, total_iters=WARMUP_STEP)
scheduler = CosineAnnealingLR(optimizer, T_max=len(train_loader)*N_EPOCH-WARMUP_STEP, eta_min=1e-6)
#scheduler = MinExponentialLR(optimizer, gamma=0.99998, minimum=1e-5)
#optimizer_scheduler = OptimizerScheduler(optimizer, scheduler, CLIP)
optimizer_scheduler = OptimizerSchedulerWithWarmUp(optimizer, warmup_scheduler, scheduler, CLIP, WARMUP_STEP)
writer_names = ['func_l']
scheduler_writer_names = ['lr']
if rank == 0:
tags = {'loss': None}
loss_writers = SummaryWriters(writer_names, tags, log_path_mng.writer_path)
tags = {'scheduler': None}
scheduler_writers = SummaryWriters(scheduler_writer_names, tags, log_path_mng.writer_path)
else:
loss_writers = None
scheduler_writers = None
VERBOSE = False
for n_epoch in range(N_EPOCH):
start_time = time.time()
train_loader.sampler.set_epoch(n_epoch)
print(f'Training epoch {n_epoch}')
train_loss = train(model, train_loader, optimizer_scheduler, writer_names, loss_writers, scheduler_writers, n_epoch=n_epoch, VERBOSE=VERBOSE)['func_l']
print(f'Validating epoch {n_epoch}')
val_loss = val(model, val_loader, writer_names, loss_writers, n_epoch=n_epoch, VERBOSE=VERBOSE)['func_l']
end_time = time.time()
if rank == 0:
torch.save(model.module.state_dict(), log_path_mng.epoch_model_path(f'{MODEL_NAME}_{str(n_epoch).zfill(3)}'))
epoch_report(start_time, end_time, train_loss, val_loss, n_epoch)
destroy_process_group()
def accumulate_loss_dic(writer_names, loss_dic, loss_items):
assert len(writer_names) == len(loss_items)
for key, val in zip(writer_names, loss_items):
loss_dic[key] += val.item()
return loss_dic
def write_loss_to_dic(writer_names, loss_items):
loss_dic = {}
assert len(writer_names) == len(loss_items)
for key, val in zip(writer_names, loss_items):
loss_dic[key] = val.item()
return loss_dic
def init_loss_dic(writer_names):
loss_dic = {}
for key in writer_names:
loss_dic[key] = 0.
return loss_dic
def average_epoch_loss(epoch_loss_dict, num_batch):
for key in epoch_loss_dict:
epoch_loss_dict[key] /= num_batch
return epoch_loss_dict
def batch_report(loss, n_epoch, idx, num_batch, mode='training', verbose=False):
if verbose:
print(f'------------{mode}------------')
print('Epoch: [{0}][{1}/{2}]'.format(n_epoch, idx, num_batch))
print(f"\t time func loss: {loss['func_l']:.3f}")
def scheduler_show(optimizer_scheduler, verbose=False):
schedule_params = {}
schedule_params['lr'] = optimizer_scheduler.optimizer.param_groups[0]['lr']
if verbose:
print(schedule_params)
return schedule_params
def train(model, dataloader, optimizer_scheduler, writer_names, loss_writers, scheduler_writers, n_epoch, VERBOSE):
model.train()
epoch_loss_dic = init_loss_dic(writer_names)
for idx, batch in tqdm(enumerate(dataloader), total=len(dataloader)):
try:
optimizer_scheduler.optimizer_zero_grad()
loss = model('loss', *batch)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), optimizer_scheduler.clip)
optimizer_scheduler.step()
epoch_loss_dic = accumulate_loss_dic(writer_names, epoch_loss_dic, [loss])
batch_loss_dic = write_loss_to_dic(writer_names, [loss])
train_step = n_epoch * len(dataloader) + idx
if loss_writers is not None:
loss_writers.write_task('train', batch_loss_dic, train_step)
batch_report(batch_loss_dic, n_epoch, idx, len(dataloader), mode='train', verbose=VERBOSE)
scheduler_dic = scheduler_show(optimizer_scheduler, verbose=VERBOSE)
if scheduler_writers is not None:
scheduler_writers.write_task('train', scheduler_dic, train_step)
except Exception as exc:
print(exc)
print(batch[0].shape, batch[1].shape)
continue
scheduler_show(optimizer_scheduler, verbose=True)
epoch_loss_dic = average_epoch_loss(epoch_loss_dic, len(dataloader))
return epoch_loss_dic
def val(model, dataloader, writer_names, summary_writers, n_epoch, VERBOSE):
model.eval()
epoch_loss_dic = init_loss_dic(writer_names)
for idx, batch in tqdm(enumerate(dataloader), total=len(dataloader)):
try:
with torch.no_grad():
loss = model('loss', *batch)#, **input_params)
epoch_loss_dic = accumulate_loss_dic(writer_names, epoch_loss_dic, [loss])
batch_loss_dic = write_loss_to_dic(writer_names, [loss])
if summary_writers is not None:
batch_report(batch_loss_dic, n_epoch, idx, len(dataloader), mode='validation', verbose=VERBOSE)
except Exception as exc:
print(exc)
print(batch[0].shape, batch[1].shape)
continue
epoch_loss_dic = average_epoch_loss(epoch_loss_dic, len(dataloader))
if summary_writers is not None:
summary_writers.write_task('val', epoch_loss_dic, n_epoch)
return epoch_loss_dic
def epoch_report(start_time, end_time, train_loss, valid_loss, n_epoch):
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
print(f'Epoch: {n_epoch + 1:02} | '
f'Time: {epoch_mins}m {epoch_secs}s',
flush=True)
print(f'\tTrain Loss: {train_loss:.3f}', flush=True)
print(f'\t Valid. Loss: {valid_loss:.3f}', flush=True)
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES']= '0, 1'
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
MODEL_NAME = 'Prior-Model-VQ-Q&A-T large'
DEBUG = 0
if DEBUG:
save_root = 'AccoMontage3/prior_model_VQ-Q&A-T/save'
log_path_name = 'debug'
else:
save_root = '/data1/zhaojw/AccoMontage3/'
log_path_name = MODEL_NAME
readme_fn = 'AccoMontage3/prior_model_VQ-Q&A-T/train_DDP.py' | log_path_mng = LogPathManager(readme_fn, save_root=save_root, log_path_name=log_path_name) | 5 | 2023-10-23 12:36:57+00:00 | 12k |
bytedance/ColTrack | motlib/mot_dataset/transform/mot_video/mosaic.py | [
{
"identifier": "adjust_box_anns",
"path": "motlib/mot_dataset/transform/yolox/utils.py",
"snippet": "def adjust_box_anns(bbox, scale_ratio, padw, padh, w_max, h_max):\n #bbox[:, 0::2] = np.clip(bbox[:, 0::2] * scale_ratio + padw, 0, w_max)\n #bbox[:, 1::2] = np.clip(bbox[:, 1::2] * scale_ratio + ... | import cv2
import numpy as np
import random
from motlib.mot_dataset.transform.yolox.utils import adjust_box_anns
from copy import deepcopy
from motlib.mot_dataset.transform.yolox.data_augment import box_candidates, augment_hsv
from motlib.mot_dataset.transform.yolox.dataset import Dataset
from motlib.mot_dataset.transform.yolox.mosaic import MosaicDetection, get_mosaic_coordinate
from collections import defaultdict
from .data_augment import random_perspective, RandomErasing | 7,788 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
# This file may have been modified by Bytedance Ltd. and/or its affiliates (“Bytedance's Modifications”). All Bytedance's Modifications are Copyright (year) Bytedance Ltd. and/or its affiliates.
class MOTMosaicDetection(MosaicDetection):
"""Detection dataset wrapper that performs mixup for normal dataset."""
def __init__(
self, dataset, img_size, mosaic=True,
degrees=10.0, translate=0.1, scale=(0.5, 1.5), mscale=(0.5, 1.5),
shear=2.0, perspective=0.0, enable_mixup=True, args=None, train_or_test="train", transforms=None
):
super().__init__(dataset, img_size, mosaic, degrees, translate, scale, mscale, shear, perspective, enable_mixup, args, train_or_test, transforms)
self.erasing_func = RandomErasing(p=args.p_era, area_keep=args.area_keep, value=114)
| #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
# This file may have been modified by Bytedance Ltd. and/or its affiliates (“Bytedance's Modifications”). All Bytedance's Modifications are Copyright (year) Bytedance Ltd. and/or its affiliates.
class MOTMosaicDetection(MosaicDetection):
"""Detection dataset wrapper that performs mixup for normal dataset."""
def __init__(
self, dataset, img_size, mosaic=True,
degrees=10.0, translate=0.1, scale=(0.5, 1.5), mscale=(0.5, 1.5),
shear=2.0, perspective=0.0, enable_mixup=True, args=None, train_or_test="train", transforms=None
):
super().__init__(dataset, img_size, mosaic, degrees, translate, scale, mscale, shear, perspective, enable_mixup, args, train_or_test, transforms)
self.erasing_func = RandomErasing(p=args.p_era, area_keep=args.area_keep, value=114)
| @Dataset.resize_getitem | 3 | 2023-10-16 02:18:33+00:00 | 12k |
CuriseJia/FreeStyleRet | test.py | [
{
"identifier": "ShallowStyleRetrieval",
"path": "src/models/style_retrieval.py",
"snippet": "class ShallowStyleRetrieval(nn.Module):\n def __init__(self, model_args):\n super(ShallowStyleRetrieval, self).__init__()\n self.args = model_args\n self.openclip, self.pre_process_train... | import argparse
import torch
import torch.nn.functional as F
from tqdm import tqdm
from torch.utils.data import DataLoader
from src.models import ShallowStyleRetrieval, DeepStyleRetrieval, BLIP_Retrieval
from src.dataset.data import T2ITestDataset, I2ITestDataset, X2ITestDataset
from src.utils.utils import setup_seed, getR1Accuary, getR5Accuary | 7,602 |
def parse_args():
parser = argparse.ArgumentParser(description='Parse args for FreeStyleRet Training.')
# project settings
parser.add_argument('--resume', default='', type=str, help='load checkpoints from given path')
parser.add_argument('--origin_resume', default='model_large_retrieval_coco.pth', type=str, help='load checkpoints from given path')
parser.add_argument('--gram_encoder_path', default='pretrained/vgg_normalised.pth', type=str, help='load vgg from given path')
parser.add_argument('--style_cluster_path', default='pretrained/style_cluster.npy', type=str, help='load style prompt from given npy')
parser.add_argument('--device', default='cuda:0')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--num_workers', default=6, type=int)
# data settings
parser.add_argument("--type", type=str, default='style2image', help='choose train text2image or style2image.')
parser.add_argument("--style", type=str, default='sketch', help='choose sketch, art or mosaic.')
parser.add_argument("--test_dataset_path", type=str, default='DSR/')
parser.add_argument("--test_json_path", type=str, default='DSR/test.json')
parser.add_argument("--batch_size", type=int, default=24)
# model settings
parser.add_argument('--prompt', type=str, default='DeepPrompt', help='ShallowPrompt or DeepPrompt')
parser.add_argument('--gram_prompts', type=int, default=4)
parser.add_argument('--gram_prompt_dim', type=int, default=1024)
parser.add_argument('--style_prompts', type=int, default=4)
parser.add_argument('--style_prompt_dim', type=int, default=1024)
args = parser.parse_args()
return args
def eval(args, model, dataloader):
model.eval()
r1 = []
r5 = []
if args.type == 'text2image':
for data in enumerate(tqdm(dataloader)):
if args.prompt == 'BLIP_Retrieval':
caption = data[1][0]
else:
caption = model.tokenizer(data[1][0]).to(args.device, non_blocking=True)
image = data[1][1].to(args.device, non_blocking=True)
image_feature = model(image, dtype='image')
text_feature = model(caption, dtype='text')
image_feature = F.normalize(image_feature, dim=-1)
text_feature = F.normalize(text_feature, dim=-1)
prob = torch.softmax((100.0 * text_feature @ image_feature.T), dim=-1)
r1.append(getR1Accuary(prob))
r5.append(getR5Accuary(prob))
elif args.type == 'style2image':
for data in enumerate(tqdm(dataloader)):
origin_image = data[1][0].to(args.device, non_blocking=True)
retrival_image = data[1][1].to(args.device, non_blocking=True)
original_feature = model(origin_image, dtype='image')
retrival_feature = model(retrival_image, dtype='image')
original_feature = F.normalize(original_feature, dim=-1)
retrival_feature = F.normalize(retrival_feature, dim=-1)
prob = torch.softmax((100.0 * retrival_feature @ original_feature.T), dim=-1)
r1.append(getR1Accuary(prob))
r5.append(getR5Accuary(prob))
else:
for data in enumerate(tqdm(dataloader)):
if args.prompt == 'BLIP_Retrieval':
caption = data[1][0]
else:
caption = model.tokenizer(data[1][0]).to(args.device, non_blocking=True)
origin_image = data[1][1].to(args.device, non_blocking=True)
retrival_image = data[1][2].to(args.device, non_blocking=True)
text_feature = model(caption, dtype='text')
original_feature = model(origin_image, dtype='image')
retrival_feature = model(retrival_image, dtype='image')
text_feature = F.normalize(text_feature, dim=-1)
original_feature = F.normalize(original_feature, dim=-1)
retrival_feature = F.normalize(retrival_feature, dim=-1)
prob1 = torch.softmax((100.0 * text_feature @ original_feature.T), dim=-1)
prob2 = prob = torch.softmax((100.0 * retrival_feature @ original_feature.T), dim=-1)
prob = prob1.max(prob2)
r1.append(getR1Accuary(prob))
r5.append(getR5Accuary(prob))
resr1 = sum(r1)/len(r1)
resr5 = sum(r5)/len(r5)
print('R@1 Acc is {}'.format(resr1))
print('R@5 Acc is {}'.format(resr5))
if __name__ == "__main__":
args = parse_args()
setup_seed(args.seed)
if args.prompt == 'ShallowPrompt':
model = ShallowStyleRetrieval(args)
elif args.prompt == 'DeepPrompt':
|
def parse_args():
parser = argparse.ArgumentParser(description='Parse args for FreeStyleRet Training.')
# project settings
parser.add_argument('--resume', default='', type=str, help='load checkpoints from given path')
parser.add_argument('--origin_resume', default='model_large_retrieval_coco.pth', type=str, help='load checkpoints from given path')
parser.add_argument('--gram_encoder_path', default='pretrained/vgg_normalised.pth', type=str, help='load vgg from given path')
parser.add_argument('--style_cluster_path', default='pretrained/style_cluster.npy', type=str, help='load style prompt from given npy')
parser.add_argument('--device', default='cuda:0')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--num_workers', default=6, type=int)
# data settings
parser.add_argument("--type", type=str, default='style2image', help='choose train text2image or style2image.')
parser.add_argument("--style", type=str, default='sketch', help='choose sketch, art or mosaic.')
parser.add_argument("--test_dataset_path", type=str, default='DSR/')
parser.add_argument("--test_json_path", type=str, default='DSR/test.json')
parser.add_argument("--batch_size", type=int, default=24)
# model settings
parser.add_argument('--prompt', type=str, default='DeepPrompt', help='ShallowPrompt or DeepPrompt')
parser.add_argument('--gram_prompts', type=int, default=4)
parser.add_argument('--gram_prompt_dim', type=int, default=1024)
parser.add_argument('--style_prompts', type=int, default=4)
parser.add_argument('--style_prompt_dim', type=int, default=1024)
args = parser.parse_args()
return args
def eval(args, model, dataloader):
model.eval()
r1 = []
r5 = []
if args.type == 'text2image':
for data in enumerate(tqdm(dataloader)):
if args.prompt == 'BLIP_Retrieval':
caption = data[1][0]
else:
caption = model.tokenizer(data[1][0]).to(args.device, non_blocking=True)
image = data[1][1].to(args.device, non_blocking=True)
image_feature = model(image, dtype='image')
text_feature = model(caption, dtype='text')
image_feature = F.normalize(image_feature, dim=-1)
text_feature = F.normalize(text_feature, dim=-1)
prob = torch.softmax((100.0 * text_feature @ image_feature.T), dim=-1)
r1.append(getR1Accuary(prob))
r5.append(getR5Accuary(prob))
elif args.type == 'style2image':
for data in enumerate(tqdm(dataloader)):
origin_image = data[1][0].to(args.device, non_blocking=True)
retrival_image = data[1][1].to(args.device, non_blocking=True)
original_feature = model(origin_image, dtype='image')
retrival_feature = model(retrival_image, dtype='image')
original_feature = F.normalize(original_feature, dim=-1)
retrival_feature = F.normalize(retrival_feature, dim=-1)
prob = torch.softmax((100.0 * retrival_feature @ original_feature.T), dim=-1)
r1.append(getR1Accuary(prob))
r5.append(getR5Accuary(prob))
else:
for data in enumerate(tqdm(dataloader)):
if args.prompt == 'BLIP_Retrieval':
caption = data[1][0]
else:
caption = model.tokenizer(data[1][0]).to(args.device, non_blocking=True)
origin_image = data[1][1].to(args.device, non_blocking=True)
retrival_image = data[1][2].to(args.device, non_blocking=True)
text_feature = model(caption, dtype='text')
original_feature = model(origin_image, dtype='image')
retrival_feature = model(retrival_image, dtype='image')
text_feature = F.normalize(text_feature, dim=-1)
original_feature = F.normalize(original_feature, dim=-1)
retrival_feature = F.normalize(retrival_feature, dim=-1)
prob1 = torch.softmax((100.0 * text_feature @ original_feature.T), dim=-1)
prob2 = prob = torch.softmax((100.0 * retrival_feature @ original_feature.T), dim=-1)
prob = prob1.max(prob2)
r1.append(getR1Accuary(prob))
r5.append(getR5Accuary(prob))
resr1 = sum(r1)/len(r1)
resr5 = sum(r5)/len(r5)
print('R@1 Acc is {}'.format(resr1))
print('R@5 Acc is {}'.format(resr5))
if __name__ == "__main__":
args = parse_args()
setup_seed(args.seed)
if args.prompt == 'ShallowPrompt':
model = ShallowStyleRetrieval(args)
elif args.prompt == 'DeepPrompt': | model = DeepStyleRetrieval(args) | 1 | 2023-10-17 09:32:57+00:00 | 12k |
liuqidong07/MOELoRA-peft | src/MLoRA/peft/tuners/adalora.py | [
{
"identifier": "PeftType",
"path": "src/MLoRA/peft/utils/config.py",
"snippet": "class PeftType(str, enum.Enum):\n PROMPT_TUNING = \"PROMPT_TUNING\"\n P_TUNING = \"P_TUNING\"\n PREFIX_TUNING = \"PREFIX_TUNING\"\n LORA = \"LORA\"\n ADALORA = \"ADALORA\"\n ADAPTION_PROMPT = \"ADAPTION_P... | import importlib
import re
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
import bitsandbytes as bnb
from dataclasses import dataclass, field
from typing import Optional
from transformers.pytorch_utils import Conv1D
from ..utils import (
TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING,
PeftType,
_freeze_adapter,
_get_submodules,
transpose,
)
from .lora import (
LoraConfig,
LoraLayer,
LoraModel,
mark_only_lora_as_trainable,
) | 8,092 | state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key]
)
return state_dict
def update_and_allocate(self, global_step):
lora_config = self.peft_config[self.trainable_adapter_name]
# Update the importance score and allocate the budget
if global_step < lora_config.total_step - lora_config.tfinal:
_, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step)
if rank_pattern:
lora_config.rank_pattern = rank_pattern
# Finalize the budget allocation
elif global_step == lora_config.total_step - lora_config.tfinal:
_, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True)
# for some reason, this freezes the trainable parameters and nothing gets updates
# self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name)
lora_config.rank_pattern = rank_pattern
self.rankallocator.reset_ipt()
# Currently using inefficient way to mask the unimportant weights using the rank pattern
# due to problem mentioned above
elif global_step > lora_config.total_step - lora_config.tfinal:
self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern)
# Pass the function and do forward propagation
else:
return None
@staticmethod
def _prepare_adalora_config(peft_config, model_config):
if peft_config.target_modules is None:
if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING:
raise ValueError("Please specify `target_modules` in `peft_config`")
peft_config.target_modules = TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING[
model_config["model_type"]
]
if peft_config.inference_mode:
peft_config.merge_weights = True
return peft_config
class AdaLoraLayer(LoraLayer):
def __init__(
self,
in_features: int,
out_features: int,
):
super().__init__(in_features, out_features)
self.lora_E = nn.ParameterDict({})
self.lora_A = nn.ParameterDict({})
self.lora_B = nn.ParameterDict({})
self.ranknum = nn.ParameterDict({})
def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights):
self.r[adapter_name] = r
self.lora_alpha[adapter_name] = lora_alpha
if lora_dropout > 0.0:
lora_dropout_layer = nn.Dropout(p=lora_dropout)
else:
def lora_dropout_layer(x):
return x
self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))
# Actual trainable parameters
# Right singular vectors
self.lora_A.update(nn.ParameterDict({adapter_name: nn.Parameter(torch.zeros(r, self.in_features))}))
# Singular values
self.lora_E.update(nn.ParameterDict({adapter_name: nn.Parameter(torch.zeros(r, 1))}))
# Left singular vectors
self.lora_B.update(nn.ParameterDict({adapter_name: nn.Parameter(torch.zeros(self.out_features, r))}))
# The current rank
self.ranknum.update(nn.ParameterDict({adapter_name: nn.Parameter(torch.zeros(1), requires_grad=False)}))
self.ranknum[adapter_name].data.fill_(float(r))
self.ranknum[adapter_name].requires_grad = False
self.scaling[adapter_name] = lora_alpha if lora_alpha > 0 else float(r)
if init_lora_weights:
self.reset_lora_parameters(adapter_name)
self.to(self.weight.device)
def reset_lora_parameters(self, adapter_name):
if adapter_name in self.lora_A.keys():
nn.init.zeros_(self.lora_E[adapter_name])
nn.init.normal_(self.lora_A[adapter_name], mean=0.0, std=0.02)
nn.init.normal_(self.lora_B[adapter_name], mean=0.0, std=0.02)
class SVDLinear(nn.Linear, AdaLoraLayer):
# SVD-based adaptation by a dense layer
def __init__(
self,
adapter_name: str,
in_features: int,
out_features: int,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.0,
fan_in_fan_out: bool = False,
**kwargs,
):
init_lora_weights = kwargs.pop("init_lora_weights", True)
nn.Linear.__init__(self, in_features, out_features, **kwargs)
AdaLoraLayer.__init__(self, in_features=in_features, out_features=out_features)
# Freezing the pre-trained weight matrix
self.weight.requires_grad = False
self.fan_in_fan_out = fan_in_fan_out
if fan_in_fan_out:
self.weight.data = self.weight.data.T
nn.Linear.reset_parameters(self)
self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
self.active_adapter = adapter_name
def merge(self):
if self.active_adapter not in self.lora_A.keys():
return
if self.merged:
warnings.warn("Already merged. Nothing to do.")
return
if self.r[self.active_adapter] > 0:
self.weight.data += (
|
def is_bnb_available():
return importlib.util.find_spec("bitsandbytes") is not None
if is_bnb_available():
@dataclass
class AdaLoraConfig(LoraConfig):
"""
This is the configuration class to store the configuration of a [`~peft.AdaLora`].
Args:
target_r (`int`): The target average rank of incremental matrix.
init_r (`int`): The initial rank for each incremental matrix.
tinit (`int`): The steps of initial fine-tuning warmup.
tfinal (`int`): The step of final fine-tuning.
deltaT (`int`): The time internval between two budget allocations.
beta1 (`float`): The hyperparameter of EMA for sensitivity smoothing.
beta2 (`float`): The hyperparameter of EMA for undertainty quantification.
orth_reg_weight (`float`): The coefficient of orthogonal regularization.
total_step (`int`): The total training steps that should be specified before training.
rank_pattern (`list`): The allocated rank for each weight matrix by RankAllocator.
"""
target_r: int = field(default=8, metadata={"help": "Target Lora matrix dimension."})
init_r: int = field(default=12, metadata={"help": "Intial Lora matrix dimension."})
tinit: int = field(default=0, metadata={"help": "The steps of initial warmup."})
tfinal: int = field(default=0, metadata={"help": "The steps of final warmup."})
deltaT: int = field(default=1, metadata={"help": "Step interval of rank allocation."})
beta1: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."})
beta2: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."})
orth_reg_weight: float = field(default=0.5, metadata={"help": "The orthogonal regularization coefficient."})
total_step: Optional[int] = field(default=None, metadata={"help": "The total training steps."})
rank_pattern: Optional[dict] = field(default=None, metadata={"help": "The saved rank pattern."})
def __post_init__(self):
self.peft_type = PeftType.ADALORA
class AdaLoraModel(LoraModel):
"""
Creates AdaLoRA (Adaptive LoRA) model from a pretrained transformers model. Paper:
https://openreview.net/pdf?id=lq62uWRJjiY
Args:
model ([`transformers.PreTrainedModel`]): The model to be adapted.
config ([`AdaLoraConfig`]): The configuration of the AdaLora model.
Returns:
`torch.nn.Module`: The AdaLora model.
Example::
>>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import AdaLoraModel, AdaLoraConfig
>>> config = AdaLoraConfig(
peft_type="ADALORA", task_type="SEQ_2_SEQ_LM", r=8, lora_alpha=32, target_modules=["q", "v"],
lora_dropout=0.01,
)
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> model = AdaLoraModel(config, model)
**Attributes**:
- **model** ([`transformers.PreTrainedModel`]) -- The model to be adapted.
- **peft_config** ([`AdaLoraConfig`]): The configuration of the AdaLora model.
"""
def __init__(self, model, config, adapter_name):
nn.Module.__init__(self)
self.model = model
self.peft_config = config
self.add_adapter(adapter_name, self.peft_config[adapter_name])
def add_adapter(self, adapter_name, config=None):
if config is not None:
model_config = self.model.config.to_dict() if hasattr(self.model.config, "to_dict") else self.model.config
config = self._prepare_adalora_config(config, model_config)
self.peft_config[adapter_name] = config
self._find_and_replace(adapter_name)
if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != "none":
raise ValueError(
"AdaLoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters."
)
traininable_mode_counter = 0
for config in self.peft_config.values():
if not config.inference_mode:
traininable_mode_counter += 1
if traininable_mode_counter > 1:
raise ValueError(
"AdaLoraModel supports only 1 trainable adapter. "
"When using multiple adapters, set inference_mode to True for all adapters except the one you want to train."
)
mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)
if self.peft_config[adapter_name].inference_mode:
_freeze_adapter(self.model, adapter_name)
else:
self.trainable_adapter_name = adapter_name
self.rankallocator = RankAllocator(self.model, self.peft_config[adapter_name], self.trainable_adapter_name)
def _find_and_replace(self, adapter_name):
lora_config = self.peft_config[adapter_name]
loaded_in_8bit = getattr(self.model, "is_loaded_in_8bit", False)
if loaded_in_8bit and not is_bnb_available():
raise ImportError(
"To use Lora with 8-bit quantization, please install the `bitsandbytes` package. "
"You can install it with `pip install bitsandbytes`."
)
is_target_modules_in_base_model = False
kwargs = {
"r": lora_config.init_r,
"lora_alpha": lora_config.lora_alpha,
"lora_dropout": lora_config.lora_dropout,
"fan_in_fan_out": lora_config.fan_in_fan_out,
"init_lora_weights": lora_config.init_lora_weights,
}
key_list = [key for key, _ in self.model.named_modules()]
for key in key_list:
if isinstance(lora_config.target_modules, str):
target_module_found = re.fullmatch(lora_config.target_modules, key)
else:
target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules)
if target_module_found:
if not is_target_modules_in_base_model:
is_target_modules_in_base_model = True
parent, target, target_name = _get_submodules(self.model, key)
bias = target.bias is not None
if isinstance(target, LoraLayer):
target.update_layer(
adapter_name,
lora_config.init_r,
lora_config.lora_alpha,
lora_config.lora_dropout,
lora_config.init_lora_weights,
)
else:
if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):
kwargs.update(
{
"has_fp16_weights": target.state.has_fp16_weights,
"memory_efficient_backward": target.state.memory_efficient_backward,
"threshold": target.state.threshold,
"index": target.index,
}
)
new_module = SVDLinear8bitLt(
adapter_name, target.in_features, target.out_features, bias=bias, **kwargs
)
else:
if isinstance(target, torch.nn.Linear):
in_features, out_features = target.in_features, target.out_features
if kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
"Setting fan_in_fan_out to False."
)
kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False
elif isinstance(target, Conv1D):
in_features, out_features = (
target.weight.ds_shape if hasattr(target.weight, "ds_shape") else target.weight.shape
)
if not kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to False but the target module is `Conv1D`. "
"Setting fan_in_fan_out to True."
)
kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True
else:
raise ValueError(
f"Target module {target} is not supported. "
f"Currently, only `torch.nn.Linear` and `Conv1D` are supported."
)
new_module = SVDLinear(adapter_name, in_features, out_features, bias=bias, **kwargs)
self._replace_module(parent, target_name, new_module, target)
if not is_target_modules_in_base_model:
raise ValueError(
f"Target modules {lora_config.target_modules} not found in the base model. "
f"Please check the target modules and try again."
)
def __getattr__(self, name: str):
"""Forward missing attributes to the wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
return getattr(self.model, name)
def forward(self, *args, **kwargs):
outputs = self.model.forward(*args, **kwargs)
# Calculate the orthogonal regularization
orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight
assert orth_reg_weight > 0
if hasattr(outputs, "loss"):
regu_loss = 0
num_param = 0
for n, p in self.model.named_parameters():
if ("lora_A" in n or "lora_B" in n) and self.trainable_adapter_name in n:
para_cov = p @ p.T if "lora_A" in n else p.T @ p
I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov))
I.requires_grad = False
num_param += 1
regu_loss += torch.norm(para_cov - I, p="fro")
regu_loss = regu_loss / num_param
outputs.loss += orth_reg_weight * regu_loss
return outputs
def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name):
lora_config = self.peft_config[adapter_name]
for name, rank_idx in rank_pattern.items():
if isinstance(rank_idx, list):
rank = sum(rank_idx)
elif isinstance(rank_idx, torch.Tensor):
rank_idx = rank_idx.view(-1)
rank = rank_idx.sum().item()
else:
raise ValueError("Unexcepted type of rank_idx")
key = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
_, target, _ = _get_submodules(self.model, key)
lora_E_weights = target.lora_E[adapter_name][rank_idx]
lora_A_weights = target.lora_A[adapter_name][rank_idx]
lora_B_weights = target.lora_B[adapter_name][:, rank_idx]
ranknum = target.ranknum[adapter_name]
target.update_layer(
adapter_name,
rank,
lora_config.lora_alpha,
lora_config.lora_dropout,
lora_config.init_lora_weights,
)
with torch.no_grad():
if rank > 0:
target.lora_E[adapter_name].copy_(lora_E_weights)
target.lora_A[adapter_name].copy_(lora_A_weights)
target.lora_B[adapter_name].copy_(lora_B_weights)
# The scaling is exactly as the previous
target.ranknum[adapter_name].copy_(ranknum)
def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name):
for name, rank_idx in rank_pattern.items():
rank = sum(rank_idx)
prefix = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
for layer in ["lora_E", "lora_A", "lora_B"]:
key = f"base_model.model.{prefix}.{layer}.{adapter_name}"
if layer != "lora_B":
state_dict[key] = (
state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key]
)
else:
state_dict[key] = (
state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key]
)
return state_dict
def update_and_allocate(self, global_step):
lora_config = self.peft_config[self.trainable_adapter_name]
# Update the importance score and allocate the budget
if global_step < lora_config.total_step - lora_config.tfinal:
_, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step)
if rank_pattern:
lora_config.rank_pattern = rank_pattern
# Finalize the budget allocation
elif global_step == lora_config.total_step - lora_config.tfinal:
_, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True)
# for some reason, this freezes the trainable parameters and nothing gets updates
# self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name)
lora_config.rank_pattern = rank_pattern
self.rankallocator.reset_ipt()
# Currently using inefficient way to mask the unimportant weights using the rank pattern
# due to problem mentioned above
elif global_step > lora_config.total_step - lora_config.tfinal:
self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern)
# Pass the function and do forward propagation
else:
return None
@staticmethod
def _prepare_adalora_config(peft_config, model_config):
if peft_config.target_modules is None:
if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING:
raise ValueError("Please specify `target_modules` in `peft_config`")
peft_config.target_modules = TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING[
model_config["model_type"]
]
if peft_config.inference_mode:
peft_config.merge_weights = True
return peft_config
class AdaLoraLayer(LoraLayer):
def __init__(
self,
in_features: int,
out_features: int,
):
super().__init__(in_features, out_features)
self.lora_E = nn.ParameterDict({})
self.lora_A = nn.ParameterDict({})
self.lora_B = nn.ParameterDict({})
self.ranknum = nn.ParameterDict({})
def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights):
self.r[adapter_name] = r
self.lora_alpha[adapter_name] = lora_alpha
if lora_dropout > 0.0:
lora_dropout_layer = nn.Dropout(p=lora_dropout)
else:
def lora_dropout_layer(x):
return x
self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))
# Actual trainable parameters
# Right singular vectors
self.lora_A.update(nn.ParameterDict({adapter_name: nn.Parameter(torch.zeros(r, self.in_features))}))
# Singular values
self.lora_E.update(nn.ParameterDict({adapter_name: nn.Parameter(torch.zeros(r, 1))}))
# Left singular vectors
self.lora_B.update(nn.ParameterDict({adapter_name: nn.Parameter(torch.zeros(self.out_features, r))}))
# The current rank
self.ranknum.update(nn.ParameterDict({adapter_name: nn.Parameter(torch.zeros(1), requires_grad=False)}))
self.ranknum[adapter_name].data.fill_(float(r))
self.ranknum[adapter_name].requires_grad = False
self.scaling[adapter_name] = lora_alpha if lora_alpha > 0 else float(r)
if init_lora_weights:
self.reset_lora_parameters(adapter_name)
self.to(self.weight.device)
def reset_lora_parameters(self, adapter_name):
if adapter_name in self.lora_A.keys():
nn.init.zeros_(self.lora_E[adapter_name])
nn.init.normal_(self.lora_A[adapter_name], mean=0.0, std=0.02)
nn.init.normal_(self.lora_B[adapter_name], mean=0.0, std=0.02)
class SVDLinear(nn.Linear, AdaLoraLayer):
# SVD-based adaptation by a dense layer
def __init__(
self,
adapter_name: str,
in_features: int,
out_features: int,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.0,
fan_in_fan_out: bool = False,
**kwargs,
):
init_lora_weights = kwargs.pop("init_lora_weights", True)
nn.Linear.__init__(self, in_features, out_features, **kwargs)
AdaLoraLayer.__init__(self, in_features=in_features, out_features=out_features)
# Freezing the pre-trained weight matrix
self.weight.requires_grad = False
self.fan_in_fan_out = fan_in_fan_out
if fan_in_fan_out:
self.weight.data = self.weight.data.T
nn.Linear.reset_parameters(self)
self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
self.active_adapter = adapter_name
def merge(self):
if self.active_adapter not in self.lora_A.keys():
return
if self.merged:
warnings.warn("Already merged. Nothing to do.")
return
if self.r[self.active_adapter] > 0:
self.weight.data += ( | transpose( | 2 | 2023-10-19 10:55:50+00:00 | 12k |
YuroFR/freqtrade-modded-crypto-trading-bot | freqtrade/plugins/pairlist/VolatilityFilter.py | [
{
"identifier": "Config",
"path": "freqtrade/constants.py",
"snippet": "DOCS_LINK = \"https://www.freqtrade.io/en/stable\"\nDEFAULT_CONFIG = 'config.json'\nPROCESS_THROTTLE_SECS = 5 # sec\nHYPEROPT_EPOCH = 100 # epochs\nRETRY_TIMEOUT = 30 # sec\nTIMEOUT_UNITS = ['minutes', 'seconds']\nEXPORT_OPTIONS ... | import logging
import sys
import numpy as np
from copy import deepcopy
from datetime import timedelta
from typing import Any, Dict, List, Optional
from cachetools import TTLCache
from pandas import DataFrame
from freqtrade.constants import Config, ListPairsWithTimeframes
from freqtrade.exceptions import OperationalException
from freqtrade.exchange.types import Tickers
from freqtrade.misc import plural
from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter
from freqtrade.util import dt_floor_day, dt_now, dt_ts | 8,785 | """
Volatility pairlist filter
"""
logger = logging.getLogger(__name__)
class VolatilityFilter(IPairList):
"""
Filters pairs by volatility
"""
def __init__(self, exchange, pairlistmanager,
config: Config, pairlistconfig: Dict[str, Any],
pairlist_pos: int) -> None:
super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos)
self._days = pairlistconfig.get('lookback_days', 10)
self._min_volatility = pairlistconfig.get('min_volatility', 0)
self._max_volatility = pairlistconfig.get('max_volatility', sys.maxsize)
self._refresh_period = pairlistconfig.get('refresh_period', 1440)
self._def_candletype = self._config['candle_type_def']
self._pair_cache: TTLCache = TTLCache(maxsize=1000, ttl=self._refresh_period)
candle_limit = exchange.ohlcv_candle_limit('1d', self._config['candle_type_def'])
if self._days < 1:
raise OperationalException("VolatilityFilter requires lookback_days to be >= 1")
if self._days > candle_limit:
raise OperationalException("VolatilityFilter requires lookback_days to not "
f"exceed exchange max request size ({candle_limit})")
@property
def needstickers(self) -> bool:
"""
Boolean property defining if tickers are necessary.
If no Pairlist requires tickers, an empty List is passed
as tickers argument to filter_pairlist
"""
return False
def short_desc(self) -> str:
"""
Short whitelist method description - used for startup-messages
"""
return (f"{self.name} - Filtering pairs with volatility range "
f"{self._min_volatility}-{self._max_volatility} "
f" the last {self._days} {plural(self._days, 'day')}.")
@staticmethod
def description() -> str:
return "Filter pairs by their recent volatility."
@staticmethod
def available_parameters() -> Dict[str, PairlistParameter]:
return {
"lookback_days": {
"type": "number",
"default": 10,
"description": "Lookback Days",
"help": "Number of days to look back at.",
},
"min_volatility": {
"type": "number",
"default": 0,
"description": "Minimum Volatility",
"help": "Minimum volatility a pair must have to be considered.",
},
"max_volatility": {
"type": "number",
"default": None,
"description": "Maximum Volatility",
"help": "Maximum volatility a pair must have to be considered.",
},
**IPairList.refresh_period_parameter()
}
def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]:
"""
Validate trading range
:param pairlist: pairlist to filter or sort
:param tickers: Tickers (from exchange.get_tickers). May be cached.
:return: new allowlist
"""
| """
Volatility pairlist filter
"""
logger = logging.getLogger(__name__)
class VolatilityFilter(IPairList):
"""
Filters pairs by volatility
"""
def __init__(self, exchange, pairlistmanager,
config: Config, pairlistconfig: Dict[str, Any],
pairlist_pos: int) -> None:
super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos)
self._days = pairlistconfig.get('lookback_days', 10)
self._min_volatility = pairlistconfig.get('min_volatility', 0)
self._max_volatility = pairlistconfig.get('max_volatility', sys.maxsize)
self._refresh_period = pairlistconfig.get('refresh_period', 1440)
self._def_candletype = self._config['candle_type_def']
self._pair_cache: TTLCache = TTLCache(maxsize=1000, ttl=self._refresh_period)
candle_limit = exchange.ohlcv_candle_limit('1d', self._config['candle_type_def'])
if self._days < 1:
raise OperationalException("VolatilityFilter requires lookback_days to be >= 1")
if self._days > candle_limit:
raise OperationalException("VolatilityFilter requires lookback_days to not "
f"exceed exchange max request size ({candle_limit})")
@property
def needstickers(self) -> bool:
"""
Boolean property defining if tickers are necessary.
If no Pairlist requires tickers, an empty List is passed
as tickers argument to filter_pairlist
"""
return False
def short_desc(self) -> str:
"""
Short whitelist method description - used for startup-messages
"""
return (f"{self.name} - Filtering pairs with volatility range "
f"{self._min_volatility}-{self._max_volatility} "
f" the last {self._days} {plural(self._days, 'day')}.")
@staticmethod
def description() -> str:
return "Filter pairs by their recent volatility."
@staticmethod
def available_parameters() -> Dict[str, PairlistParameter]:
return {
"lookback_days": {
"type": "number",
"default": 10,
"description": "Lookback Days",
"help": "Number of days to look back at.",
},
"min_volatility": {
"type": "number",
"default": 0,
"description": "Minimum Volatility",
"help": "Minimum volatility a pair must have to be considered.",
},
"max_volatility": {
"type": "number",
"default": None,
"description": "Maximum Volatility",
"help": "Maximum volatility a pair must have to be considered.",
},
**IPairList.refresh_period_parameter()
}
def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]:
"""
Validate trading range
:param pairlist: pairlist to filter or sort
:param tickers: Tickers (from exchange.get_tickers). May be cached.
:return: new allowlist
""" | needed_pairs: ListPairsWithTimeframes = [ | 0 | 2023-10-21 10:02:05+00:00 | 12k |
yanzhh/HGERE | transformers/src/transformers/modeling_utils.py | [
{
"identifier": "get_activation",
"path": "transformers/src/transformers/activations.py",
"snippet": "def get_activation(activation_string):\n if activation_string in ACT2FN:\n return ACT2FN[activation_string]\n else:\n raise KeyError(\n \"function {} not found in ACT2FN m... | import logging
import os
import typing
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from .activations import get_activation
from .configuration_utils import PretrainedConfig
from .file_utils import (
DUMMY_INPUTS,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
WEIGHTS_NAME,
cached_path,
hf_bucket_url,
is_remote_url,
)
from torch.nn import Identity
from transformers import load_tf2_checkpoint_in_pytorch_model | 8,358 | save_directory
), "Saving path should be a directory where the model and configuration can be saved"
# Only save the model itself if we are using distributed training
model_to_save = self.module if hasattr(self, "module") else self
# Attach architecture to the config
model_to_save.config.architectures = [model_to_save.__class__.__name__]
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Model weights saved in {}".format(output_model_file))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiate a pretrained pytorch model from a pre-trained model configuration.
The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with ``model.train()``
The warning ``Weights from XXX not initialized from pretrained model`` means that the weights of XXX do not come pre-trained with the rest of the model.
It is up to you to train those weights with a downstream fine-tuning task.
The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded.
Parameters:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
- None if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``)
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) one of:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`, or
- a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained()`
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
# For example purposes. Not runnable.
model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
state_dict = kwargs.pop("state_dict", None)
cache_dir = kwargs.pop("cache_dir", None)
from_tf = kwargs.pop("from_tf", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path):
| # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
logger = logging.getLogger(__name__)
try:
except ImportError:
# Older PyTorch compatibility
class Identity(nn.Module):
r"""A placeholder identity operator that is argument-insensitive.
"""
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, input):
return input
class ModuleUtilsMixin:
"""
A few utilities for torch.nn.Modules, to be used as a mixin.
"""
def num_parameters(self, only_trainable: bool = False) -> int:
"""
Get number of (optionally, trainable) parameters in the module.
"""
params = filter(lambda x: x.requires_grad, self.parameters()) if only_trainable else self.parameters()
return sum(p.numel() for p in params)
class PreTrainedModel(nn.Module, ModuleUtilsMixin):
r""" Base class for all models.
:class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods for loading/downloading/saving models
as well as a few methods common to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- ``config_class``: a class derived from :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
- ``pretrained_model_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained weights as values.
- ``load_tf_weights``: a python ``method`` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:
- ``model``: an instance of the relevant subclass of :class:`~transformers.PreTrainedModel`,
- ``config``: an instance of the relevant subclass of :class:`~transformers.PretrainedConfig`,
- ``path``: a path (string) to the TensorFlow checkpoint.
- ``base_model_prefix``: a string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.
"""
config_class = None
pretrained_model_archive_map = {}
base_model_prefix = ""
@property
def dummy_inputs(self):
""" Dummy inputs to do a forward pass in the network.
Returns:
torch.Tensor with dummy inputs
"""
return {"input_ids": torch.tensor(DUMMY_INPUTS)}
def __init__(self, config, *inputs, **kwargs):
super().__init__()
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
# Save config in model
self.config = config
@property
def base_model(self):
return getattr(self, self.base_model_prefix, self)
def get_input_embeddings(self):
"""
Returns the model's input embeddings.
Returns:
:obj:`nn.Module`:
A torch module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
return base_model.get_input_embeddings()
else:
raise NotImplementedError
def set_input_embeddings(self, value):
"""
Set model's input embeddings
Args:
value (:obj:`nn.Module`):
A module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
base_model.set_input_embeddings(value)
else:
raise NotImplementedError
def get_output_embeddings(self):
"""
Returns the model's output embeddings.
Returns:
:obj:`nn.Module`:
A torch module mapping hidden states to vocabulary.
"""
return None # Overwrite for models with output embeddings
def tie_weights(self):
"""
Tie the weights between the input embeddings and the output embeddings.
If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning
the weights instead.
"""
output_embeddings = self.get_output_embeddings()
if output_embeddings is not None:
if isinstance(output_embeddings, list):
for x in output_embeddings:
self._tie_or_clone_weights(x, self.get_input_embeddings())
else:
self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
""" Tie or clone module weights depending of weither we are using TorchScript or not
"""
if self.config.torchscript:
output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
else:
output_embeddings.weight = input_embeddings.weight
if hasattr(output_embeddings, "bias") and output_embeddings.bias is not None:
output_embeddings.bias.data = torch.nn.functional.pad(
output_embeddings.bias.data,
(0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0]),
"constant",
0,
)
if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
output_embeddings.out_features = input_embeddings.num_embeddings
def resize_token_embeddings(self, new_num_tokens=None):
""" Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size.
Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
Arguments:
new_num_tokens: (`optional`) int:
New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end.
If not provided or None: does nothing and just returns a pointer to the input tokens ``torch.nn.Embeddings`` Module of the model.
Return: ``torch.nn.Embeddings``
Pointer to the input tokens Embeddings Module of the model
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
model_embeds = base_model._resize_token_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.vocab_size = new_num_tokens
base_model.vocab_size = new_num_tokens
# Tie weights again if needed
self.tie_weights()
return model_embeds
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.get_input_embeddings()
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.set_input_embeddings(new_embeddings)
return self.get_input_embeddings()
def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None):
""" Build a resized Embedding Module from a provided token Embedding Module.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
Args:
new_num_tokens: (`optional`) int
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
If not provided or None: return the provided token Embedding Module.
Return: ``torch.nn.Embeddings``
Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None
"""
if new_num_tokens is None:
return old_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens:
return old_embeddings
# Build new embeddings
new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
new_embeddings.to(old_embeddings.weight.device)
# initialize all new embeddings (in particular added tokens)
self._init_weights(new_embeddings)
# Copy word embeddings from the previous weights
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]
return new_embeddings
def init_weights(self):
""" Initialize and prunes weights if needed. """
# Initialize weights
self.apply(self._init_weights)
# Prune heads if needed
if self.config.pruned_heads:
self.prune_heads(self.config.pruned_heads)
# Tie weights if needed
self.tie_weights()
def prune_heads(self, heads_to_prune):
""" Prunes heads of the base model.
Arguments:
heads_to_prune: dict with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`).
E.g. {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
"""
# save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
for layer, heads in heads_to_prune.items():
union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON
self.base_model._prune_heads(heads_to_prune)
def save_pretrained(self, save_directory):
""" Save a model and its configuration file to a directory, so that it
can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
"""
assert os.path.isdir(
save_directory
), "Saving path should be a directory where the model and configuration can be saved"
# Only save the model itself if we are using distributed training
model_to_save = self.module if hasattr(self, "module") else self
# Attach architecture to the config
model_to_save.config.architectures = [model_to_save.__class__.__name__]
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Model weights saved in {}".format(output_model_file))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiate a pretrained pytorch model from a pre-trained model configuration.
The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with ``model.train()``
The warning ``Weights from XXX not initialized from pretrained model`` means that the weights of XXX do not come pre-trained with the rest of the model.
It is up to you to train those weights with a downstream fine-tuning task.
The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded.
Parameters:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
- None if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``)
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) one of:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`, or
- a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained()`
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
# For example purposes. Not runnable.
model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
state_dict = kwargs.pop("state_dict", None)
cache_dir = kwargs.pop("cache_dir", None)
from_tf = kwargs.pop("from_tf", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path): | if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")): | 4 | 2023-10-15 02:31:09+00:00 | 12k |
generative-skill-chaining/gsc-code | generative_skill_chaining/envs/pybullet/table/predicates.py | [
{
"identifier": "primitive_actions",
"path": "generative_skill_chaining/envs/pybullet/table/primitive_actions.py",
"snippet": "class PrimitiveAction:\nclass PickAction(PrimitiveAction):\nclass PlaceAction(PrimitiveAction):\nclass PullAction(PrimitiveAction):\nclass PushAction(PrimitiveAction):\n RANG... | import dataclasses
import random
import numpy as np
import pybullet as p
import symbolic
from typing import Optional, Dict, List, Sequence, Tuple, Type
from ctrlutils import eigen
from shapely.geometry import Polygon, LineString
from generative_skill_chaining.envs.pybullet.table import primitive_actions, utils
from generative_skill_chaining.envs.pybullet.table.objects import Box, Hook, Null, Object, Rack
from generative_skill_chaining.envs.pybullet.sim import math
from generative_skill_chaining.envs.pybullet.sim.robot import Robot | 9,367 |
dbprint = lambda *args: None # noqa
# dbprint = print
@dataclasses.dataclass
class Predicate:
args: List[str]
@classmethod
def create(cls, proposition: str) -> "Predicate":
predicate, args = symbolic.parse_proposition(proposition)
predicate_classes = {
name.lower(): predicate_class for name, predicate_class in globals().items()
}
predicate_class = predicate_classes[predicate]
return predicate_class(args)
def sample(
self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"]
) -> bool:
"""Generates a geometric grounding of a predicate."""
return True
def value(
self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"]
) -> bool:
"""Evaluates to True if the geometrically grounded predicate is satisfied."""
return True
def get_arg_objects(self, objects: Dict[str, Object]) -> List[Object]:
return [objects[arg] for arg in self.args]
def __str__(self) -> str:
return f"{type(self).__name__.lower()}({', '.join(self.args)})"
def __hash__(self) -> int:
return hash(str(self))
def __eq__(self, other) -> bool:
return str(self) == str(other)
class HandleGrasp(Predicate):
"""Unary predicate enforcing a handle grasp towards the tail end on a hook object."""
pass
class UpperHandleGrasp(Predicate):
"""Unary predicate enforcing a handle grasp towards the head on a hook object."""
pass
class Free(Predicate):
"""Unary predicate enforcing that no top-down occlusions exist on the object."""
DISTANCE_MIN: Dict[Tuple[Type[Object], Type[Object]], float] = {
|
dbprint = lambda *args: None # noqa
# dbprint = print
@dataclasses.dataclass
class Predicate:
args: List[str]
@classmethod
def create(cls, proposition: str) -> "Predicate":
predicate, args = symbolic.parse_proposition(proposition)
predicate_classes = {
name.lower(): predicate_class for name, predicate_class in globals().items()
}
predicate_class = predicate_classes[predicate]
return predicate_class(args)
def sample(
self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"]
) -> bool:
"""Generates a geometric grounding of a predicate."""
return True
def value(
self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"]
) -> bool:
"""Evaluates to True if the geometrically grounded predicate is satisfied."""
return True
def get_arg_objects(self, objects: Dict[str, Object]) -> List[Object]:
return [objects[arg] for arg in self.args]
def __str__(self) -> str:
return f"{type(self).__name__.lower()}({', '.join(self.args)})"
def __hash__(self) -> int:
return hash(str(self))
def __eq__(self, other) -> bool:
return str(self) == str(other)
class HandleGrasp(Predicate):
"""Unary predicate enforcing a handle grasp towards the tail end on a hook object."""
pass
class UpperHandleGrasp(Predicate):
"""Unary predicate enforcing a handle grasp towards the head on a hook object."""
pass
class Free(Predicate):
"""Unary predicate enforcing that no top-down occlusions exist on the object."""
DISTANCE_MIN: Dict[Tuple[Type[Object], Type[Object]], float] = { | (Box, Box): 0.05, | 2 | 2023-10-16 00:22:40+00:00 | 12k |
akashgreninja/GreSec | backend/venv/lib/python3.10/site-packages/h11/_connection.py | [
{
"identifier": "ConnectionClosed",
"path": "backend/venv/lib/python3.10/site-packages/h11/_events.py",
"snippet": "class ConnectionClosed(Event):\n \"\"\"This event indicates that the sender has closed their outgoing\n connection.\n\n Note that this does not necessarily mean that they can't *r... | from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Type, Union
from ._events import (
ConnectionClosed,
Data,
EndOfMessage,
Event,
InformationalResponse,
Request,
Response,
)
from ._headers import get_comma_header, has_expect_100_continue, set_comma_header
from ._readers import READERS, ReadersType
from ._receivebuffer import ReceiveBuffer
from ._state import (
_SWITCH_CONNECT,
_SWITCH_UPGRADE,
CLIENT,
ConnectionState,
DONE,
ERROR,
MIGHT_SWITCH_PROTOCOL,
SEND_BODY,
SERVER,
SWITCHED_PROTOCOL,
)
from ._util import ( # Import the internal things we need
LocalProtocolError,
RemoteProtocolError,
Sentinel,
)
from ._writers import WRITERS, WritersType | 10,206 |
See :ref:`switching-protocols` for discussion of why you'd want this.
"""
return (bytes(self._receive_buffer), self._receive_buffer_closed)
def receive_data(self, data: bytes) -> None:
"""Add data to our internal receive buffer.
This does not actually do any processing on the data, just stores
it. To trigger processing, you have to call :meth:`next_event`.
Args:
data (:term:`bytes-like object`):
The new data that was just received.
Special case: If *data* is an empty byte-string like ``b""``,
then this indicates that the remote side has closed the
connection (end of file). Normally this is convenient, because
standard Python APIs like :meth:`file.read` or
:meth:`socket.recv` use ``b""`` to indicate end-of-file, while
other failures to read are indicated using other mechanisms
like raising :exc:`TimeoutError`. When using such an API you
can just blindly pass through whatever you get from ``read``
to :meth:`receive_data`, and everything will work.
But, if you have an API where reading an empty string is a
valid non-EOF condition, then you need to be aware of this and
make sure to check for such strings and avoid passing them to
:meth:`receive_data`.
Returns:
Nothing, but after calling this you should call :meth:`next_event`
to parse the newly received data.
Raises:
RuntimeError:
Raised if you pass an empty *data*, indicating EOF, and then
pass a non-empty *data*, indicating more data that somehow
arrived after the EOF.
(Calling ``receive_data(b"")`` multiple times is fine,
and equivalent to calling it once.)
"""
if data:
if self._receive_buffer_closed:
raise RuntimeError("received close, then received more data?")
self._receive_buffer += data
else:
self._receive_buffer_closed = True
def _extract_next_receive_event(
self,
) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]:
state = self.their_state
# We don't pause immediately when they enter DONE, because even in
# DONE state we can still process a ConnectionClosed() event. But
# if we have data in our buffer, then we definitely aren't getting
# a ConnectionClosed() immediately and we need to pause.
if state is DONE and self._receive_buffer:
return PAUSED
if state is MIGHT_SWITCH_PROTOCOL or state is SWITCHED_PROTOCOL:
return PAUSED
assert self._reader is not None
event = self._reader(self._receive_buffer)
if event is None:
if not self._receive_buffer and self._receive_buffer_closed:
# In some unusual cases (basically just HTTP/1.0 bodies), EOF
# triggers an actual protocol event; in that case, we want to
# return that event, and then the state will change and we'll
# get called again to generate the actual ConnectionClosed().
if hasattr(self._reader, "read_eof"):
event = self._reader.read_eof() # type: ignore[attr-defined]
else:
event = ConnectionClosed()
if event is None:
event = NEED_DATA
return event # type: ignore[no-any-return]
def next_event(self) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]:
"""Parse the next event out of our receive buffer, update our internal
state, and return it.
This is a mutating operation -- think of it like calling :func:`next`
on an iterator.
Returns:
: One of three things:
1) An event object -- see :ref:`events`.
2) The special constant :data:`NEED_DATA`, which indicates that
you need to read more data from your socket and pass it to
:meth:`receive_data` before this method will be able to return
any more events.
3) The special constant :data:`PAUSED`, which indicates that we
are not in a state where we can process incoming data (usually
because the peer has finished their part of the current
request/response cycle, and you have not yet called
:meth:`start_next_cycle`). See :ref:`flow-control` for details.
Raises:
RemoteProtocolError:
The peer has misbehaved. You should close the connection
(possibly after sending some kind of 4xx response).
Once this method returns :class:`ConnectionClosed` once, then all
subsequent calls will also return :class:`ConnectionClosed`.
If this method raises any exception besides :exc:`RemoteProtocolError`
then that's a bug -- if it happens please file a bug report!
If this method raises any exception then it also sets
:attr:`Connection.their_state` to :data:`ERROR` -- see
:ref:`error-handling` for discussion.
"""
if self.their_state is ERROR:
| # This contains the main Connection class. Everything in h11 revolves around
# this.
# Everything in __all__ gets re-exported as part of the h11 public API.
__all__ = ["Connection", "NEED_DATA", "PAUSED"]
class NEED_DATA(Sentinel, metaclass=Sentinel):
pass
class PAUSED(Sentinel, metaclass=Sentinel):
pass
# If we ever have this much buffered without it making a complete parseable
# event, we error out. The only time we really buffer is when reading the
# request/response line + headers together, so this is effectively the limit on
# the size of that.
#
# Some precedents for defaults:
# - node.js: 80 * 1024
# - tomcat: 8 * 1024
# - IIS: 16 * 1024
# - Apache: <8 KiB per line>
DEFAULT_MAX_INCOMPLETE_EVENT_SIZE = 16 * 1024
# RFC 7230's rules for connection lifecycles:
# - If either side says they want to close the connection, then the connection
# must close.
# - HTTP/1.1 defaults to keep-alive unless someone says Connection: close
# - HTTP/1.0 defaults to close unless both sides say Connection: keep-alive
# (and even this is a mess -- e.g. if you're implementing a proxy then
# sending Connection: keep-alive is forbidden).
#
# We simplify life by simply not supporting keep-alive with HTTP/1.0 peers. So
# our rule is:
# - If someone says Connection: close, we will close
# - If someone uses HTTP/1.0, we will close.
def _keep_alive(event: Union[Request, Response]) -> bool:
connection = get_comma_header(event.headers, b"connection")
if b"close" in connection:
return False
if getattr(event, "http_version", b"1.1") < b"1.1":
return False
return True
def _body_framing(
request_method: bytes, event: Union[Request, Response]
) -> Tuple[str, Union[Tuple[()], Tuple[int]]]:
# Called when we enter SEND_BODY to figure out framing information for
# this body.
#
# These are the only two events that can trigger a SEND_BODY state:
assert type(event) in (Request, Response)
# Returns one of:
#
# ("content-length", count)
# ("chunked", ())
# ("http/1.0", ())
#
# which are (lookup key, *args) for constructing body reader/writer
# objects.
#
# Reference: https://tools.ietf.org/html/rfc7230#section-3.3.3
#
# Step 1: some responses always have an empty body, regardless of what the
# headers say.
if type(event) is Response:
if (
event.status_code in (204, 304)
or request_method == b"HEAD"
or (request_method == b"CONNECT" and 200 <= event.status_code < 300)
):
return ("content-length", (0,))
# Section 3.3.3 also lists another case -- responses with status_code
# < 200. For us these are InformationalResponses, not Responses, so
# they can't get into this function in the first place.
assert event.status_code >= 200
# Step 2: check for Transfer-Encoding (T-E beats C-L):
transfer_encodings = get_comma_header(event.headers, b"transfer-encoding")
if transfer_encodings:
assert transfer_encodings == [b"chunked"]
return ("chunked", ())
# Step 3: check for Content-Length
content_lengths = get_comma_header(event.headers, b"content-length")
if content_lengths:
return ("content-length", (int(content_lengths[0]),))
# Step 4: no applicable headers; fallback/default depends on type
if type(event) is Request:
return ("content-length", (0,))
else:
return ("http/1.0", ())
################################################################
#
# The main Connection class
#
################################################################
class Connection:
"""An object encapsulating the state of an HTTP connection.
Args:
our_role: If you're implementing a client, pass :data:`h11.CLIENT`. If
you're implementing a server, pass :data:`h11.SERVER`.
max_incomplete_event_size (int):
The maximum number of bytes we're willing to buffer of an
incomplete event. In practice this mostly sets a limit on the
maximum size of the request/response line + headers. If this is
exceeded, then :meth:`next_event` will raise
:exc:`RemoteProtocolError`.
"""
def __init__(
self,
our_role: Type[Sentinel],
max_incomplete_event_size: int = DEFAULT_MAX_INCOMPLETE_EVENT_SIZE,
) -> None:
self._max_incomplete_event_size = max_incomplete_event_size
# State and role tracking
if our_role not in (CLIENT, SERVER):
raise ValueError("expected CLIENT or SERVER, not {!r}".format(our_role))
self.our_role = our_role
self.their_role: Type[Sentinel]
if our_role is CLIENT:
self.their_role = SERVER
else:
self.their_role = CLIENT
self._cstate = ConnectionState()
# Callables for converting data->events or vice-versa given the
# current state
self._writer = self._get_io_object(self.our_role, None, WRITERS)
self._reader = self._get_io_object(self.their_role, None, READERS)
# Holds any unprocessed received data
self._receive_buffer = ReceiveBuffer()
# If this is true, then it indicates that the incoming connection was
# closed *after* the end of whatever's in self._receive_buffer:
self._receive_buffer_closed = False
# Extra bits of state that don't fit into the state machine.
#
# These two are only used to interpret framing headers for figuring
# out how to read/write response bodies. their_http_version is also
# made available as a convenient public API.
self.their_http_version: Optional[bytes] = None
self._request_method: Optional[bytes] = None
# This is pure flow-control and doesn't at all affect the set of legal
# transitions, so no need to bother ConnectionState with it:
self.client_is_waiting_for_100_continue = False
@property
def states(self) -> Dict[Type[Sentinel], Type[Sentinel]]:
"""A dictionary like::
{CLIENT: <client state>, SERVER: <server state>}
See :ref:`state-machine` for details.
"""
return dict(self._cstate.states)
@property
def our_state(self) -> Type[Sentinel]:
"""The current state of whichever role we are playing. See
:ref:`state-machine` for details.
"""
return self._cstate.states[self.our_role]
@property
def their_state(self) -> Type[Sentinel]:
"""The current state of whichever role we are NOT playing. See
:ref:`state-machine` for details.
"""
return self._cstate.states[self.their_role]
@property
def they_are_waiting_for_100_continue(self) -> bool:
return self.their_role is CLIENT and self.client_is_waiting_for_100_continue
def start_next_cycle(self) -> None:
"""Attempt to reset our connection state for a new request/response
cycle.
If both client and server are in :data:`DONE` state, then resets them
both to :data:`IDLE` state in preparation for a new request/response
cycle on this same connection. Otherwise, raises a
:exc:`LocalProtocolError`.
See :ref:`keepalive-and-pipelining`.
"""
old_states = dict(self._cstate.states)
self._cstate.start_next_cycle()
self._request_method = None
# self.their_http_version gets left alone, since it presumably lasts
# beyond a single request/response cycle
assert not self.client_is_waiting_for_100_continue
self._respond_to_state_changes(old_states)
def _process_error(self, role: Type[Sentinel]) -> None:
old_states = dict(self._cstate.states)
self._cstate.process_error(role)
self._respond_to_state_changes(old_states)
def _server_switch_event(self, event: Event) -> Optional[Type[Sentinel]]:
if type(event) is InformationalResponse and event.status_code == 101:
return _SWITCH_UPGRADE
if type(event) is Response:
if (
_SWITCH_CONNECT in self._cstate.pending_switch_proposals
and 200 <= event.status_code < 300
):
return _SWITCH_CONNECT
return None
# All events go through here
def _process_event(self, role: Type[Sentinel], event: Event) -> None:
# First, pass the event through the state machine to make sure it
# succeeds.
old_states = dict(self._cstate.states)
if role is CLIENT and type(event) is Request:
if event.method == b"CONNECT":
self._cstate.process_client_switch_proposal(_SWITCH_CONNECT)
if get_comma_header(event.headers, b"upgrade"):
self._cstate.process_client_switch_proposal(_SWITCH_UPGRADE)
server_switch_event = None
if role is SERVER:
server_switch_event = self._server_switch_event(event)
self._cstate.process_event(role, type(event), server_switch_event)
# Then perform the updates triggered by it.
if type(event) is Request:
self._request_method = event.method
if role is self.their_role and type(event) in (
Request,
Response,
InformationalResponse,
):
event = cast(Union[Request, Response, InformationalResponse], event)
self.their_http_version = event.http_version
# Keep alive handling
#
# RFC 7230 doesn't really say what one should do if Connection: close
# shows up on a 1xx InformationalResponse. I think the idea is that
# this is not supposed to happen. In any case, if it does happen, we
# ignore it.
if type(event) in (Request, Response) and not _keep_alive(
cast(Union[Request, Response], event)
):
self._cstate.process_keep_alive_disabled()
# 100-continue
if type(event) is Request and has_expect_100_continue(event):
self.client_is_waiting_for_100_continue = True
if type(event) in (InformationalResponse, Response):
self.client_is_waiting_for_100_continue = False
if role is CLIENT and type(event) in (Data, EndOfMessage):
self.client_is_waiting_for_100_continue = False
self._respond_to_state_changes(old_states, event)
def _get_io_object(
self,
role: Type[Sentinel],
event: Optional[Event],
io_dict: Union[ReadersType, WritersType],
) -> Optional[Callable[..., Any]]:
# event may be None; it's only used when entering SEND_BODY
state = self._cstate.states[role]
if state is SEND_BODY:
# Special case: the io_dict has a dict of reader/writer factories
# that depend on the request/response framing.
framing_type, args = _body_framing(
cast(bytes, self._request_method), cast(Union[Request, Response], event)
)
return io_dict[SEND_BODY][framing_type](*args) # type: ignore[index]
else:
# General case: the io_dict just has the appropriate reader/writer
# for this state
return io_dict.get((role, state)) # type: ignore[return-value]
# This must be called after any action that might have caused
# self._cstate.states to change.
def _respond_to_state_changes(
self,
old_states: Dict[Type[Sentinel], Type[Sentinel]],
event: Optional[Event] = None,
) -> None:
# Update reader/writer
if self.our_state != old_states[self.our_role]:
self._writer = self._get_io_object(self.our_role, event, WRITERS)
if self.their_state != old_states[self.their_role]:
self._reader = self._get_io_object(self.their_role, event, READERS)
@property
def trailing_data(self) -> Tuple[bytes, bool]:
"""Data that has been received, but not yet processed, represented as
a tuple with two elements, where the first is a byte-string containing
the unprocessed data itself, and the second is a bool that is True if
the receive connection was closed.
See :ref:`switching-protocols` for discussion of why you'd want this.
"""
return (bytes(self._receive_buffer), self._receive_buffer_closed)
def receive_data(self, data: bytes) -> None:
"""Add data to our internal receive buffer.
This does not actually do any processing on the data, just stores
it. To trigger processing, you have to call :meth:`next_event`.
Args:
data (:term:`bytes-like object`):
The new data that was just received.
Special case: If *data* is an empty byte-string like ``b""``,
then this indicates that the remote side has closed the
connection (end of file). Normally this is convenient, because
standard Python APIs like :meth:`file.read` or
:meth:`socket.recv` use ``b""`` to indicate end-of-file, while
other failures to read are indicated using other mechanisms
like raising :exc:`TimeoutError`. When using such an API you
can just blindly pass through whatever you get from ``read``
to :meth:`receive_data`, and everything will work.
But, if you have an API where reading an empty string is a
valid non-EOF condition, then you need to be aware of this and
make sure to check for such strings and avoid passing them to
:meth:`receive_data`.
Returns:
Nothing, but after calling this you should call :meth:`next_event`
to parse the newly received data.
Raises:
RuntimeError:
Raised if you pass an empty *data*, indicating EOF, and then
pass a non-empty *data*, indicating more data that somehow
arrived after the EOF.
(Calling ``receive_data(b"")`` multiple times is fine,
and equivalent to calling it once.)
"""
if data:
if self._receive_buffer_closed:
raise RuntimeError("received close, then received more data?")
self._receive_buffer += data
else:
self._receive_buffer_closed = True
def _extract_next_receive_event(
self,
) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]:
state = self.their_state
# We don't pause immediately when they enter DONE, because even in
# DONE state we can still process a ConnectionClosed() event. But
# if we have data in our buffer, then we definitely aren't getting
# a ConnectionClosed() immediately and we need to pause.
if state is DONE and self._receive_buffer:
return PAUSED
if state is MIGHT_SWITCH_PROTOCOL or state is SWITCHED_PROTOCOL:
return PAUSED
assert self._reader is not None
event = self._reader(self._receive_buffer)
if event is None:
if not self._receive_buffer and self._receive_buffer_closed:
# In some unusual cases (basically just HTTP/1.0 bodies), EOF
# triggers an actual protocol event; in that case, we want to
# return that event, and then the state will change and we'll
# get called again to generate the actual ConnectionClosed().
if hasattr(self._reader, "read_eof"):
event = self._reader.read_eof() # type: ignore[attr-defined]
else:
event = ConnectionClosed()
if event is None:
event = NEED_DATA
return event # type: ignore[no-any-return]
def next_event(self) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]:
"""Parse the next event out of our receive buffer, update our internal
state, and return it.
This is a mutating operation -- think of it like calling :func:`next`
on an iterator.
Returns:
: One of three things:
1) An event object -- see :ref:`events`.
2) The special constant :data:`NEED_DATA`, which indicates that
you need to read more data from your socket and pass it to
:meth:`receive_data` before this method will be able to return
any more events.
3) The special constant :data:`PAUSED`, which indicates that we
are not in a state where we can process incoming data (usually
because the peer has finished their part of the current
request/response cycle, and you have not yet called
:meth:`start_next_cycle`). See :ref:`flow-control` for details.
Raises:
RemoteProtocolError:
The peer has misbehaved. You should close the connection
(possibly after sending some kind of 4xx response).
Once this method returns :class:`ConnectionClosed` once, then all
subsequent calls will also return :class:`ConnectionClosed`.
If this method raises any exception besides :exc:`RemoteProtocolError`
then that's a bug -- if it happens please file a bug report!
If this method raises any exception then it also sets
:attr:`Connection.their_state` to :data:`ERROR` -- see
:ref:`error-handling` for discussion.
"""
if self.their_state is ERROR: | raise RemoteProtocolError("Can't receive data when peer state is ERROR") | 23 | 2023-10-23 18:09:28+00:00 | 12k |
f0uriest/quadax | tests/test_adaptive.py | [
{
"identifier": "romberg",
"path": "quadax/romberg.py",
"snippet": "def romberg(\n fun,\n interval,\n args=(),\n full_output=False,\n epsabs=1.4e-8,\n epsrel=1.4e-8,\n divmax=20,\n norm=jnp.inf,\n):\n \"\"\"Romberg integration of a callable function or method.\n\n Returns t... | import jax.numpy as jnp
import numpy as np
import pytest
import scipy
from jax.config import config as jax_config
from quadax import quadcc, quadgk, quadts, romberg, rombergts | 8,290 | class TestRombergTS:
"""Tests for tanh-sinh quadrature with adaptive refinement."""
def _base(self, i, tol, fudge=1, **kwargs):
prob = example_problems[i]
y, info = rombergts(
prob["fun"], prob["interval"], epsabs=tol, epsrel=tol, **kwargs
)
if info.status == 0:
assert info.err < max(tol, tol * np.max(np.abs(y)))
np.testing.assert_allclose(
y,
prob["val"],
rtol=fudge * tol,
atol=fudge * tol,
err_msg=f"problem {i}, tol={tol}",
)
def test_prob0(self):
"""Test for example problem #0."""
self._base(0, 1e-4)
self._base(0, 1e-8)
self._base(0, 1e-12)
def test_prob1(self):
"""Test for example problem #1."""
self._base(1, 1e-4)
self._base(1, 1e-8)
self._base(1, 1e-12)
def test_prob2(self):
"""Test for example problem #2."""
self._base(2, 1e-4)
self._base(2, 1e-8)
self._base(2, 1e-12)
def test_prob3(self):
"""Test for example problem #3."""
self._base(3, 1e-4)
self._base(3, 1e-8)
self._base(3, 1e-12)
def test_prob4(self):
"""Test for example problem #4."""
self._base(4, 1e-4)
self._base(4, 1e-8)
self._base(4, 1e-12)
def test_prob5(self):
"""Test for example problem #5."""
self._base(5, 1e-4)
self._base(5, 1e-8)
self._base(5, 1e-12)
def test_prob6(self):
"""Test for example problem #6."""
self._base(6, 1e-4)
self._base(6, 1e-8, fudge=10)
self._base(6, 1e-12, divmax=22, fudge=1e5)
def test_prob7(self):
"""Test for example problem #7."""
self._base(7, 1e-4)
self._base(7, 1e-8)
self._base(7, 1e-12)
def test_prob8(self):
"""Test for example problem #8."""
self._base(8, 1e-4)
self._base(8, 1e-8)
self._base(8, 1e-12)
def test_prob9(self):
"""Test for example problem #9."""
self._base(9, 1e-4)
self._base(9, 1e-8, fudge=10)
self._base(9, 1e-12, fudge=1e5)
def test_prob10(self):
"""Test for example problem #10."""
self._base(10, 1e-4)
self._base(10, 1e-8)
self._base(10, 1e-12)
def test_prob11(self):
"""Test for example problem #11."""
self._base(11, 1e-4)
self._base(11, 1e-8, fudge=10)
self._base(11, 1e-12, fudge=1e5)
def test_prob12(self):
"""Test for example problem #12."""
self._base(12, 1e-4)
self._base(12, 1e-8)
self._base(12, 1e-12)
def test_prob13(self):
"""Test for example problem #13."""
self._base(13, 1e-4)
self._base(13, 1e-8)
self._base(13, 1e-12)
def test_prob14(self):
"""Test for example problem #14."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
def test_prob15(self):
"""Test for example problem #15."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
class TestRomberg:
"""Tests for Romberg's method (only for well behaved integrands)."""
def _base(self, i, tol, fudge=1, **kwargs):
prob = example_problems[i]
| """Tests for adaptive quadrature routines."""
jax_config.update("jax_enable_x64", True)
example_problems = [
# problem 0
{"fun": lambda t: t * jnp.log(1 + t), "interval": [0, 1], "val": 1 / 4},
# problem 1
{
"fun": lambda t: t**2 * jnp.arctan(t),
"interval": [0, 1],
"val": (jnp.pi - 2 + 2 * jnp.log(2)) / 12,
},
# problem 2
{
"fun": lambda t: jnp.exp(t) * jnp.cos(t),
"interval": [0, jnp.pi / 2],
"val": (jnp.exp(jnp.pi / 2) - 1) / 2,
},
# problem 3
{
"fun": lambda t: jnp.arctan(jnp.sqrt(2 + t**2))
/ ((1 + t**2) * jnp.sqrt(2 + t**2)),
"interval": [0, 1],
"val": 5 * jnp.pi**2 / 96,
},
# problem 4
{"fun": lambda t: jnp.sqrt(t) * jnp.log(t), "interval": [0, 1], "val": -4 / 9},
# problem 5
{"fun": lambda t: jnp.sqrt(1 - t**2), "interval": [0, 1], "val": jnp.pi / 4},
# problem 6
{
"fun": lambda t: jnp.sqrt(t) / jnp.sqrt(1 - t**2),
"interval": [0, 1],
"val": 2
* jnp.sqrt(jnp.pi)
* scipy.special.gamma(3 / 4)
/ scipy.special.gamma(1 / 4),
},
# problem 7
{"fun": lambda t: jnp.log(t) ** 2, "interval": [0, 1], "val": 2},
# problem 8
{
"fun": lambda t: jnp.log(jnp.cos(t)),
"interval": [0, jnp.pi / 2],
"val": -jnp.pi * jnp.log(2) / 2,
},
# problem 9
{
"fun": lambda t: jnp.sqrt(jnp.tan(t)),
"interval": [0, jnp.pi / 2],
"val": jnp.pi * jnp.sqrt(2) / 2,
},
# problem 10
{"fun": lambda t: 1 / (1 + t**2), "interval": [0, jnp.inf], "val": jnp.pi / 2},
# problem 11
{
"fun": lambda t: jnp.exp(-t) / jnp.sqrt(t),
"interval": [0, jnp.inf],
"val": jnp.sqrt(jnp.pi),
},
# problem 12
{
"fun": lambda t: jnp.exp(-(t**2) / 2),
"interval": [-jnp.inf, jnp.inf],
"val": jnp.sqrt(2 * jnp.pi),
},
# problem 13
{"fun": lambda t: jnp.exp(-t) * jnp.cos(t), "interval": [0, jnp.inf], "val": 1 / 2},
# problem 14 - vector valued integrand made of up problems 0 and 1
{
"fun": lambda t: jnp.array([t * jnp.log(1 + t), t**2 * jnp.arctan(t)]),
"interval": [0, 1],
"val": jnp.array([1 / 4, (jnp.pi - 2 + 2 * jnp.log(2)) / 12]),
},
# problem 15 - intergral with breakpoints
{
"fun": lambda t: jnp.log((t - 1) ** 2),
"interval": [0, 1, 2],
"val": -4,
},
]
class TestQuadGK:
"""Tests for Gauss-Konrod quadrature."""
def _base(self, i, tol, fudge=1, **kwargs):
prob = example_problems[i]
status = kwargs.pop("status", 0)
y, info = quadgk(
prob["fun"],
prob["interval"],
epsabs=tol,
epsrel=tol,
**kwargs,
)
assert info.status == status
if status == 0:
assert info.err < max(tol, tol * np.max(np.abs(y)))
np.testing.assert_allclose(
y,
prob["val"],
rtol=fudge * tol,
atol=fudge * tol,
err_msg=f"problem {i}, tol={tol}",
)
def test_prob0(self):
"""Test for example problem #0."""
self._base(0, 1e-4, order=21)
self._base(0, 1e-8, order=21)
self._base(0, 1e-12, order=21)
def test_prob1(self):
"""Test for example problem #1."""
self._base(1, 1e-4, order=31)
self._base(1, 1e-8, order=31)
self._base(1, 1e-12, order=31)
def test_prob2(self):
"""Test for example problem #2."""
self._base(2, 1e-4, order=41)
self._base(2, 1e-8, order=41)
self._base(2, 1e-12, order=41)
def test_prob3(self):
"""Test for example problem #3."""
self._base(3, 1e-4, order=51)
self._base(3, 1e-8, order=51)
self._base(3, 1e-12, order=51)
def test_prob4(self):
"""Test for example problem #4."""
self._base(4, 1e-4, order=61)
self._base(4, 1e-8, order=61)
self._base(4, 1e-12, order=61)
def test_prob5(self):
"""Test for example problem #5."""
self._base(5, 1e-4, order=21)
self._base(5, 1e-8, order=21)
self._base(5, 1e-12, order=21)
def test_prob6(self):
"""Test for example problem #6."""
self._base(6, 1e-4, order=15)
self._base(6, 1e-8, 100, order=15)
self._base(6, 1e-12, 1e5, order=15, max_ninter=100, status=8)
def test_prob7(self):
"""Test for example problem #7."""
self._base(7, 1e-4, order=61)
self._base(7, 1e-8, order=61)
self._base(7, 1e-12, order=61, status=4)
def test_prob8(self):
"""Test for example problem #8."""
self._base(8, 1e-4, order=51)
self._base(8, 1e-8, order=51)
self._base(8, 1e-12, order=51, status=4)
def test_prob9(self):
"""Test for example problem #9."""
self._base(9, 1e-4, order=15)
self._base(9, 1e-8, 100, order=15)
self._base(9, 1e-12, 1e4, order=15, max_ninter=100, status=8)
def test_prob10(self):
"""Test for example problem #10."""
self._base(10, 1e-4, order=15)
self._base(10, 1e-8, order=15)
self._base(10, 1e-12, order=15)
def test_prob11(self):
"""Test for example problem #11."""
self._base(11, 1e-4, order=21)
self._base(11, 1e-8, 100, order=21)
self._base(11, 1e-12, 1e4, order=21, status=8, max_ninter=100)
def test_prob12(self):
"""Test for example problem #12."""
self._base(12, 1e-4, order=15)
self._base(12, 1e-8, order=15)
self._base(12, 1e-12, order=15)
def test_prob13(self):
"""Test for example problem #13."""
self._base(13, 1e-4, order=31)
self._base(13, 1e-8, order=31)
self._base(13, 1e-12, order=31)
def test_prob14(self):
"""Test for example problem #14."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
def test_prob15(self):
"""Test for example problem #15."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
class TestQuadCC:
"""Tests for Clenshaw-Curtis quadrature."""
def _base(self, i, tol, fudge=1, **kwargs):
prob = example_problems[i]
status = kwargs.pop("status", 0)
y, info = quadcc(
prob["fun"],
prob["interval"],
epsabs=tol,
epsrel=tol,
**kwargs,
)
assert info.status == status
if status == 0:
assert info.err < max(tol, tol * np.max(np.abs(y)))
np.testing.assert_allclose(
y,
prob["val"],
rtol=fudge * tol,
atol=fudge * tol,
err_msg=f"problem {i}, tol={tol}",
)
def test_prob0(self):
"""Test for example problem #0."""
self._base(0, 1e-4, order=32)
self._base(0, 1e-8, order=32)
self._base(0, 1e-12, order=32)
def test_prob1(self):
"""Test for example problem #1."""
self._base(1, 1e-4, order=64)
self._base(1, 1e-8, order=64)
self._base(1, 1e-12, order=64)
def test_prob2(self):
"""Test for example problem #2."""
self._base(2, 1e-4, order=128)
self._base(2, 1e-8, order=128)
self._base(2, 1e-12, order=128)
def test_prob3(self):
"""Test for example problem #3."""
self._base(3, 1e-4, order=256)
self._base(3, 1e-8, order=256)
self._base(3, 1e-12, order=256)
def test_prob4(self):
"""Test for example problem #4."""
self._base(4, 1e-4, order=8)
self._base(4, 1e-8, order=8)
self._base(4, 1e-12, order=8, max_ninter=100)
def test_prob5(self):
"""Test for example problem #5."""
self._base(5, 1e-4, order=16)
self._base(5, 1e-8, order=16)
self._base(5, 1e-12, order=16)
def test_prob6(self):
"""Test for example problem #6."""
self._base(6, 1e-4)
self._base(6, 1e-8, 100)
self._base(6, 1e-12, 1e5, max_ninter=100, status=8)
def test_prob7(self):
"""Test for example problem #7."""
self._base(7, 1e-4)
self._base(7, 1e-8, 10)
self._base(7, 1e-12, status=8)
def test_prob8(self):
"""Test for example problem #8."""
self._base(8, 1e-4)
self._base(8, 1e-8)
self._base(8, 1e-12, status=8)
def test_prob9(self):
"""Test for example problem #9."""
self._base(9, 1e-4)
self._base(9, 1e-8, max_ninter=100, status=8)
self._base(9, 1e-12, 1e4, max_ninter=100, status=8)
def test_prob10(self):
"""Test for example problem #10."""
self._base(10, 1e-4)
self._base(10, 1e-8)
self._base(10, 1e-12, 10)
def test_prob11(self):
"""Test for example problem #11."""
self._base(11, 1e-4)
self._base(11, 1e-8, 100)
self._base(11, 1e-12, 1e4, status=8)
def test_prob12(self):
"""Test for example problem #12."""
self._base(12, 1e-4)
self._base(12, 1e-8)
self._base(12, 1e-12)
def test_prob13(self):
"""Test for example problem #13."""
self._base(13, 1e-4)
self._base(13, 1e-8)
self._base(13, 1e-12)
def test_prob14(self):
"""Test for example problem #14."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
def test_prob15(self):
"""Test for example problem #15."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
class TestQuadTS:
"""Tests for adaptive tanh-sinh quadrature."""
def _base(self, i, tol, fudge=1, **kwargs):
prob = example_problems[i]
status = kwargs.pop("status", 0)
y, info = quadts(
prob["fun"],
prob["interval"],
epsabs=tol,
epsrel=tol,
**kwargs,
)
assert info.status == status
if status == 0:
assert info.err < max(tol, tol * np.max(np.abs(y)))
np.testing.assert_allclose(
y,
prob["val"],
rtol=fudge * tol,
atol=fudge * tol,
err_msg=f"problem {i}, tol={tol}",
)
def test_prob0(self):
"""Test for example problem #0."""
self._base(0, 1e-4)
self._base(0, 1e-8)
self._base(0, 1e-12)
def test_prob1(self):
"""Test for example problem #1."""
self._base(1, 1e-4)
self._base(1, 1e-8)
self._base(1, 1e-12)
def test_prob2(self):
"""Test for example problem #2."""
self._base(2, 1e-4, order=41)
self._base(2, 1e-8, order=41)
self._base(2, 1e-12, order=41)
def test_prob3(self):
"""Test for example problem #3."""
self._base(3, 1e-4, order=61)
self._base(3, 1e-8, order=61)
self._base(3, 1e-12, order=61)
def test_prob4(self):
"""Test for example problem #4."""
self._base(4, 1e-4, order=81)
self._base(4, 1e-8, order=81)
self._base(4, 1e-12, order=81)
def test_prob5(self):
"""Test for example problem #5."""
self._base(5, 1e-4, order=101)
self._base(5, 1e-8, order=101)
self._base(5, 1e-12, order=101)
def test_prob6(self):
"""Test for example problem #6."""
self._base(6, 1e-4)
self._base(6, 1e-8)
self._base(6, 1e-12, 1e4)
def test_prob7(self):
"""Test for example problem #7."""
self._base(7, 1e-4)
self._base(7, 1e-8)
self._base(7, 1e-12)
def test_prob8(self):
"""Test for example problem #8."""
self._base(8, 1e-4)
self._base(8, 1e-8)
self._base(8, 1e-12)
def test_prob9(self):
"""Test for example problem #9."""
self._base(9, 1e-4)
self._base(9, 1e-8, 10)
self._base(9, 1e-12, 1e4)
def test_prob10(self):
"""Test for example problem #10."""
self._base(10, 1e-4)
self._base(10, 1e-8)
self._base(10, 1e-12)
def test_prob11(self):
"""Test for example problem #11."""
self._base(11, 1e-4)
self._base(11, 1e-8)
self._base(11, 1e-12, 1e4)
def test_prob12(self):
"""Test for example problem #12."""
self._base(12, 1e-4)
self._base(12, 1e-8)
self._base(12, 1e-12)
def test_prob13(self):
"""Test for example problem #13."""
self._base(13, 1e-4)
self._base(13, 1e-8)
self._base(13, 1e-12)
def test_prob14(self):
"""Test for example problem #14."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
def test_prob15(self):
"""Test for example problem #15."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
class TestRombergTS:
"""Tests for tanh-sinh quadrature with adaptive refinement."""
def _base(self, i, tol, fudge=1, **kwargs):
prob = example_problems[i]
y, info = rombergts(
prob["fun"], prob["interval"], epsabs=tol, epsrel=tol, **kwargs
)
if info.status == 0:
assert info.err < max(tol, tol * np.max(np.abs(y)))
np.testing.assert_allclose(
y,
prob["val"],
rtol=fudge * tol,
atol=fudge * tol,
err_msg=f"problem {i}, tol={tol}",
)
def test_prob0(self):
"""Test for example problem #0."""
self._base(0, 1e-4)
self._base(0, 1e-8)
self._base(0, 1e-12)
def test_prob1(self):
"""Test for example problem #1."""
self._base(1, 1e-4)
self._base(1, 1e-8)
self._base(1, 1e-12)
def test_prob2(self):
"""Test for example problem #2."""
self._base(2, 1e-4)
self._base(2, 1e-8)
self._base(2, 1e-12)
def test_prob3(self):
"""Test for example problem #3."""
self._base(3, 1e-4)
self._base(3, 1e-8)
self._base(3, 1e-12)
def test_prob4(self):
"""Test for example problem #4."""
self._base(4, 1e-4)
self._base(4, 1e-8)
self._base(4, 1e-12)
def test_prob5(self):
"""Test for example problem #5."""
self._base(5, 1e-4)
self._base(5, 1e-8)
self._base(5, 1e-12)
def test_prob6(self):
"""Test for example problem #6."""
self._base(6, 1e-4)
self._base(6, 1e-8, fudge=10)
self._base(6, 1e-12, divmax=22, fudge=1e5)
def test_prob7(self):
"""Test for example problem #7."""
self._base(7, 1e-4)
self._base(7, 1e-8)
self._base(7, 1e-12)
def test_prob8(self):
"""Test for example problem #8."""
self._base(8, 1e-4)
self._base(8, 1e-8)
self._base(8, 1e-12)
def test_prob9(self):
"""Test for example problem #9."""
self._base(9, 1e-4)
self._base(9, 1e-8, fudge=10)
self._base(9, 1e-12, fudge=1e5)
def test_prob10(self):
"""Test for example problem #10."""
self._base(10, 1e-4)
self._base(10, 1e-8)
self._base(10, 1e-12)
def test_prob11(self):
"""Test for example problem #11."""
self._base(11, 1e-4)
self._base(11, 1e-8, fudge=10)
self._base(11, 1e-12, fudge=1e5)
def test_prob12(self):
"""Test for example problem #12."""
self._base(12, 1e-4)
self._base(12, 1e-8)
self._base(12, 1e-12)
def test_prob13(self):
"""Test for example problem #13."""
self._base(13, 1e-4)
self._base(13, 1e-8)
self._base(13, 1e-12)
def test_prob14(self):
"""Test for example problem #14."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
def test_prob15(self):
"""Test for example problem #15."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
class TestRomberg:
"""Tests for Romberg's method (only for well behaved integrands)."""
def _base(self, i, tol, fudge=1, **kwargs):
prob = example_problems[i] | y, info = romberg( | 0 | 2023-10-24 04:44:34+00:00 | 12k |
zju3dv/nr_in_a_room | models_neurecon/neus_multi_rendering.py | [
{
"identifier": "NeuS",
"path": "models_neurecon/neus.py",
"snippet": "class NeuS(nn.Module):\n def __init__(\n self,\n variance_init=0.05,\n speed_factor=1.0,\n input_ch=3,\n input_obj_ch=0,\n input_light_ch=0,\n input_appearance_ch=0,\n W_geo_... | import ipdb
import torch
import sys
import os
import copy
from typing import List, Dict, Any
from einops import rearrange, reduce, repeat
from models_neurecon.neus import NeuS, volume_render
from models_neurecon.base import ImplicitSurface | 9,660 | render_mask=False, # only works for NeuS
refine_edge_obj_ids=[],
# chunk=4096,
chunk=99999999, # chunk should be controlled outside
extra_dict: Dict[str, Any] = {},
render_kwargs: Dict[str, Any] = {},
):
assert len(rays_list) == len(obj_instance_ids)
if render_mask:
assert use_sphere_tracing, "render_mask only support sphere_tracing mode"
results = {}
if use_sphere_tracing:
chunk = 99999999 # sphere_tracing allows larger chunk size
else: # hit_test_only works only for sphere tracing mode
hit_test_only = False
rgbs_list = []
alphas_list = []
z_vals_list = []
for i in range(len(rays_list)):
# hack to suppress zero points
# zero_mask = z_vals[:, -1] == 0
# xyz_fine[zero_mask] = 0
obj_id = obj_instance_ids[i]
if len(refine_edge_obj_ids) > 0:
if obj_id in refine_edge_obj_ids:
refine_edge = True
else:
refine_edge = False
rays = rays_list[i]
N_rays = rays.shape[0]
obj_code = extra_dict[f"embedding_inst_{obj_id}"].view(N_rays, -1)
light_code = (
extra_dict[f"embedding_light_{obj_id}"].view(N_rays, -1)
if f"embedding_light_{obj_id}" in extra_dict
else None
)
appearance_code = (
extra_dict[f"embedding_appearance_{obj_id}"].view(N_rays, -1)
if f"embedding_appearance_{obj_id}" in extra_dict
else None
)
model = models[f"neus_{obj_id}"]
rays_o = rays[:, 0:3].view(N_rays, 3)
rays_d = rays[:, 3:6].view(N_rays, 3)
near_bypass = rays[:, 6].view(N_rays, 1)
far_bypass = rays[:, 7].view(N_rays, 1)
zero_mask = (far_bypass != 0).squeeze()
device = rays_o.device
dtype = rays_o.dtype
rays_o = rays_o[zero_mask]
rays_d = rays_d[zero_mask]
near_bypass = near_bypass[zero_mask]
far_bypass = far_bypass[zero_mask]
obj_code = obj_code[zero_mask]
light_code = None if light_code is None else light_code[zero_mask]
appearance_code = (
None if appearance_code is None else appearance_code[zero_mask]
)
if rays_o.shape[0] > 0: # if have valid rays to render
if use_sphere_tracing:
render_res = sphere_tracing_rendering(
model=model,
rays_o=rays_o,
rays_d=rays_d,
near=near_bypass,
far=far_bypass,
obj_code=obj_code,
light_code=light_code,
appearance_code=appearance_code,
hit_test_only=hit_test_only,
need_normal=need_normal,
refine_edge=False
if obj_id == 0
else refine_edge, # do not refine edge for background
chunk=chunk,
)
z_vals = render_res["z_vals"]
alphas = render_res["alphas"]
if not hit_test_only:
rgbs = render_res["rgbs"]
if need_normal:
results[f"normals_{obj_id}"] = render_res["normals"]
else:
if (
safe_region_volume_rendering
): # we first use sphere tracing to get the exact distance
with torch.no_grad():
# acceletate with sphere tracing
render_res_sphere = sphere_tracing_rendering(
model=model,
rays_o=rays_o,
rays_d=rays_d,
near=near_bypass,
far=far_bypass,
obj_code=obj_code,
light_code=light_code,
appearance_code=appearance_code,
refine_edge=False,
hit_test_only=True,
need_normal=need_normal,
chunk=chunk,
)
# get exact depth to the surface
depth = render_res_sphere["z_vals"].view(-1, 1)
# set near/far near the surface
near_bypass = torch.clamp_min(depth - 0.1, 0.0)
far_bypass = torch.clamp_min(depth + 0.05, 0.0)
render_kwargs = copy.deepcopy(render_kwargs)
# with the correct surface, we can render with little sampling points
render_kwargs["N_samples"] = 8
render_kwargs["N_importance"] = 16
|
sys.path.append(os.getcwd()) # noqa
def volume_rendering_multi_neus(
results,
typ,
z_vals_list,
rgbs_list,
alphas_list,
noise_std,
white_back,
obj_ids_list=None,
):
N_objs = len(z_vals_list)
# order via z_vals
z_vals = torch.cat(z_vals_list, 1) # (N_rays, N_samples*N_objs)
rgbs = torch.cat(rgbs_list, 1) # (N_rays, N_samples*N_objs, 3)
alphas = torch.cat(alphas_list, 1) # (N_rays, N_samples*N_objs)
z_vals, idx_sorted = torch.sort(z_vals, -1)
for i in range(3):
rgbs[:, :, i] = torch.gather(rgbs[:, :, i].clone(), dim=1, index=idx_sorted)
alphas = torch.gather(alphas, dim=1, index=idx_sorted)
# record object ids for recovering weights of each object after sorting
if obj_ids_list != None:
obj_ids = torch.cat(obj_ids_list, -1)
results[f"obj_ids_{typ}"] = torch.gather(obj_ids, dim=1, index=idx_sorted)
alphas_shifted = torch.cat(
[torch.ones_like(alphas[:, :1]), 1 - alphas + 1e-10], -1
) # [1, 1-a1, 1-a2, ...]
weights = alphas * torch.cumprod(alphas_shifted[:, :-1], -1) # (N_rays, N_samples_)
weights_sum = reduce(
weights, "n1 n2 -> n1", "sum"
) # (N_rays), the accumulated opacity along the rays
# equals "1 - (1-a1)(1-a2)...(1-an)" mathematically
# results[f"weights_{typ}"] = weights
results[f"opacity_{typ}"] = weights_sum
# results[f"z_vals_{typ}"] = z_vals
rgb_map = reduce(
rearrange(weights, "n1 n2 -> n1 n2 1") * rgbs, "n1 n2 c -> n1 c", "sum"
)
depth_map = reduce(weights * z_vals, "n1 n2 -> n1", "sum")
if white_back:
rgb_map = rgb_map + 1 - weights_sum.unsqueeze(-1)
results[f"rgb_{typ}"] = rgb_map
results[f"depth_{typ}"] = depth_map
# adopt from neurecon/ray_casting.py
def sphere_tracing_surface_points(
implicit_surface: ImplicitSurface,
rays_o: torch.Tensor,
rays_d: torch.Tensor,
# function config
obj_code: torch.Tensor,
near: torch.Tensor,
far: torch.Tensor,
# algorithm config
# stop_sdf_th: float = 0.0,
# N_iters: int = 20,
N_iters: int = 50,
near_surface_th: float = 0.0,
sdf_eps: float = 5e-3,
):
"""
rays_o, rays_d: torch.Tensor [N_rays, 3]
obj_code: torch.Tensor [N_rays, N_channel]
near: torch.Tensor [N_rays]
far: torch.Tensor [N_rays]
near_surface_th: also set the output mask to false when hit point not near the surface
"""
device = rays_o.device
if isinstance(near, float):
d_preds = torch.ones([*rays_o.shape[:-1]], device=device) * near
else:
d_preds = near
mask = torch.ones_like(d_preds, dtype=torch.bool, device=device)
N_rays = d_preds.shape[0]
for _ in range(N_iters):
pts = rays_o + rays_d * d_preds[..., :, None]
surface_val = implicit_surface.forward(pts, obj_code)
# surface_val = surface_val - stop_sdf_th
# d_preds[mask] += surface_val[mask]
d_preds = d_preds + surface_val * mask.float()
mask[d_preds > far] = False
mask[d_preds < 0] = False
# mark unfinished
mask_unfinish = surface_val.abs() > sdf_eps
mask_unfinish[~mask] = False
if mask_unfinish.sum() == 0:
# print(_)
break
pts = rays_o + rays_d * d_preds[..., :, None]
if near_surface_th != 0:
mask = torch.logical_and(mask, surface_val.abs() < near_surface_th)
return d_preds, pts, mask, surface_val
def sphere_tracing_rendering(
model: NeuS,
rays_o: torch.Tensor,
rays_d: torch.Tensor,
near: torch.Tensor,
far: torch.Tensor,
obj_code: torch.Tensor,
light_code: torch.Tensor,
appearance_code: torch.Tensor,
hit_test_only: bool,
need_normal: bool,
refine_edge: bool,
chunk: int,
):
d_pred_chunk = []
rgb_chunk = []
normal_chunk = []
alpha_chunk = []
# pt_pred_chunk = []
# mask_chunk = []
B = rays_o.shape[0]
for i in range(0, B, chunk):
d_pred, pt_pred, mask, last_sdf = sphere_tracing_surface_points(
implicit_surface=model.implicit_surface,
rays_o=rays_o[i : i + chunk],
rays_d=rays_d[i : i + chunk],
near=near[i : i + chunk].squeeze(1),
far=far[i : i + chunk].squeeze(1),
obj_code=obj_code[i : i + chunk],
near_surface_th=0.05 if hit_test_only else 0,
)
d_pred_chunk += [d_pred]
alpha = torch.zeros_like(d_pred)
alpha[mask] = 1
alpha_chunk += [alpha]
if not hit_test_only:
rgb, sdf, nablas = model.forward(
pt_pred,
obj_code[i : i + chunk],
None if light_code is None else light_code[i : i + chunk],
rays_d[i : i + chunk],
None if appearance_code is None else appearance_code[i : i + chunk],
)
rgb_chunk += [rgb]
if need_normal or refine_edge:
_, normal, _ = model.implicit_surface.forward_with_nablas(
pt_pred, obj_code[i : i + chunk]
)
normal_chunk += [normal]
if refine_edge:
# compute cos_angle of hit ray and surface normal
# for edges near to the perpendicular, we dim the alpha
normal_reg = torch.nn.functional.normalize(normal, dim=1)
cos_angle = -(rays_d[i : i + chunk] * normal_reg).sum(-1)
# do not affect other visible part that far from perpendicular
mask_merged = torch.logical_and(mask, cos_angle < 0)
alpha[mask_merged] = 0 # just set to 0 is enough
# alpha[mask] = torch.relu(torch.tanh(cos_angle[mask] * 2))
alpha_chunk[-1] = alpha
d_pred = torch.cat(d_pred_chunk, 0)
alpha = torch.cat(alpha_chunk, 0)
ret_res = {
"alphas": alpha.unsqueeze(1),
"z_vals": d_pred.unsqueeze(1),
}
if not hit_test_only:
ret_res["rgbs"] = torch.cat(rgb_chunk, 0).unsqueeze(1)
if need_normal:
ret_res["normals"] = torch.cat(normal_chunk, 0).unsqueeze(1)
return ret_res
def render_rays_multi_neus(
room_optimizer,
models: Dict[str, NeuS],
rays_list: List[torch.Tensor],
obj_instance_ids: List[int],
noise_std=0,
white_back=False,
use_sphere_tracing=True,
refine_edge=False,
safe_region_volume_rendering=True,
hit_test_only=False, # only works for NeuS
need_normal=False, # only works for NeuS
render_mask=False, # only works for NeuS
refine_edge_obj_ids=[],
# chunk=4096,
chunk=99999999, # chunk should be controlled outside
extra_dict: Dict[str, Any] = {},
render_kwargs: Dict[str, Any] = {},
):
assert len(rays_list) == len(obj_instance_ids)
if render_mask:
assert use_sphere_tracing, "render_mask only support sphere_tracing mode"
results = {}
if use_sphere_tracing:
chunk = 99999999 # sphere_tracing allows larger chunk size
else: # hit_test_only works only for sphere tracing mode
hit_test_only = False
rgbs_list = []
alphas_list = []
z_vals_list = []
for i in range(len(rays_list)):
# hack to suppress zero points
# zero_mask = z_vals[:, -1] == 0
# xyz_fine[zero_mask] = 0
obj_id = obj_instance_ids[i]
if len(refine_edge_obj_ids) > 0:
if obj_id in refine_edge_obj_ids:
refine_edge = True
else:
refine_edge = False
rays = rays_list[i]
N_rays = rays.shape[0]
obj_code = extra_dict[f"embedding_inst_{obj_id}"].view(N_rays, -1)
light_code = (
extra_dict[f"embedding_light_{obj_id}"].view(N_rays, -1)
if f"embedding_light_{obj_id}" in extra_dict
else None
)
appearance_code = (
extra_dict[f"embedding_appearance_{obj_id}"].view(N_rays, -1)
if f"embedding_appearance_{obj_id}" in extra_dict
else None
)
model = models[f"neus_{obj_id}"]
rays_o = rays[:, 0:3].view(N_rays, 3)
rays_d = rays[:, 3:6].view(N_rays, 3)
near_bypass = rays[:, 6].view(N_rays, 1)
far_bypass = rays[:, 7].view(N_rays, 1)
zero_mask = (far_bypass != 0).squeeze()
device = rays_o.device
dtype = rays_o.dtype
rays_o = rays_o[zero_mask]
rays_d = rays_d[zero_mask]
near_bypass = near_bypass[zero_mask]
far_bypass = far_bypass[zero_mask]
obj_code = obj_code[zero_mask]
light_code = None if light_code is None else light_code[zero_mask]
appearance_code = (
None if appearance_code is None else appearance_code[zero_mask]
)
if rays_o.shape[0] > 0: # if have valid rays to render
if use_sphere_tracing:
render_res = sphere_tracing_rendering(
model=model,
rays_o=rays_o,
rays_d=rays_d,
near=near_bypass,
far=far_bypass,
obj_code=obj_code,
light_code=light_code,
appearance_code=appearance_code,
hit_test_only=hit_test_only,
need_normal=need_normal,
refine_edge=False
if obj_id == 0
else refine_edge, # do not refine edge for background
chunk=chunk,
)
z_vals = render_res["z_vals"]
alphas = render_res["alphas"]
if not hit_test_only:
rgbs = render_res["rgbs"]
if need_normal:
results[f"normals_{obj_id}"] = render_res["normals"]
else:
if (
safe_region_volume_rendering
): # we first use sphere tracing to get the exact distance
with torch.no_grad():
# acceletate with sphere tracing
render_res_sphere = sphere_tracing_rendering(
model=model,
rays_o=rays_o,
rays_d=rays_d,
near=near_bypass,
far=far_bypass,
obj_code=obj_code,
light_code=light_code,
appearance_code=appearance_code,
refine_edge=False,
hit_test_only=True,
need_normal=need_normal,
chunk=chunk,
)
# get exact depth to the surface
depth = render_res_sphere["z_vals"].view(-1, 1)
# set near/far near the surface
near_bypass = torch.clamp_min(depth - 0.1, 0.0)
far_bypass = torch.clamp_min(depth + 0.05, 0.0)
render_kwargs = copy.deepcopy(render_kwargs)
# with the correct surface, we can render with little sampling points
render_kwargs["N_samples"] = 8
render_kwargs["N_importance"] = 16 | render_res = volume_render( | 1 | 2023-10-15 08:41:29+00:00 | 12k |
chenxn2020/GOSE | GOSEfinetune/models/layoutlmv2/modeling_layoutlmv2.py | [
{
"identifier": "ReOutput",
"path": "GOSEfinetune/utils.py",
"snippet": "class ReOutput(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = No... | import math
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import detectron2
from torch import nn
from torch.nn import CrossEntropyLoss
from detectron2.modeling import META_ARCH_REGISTRY
from transformers import PreTrainedModel
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
TokenClassifierOutput,
)
from transformers.modeling_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMIntermediate as LayoutLMv2Intermediate
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMOutput as LayoutLMv2Output
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMPooler as LayoutLMv2Pooler
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMSelfOutput as LayoutLMv2SelfOutput
from transformers.utils import logging
from ...utils import ReOutput
from .configuration_layoutlmv2 import LayoutLMv2Config
from .detectron2_config import add_layoutlmv2_config
from ...modules.decoders.gose import GOSE | 10,215 | )
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class LayoutLMv2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LayoutLMv2Config
pretrained_model_archive_map = LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST
base_model_prefix = "layoutlmv2"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, LayoutLMv2LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def my_convert_sync_batchnorm(module, process_group=None):
# same as `nn.modules.SyncBatchNorm.convert_sync_batchnorm` but allowing converting from `detectron2.layers.FrozenBatchNorm2d`
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
return nn.modules.SyncBatchNorm.convert_sync_batchnorm(module, process_group)
module_output = module
if isinstance(module, detectron2.layers.FrozenBatchNorm2d):
module_output = torch.nn.SyncBatchNorm(
num_features=module.num_features,
eps=module.eps,
affine=True,
track_running_stats=True,
process_group=process_group,
)
module_output.weight = torch.nn.Parameter(module.weight)
module_output.bias = torch.nn.Parameter(module.bias)
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = torch.tensor(0, dtype=torch.long, device=module.running_mean.device)
for name, child in module.named_children():
module_output.add_module(name, my_convert_sync_batchnorm(child, process_group))
del module
return module_output
class VisualBackbone(nn.Module):
def __init__(self, config):
super().__init__()
self.cfg = detectron2.config.get_cfg()
| # coding=utf-8
logger = logging.get_logger(__name__)
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"layoutlmv2-base-uncased",
"layoutlmv2-large-uncased",
]
LayoutLMv2LayerNorm = torch.nn.LayerNorm
class LayoutLMv2Embeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super(LayoutLMv2Embeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = LayoutLMv2LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def _cal_spatial_position_embeddings(self, bbox):
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
except IndexError as e:
raise IndexError("The :obj:`bbox`coordinate values should be within 0-1000 range.") from e
h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
spatial_position_embeddings = torch.cat(
[
left_position_embeddings,
upper_position_embeddings,
right_position_embeddings,
lower_position_embeddings,
h_position_embeddings,
w_position_embeddings,
],
dim=-1,
)
return spatial_position_embeddings
class LayoutLMv2SelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.fast_qkv = config.fast_qkv
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if config.fast_qkv:
self.qkv_linear = nn.Linear(config.hidden_size, 3 * self.all_head_size, bias=False)
self.q_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
self.v_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
else:
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def compute_qkv(self, hidden_states):
if self.fast_qkv:
qkv = self.qkv_linear(hidden_states)
q, k, v = torch.chunk(qkv, 3, dim=-1)
if q.ndimension() == self.q_bias.ndimension():
q = q + self.q_bias
v = v + self.v_bias
else:
_sz = (1,) * (q.ndimension() - 1) + (-1,)
q = q + self.q_bias.view(*_sz)
v = v + self.v_bias.view(*_sz)
else:
q = self.query(hidden_states)
k = self.key(hidden_states)
v = self.value(hidden_states)
return q, k, v
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
q, k, v = self.compute_qkv(hidden_states)
# (B, L, H*D) -> (B, H, L, D)
query_layer = self.transpose_for_scores(q)
key_layer = self.transpose_for_scores(k)
value_layer = self.transpose_for_scores(v)
query_layer = query_layer / math.sqrt(self.attention_head_size)
# [BSZ, NAT, L, L]
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.has_relative_attention_bias:
attention_scores += rel_pos
if self.has_spatial_attention_bias:
attention_scores += rel_2d_pos
attention_scores = attention_scores.float().masked_fill_(attention_mask.to(torch.bool), float("-inf"))
attention_probs = F.softmax(attention_scores, dim=-1, dtype=torch.float32).type_as(value_layer)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class LayoutLMv2Attention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = LayoutLMv2SelfAttention(config)
self.output = LayoutLMv2SelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class LayoutLMv2Layer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = LayoutLMv2Attention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = LayoutLMv2Attention(config)
self.intermediate = LayoutLMv2Intermediate(config)
self.output = LayoutLMv2Output(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
ret = 0
if bidirectional:
num_buckets //= 2
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
class LayoutLMv2Encoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([LayoutLMv2Layer(config) for _ in range(config.num_hidden_layers)])
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if self.has_relative_attention_bias:
self.rel_pos_bins = config.rel_pos_bins
self.max_rel_pos = config.max_rel_pos
self.rel_pos_onehot_size = config.rel_pos_bins
self.rel_pos_bias = nn.Linear(self.rel_pos_onehot_size, config.num_attention_heads, bias=False)
if self.has_spatial_attention_bias:
self.max_rel_2d_pos = config.max_rel_2d_pos
self.rel_2d_pos_bins = config.rel_2d_pos_bins
self.rel_2d_pos_onehot_size = config.rel_2d_pos_bins
self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)
self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)
def _cal_1d_pos_emb(self, hidden_states, position_ids):
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat,
num_buckets=self.rel_pos_bins,
max_distance=self.max_rel_pos,
)
rel_pos = F.one_hot(rel_pos, num_classes=self.rel_pos_onehot_size).type_as(hidden_states)
rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)
rel_pos = rel_pos.contiguous()
return rel_pos
def _cal_2d_pos_emb(self, hidden_states, bbox):
position_coord_x = bbox[:, :, 0]
position_coord_y = bbox[:, :, 3]
rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1)
rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1)
rel_pos_x = relative_position_bucket(
rel_pos_x_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
rel_pos_y = relative_position_bucket(
rel_pos_y_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
rel_pos_x = F.one_hot(rel_pos_x, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_y = F.one_hot(rel_pos_y, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_x = self.rel_pos_x_bias(rel_pos_x).permute(0, 3, 1, 2)
rel_pos_y = self.rel_pos_y_bias(rel_pos_y).permute(0, 3, 1, 2)
rel_pos_x = rel_pos_x.contiguous()
rel_pos_y = rel_pos_y.contiguous()
rel_2d_pos = rel_pos_x + rel_pos_y
return rel_2d_pos
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
bbox=None,
position_ids=None,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
rel_pos = self._cal_1d_pos_emb(hidden_states, position_ids) if self.has_relative_attention_bias else None
rel_2d_pos = self._cal_2d_pos_emb(hidden_states, bbox) if self.has_spatial_attention_bias else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class LayoutLMv2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LayoutLMv2Config
pretrained_model_archive_map = LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST
base_model_prefix = "layoutlmv2"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, LayoutLMv2LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def my_convert_sync_batchnorm(module, process_group=None):
# same as `nn.modules.SyncBatchNorm.convert_sync_batchnorm` but allowing converting from `detectron2.layers.FrozenBatchNorm2d`
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
return nn.modules.SyncBatchNorm.convert_sync_batchnorm(module, process_group)
module_output = module
if isinstance(module, detectron2.layers.FrozenBatchNorm2d):
module_output = torch.nn.SyncBatchNorm(
num_features=module.num_features,
eps=module.eps,
affine=True,
track_running_stats=True,
process_group=process_group,
)
module_output.weight = torch.nn.Parameter(module.weight)
module_output.bias = torch.nn.Parameter(module.bias)
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = torch.tensor(0, dtype=torch.long, device=module.running_mean.device)
for name, child in module.named_children():
module_output.add_module(name, my_convert_sync_batchnorm(child, process_group))
del module
return module_output
class VisualBackbone(nn.Module):
def __init__(self, config):
super().__init__()
self.cfg = detectron2.config.get_cfg() | add_layoutlmv2_config(self.cfg) | 2 | 2023-10-19 14:36:32+00:00 | 12k |
mklissa/dceo | dopamine/discrete_domains/run_experiment.py | [
{
"identifier": "dqn_agent",
"path": "dopamine/agents/dqn/dqn_agent.py",
"snippet": "NATURE_DQN_OBSERVATION_SHAPE = atari_lib.NATURE_DQN_OBSERVATION_SHAPE\nNATURE_DQN_DTYPE = atari_lib.NATURE_DQN_DTYPE\nNATURE_DQN_STACK_SIZE = atari_lib.NATURE_DQN_STACK_SIZE\ndef linearly_decaying_epsilon(decay_period, ... | import os
import sys
import time
import gin.tf
import numpy as np
import tensorflow as tf
from absl import logging
from dopamine.agents.dqn import dqn_agent
from dopamine.agents.implicit_quantile import implicit_quantile_agent
from dopamine.agents.rainbow import rainbow_agent
from dopamine.discrete_domains import atari_lib
from dopamine.discrete_domains import checkpointer
from dopamine.discrete_domains import iteration_statistics
from dopamine.discrete_domains import logger
from dopamine.jax.agents.dqn import dqn_agent as jax_dqn_agent
from dopamine.jax.agents.full_rainbow import full_rainbow_agent
from dopamine.jax.agents.full_rainbow import full_rainbow_dceo
from dopamine.jax.agents.implicit_quantile import implicit_quantile_agent as jax_implicit_quantile_agent
from dopamine.jax.agents.quantile import quantile_agent as jax_quantile_agent
from dopamine.jax.agents.rainbow import rainbow_agent as jax_rainbow_agent
from dopamine.metrics import collector_dispatcher
from dopamine.metrics import statistics_instance | 7,612 | max_steps_per_episode=27000,
clip_rewards=True,
use_legacy_logger=True,
fine_grained_print_to_console=True):
"""Initialize the Runner object in charge of running a full experiment.
Args:
base_dir: str, the base directory to host all required sub-directories.
create_agent_fn: A function that takes as args a Tensorflow session and an
environment, and returns an agent.
create_environment_fn: A function which receives a problem name and
creates a Gym environment for that problem (e.g. an Atari 2600 game).
checkpoint_file_prefix: str, the prefix to use for checkpoint files.
logging_file_prefix: str, prefix to use for the log files.
log_every_n: int, the frequency for writing logs.
num_iterations: int, the iteration number threshold (must be greater than
start_iteration).
training_steps: int, the number of training steps to perform.
evaluation_steps: int, the number of evaluation steps to perform.
max_steps_per_episode: int, maximum number of steps after which an episode
terminates.
clip_rewards: bool, whether to clip rewards in [-1, 1].
use_legacy_logger: bool, whether to use the legacy Logger. This will be
deprecated soon, replaced with the new CollectorDispatcher setup.
fine_grained_print_to_console: bool, whether to print fine-grained
progress to console (useful for debugging).
This constructor will take the following actions:
- Initialize an environment.
- Initialize a `tf.compat.v1.Session`.
- Initialize a logger.
- Initialize an agent.
- Reload from the latest checkpoint, if available, and initialize the
Checkpointer object.
"""
assert base_dir is not None
self._legacy_logger_enabled = use_legacy_logger
self._fine_grained_print_to_console_enabled = fine_grained_print_to_console
self._logging_file_prefix = logging_file_prefix
self._log_every_n = log_every_n
self._num_iterations = num_iterations
self._training_steps = training_steps
self._evaluation_steps = evaluation_steps
self._max_steps_per_episode = max_steps_per_episode
self._base_dir = base_dir
self._clip_rewards = clip_rewards
self._create_directories()
self._environment = create_environment_fn()
# The agent is now in charge of setting up the session.
self._sess = None
# We're using a bit of a hack in that we pass in _base_dir instead of an
# actually SummaryWriter. This is because the agent is now in charge of the
# session, but needs to create the SummaryWriter before creating the ops,
# and in order to do so, it requires the base directory.
self._agent = create_agent_fn(self._sess, self._environment,
summary_writer=self._base_dir)
if hasattr(self._agent, '_sess'):
self._sess = self._agent._sess
self._summary_writer = self._agent.summary_writer
self._initialize_checkpointer_and_maybe_resume(checkpoint_file_prefix)
# Create a collector dispatcher for metrics reporting.
self._collector_dispatcher = collector_dispatcher.CollectorDispatcher(
self._base_dir)
set_collector_dispatcher_fn = getattr(
self._agent, 'set_collector_dispatcher', None)
if callable(set_collector_dispatcher_fn):
set_collector_dispatcher_fn(self._collector_dispatcher)
@property
def _use_legacy_logger(self):
if not hasattr(self, '_legacy_logger_enabled'):
return True
return self._legacy_logger_enabled
@property
def _has_collector_dispatcher(self):
if not hasattr(self, '_collector_dispatcher'):
return False
return True
@property
def _fine_grained_print_to_console(self):
if not hasattr(self, '_fine_grained_print_to_console_enabled'):
return True
return self._fine_grained_print_to_console_enabled
def _create_directories(self):
"""Create necessary sub-directories."""
self._checkpoint_dir = os.path.join(self._base_dir, 'checkpoints')
if self._use_legacy_logger:
logging.warning(
'DEPRECATION WARNING: Logger is being deprecated. '
'Please switch to CollectorDispatcher!')
self._logger = logger.Logger(os.path.join(self._base_dir, 'logs'))
def _initialize_checkpointer_and_maybe_resume(self, checkpoint_file_prefix):
"""Reloads the latest checkpoint if it exists.
This method will first create a `Checkpointer` object and then call
`checkpointer.get_latest_checkpoint_number` to determine if there is a valid
checkpoint in self._checkpoint_dir, and what the largest file number is.
If a valid checkpoint file is found, it will load the bundled data from this
file and will pass it to the agent for it to reload its data.
If the agent is able to successfully unbundle, this method will verify that
the unbundled data contains the keys,'logs' and 'current_iteration'. It will
then load the `Logger`'s data from the bundle, and will return the iteration
number keyed by 'current_iteration' as one of the return values (along with
the `Checkpointer` object).
Args:
checkpoint_file_prefix: str, the checkpoint file prefix.
Returns:
start_iteration: int, the iteration number to start the experiment from.
experiment_checkpointer: `Checkpointer` object for the experiment.
"""
| # coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module defining classes and helper methods for general agents."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def load_gin_configs(gin_files, gin_bindings):
"""Loads gin configuration files.
Args:
gin_files: list, of paths to the gin configuration files for this
experiment.
gin_bindings: list, of gin parameter bindings to override the values in
the config files.
"""
gin.parse_config_files_and_bindings(gin_files,
bindings=gin_bindings,
skip_unknown=False)
@gin.configurable
def create_agent(sess, environment, agent_name=None, summary_writer=None,
debug_mode=False):
"""Creates an agent.
Args:
sess: A `tf.compat.v1.Session` object for running associated ops.
environment: A gym environment (e.g. Atari 2600).
agent_name: str, name of the agent to create.
summary_writer: A Tensorflow summary writer to pass to the agent
for in-agent training statistics in Tensorboard.
debug_mode: bool, whether to output Tensorboard summaries. If set to true,
the agent will output in-episode statistics to Tensorboard. Disabled by
default as this results in slower training.
Returns:
agent: An RL agent.
Raises:
ValueError: If `agent_name` is not in supported list.
"""
assert agent_name is not None
if not debug_mode:
summary_writer = None
if agent_name.startswith('dqn'):
return dqn_agent.DQNAgent(sess, num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'rainbow':
return rainbow_agent.RainbowAgent(
sess, num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'implicit_quantile':
return implicit_quantile_agent.ImplicitQuantileAgent(
sess, num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'jax_dqn':
return jax_dqn_agent.JaxDQNAgent(num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'jax_quantile':
return jax_quantile_agent.JaxQuantileAgent(
num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'jax_rainbow':
return jax_rainbow_agent.JaxRainbowAgent(
num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'full_rainbow':
return full_rainbow_agent.JaxFullRainbowAgent(
num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'full_rainbow_dceo':
return full_rainbow_dceo.JaxFullRainbowAgentDCEO(
num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'jax_implicit_quantile':
return jax_implicit_quantile_agent.JaxImplicitQuantileAgent(
num_actions=environment.action_space.n,
summary_writer=summary_writer)
else:
raise ValueError('Unknown agent: {}'.format(agent_name))
@gin.configurable
def create_runner(base_dir, schedule='continuous_train_and_eval'):
"""Creates an experiment Runner.
Args:
base_dir: str, base directory for hosting all subdirectories.
schedule: string, which type of Runner to use.
Returns:
runner: A `Runner` like object.
Raises:
ValueError: When an unknown schedule is encountered.
"""
assert base_dir is not None
# Continuously runs training and evaluation until max num_iterations is hit.
if schedule == 'continuous_train_and_eval':
return Runner(base_dir, create_agent)
# Continuously runs training until max num_iterations is hit.
elif schedule == 'continuous_train':
return TrainRunner(base_dir, create_agent)
else:
raise ValueError('Unknown schedule: {}'.format(schedule))
@gin.configurable
class Runner(object):
"""Object that handles running Dopamine experiments.
Here we use the term 'experiment' to mean simulating interactions between the
agent and the environment and reporting some statistics pertaining to these
interactions.
A simple scenario to train a DQN agent is as follows:
```python
import dopamine.discrete_domains.atari_lib
base_dir = '/tmp/simple_example'
def create_agent(sess, environment):
return dqn_agent.DQNAgent(sess, num_actions=environment.action_space.n)
runner = Runner(base_dir, create_agent, atari_lib.create_atari_environment)
runner.run()
```
"""
def __init__(self,
base_dir,
create_agent_fn,
create_environment_fn=atari_lib.create_atari_environment,
checkpoint_file_prefix='ckpt',
logging_file_prefix='log',
log_every_n=1,
num_iterations=200,
training_steps=250000,
evaluation_steps=125000,
max_steps_per_episode=27000,
clip_rewards=True,
use_legacy_logger=True,
fine_grained_print_to_console=True):
"""Initialize the Runner object in charge of running a full experiment.
Args:
base_dir: str, the base directory to host all required sub-directories.
create_agent_fn: A function that takes as args a Tensorflow session and an
environment, and returns an agent.
create_environment_fn: A function which receives a problem name and
creates a Gym environment for that problem (e.g. an Atari 2600 game).
checkpoint_file_prefix: str, the prefix to use for checkpoint files.
logging_file_prefix: str, prefix to use for the log files.
log_every_n: int, the frequency for writing logs.
num_iterations: int, the iteration number threshold (must be greater than
start_iteration).
training_steps: int, the number of training steps to perform.
evaluation_steps: int, the number of evaluation steps to perform.
max_steps_per_episode: int, maximum number of steps after which an episode
terminates.
clip_rewards: bool, whether to clip rewards in [-1, 1].
use_legacy_logger: bool, whether to use the legacy Logger. This will be
deprecated soon, replaced with the new CollectorDispatcher setup.
fine_grained_print_to_console: bool, whether to print fine-grained
progress to console (useful for debugging).
This constructor will take the following actions:
- Initialize an environment.
- Initialize a `tf.compat.v1.Session`.
- Initialize a logger.
- Initialize an agent.
- Reload from the latest checkpoint, if available, and initialize the
Checkpointer object.
"""
assert base_dir is not None
self._legacy_logger_enabled = use_legacy_logger
self._fine_grained_print_to_console_enabled = fine_grained_print_to_console
self._logging_file_prefix = logging_file_prefix
self._log_every_n = log_every_n
self._num_iterations = num_iterations
self._training_steps = training_steps
self._evaluation_steps = evaluation_steps
self._max_steps_per_episode = max_steps_per_episode
self._base_dir = base_dir
self._clip_rewards = clip_rewards
self._create_directories()
self._environment = create_environment_fn()
# The agent is now in charge of setting up the session.
self._sess = None
# We're using a bit of a hack in that we pass in _base_dir instead of an
# actually SummaryWriter. This is because the agent is now in charge of the
# session, but needs to create the SummaryWriter before creating the ops,
# and in order to do so, it requires the base directory.
self._agent = create_agent_fn(self._sess, self._environment,
summary_writer=self._base_dir)
if hasattr(self._agent, '_sess'):
self._sess = self._agent._sess
self._summary_writer = self._agent.summary_writer
self._initialize_checkpointer_and_maybe_resume(checkpoint_file_prefix)
# Create a collector dispatcher for metrics reporting.
self._collector_dispatcher = collector_dispatcher.CollectorDispatcher(
self._base_dir)
set_collector_dispatcher_fn = getattr(
self._agent, 'set_collector_dispatcher', None)
if callable(set_collector_dispatcher_fn):
set_collector_dispatcher_fn(self._collector_dispatcher)
@property
def _use_legacy_logger(self):
if not hasattr(self, '_legacy_logger_enabled'):
return True
return self._legacy_logger_enabled
@property
def _has_collector_dispatcher(self):
if not hasattr(self, '_collector_dispatcher'):
return False
return True
@property
def _fine_grained_print_to_console(self):
if not hasattr(self, '_fine_grained_print_to_console_enabled'):
return True
return self._fine_grained_print_to_console_enabled
def _create_directories(self):
"""Create necessary sub-directories."""
self._checkpoint_dir = os.path.join(self._base_dir, 'checkpoints')
if self._use_legacy_logger:
logging.warning(
'DEPRECATION WARNING: Logger is being deprecated. '
'Please switch to CollectorDispatcher!')
self._logger = logger.Logger(os.path.join(self._base_dir, 'logs'))
def _initialize_checkpointer_and_maybe_resume(self, checkpoint_file_prefix):
"""Reloads the latest checkpoint if it exists.
This method will first create a `Checkpointer` object and then call
`checkpointer.get_latest_checkpoint_number` to determine if there is a valid
checkpoint in self._checkpoint_dir, and what the largest file number is.
If a valid checkpoint file is found, it will load the bundled data from this
file and will pass it to the agent for it to reload its data.
If the agent is able to successfully unbundle, this method will verify that
the unbundled data contains the keys,'logs' and 'current_iteration'. It will
then load the `Logger`'s data from the bundle, and will return the iteration
number keyed by 'current_iteration' as one of the return values (along with
the `Checkpointer` object).
Args:
checkpoint_file_prefix: str, the checkpoint file prefix.
Returns:
start_iteration: int, the iteration number to start the experiment from.
experiment_checkpointer: `Checkpointer` object for the experiment.
""" | self._checkpointer = checkpointer.Checkpointer(self._checkpoint_dir, | 4 | 2023-10-15 22:14:16+00:00 | 12k |
LeoQLi/NeuralGF | train_test.py | [
{
"identifier": "Network",
"path": "network.py",
"snippet": "class Network(nn.Module):\n def __init__(self, num_points, num_knn):\n super(Network, self).__init__()\n self.num_points = num_points\n self.num_knn = num_knn\n self.num_iter = 2\n\n self.net = MLPNet_line... | import os, sys
import argparse
import time
import math
import numpy as np
import torch
import torch.utils.data
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import scipy.spatial as spatial
import torch.multiprocessing as mp
from network import Network
from datasets import BaseDataset
from mesh import extract_mesh
from misc import seed_all, get_log, get_logger, creat_logger, knn_gather_np | 7,245 | ### reorder and normalize the vectors, eliminate zero values
pred_norm = np.zeros_like(grad_norm)
pred_norm[rand_idxs, :] = grad_norm
pred_norm[np.linalg.norm(pred_norm, axis=-1) == 0.0] = 1.0
pred_norm /= np.linalg.norm(pred_norm, axis=-1, keepdims=True)
elapsed_time = time.time() - start_time
time_sum += elapsed_time
assert pcl_raw.shape == pred_norm.shape
if args.avg_nor:
# k_idex = []
ptree = spatial.cKDTree(pcl_raw)
_, k_idex = ptree.query(pcl_raw, k=1, distance_upper_bound=0.3)
if k_idex.ndim == 1:
k_idex = k_idex[:, None]
pred_norm = knn_gather_np(pred_norm, k_idex)
pred_norm = pred_norm.mean(axis=1)
if args.save_normal_npy or args.save_normal_xyz:
normal_dir = os.path.join(output_dir, 'pred_normal')
os.makedirs(normal_dir, exist_ok=True)
path_save = os.path.join(normal_dir, shape_name)
if args.save_normal_npy:
np.save(path_save + '_normal.npy', pred_norm)
if args.save_normal_xyz:
pc_nor = np.concatenate([pcl_raw, pred_norm], axis=-1)
# k = 1000; n = 50 # 10
# pc_nor = pc_nor[n*k:n*k+k, :]
np.savetxt(path_save + '.xyz', pc_nor, fmt='%.6f')
### evaluation
nn = np.sum(np.multiply(-1 * nor_gt, pred_norm), axis=1)
nn[nn > 1] = 1
nn[nn < -1] = -1
ang = np.rad2deg(np.arccos(np.abs(nn)))
rms = np.sqrt(np.mean(np.square(ang)))
ang_o = np.rad2deg(np.arccos(nn))
ids = ang_o < 90.0
p90 = sum(ids) / pred_norm.shape[0] * 100
### if more than half of points have wrong orientation, then flip all normals
if p90 < 50.0:
nn = np.sum(np.multiply(nor_gt, pred_norm), axis=1)
nn[nn > 1] = 1
nn[nn < -1] = -1
ang_o = np.rad2deg(np.arccos(nn))
ids = ang_o < 90.0
p90 = sum(ids) / pred_norm.shape[0] * 100
rms_o = np.sqrt(np.mean(np.square(ang_o)))
list_rms.append(rms)
list_rms_o.append(rms_o)
list_p90.append(p90)
if np.mean(p90) < 90.0:
list_bad[shape_name] = p90
logger.info('RMSE_U: %.3f, RMSE_O: %.3f, Correct orientation: %.3f %% (%s)' % (rms, rms_o, p90, shape_name))
if args.save_mesh:
mesh_dir = os.path.join(output_dir, 'recon_mesh')
os.makedirs(mesh_dir, exist_ok=True)
mesh = extract_mesh(my_model.net.forward, bbox_min=test_set.bbox_min, bbox_max=test_set.bbox_max,
points_gt=pcl_raw, mesh_far=args.mesh_far)
mesh.export(os.path.join(mesh_dir, '%s.obj' % shape_name))
if len(list_p90) > 0:
logger.info('Time: %.2f sec\n' % time_sum)
logger.info('Average || RMSE_U: %.3f, RMSE_O: %.3f, Correct orientation: %.3f %%' % (np.mean(list_rms), np.mean(list_rms_o), np.mean(list_p90)))
ss = ''
for k, v in list_bad.items():
ss += '%s: %.3f %%\n' % (k, v)
logger.info('Bad results in %d shapes: \n%s' % (len(list_p90), ss))
return 1
### Arguments
args = parse_arguments()
if len(args.testset_list) == 0:
args.testset_list = 'testset_' + args.data_set
if args.data_set in ['SceneNN', 'Semantic3D', 'KITTI_sub', 'Others', '3DScene']:
args.lr = 0.00001
args.dis_k = 64
if args.data_set in ['PCPNet']:
args.dis_k = 25
# args.lr = 0.0007
eval_list = ['testset_no_noise', 'testset_low_noise', 'testset_med_noise', 'testset_high_noise',
'testset_vardensity_striped', 'testset_vardensity_gradient']
if args.data_set in ['FamousShape']:
args.dis_k = 50
args.lr = 0.002
eval_list = ['testset_noise_clean', 'testset_noise_low', 'testset_noise_med', 'testset_noise_high',
'testset_density_stripe', 'testset_density_gradient']
if args.data_set == 'FamousShape5k':
args.num_points = 1000
args.dis_k = 10
if args.data_set == 'WireframePC':
args.max_iter = 10000
args.save_inter = 2500
args.num_points = 300
args.dis_k = 3
args.warn_up = 2000
# args.lr = 0.0001
if args.data_set == 'NestPC':
args.dis_k = 50
# args.num_knn = 6
args.lr = 0.0001
torch.cuda.set_device(args.gpu)
_device = torch.device('cuda')
|
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--mode', type=str, default='')
parser.add_argument('--log_root', type=str, default='./log')
parser.add_argument('--data_set', type=str, default='',
choices=['PCPNet', 'FamousShape', 'FamousShape5k', 'SceneNN', 'Others', 'KITTI_sub', 'Semantic3D', '3DScene', 'WireframePC', 'NestPC', 'Plane'])
### Train
parser.add_argument('--seed', type=int, default=2023)
parser.add_argument('--tag', type=str, default=None)
parser.add_argument('--logging', type=eval, default=True, choices=[True, False])
parser.add_argument('--max_iter', type=int, default=20000)
parser.add_argument('--save_inter', type=int, default=10000)
parser.add_argument('--warn_up', type=int, default=10000)
parser.add_argument('--lr', type=float, default=0.001)
### Dataset and loader
parser.add_argument('--dataset_root', type=str, default='/data1/lq/Dataset/')
parser.add_argument('--testset_list', type=str, default='')
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--num_workers', type=int, default=6)
parser.add_argument('--num_points', type=int, default=5000)
parser.add_argument('--num_query', type=int, default=10)
parser.add_argument('--num_knn', type=int, default=64)
parser.add_argument('--dis_k', type=int, default=50)
parser.add_argument('--dis_scale', type=float, default=0.15)
### Test
parser.add_argument('--ckpt_dir', type=str, default='')
parser.add_argument('--ckpt_iter', type=int, default=None)
parser.add_argument('--save_normal_npy', type=eval, default=False, choices=[True, False])
parser.add_argument('--save_normal_xyz', type=eval, default=False, choices=[True, False])
parser.add_argument('--save_mesh', type=eval, default=False, choices=[True, False])
parser.add_argument('--avg_nor', type=eval, default=False, choices=[True, False])
parser.add_argument('--mesh_far', type=float, default=-1.0)
args = parser.parse_args()
return args
def update_learning_rate(optimizer, iter_step, init_lr, max_iter):
warn_up = args.warn_up # 2000, 10000
lr = (iter_step / warn_up) if iter_step < warn_up else 0.5 * (math.cos((iter_step - warn_up)/(max_iter - warn_up) * math.pi) + 1)
lr = lr * init_lr
for g in optimizer.param_groups:
g['lr'] = lr
def train(data_list, log_dir, log_name, ckpt_dir, id=None):
### Dataset
train_set = BaseDataset(root=args.dataset_root,
data_set=args.data_set,
data_list=data_list,
num_points=args.num_points,
num_query=args.num_query,
num_knn=args.num_knn,
dis_k=args.dis_k,
dis_scale=args.dis_scale,
)
dataloader = torch.utils.data.DataLoader(
train_set,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=True, # faster speed
)
log_flag = True
num_shapes = len(train_set.cur_sets)
for shape_idx, shape_name in enumerate(train_set.cur_sets):
### Model
my_model = Network(args.num_points, num_knn=args.num_knn).to(_device).train()
optimizer = optim.Adam(my_model.parameters(), lr=args.lr)
train_set.process_data(shape_name)
iter_dataloader = iter(dataloader)
if log_flag:
log_name = 'train(%s)(%d)' % (log_name, os.getpid())
if id is not None:
log_name = log_name + '-%d' % id
logger = get_logger(args, log_dir, log_name, file_name='log_'+data_list, model=my_model)
log_flag = False
time_sum = 0
for iter_i in range(1, args.max_iter+1):
update_learning_rate(optimizer, iter_i, init_lr=args.lr, max_iter=args.max_iter)
data = iter_dataloader.next()
start_time = time.time()
pcl_raw = data['pcl_raw'].to(_device) # (B, M, 3), M > N
pcl_source = data['pcl_source'].to(_device) # (B, N, 3)
knn_idx = data['knn_idx'].to(_device) # (B, N, K)
pcl_raw_sub = data['pcl_raw_sub'].to(_device) if 'pcl_raw_sub' in data else None # (B, N, 3)
### Reset gradient and model state
my_model.train()
optimizer.zero_grad()
pcl_source = torch.cat([pcl_source, pcl_raw_sub], dim=-2)
grad_norm = my_model(pcl_source)
loss, loss_tuple = my_model.get_loss(pcl_raw=pcl_raw, pcl_source=pcl_source, knn_idx=knn_idx)
### Backward and optimize
loss.backward()
optimizer.step()
elapsed_time = time.time() - start_time
time_sum += elapsed_time
if iter_i % (args.save_inter//10) == 0:
ss = ''
for l in loss_tuple:
ss += '%.6f+' % l.item()
logger.info('shape:%d/%d, iter:%d/%d, loss=%.6f(%s), lr=%.6f' % (
shape_idx+1, num_shapes, iter_i, args.max_iter, loss, ss[:-1], optimizer.param_groups[0]['lr']))
if iter_i % args.save_inter == 0 or iter_i == args.max_iter:
model_filename = os.path.join(ckpt_dir, shape_name + '_%d.pt' % iter_i)
torch.save(my_model.state_dict(), model_filename)
logger.info('Save model: ' + model_filename)
# pc_nor = torch.cat([pcl_source, grad_norm], dim=-1)[0].cpu().detach().numpy()
# np.savetxt(model_filename[:-3] + '.txt', pc_nor, fmt='%.6f')
del my_model, optimizer
logger.info('Time: %.2f sec\n' % time_sum)
return 1
def test(data_list):
ckpt_paths = os.path.join(args.log_root, args.ckpt_dir, 'ckpts/*.pt')
assert len(ckpt_paths) > 0
### Dataset
test_set = BaseDataset(root=args.dataset_root,
data_set=args.data_set,
data_list=data_list,
)
### Model
print('Building model ...')
my_model = Network(args.num_points, num_knn=args.num_knn).to(_device).eval()
### Log
PID = os.getpid()
output_dir = os.path.join(args.log_root, args.ckpt_dir, 'test_%s' % args.ckpt_iter)
os.makedirs(output_dir, exist_ok=True)
logger = creat_logger('test(%d)(%s-%s)' % (PID, args.ckpt_dir, args.ckpt_iter), output_dir)
logger.info('Command: {}'.format(' '.join(sys.argv)))
trainable_num = sum(p.numel() for p in my_model.parameters() if p.requires_grad)
logger.info('Num_params_trainable: %d' % trainable_num)
max_n = int(2e5)
list_bad = {}
list_rms = []
list_rms_o = []
list_p90 = []
time_sum = 0
for shape_idx, shape_name in enumerate(test_set.cur_sets):
### load the trained model
ckpt_path = os.path.join(args.log_root, args.ckpt_dir, 'ckpts/%s_%s.pt' % (shape_name, args.ckpt_iter))
if not os.path.exists(ckpt_path):
logger.info('File not exist: ' + ckpt_path)
continue
my_model.load_state_dict(torch.load(ckpt_path, map_location=_device), strict=False)
### load a point cloud and shuffle the order of points
pcl_raw, nor_gt = test_set.get_data(shape_name) # (N, 3)
start_time = time.time()
num_point = pcl_raw.shape[0]
rand_idxs = np.random.choice(num_point, num_point, replace=False)
pcl = pcl_raw[rand_idxs, :3]
### if there are too many points, the point cloud will be processed in batches,
### the number of output vectors may be less than the number of initial points (decided by remainder).
if num_point <= max_n:
pcl_source = torch.from_numpy(pcl).float().to(_device)
with torch.no_grad():
grad_norm = my_model(pcl_source)
grad_norm = grad_norm.cpu().detach().numpy()
else:
k = math.ceil(num_point / max_n)
remainder = int(max_n * k % num_point)
print('Split data: ', num_point, k, remainder)
pcl_new = np.concatenate((pcl, pcl[:remainder]), axis=0)
pcl_source = torch.from_numpy(pcl_new).float() # (max_n*k, D)
grad_norm = np.zeros((pcl_new.shape[0], 3)) # (N, 3)
with torch.no_grad():
for i in range(k):
grad_norm_s = my_model(pcl_source[max_n*i:max_n*(i+1)].to(_device))
grad_norm[max_n*i:max_n*(i+1)] = grad_norm_s.cpu().detach().numpy()
grad_norm = grad_norm[:max_n*k-remainder]
### reorder and normalize the vectors, eliminate zero values
pred_norm = np.zeros_like(grad_norm)
pred_norm[rand_idxs, :] = grad_norm
pred_norm[np.linalg.norm(pred_norm, axis=-1) == 0.0] = 1.0
pred_norm /= np.linalg.norm(pred_norm, axis=-1, keepdims=True)
elapsed_time = time.time() - start_time
time_sum += elapsed_time
assert pcl_raw.shape == pred_norm.shape
if args.avg_nor:
# k_idex = []
ptree = spatial.cKDTree(pcl_raw)
_, k_idex = ptree.query(pcl_raw, k=1, distance_upper_bound=0.3)
if k_idex.ndim == 1:
k_idex = k_idex[:, None]
pred_norm = knn_gather_np(pred_norm, k_idex)
pred_norm = pred_norm.mean(axis=1)
if args.save_normal_npy or args.save_normal_xyz:
normal_dir = os.path.join(output_dir, 'pred_normal')
os.makedirs(normal_dir, exist_ok=True)
path_save = os.path.join(normal_dir, shape_name)
if args.save_normal_npy:
np.save(path_save + '_normal.npy', pred_norm)
if args.save_normal_xyz:
pc_nor = np.concatenate([pcl_raw, pred_norm], axis=-1)
# k = 1000; n = 50 # 10
# pc_nor = pc_nor[n*k:n*k+k, :]
np.savetxt(path_save + '.xyz', pc_nor, fmt='%.6f')
### evaluation
nn = np.sum(np.multiply(-1 * nor_gt, pred_norm), axis=1)
nn[nn > 1] = 1
nn[nn < -1] = -1
ang = np.rad2deg(np.arccos(np.abs(nn)))
rms = np.sqrt(np.mean(np.square(ang)))
ang_o = np.rad2deg(np.arccos(nn))
ids = ang_o < 90.0
p90 = sum(ids) / pred_norm.shape[0] * 100
### if more than half of points have wrong orientation, then flip all normals
if p90 < 50.0:
nn = np.sum(np.multiply(nor_gt, pred_norm), axis=1)
nn[nn > 1] = 1
nn[nn < -1] = -1
ang_o = np.rad2deg(np.arccos(nn))
ids = ang_o < 90.0
p90 = sum(ids) / pred_norm.shape[0] * 100
rms_o = np.sqrt(np.mean(np.square(ang_o)))
list_rms.append(rms)
list_rms_o.append(rms_o)
list_p90.append(p90)
if np.mean(p90) < 90.0:
list_bad[shape_name] = p90
logger.info('RMSE_U: %.3f, RMSE_O: %.3f, Correct orientation: %.3f %% (%s)' % (rms, rms_o, p90, shape_name))
if args.save_mesh:
mesh_dir = os.path.join(output_dir, 'recon_mesh')
os.makedirs(mesh_dir, exist_ok=True)
mesh = extract_mesh(my_model.net.forward, bbox_min=test_set.bbox_min, bbox_max=test_set.bbox_max,
points_gt=pcl_raw, mesh_far=args.mesh_far)
mesh.export(os.path.join(mesh_dir, '%s.obj' % shape_name))
if len(list_p90) > 0:
logger.info('Time: %.2f sec\n' % time_sum)
logger.info('Average || RMSE_U: %.3f, RMSE_O: %.3f, Correct orientation: %.3f %%' % (np.mean(list_rms), np.mean(list_rms_o), np.mean(list_p90)))
ss = ''
for k, v in list_bad.items():
ss += '%s: %.3f %%\n' % (k, v)
logger.info('Bad results in %d shapes: \n%s' % (len(list_p90), ss))
return 1
### Arguments
args = parse_arguments()
if len(args.testset_list) == 0:
args.testset_list = 'testset_' + args.data_set
if args.data_set in ['SceneNN', 'Semantic3D', 'KITTI_sub', 'Others', '3DScene']:
args.lr = 0.00001
args.dis_k = 64
if args.data_set in ['PCPNet']:
args.dis_k = 25
# args.lr = 0.0007
eval_list = ['testset_no_noise', 'testset_low_noise', 'testset_med_noise', 'testset_high_noise',
'testset_vardensity_striped', 'testset_vardensity_gradient']
if args.data_set in ['FamousShape']:
args.dis_k = 50
args.lr = 0.002
eval_list = ['testset_noise_clean', 'testset_noise_low', 'testset_noise_med', 'testset_noise_high',
'testset_density_stripe', 'testset_density_gradient']
if args.data_set == 'FamousShape5k':
args.num_points = 1000
args.dis_k = 10
if args.data_set == 'WireframePC':
args.max_iter = 10000
args.save_inter = 2500
args.num_points = 300
args.dis_k = 3
args.warn_up = 2000
# args.lr = 0.0001
if args.data_set == 'NestPC':
args.dis_k = 50
# args.num_knn = 6
args.lr = 0.0001
torch.cuda.set_device(args.gpu)
_device = torch.device('cuda')
| seed_all(args.seed) | 3 | 2023-10-22 08:51:50+00:00 | 12k |
BurgerBurgerBurger/AA | run.py | [
{
"identifier": "add_args",
"path": "args.py",
"snippet": "def add_args(parser):\n parser.add_argument(\"--do_train\", action=\"store_true\")\n parser.add_argument(\"--data_dir\", default=\"./dataset/docred\", type=str)\n parser.add_argument(\"--transformer_type\", default=\"bert\", type=str)\n... | import argparse
import os
import numpy as np
import torch
import ujson as json
import pandas as pd
import pickle
from torch.cuda.amp import GradScaler
from torch.utils.data import DataLoader
from transformers import AutoConfig, AutoModel, AutoTokenizer
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from args import add_args
from model import DocREModel
from utils import set_seed, collate_fn, create_directory
from prepro import read_docred
from evaluation import to_official, official_evaluate, merge_results
from tqdm import tqdm | 9,652 |
def load_input(batch, device, tag="dev"):
input = {'input_ids': batch[0].to(device),
'attention_mask': batch[1].to(device),
'labels': batch[2].to(device),
'entity_pos': batch[3],
'hts': batch[4],
'sent_pos': batch[5],
'sent_labels': batch[6].to(device) if (not batch[6] is None) and (batch[7] is None) else None,
'teacher_attns': batch[7].to(device) if not batch[7] is None else None,
'graph': batch[8],
'tag': tag
}
return input
def train(args, model, train_features, dev_features):
def finetune(features, optimizer, num_epoch, num_steps):
best_score = -1
|
def load_input(batch, device, tag="dev"):
input = {'input_ids': batch[0].to(device),
'attention_mask': batch[1].to(device),
'labels': batch[2].to(device),
'entity_pos': batch[3],
'hts': batch[4],
'sent_pos': batch[5],
'sent_labels': batch[6].to(device) if (not batch[6] is None) and (batch[7] is None) else None,
'teacher_attns': batch[7].to(device) if not batch[7] is None else None,
'graph': batch[8],
'tag': tag
}
return input
def train(args, model, train_features, dev_features):
def finetune(features, optimizer, num_epoch, num_steps):
best_score = -1 | train_dataloader = DataLoader(features, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, | 3 | 2023-10-20 05:53:25+00:00 | 12k |
xingchenshanyao/YOLOP-E | lib/dataset/AutoDriveDataset.py | [
{
"identifier": "xyxy2xywh",
"path": "lib/utils/utils.py",
"snippet": "def xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x cente... | import os
import cv2
import numpy as np
import random
import torch
import torchvision.transforms as transforms
from pathlib import Path
from PIL import Image
from torch.utils.data import Dataset
from ..utils import letterbox, augment_hsv, random_perspective, xyxy2xywh, cutout | 8,630 | if random.random() > p : # Pending Sign增强
Is_add = True
if id_image >= 3294 and Only_day: # 只加强白天的图片
Is_add = False
cropped_path = cropped_path0+'Pending Sign/'
fileList = os.listdir(cropped_path)
cropped_id = random.randint(0,len(fileList)-1)
txt_id = int(fileList[cropped_id].split('_')[0])
txt_line = lines[txt_id-1].split(' ')
x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5])
if x1>x2:
x1,x2 = x2,x1
if y1>y2:
y1,y2 = y2,y1
for line in data_label:
idx_0 = line[0]
x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720)
x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2)
if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0):
Is_add = False
break
if Is_add:
try:
cropped = cv2.imread(cropped_path+fileList[cropped_id])
img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped
cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]]
data_label = np.append(data_label, cropped_line, axis=0)
except:
Is_add = True
if random.random() > p : # Guidance Sign增强
Is_add = True
if id_image >= 3294 and Only_day: # 只加强白天的图片
Is_add = False
cropped_path = cropped_path0+'Guidance Sign/'
fileList = os.listdir(cropped_path)
cropped_id = random.randint(0,len(fileList)-1)
txt_id = int(fileList[cropped_id].split('_')[0])
txt_line = lines[txt_id-1].split(' ')
x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5])
if x1>x2:
x1,x2 = x2,x1
if y1>y2:
y1,y2 = y2,y1
for line in data_label:
idx_0 = line[0]
x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720)
x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2)
if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0):
Is_add = False
break
if Is_add:
try:
cropped = cv2.imread(cropped_path+fileList[cropped_id])
img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped
cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]]
data_label = np.append(data_label, cropped_line, axis=0)
except:
Is_add = True
data["label"] = data_label
# cv2.imshow("img",img)
# cv2.waitKey(10000)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# cv2.imshow("img",img) # 图像颜色空间转换
# cv2.waitKey(10000)
# seg_label = cv2.imread(data["mask"], 0)
if self.cfg.num_seg_class == 3:
seg_label = cv2.imread(data["mask"])
else:
seg_label = cv2.imread(data["mask"], 0)
lane_label = cv2.imread(data["lane"], 0)
#print(lane_label.shape)
# print(seg_label.shape)
# print(lane_label.shape)
# print(seg_label.shape)
resized_shape = self.inputsize
if isinstance(resized_shape, list):
resized_shape = max(resized_shape)
h0, w0 = img.shape[:2] # orig hw
r = resized_shape / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
# cv2.imshow("img",img) # 图像缩小到640*360
# cv2.waitKey(10000)
seg_label = cv2.resize(seg_label, (int(w0 * r), int(h0 * r)), interpolation=interp)
lane_label = cv2.resize(lane_label, (int(w0 * r), int(h0 * r)), interpolation=interp)
h, w = img.shape[:2]
(img, seg_label, lane_label), ratio, pad = letterbox((img, seg_label, lane_label), resized_shape, auto=True, scaleup=self.is_train)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# ratio = (w / w0, h / h0)
# print(resized_shape)
det_label = data["label"]
labels=[]
if det_label.size > 0:
# Normalized xywh to pixel xyxy format
labels = det_label.copy()
labels[:, 1] = ratio[0] * w * (det_label[:, 1] - det_label[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (det_label[:, 2] - det_label[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (det_label[:, 1] + det_label[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (det_label[:, 2] + det_label[:, 4] / 2) + pad[1]
if self.is_train:
combination = (img, seg_label, lane_label)
(img, seg_label, lane_label), labels = random_perspective(
combination=combination,
targets=labels,
degrees=self.cfg.DATASET.ROT_FACTOR,
translate=self.cfg.DATASET.TRANSLATE,
scale=self.cfg.DATASET.SCALE_FACTOR,
shear=self.cfg.DATASET.SHEAR
)
#print(labels.shape)
| # np.set_printoptions(threshold=np.inf)
# from visualization import plot_img_and_mask,plot_one_box,show_seg_result
# # ACE
# import os
# import math
# import matplotlib.pyplot as plt
# #线性拉伸处理
# #去掉最大最小0.5%的像素值 线性拉伸至[0,1]
# def stretchImage(data, s=0.005, bins = 2000):
# ht = np.histogram(data, bins)
# d = np.cumsum(ht[0])/float(data.size)
# lmin = 0; lmax=bins-1
# while lmin<bins:
# if d[lmin]>=s:
# break
# lmin+=1
# while lmax>=0:
# if d[lmax]<=1-s:
# break
# lmax-=1
# return np.clip((data-ht[1][lmin])/(ht[1][lmax]-ht[1][lmin]), 0,1)
# #根据半径计算权重参数矩阵
# g_para = {}
# def getPara(radius = 5):
# global g_para
# m = g_para.get(radius, None)
# if m is not None:
# return m
# size = radius*2+1
# m = np.zeros((size, size))
# for h in range(-radius, radius+1):
# for w in range(-radius, radius+1):
# if h==0 and w==0:
# continue
# m[radius+h, radius+w] = 1.0/math.sqrt(h**2+w**2)
# m /= m.sum()
# g_para[radius] = m
# return m
# #常规的ACE实现
# def zmIce(I, ratio=4, radius=300):
# para = getPara(radius)
# height,width = I.shape
# zh = []
# zw = []
# n = 0
# while n < radius:
# zh.append(0)
# zw.append(0)
# n += 1
# for n in range(height):
# zh.append(n)
# for n in range(width):
# zw.append(n)
# n = 0
# while n < radius:
# zh.append(height-1)
# zw.append(width-1)
# n += 1
# #print(zh)
# #print(zw)
# Z = I[np.ix_(zh, zw)]
# res = np.zeros(I.shape)
# for h in range(radius*2+1):
# for w in range(radius*2+1):
# if para[h][w] == 0:
# continue
# res += (para[h][w] * np.clip((I-Z[h:h+height, w:w+width])*ratio, -1, 1))
# return res
# #单通道ACE快速增强实现
# def zmIceFast(I, ratio, radius):
# # print(I)
# height, width = I.shape[:2]
# if min(height, width) <=2:
# return np.zeros(I.shape)+0.5
# Rs = cv2.resize(I, (int((width+1)/2), int((height+1)/2)))
# Rf = zmIceFast(Rs, ratio, radius) #递归调用
# Rf = cv2.resize(Rf, (width, height))
# Rs = cv2.resize(Rs, (width, height))
# return Rf+zmIce(I,ratio, radius)-zmIce(Rs,ratio,radius)
# #rgb三通道分别增强 ratio是对比度增强因子 radius是卷积模板半径
# def zmIceColor(I, ratio=4, radius=3):
# res = np.zeros(I.shape)
# for k in range(3):
# res[:,:,k] = stretchImage(zmIceFast(I[:,:,k], ratio, radius))
# return res
class AutoDriveDataset(Dataset):
"""
A general Dataset for some common function
"""
def __init__(self, cfg, is_train, inputsize=640, transform=None):
"""
initial all the characteristic
Inputs:
-cfg: configurations
-is_train(bool): whether train set or not
-transform: ToTensor and Normalize
Returns:
None
"""
self.is_train = is_train
self.cfg = cfg
self.transform = transform
self.inputsize = inputsize
self.Tensor = transforms.ToTensor()
img_root = Path(cfg.DATASET.DATAROOT)
label_root = Path(cfg.DATASET.LABELROOT)
mask_root = Path(cfg.DATASET.MASKROOT)
lane_root = Path(cfg.DATASET.LANEROOT)
if is_train:
indicator = cfg.DATASET.TRAIN_SET
else:
indicator = cfg.DATASET.TEST_SET
self.img_root = img_root / indicator
self.label_root = label_root / indicator
self.mask_root = mask_root / indicator
self.lane_root = lane_root / indicator
# self.label_list = self.label_root.iterdir()
self.mask_list = self.mask_root.iterdir()
self.db = []
self.data_format = cfg.DATASET.DATA_FORMAT
self.scale_factor = cfg.DATASET.SCALE_FACTOR
self.rotation_factor = cfg.DATASET.ROT_FACTOR
self.flip = cfg.DATASET.FLIP
self.color_rgb = cfg.DATASET.COLOR_RGB
# self.target_type = cfg.MODEL.TARGET_TYPE
self.shapes = np.array(cfg.DATASET.ORG_IMG_SIZE)
def _get_db(self):
"""
finished on children Dataset(for dataset which is not in Bdd100k format, rewrite children Dataset)
"""
raise NotImplementedError
def evaluate(self, cfg, preds, output_dir):
"""
finished on children dataset
"""
raise NotImplementedError
def __len__(self,):
"""
number of objects in the dataset
"""
return len(self.db)
def __getitem__(self, idx):
"""
Get input and groud-truth from database & add data augmentation on input
Inputs:
-idx: the index of image in self.db(database)(list)
self.db(list) [a,b,c,...]
a: (dictionary){'image':, 'information':}
Returns:
-image: transformed image, first passed the data augmentation in __getitem__ function(type:numpy), then apply self.transform
-target: ground truth(det_gt,seg_gt)
function maybe useful
cv2.imread
cv2.cvtColor(data, cv2.COLOR_BGR2RGB)
cv2.warpAffine
"""
data = self.db[idx]
data_label = data["label"]
id_image = int(data["image"].split('/')[-1][:-4]) # 获取图片序号
img = cv2.imread(data["image"], cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
# cv2.imshow("img",img) # 原图像
# cv2.waitKey(5000)
# print("img = zmIceColor(img/255.0)*255")
# img = zmIceColor(img/255.0)*255
# cv2.imshow("img",img/255) # ACE自动色彩均衡快速算法
# cv2.waitKey(5000)
# Only Mascio Enhancement 数据增强
for line in data_label:
idx_0 = line[0]
x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720)
x1, y1, x2, y2 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2)
random.seed(idx)
if self.is_train and int(idx_0) == 9 and random.random() > 1: # 只增强Straight or Right Turn Arrow
# if self.is_train:
# if True:
c_y = 10 # 偏移间隙
c_x = 0
x_c_new = x_c+c_x
y_c_new = y_c+h_c+c_y
x1_new, y1_new, x2_new, y2_new = x1+c_x, y1+h_c+c_y, x2+c_x, y2+h_c+c_y
if (x1_new >=0 and x2_new <=1280 and y1_new>=0 and y2_new <=720):
# 向下重叠一次
Is_add = True
for line0 in data_label:
x1_0, y1_0, x2_0, y2_0 = line0[1]*1280-line0[3]*1280/2, line0[2]*1280-line0[4]*720/2, line0[1]*1280+line0[3]*1280/2, line0[2]*1280+line0[4]*720/2
if (x1_new>x1_0 and y1_new>y1_0 and x1_new<x2_0 and y1_new<y2_0) or (x2_new>x1_0 and y2_new>y1_0 and x2_new<x2_0 and y2_new<y2_0) or (x1_new<x1_0 and y1_new<y1_0 and x2_new>x2_0 and y2_new>y2_0):
Is_add = False
break
if Is_add:
try:
cropped_line = [[idx_0, x_c_new, y_c_new, w_c, h_c]]
data_label = np.append(data_label, cropped_line, axis=0)
img[int(y1_new):int(y2_new), int(x1_new):int(x2_new)] = img[int(y1):int(y2), int(x1):int(x2)]
except:
Is_add = True
# cv2.imshow("img",img)
# cv2.waitKey(10000)
# Specific Mascio Enhancement数据增强
cropped_path0 = '/home/xingchen/Study/dataset/SDExpressway/traffic_object_cropped/'
f=open('/home/xingchen/Study/dataset/SDExpressway/traffic_object_cropped.txt','r')
lines=f.readlines()
f.close()
c_c = 10
p = 0.8 # 数据增强概率
# Only_day = True
Only_day = False #只加强白天的图片
# if self.is_train: # 限定只有训练的时候增强
# if True:
if False:
random.seed(idx)
if random.random() > p-0.1 : # Straight or Right Turn Arrow增强
Is_add = True
if id_image >= 3294 and Only_day: # 只加强白天的图片
Is_add = False
cropped_path = cropped_path0+'Straight or Right Turn Arrow/'
fileList = os.listdir(cropped_path)
cropped_id = random.randint(0,len(fileList)-1)
txt_id = int(fileList[cropped_id].split('_')[0])
txt_line = lines[txt_id-1].split(' ')
x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5])
if x1>x2:
x1,x2 = x2,x1
if y1>y2:
y1,y2 = y2,y1
for line in data_label:
idx_0 = line[0]
x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720)
x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2)
if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0):
Is_add = False
break
if Is_add:
try:
cropped = cv2.imread(cropped_path+fileList[cropped_id])
img[int(y1):int(y2), int(x1):int(x2)] = cropped
cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]]
data_label = np.append(data_label, cropped_line, axis=0)
except:
Is_add = True
if random.random() > p-0.1 : # Straight Ahead Arrow增强
Is_add = True
if id_image >= 3294 and Only_day: # 只加强白天的图片
Is_add = False
cropped_path = cropped_path0+'Straight Ahead Arrow/'
fileList = os.listdir(cropped_path)
cropped_id = random.randint(0,len(fileList)-1)
txt_id = int(fileList[cropped_id].split('_')[0])
txt_line = lines[txt_id-1].split(' ')
x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5])
if x1>x2:
x1,x2 = x2,x1
if y1>y2:
y1,y2 = y2,y1
for line in data_label:
idx_0 = line[0]
x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720)
x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2)
if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0):
Is_add = False
break
if Is_add:
try:
cropped = cv2.imread(cropped_path+fileList[cropped_id])
img[int(y1):int(y2), int(x1):int(x2)] = cropped
cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]]
data_label = np.append(data_label, cropped_line, axis=0)
except:
Is_add = True
if random.random() > p : # Speed Limit Sign增强
Is_add = True
if id_image >= 3294 and Only_day: # 只加强白天的图片
Is_add = False
cropped_path = cropped_path0+'Speed Limit Sign/'
fileList = os.listdir(cropped_path)
cropped_id = random.randint(0,len(fileList)-1)
txt_id = int(fileList[cropped_id].split('_')[0])
txt_line = lines[txt_id-1].split(' ')
x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5])
if x1>x2:
x1,x2 = x2,x1
if y1>y2:
y1,y2 = y2,y1
for line in data_label:
idx_0 = line[0]
x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720)
x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2)
if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0):
Is_add = False
break
if Is_add:
try:
cropped = cv2.imread(cropped_path+fileList[cropped_id])
img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped
cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]]
data_label = np.append(data_label, cropped_line, axis=0)
except:
Is_add = True
if random.random() > p : # Emergency Telephone Sign增强
Is_add = True
if id_image >= 3294 and Only_day: # 只加强白天的图片
Is_add = False
cropped_path = cropped_path0+'Emergency Telephone Sign/'
fileList = os.listdir(cropped_path)
cropped_id = random.randint(0,len(fileList)-1)
txt_id = int(fileList[cropped_id].split('_')[0])
txt_line = lines[txt_id-1].split(' ')
x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5])
if x1>x2:
x1,x2 = x2,x1
if y1>y2:
y1,y2 = y2,y1
for line in data_label:
idx_0 = line[0]
x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720)
x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2)
if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0):
Is_add = False
break
if Is_add:
try:
cropped = cv2.imread(cropped_path+fileList[cropped_id])
img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped
cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]]
data_label = np.append(data_label, cropped_line, axis=0)
except:
Is_add = True
if random.random() > p : # Warning Sign增强
Is_add = True
if id_image >= 3294 and Only_day: # 只加强白天的图片
Is_add = False
cropped_path = cropped_path0+'Warning Sign/'
fileList = os.listdir(cropped_path)
cropped_id = random.randint(0,len(fileList)-1)
txt_id = int(fileList[cropped_id].split('_')[0])
txt_line = lines[txt_id-1].split(' ')
x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5])
if x1>x2:
x1,x2 = x2,x1
if y1>y2:
y1,y2 = y2,y1
for line in data_label:
idx_0 = line[0]
x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720)
x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2)
if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0):
Is_add = False
break
if Is_add:
try:
cropped = cv2.imread(cropped_path+fileList[cropped_id])
img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped
cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]]
data_label = np.append(data_label, cropped_line, axis=0)
except:
Is_add = True
if random.random() > p : # Directional Sign增强
Is_add = True
if id_image >= 3294 and Only_day: # 只加强白天的图片
Is_add = False
cropped_path = cropped_path0+'Directional Sign/'
fileList = os.listdir(cropped_path)
cropped_id = random.randint(0,len(fileList)-1)
txt_id = int(fileList[cropped_id].split('_')[0])
txt_line = lines[txt_id-1].split(' ')
x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5])
if x1>x2:
x1,x2 = x2,x1
if y1>y2:
y1,y2 = y2,y1
for line in data_label:
idx_0 = line[0]
x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720)
x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2)
if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0):
Is_add = False
break
if Is_add:
try:
cropped = cv2.imread(cropped_path+fileList[cropped_id])
img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped
cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]]
data_label = np.append(data_label, cropped_line, axis=0)
except:
Is_add = True
if random.random() > p : # Pending Sign增强
Is_add = True
if id_image >= 3294 and Only_day: # 只加强白天的图片
Is_add = False
cropped_path = cropped_path0+'Pending Sign/'
fileList = os.listdir(cropped_path)
cropped_id = random.randint(0,len(fileList)-1)
txt_id = int(fileList[cropped_id].split('_')[0])
txt_line = lines[txt_id-1].split(' ')
x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5])
if x1>x2:
x1,x2 = x2,x1
if y1>y2:
y1,y2 = y2,y1
for line in data_label:
idx_0 = line[0]
x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720)
x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2)
if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0):
Is_add = False
break
if Is_add:
try:
cropped = cv2.imread(cropped_path+fileList[cropped_id])
img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped
cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]]
data_label = np.append(data_label, cropped_line, axis=0)
except:
Is_add = True
if random.random() > p : # Guidance Sign增强
Is_add = True
if id_image >= 3294 and Only_day: # 只加强白天的图片
Is_add = False
cropped_path = cropped_path0+'Guidance Sign/'
fileList = os.listdir(cropped_path)
cropped_id = random.randint(0,len(fileList)-1)
txt_id = int(fileList[cropped_id].split('_')[0])
txt_line = lines[txt_id-1].split(' ')
x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5])
if x1>x2:
x1,x2 = x2,x1
if y1>y2:
y1,y2 = y2,y1
for line in data_label:
idx_0 = line[0]
x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720)
x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2)
if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0):
Is_add = False
break
if Is_add:
try:
cropped = cv2.imread(cropped_path+fileList[cropped_id])
img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped
cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]]
data_label = np.append(data_label, cropped_line, axis=0)
except:
Is_add = True
data["label"] = data_label
# cv2.imshow("img",img)
# cv2.waitKey(10000)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# cv2.imshow("img",img) # 图像颜色空间转换
# cv2.waitKey(10000)
# seg_label = cv2.imread(data["mask"], 0)
if self.cfg.num_seg_class == 3:
seg_label = cv2.imread(data["mask"])
else:
seg_label = cv2.imread(data["mask"], 0)
lane_label = cv2.imread(data["lane"], 0)
#print(lane_label.shape)
# print(seg_label.shape)
# print(lane_label.shape)
# print(seg_label.shape)
resized_shape = self.inputsize
if isinstance(resized_shape, list):
resized_shape = max(resized_shape)
h0, w0 = img.shape[:2] # orig hw
r = resized_shape / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
# cv2.imshow("img",img) # 图像缩小到640*360
# cv2.waitKey(10000)
seg_label = cv2.resize(seg_label, (int(w0 * r), int(h0 * r)), interpolation=interp)
lane_label = cv2.resize(lane_label, (int(w0 * r), int(h0 * r)), interpolation=interp)
h, w = img.shape[:2]
(img, seg_label, lane_label), ratio, pad = letterbox((img, seg_label, lane_label), resized_shape, auto=True, scaleup=self.is_train)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# ratio = (w / w0, h / h0)
# print(resized_shape)
det_label = data["label"]
labels=[]
if det_label.size > 0:
# Normalized xywh to pixel xyxy format
labels = det_label.copy()
labels[:, 1] = ratio[0] * w * (det_label[:, 1] - det_label[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (det_label[:, 2] - det_label[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (det_label[:, 1] + det_label[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (det_label[:, 2] + det_label[:, 4] / 2) + pad[1]
if self.is_train:
combination = (img, seg_label, lane_label)
(img, seg_label, lane_label), labels = random_perspective(
combination=combination,
targets=labels,
degrees=self.cfg.DATASET.ROT_FACTOR,
translate=self.cfg.DATASET.TRANSLATE,
scale=self.cfg.DATASET.SCALE_FACTOR,
shear=self.cfg.DATASET.SHEAR
)
#print(labels.shape) | augment_hsv(img, hgain=self.cfg.DATASET.HSV_H, sgain=self.cfg.DATASET.HSV_S, vgain=self.cfg.DATASET.HSV_V) | 1 | 2023-10-24 02:08:25+00:00 | 12k |
giulio98/functional-diffusion-processes | src/functional_diffusion_processes/losses/mse_loss.py | [
{
"identifier": "BaseMAML",
"path": "src/functional_diffusion_processes/models/base_maml.py",
"snippet": "class BaseMAML(nn.Module, abc.ABC):\n \"\"\"Abstract model class for implementing Model-Agnostic Meta-Learning (MAML).\n\n The Model-Agnostic Meta-Learning (MAML) algorithm is designed to trai... | import abc
import jax
import jax.numpy as jnp
from functools import partial
from typing import Any, Callable, TypeVar, Union
from flax.core import FrozenDict
from jax.random import PRNGKeyArray
from omegaconf import DictConfig
from ..models import BaseMAML, BaseViT
from ..sdetools import SDE
from ..utils.common import batch_mul | 8,996 |
Params = FrozenDict[str, Any]
T = TypeVar("T")
class MSELoss(abc.ABC):
"""Abstract class for computing Mean Squared Error (MSE) Loss.
Provides a structure for constructing a loss function to compute the MSE
loss between model predictions and real data, with potential modifications
for different domains (frequency or normal) and scheduling.
Attributes:
sde (SDE): An instance of stochastic differential equation to be used to calculate the weight factor in loss computation.
loss_config (DictConfig): A configuration object holding parameters for loss computation.
"""
|
Params = FrozenDict[str, Any]
T = TypeVar("T")
class MSELoss(abc.ABC):
"""Abstract class for computing Mean Squared Error (MSE) Loss.
Provides a structure for constructing a loss function to compute the MSE
loss between model predictions and real data, with potential modifications
for different domains (frequency or normal) and scheduling.
Attributes:
sde (SDE): An instance of stochastic differential equation to be used to calculate the weight factor in loss computation.
loss_config (DictConfig): A configuration object holding parameters for loss computation.
"""
| def __init__(self, sde: SDE, loss_config: DictConfig) -> None: | 2 | 2023-10-24 22:01:35+00:00 | 12k |
R1999RC-official/Reverse1999ResonanceCalculator | python/python_env/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py | [
{
"identifier": "Candidate",
"path": "python/python_env/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py",
"snippet": "class Candidate:\n @property\n def project_name(self) -> NormalizedName:\n \"\"\"The \"project name\" of the candidate.\n\n This is different from ``nam... | import contextlib
import functools
import logging
import os
from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.resolvelib import BaseReporter, ResolutionImpossible
from pip._vendor.resolvelib import Resolver as RLResolver
from pip._vendor.resolvelib.structs import DirectedGraph
from pip._internal.cache import WheelCache
from pip._internal.index.package_finder import PackageFinder
from pip._internal.operations.prepare import RequirementPreparer
from pip._internal.req.constructors import install_req_extend_extras
from pip._internal.req.req_install import InstallRequirement
from pip._internal.req.req_set import RequirementSet
from pip._internal.resolution.base import BaseResolver, InstallRequirementProvider
from pip._internal.resolution.resolvelib.provider import PipProvider
from pip._internal.resolution.resolvelib.reporter import (
PipDebuggingReporter,
PipReporter,
)
from pip._internal.utils.packaging import get_requirement
from .base import Candidate, Requirement
from .factory import Factory
from pip._vendor.resolvelib.resolvers import Result as RLResult | 7,464 |
if TYPE_CHECKING:
Result = RLResult[Requirement, Candidate, str]
logger = logging.getLogger(__name__)
class Resolver(BaseResolver):
_allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}
def __init__(
self,
preparer: RequirementPreparer,
finder: PackageFinder,
wheel_cache: Optional[WheelCache],
make_install_req: InstallRequirementProvider,
use_user_site: bool,
ignore_dependencies: bool,
ignore_installed: bool,
ignore_requires_python: bool,
force_reinstall: bool,
upgrade_strategy: str,
py_version_info: Optional[Tuple[int, ...]] = None,
):
super().__init__()
assert upgrade_strategy in self._allowed_strategies
|
if TYPE_CHECKING:
Result = RLResult[Requirement, Candidate, str]
logger = logging.getLogger(__name__)
class Resolver(BaseResolver):
_allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}
def __init__(
self,
preparer: RequirementPreparer,
finder: PackageFinder,
wheel_cache: Optional[WheelCache],
make_install_req: InstallRequirementProvider,
use_user_site: bool,
ignore_dependencies: bool,
ignore_installed: bool,
ignore_requires_python: bool,
force_reinstall: bool,
upgrade_strategy: str,
py_version_info: Optional[Tuple[int, ...]] = None,
):
super().__init__()
assert upgrade_strategy in self._allowed_strategies
| self.factory = Factory( | 2 | 2023-10-24 06:48:58+00:00 | 12k |
KosinskiLab/pyTME | tme/tests/test_matching_exhaustive.py | [
{
"identifier": "scan",
"path": "tme/matching_exhaustive.py",
"snippet": "@device_memory_handler\ndef scan(\n matching_data: MatchingData,\n matching_setup: Callable,\n matching_score: Callable,\n n_jobs: int = 4,\n callback_class: CallbackClass = None,\n callback_class_args: Dict = {}... | import numpy as np
import pytest
from tme.matching_exhaustive import (
scan,
scan_subsets,
MATCHING_EXHAUSTIVE_REGISTER,
register_matching_exhaustive,
)
from tme.matching_data import MatchingData
from tme.matching_utils import get_rotation_matrices
from tme.matching_memory import MATCHING_MEMORY_REGISTRY | 7,738 |
class TestMatchExhaustive:
def setup_method(self):
target = np.zeros((50, 50, 50))
target[20:30, 30:40, 12:17] = 1
self.target = target
template = np.zeros((50, 50, 50))
template[15:25, 20:30, 2:7] = 1
self.template = template
self.rotations = get_rotation_matrices(60)[0:2,]
def teardown_method(self):
self.target = None
self.template = None
self.coordinates = None
self.coordinates_weights = None
self.rotations = None
@pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys()))
def test_scan_single_core(self, score):
matching_data = MatchingData(target=self.target, template=self.template)
matching_data.target_mask = self.target
matching_data.template_mask = self.template
matching_data.rotations = self.rotations
setup, process = MATCHING_EXHAUSTIVE_REGISTER[score]
scan(matching_data=matching_data, matching_setup=setup, matching_score=process)
@pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys()))
def test_scan_single_multi_core(self, score):
matching_data = MatchingData(target=self.target, template=self.template)
matching_data.target_mask = self.target
matching_data.template_mask = self.template
matching_data.rotations = self.rotations
setup, process = MATCHING_EXHAUSTIVE_REGISTER[score]
scan(
matching_data=matching_data,
matching_setup=setup,
matching_score=process,
n_jobs=2,
)
@pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys()))
def test_scan_subsets_single_core(self, score):
matching_data = MatchingData(target=self.target, template=self.template)
matching_data.target_mask = self.target
matching_data.template_mask = self.template
matching_data.rotations = self.rotations
setup, process = MATCHING_EXHAUSTIVE_REGISTER[score]
target_splits = {i: 1 for i in range(self.target.ndim)}
template_splits = {i: 1 for i in range(self.target.ndim)}
target_splits[0], template_splits[1] = 2, 2
scan_subsets(
matching_data=matching_data,
matching_setup=setup,
matching_score=process,
target_splits=target_splits,
template_splits=template_splits,
job_schedule=(2, 1),
)
@pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys()))
def test_scan_subsets_single_multi_core(self, score):
matching_data = MatchingData(target=self.target, template=self.template)
matching_data.target_mask = self.target
matching_data.template_mask = self.template
matching_data.rotations = self.rotations
setup, process = MATCHING_EXHAUSTIVE_REGISTER[score]
target_splits = {i: 1 for i in range(self.target.ndim)}
template_splits = {i: 1 for i in range(self.target.ndim)}
target_splits[0], template_splits[1] = 2, 2
scan_subsets(
matching_data=matching_data,
matching_setup=setup,
matching_score=process,
target_splits=target_splits,
template_splits=template_splits,
job_schedule=(2, 1),
)
@pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys()))
def test_scan_subsets_single_multi_core_both(self, score):
matching_data = MatchingData(target=self.target, template=self.template)
matching_data.target_mask = self.target
matching_data.template_mask = self.template
matching_data.rotations = self.rotations
setup, process = MATCHING_EXHAUSTIVE_REGISTER[score]
target_splits = {i: 1 for i in range(self.target.ndim)}
template_splits = {i: 1 for i in range(self.target.ndim)}
target_splits[0], template_splits[1] = 2, 2
scan_subsets(
matching_data=matching_data,
matching_setup=setup,
matching_score=process,
target_splits=target_splits,
template_splits=template_splits,
job_schedule=(2, 2),
)
def test_register_matching_exhaustive(self):
setup, matching = MATCHING_EXHAUSTIVE_REGISTER[
list(MATCHING_EXHAUSTIVE_REGISTER.keys())[0]
]
|
class TestMatchExhaustive:
def setup_method(self):
target = np.zeros((50, 50, 50))
target[20:30, 30:40, 12:17] = 1
self.target = target
template = np.zeros((50, 50, 50))
template[15:25, 20:30, 2:7] = 1
self.template = template
self.rotations = get_rotation_matrices(60)[0:2,]
def teardown_method(self):
self.target = None
self.template = None
self.coordinates = None
self.coordinates_weights = None
self.rotations = None
@pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys()))
def test_scan_single_core(self, score):
matching_data = MatchingData(target=self.target, template=self.template)
matching_data.target_mask = self.target
matching_data.template_mask = self.template
matching_data.rotations = self.rotations
setup, process = MATCHING_EXHAUSTIVE_REGISTER[score]
scan(matching_data=matching_data, matching_setup=setup, matching_score=process)
@pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys()))
def test_scan_single_multi_core(self, score):
matching_data = MatchingData(target=self.target, template=self.template)
matching_data.target_mask = self.target
matching_data.template_mask = self.template
matching_data.rotations = self.rotations
setup, process = MATCHING_EXHAUSTIVE_REGISTER[score]
scan(
matching_data=matching_data,
matching_setup=setup,
matching_score=process,
n_jobs=2,
)
@pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys()))
def test_scan_subsets_single_core(self, score):
matching_data = MatchingData(target=self.target, template=self.template)
matching_data.target_mask = self.target
matching_data.template_mask = self.template
matching_data.rotations = self.rotations
setup, process = MATCHING_EXHAUSTIVE_REGISTER[score]
target_splits = {i: 1 for i in range(self.target.ndim)}
template_splits = {i: 1 for i in range(self.target.ndim)}
target_splits[0], template_splits[1] = 2, 2
scan_subsets(
matching_data=matching_data,
matching_setup=setup,
matching_score=process,
target_splits=target_splits,
template_splits=template_splits,
job_schedule=(2, 1),
)
@pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys()))
def test_scan_subsets_single_multi_core(self, score):
matching_data = MatchingData(target=self.target, template=self.template)
matching_data.target_mask = self.target
matching_data.template_mask = self.template
matching_data.rotations = self.rotations
setup, process = MATCHING_EXHAUSTIVE_REGISTER[score]
target_splits = {i: 1 for i in range(self.target.ndim)}
template_splits = {i: 1 for i in range(self.target.ndim)}
target_splits[0], template_splits[1] = 2, 2
scan_subsets(
matching_data=matching_data,
matching_setup=setup,
matching_score=process,
target_splits=target_splits,
template_splits=template_splits,
job_schedule=(2, 1),
)
@pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys()))
def test_scan_subsets_single_multi_core_both(self, score):
matching_data = MatchingData(target=self.target, template=self.template)
matching_data.target_mask = self.target
matching_data.template_mask = self.template
matching_data.rotations = self.rotations
setup, process = MATCHING_EXHAUSTIVE_REGISTER[score]
target_splits = {i: 1 for i in range(self.target.ndim)}
template_splits = {i: 1 for i in range(self.target.ndim)}
target_splits[0], template_splits[1] = 2, 2
scan_subsets(
matching_data=matching_data,
matching_setup=setup,
matching_score=process,
target_splits=target_splits,
template_splits=template_splits,
job_schedule=(2, 2),
)
def test_register_matching_exhaustive(self):
setup, matching = MATCHING_EXHAUSTIVE_REGISTER[
list(MATCHING_EXHAUSTIVE_REGISTER.keys())[0]
] | memory_class = MATCHING_MEMORY_REGISTRY[ | 6 | 2023-10-20 13:46:01+00:00 | 12k |
tonnetonne814/MB-iSTFT-BERT-VITS2-44100-Ja | text/chinese.py | [
{
"identifier": "punctuation",
"path": "text/symbols.py",
"snippet": ""
},
{
"identifier": "ToneSandhi",
"path": "text/tone_sandhi.py",
"snippet": "class ToneSandhi:\n def __init__(self):\n self.must_neural_tone_words = {\n \"麻烦\",\n \"麻利\",\n \... | import os
import re
import cn2an
import jieba.posseg as psg
from pypinyin import lazy_pinyin, Style
from text.symbols import punctuation
from text.tone_sandhi import ToneSandhi
from text import chinese_bert
from text.chinese_bert import get_bert_feature | 7,663 |
current_file_path = os.path.dirname(__file__)
pinyin_to_symbol_map = {
line.split("\t")[0]: line.strip().split("\t")[1]
for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines()
}
rep_map = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
"$": ".",
"“": "'",
"”": "'",
"‘": "'",
"’": "'",
"(": "'",
")": "'",
"(": "'",
")": "'",
"《": "'",
"》": "'",
"【": "'",
"】": "'",
"[": "'",
"]": "'",
"—": "-",
"~": "-",
"~": "-",
"「": "'",
"」": "'",
}
tone_modifier = ToneSandhi()
def replace_punctuation(text):
text = text.replace("嗯", "恩").replace("呣", "母")
pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys()))
replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
replaced_text = re.sub(
|
current_file_path = os.path.dirname(__file__)
pinyin_to_symbol_map = {
line.split("\t")[0]: line.strip().split("\t")[1]
for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines()
}
rep_map = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
"$": ".",
"“": "'",
"”": "'",
"‘": "'",
"’": "'",
"(": "'",
")": "'",
"(": "'",
")": "'",
"《": "'",
"》": "'",
"【": "'",
"】": "'",
"[": "'",
"]": "'",
"—": "-",
"~": "-",
"~": "-",
"「": "'",
"」": "'",
}
tone_modifier = ToneSandhi()
def replace_punctuation(text):
text = text.replace("嗯", "恩").replace("呣", "母")
pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys()))
replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
replaced_text = re.sub( | r"[^\u4e00-\u9fa5" + "".join(punctuation) + r"]+", "", replaced_text | 0 | 2023-10-16 10:04:32+00:00 | 12k |
cfs-energy/cfspopcon | cfspopcon/formulas/radiated_power/mavrin_noncoronal.py | [
{
"identifier": "Impurity",
"path": "cfspopcon/named_options.py",
"snippet": "class Impurity(Enum):\n \"\"\"Enum of possible impurity elements.\n\n The enum value represents the element's atomic number (Z).\n \"\"\"\n\n Helium = 2\n Lithium = 3\n Beryllium = 4\n Carbon = 6\n Nitr... | import warnings
import numpy as np
from numpy import float64
from numpy.typing import NDArray
from ...named_options import Impurity
from ...unit_handling import Quantity, ureg, wraps_ufunc
from ..helpers import integrate_profile_over_volume | 8,147 | ]
)
elif impurity_Z == 7: # Nitrogen
temperature_bin_borders = np.array([1.0, 10.0, 30.0, 100.0, 300.0, 1000.0, 15000.0])
radc = np.array(
[
[-3.5312e01, -5.8692e01, -2.0301e01, -7.7571e01, -2.9401e01, -2.7201e01],
[7.1926e00, 6.8148e01, -8.8594e00, 5.0488e01, -3.8191e-01, -4.4640e00],
[7.8200e-03, 3.6209e-01, 6.0500e00, -6.5889e00, 3.5270e00, 7.6960e-01],
[-3.5696e00, -5.4257e01, -2.7129e00, -1.8187e01, -1.0347e00, 9.2450e-01],
[-1.2800e-02, 1.4835e-01, -7.6700e00, 6.8691e00, -2.4192e00, -6.7720e-01],
[1.1180e-02, -1.4700e-03, 1.0705e-01, 8.3119e-01, 3.2269e-01, 2.6185e-01],
[3.5812e-01, 1.3476e01, 1.9691e00, 2.0259e00, 2.2501e-01, -5.6280e-02],
[-2.5100e-03, -2.9646e-01, 2.3943e00, -1.7572e00, 3.9511e-01, 1.2014e-01],
[-2.2020e-02, 2.2706e-01, 1.4088e-01, -2.9376e-01, 2.6510e-02, 4.6870e-02],
[-1.0000e-03, 5.4220e-02, 4.7450e-02, 1.7200e-02, 7.8930e-02, 7.9250e-02],
]
)
elif impurity_Z == 8: # Oxygen
temperature_bin_borders = np.array([1.0, 10.0, 30.0, 100.0, 300.0, 1000.0, 15000.0])
radc = np.array(
[
[-3.6208e01, -2.9057e01, -2.9370e01, -4.4120e-02, -3.7073e01, -2.5037e01],
[7.5487e00, -1.5228e01, 8.7451e00, -5.4918e01, 7.8826e00, -5.7568e00],
[2.3340e-02, -3.1460e00, 6.3827e00, -9.5003e00, 3.7999e00, 1.2973e00],
[-2.1983e00, 2.0826e01, -1.2357e01, 2.8883e01, -3.8006e00, 1.2040e00],
[-1.0131e-01, 5.9427e00, -7.6451e00, 8.5536e00, -2.2619e00, -9.1955e-01],
[8.0600e-03, 1.0610e-01, -2.2230e-02, 5.5336e-01, 5.0270e-01, 2.8988e-01],
[-6.5108e-01, -8.0843e00, 3.4958e00, -4.8731e00, 5.2144e-01, -7.6780e-02],
[8.4570e-02, -2.6827e00, 2.2661e00, -1.9172e00, 3.0219e-01, 1.4568e-01],
[-2.1710e-02, 1.0350e-02, 2.5727e-01, -1.5709e-01, -6.6330e-02, 3.9250e-02],
[-2.1200e-03, 2.6480e-02, 7.7800e-02, 1.6370e-02, 6.1140e-02, 8.3010e-02],
]
)
elif impurity_Z == 10: # Neon
temperature_bin_borders = np.array([1.0, 10.0, 70.0, 300.0, 1000.0, 3000.0, 15000.0])
radc = np.array(
[
[-3.8610e01, -3.6822e01, -6.6901e00, -1.1261e02, -2.6330e02, -1.1174e02],
[1.2606e01, 4.9706e00, -2.4212e01, 8.5765e01, 2.1673e02, 6.1907e01],
[1.7866e-01, -1.5334e00, 7.3589e00, -2.1093e00, 1.2973e00, 4.7967e00],
[-1.0213e01, 1.1973e00, 5.7352e00, -3.0372e01, -6.7799e01, -1.6289e01],
[-7.7051e-01, 2.7279e00, -7.4602e00, 2.2928e00, -7.3310e-01, -2.5731e00],
[2.7510e-02, 9.0090e-02, -7.9030e-02, 7.7055e-01, 4.4883e-01, 4.2620e-01],
[4.3390e00, -1.3992e00, -8.5020e-02, 3.5346e00, 7.0398e00, 1.4263e00],
[6.4207e-01, -1.1084e00, 1.8679e00, -5.6062e-01, 9.3190e-02, 3.3443e-01],
[-3.3560e-02, 1.3620e-02, 2.2507e-01, -1.8569e-01, -1.5390e-02, -9.3734e-04],
[-1.3333e-04, 2.4300e-02, 7.1420e-02, 3.7550e-02, 7.7660e-02, 8.4220e-02],
]
)
elif impurity_Z == 18: # Argon
temperature_bin_borders = np.array([1.0, 10.0, 50.0, 150.0, 500.0, 1500.0, 10000.0])
radc = np.array(
[
[-3.6586e01, -4.8732e01, -2.3157e01, -6.8134e01, 5.5851e01, -6.2758e01],
[1.2841e01, 3.8185e01, -8.5132e00, 3.6408e01, -7.8618e01, 2.5163e01],
[2.3080e-02, -7.0622e-01, 1.5617e00, -7.3868e00, 1.0520e01, -7.4717e-01],
[-1.2087e01, -2.5859e01, 1.5478e00, -1.0735e01, 2.2871e01, -6.8170e00],
[-9.8000e-03, 1.2850e00, -1.8880e00, 6.8800e00, -7.7061e00, 6.9486e-01],
[-2.4600e-03, -6.8710e-02, 2.2830e-01, 3.1142e-01, -1.8530e-01, 4.6946e-01],
[4.8823e00, 5.4372e00, 2.8279e-01, 8.0440e-01, -2.1616e00, 5.9969e-01],
[-3.7470e-02, -5.2157e-01, 5.5767e-01, -1.5740e00, 1.4123e00, -1.3487e-01],
[1.1100e-03, 1.4016e-01, -9.9600e-02, -9.9180e-02, 1.8409e-01, -8.1380e-02],
[1.1100e-03, 1.9120e-02, -1.5280e-02, 9.4500e-03, 6.7470e-02, 2.5840e-02],
]
)
else:
raise RuntimeError("This should never happen, please ensure all impurity cases in zimp array are covered!")
# If trying to evaluate for a temperature outside of the given range, assume nearest neighbor
# and throw a warning
if any(electron_temp_profile < temperature_bin_borders[0]) or any(
electron_temp_profile > temperature_bin_borders[-1]
): # pragma: no cover
warnings.warn(
f"Mavrin 2017 line radiation calculation is only valid between {temperature_bin_borders[0]}eV-{temperature_bin_borders[-1]}eV. Using nearest neighbor extrapolation.",
stacklevel=3,
)
electron_temp_profile = np.maximum(electron_temp_profile, temperature_bin_borders[0])
electron_temp_profile = np.minimum(electron_temp_profile, temperature_bin_borders[-1])
# solve for radiated power
ne_tau_i_per_m3 = electron_density_profile * tau_i
X_vals = np.log10(electron_temp_profile)
Y_vals = np.log10(ne_tau_i_per_m3 / 1e19)
if np.any(Y_vals > 0.0): # pragma: no cover
warnings.warn("Warning: treating points with ne_tau_i_per_m3 > 1e19 m^-3 s as coronal.", stacklevel=3)
Y_vals = np.minimum(Y_vals, 0.0)
log10_Lz = np.zeros(electron_temp_profile.size)
for i, Te_test in enumerate(electron_temp_profile):
X, Y = X_vals[i], Y_vals[i]
for j in range(temperature_bin_borders.size - 1):
Te_min, Te_max = temperature_bin_borders[j], temperature_bin_borders[j + 1]
if Te_min <= Te_test <= Te_max:
log10_Lz[i] = (
radc[0, j]
+ radc[1, j] * X
+ radc[2, j] * Y
+ radc[3, j] * X**2
+ radc[4, j] * X * Y
+ radc[5, j] * Y**2
+ radc[6, j] * X**3
+ radc[7, j] * X**2 * Y
+ radc[8, j] * X * Y**2
+ radc[9, j] * Y**3
)
continue
radrate = 10.0**log10_Lz
qRad = radrate * electron_density_profile * electron_density_profile * impurity_concentration # W / (m^3 s)
| """Calculate the radiated power due to impurities, according to an analytical fitted curve from Mavrin 2017."""
@wraps_ufunc(
return_units=dict(radiated_power=ureg.MW),
input_units=dict(
rho=ureg.dimensionless,
electron_temp_profile=ureg.keV,
electron_density_profile=ureg.n19,
tau_i=ureg.s,
impurity_concentration=ureg.dimensionless,
impurity_species=None,
plasma_volume=ureg.m**3,
),
input_core_dims=[("dim_rho",), ("dim_rho",), ("dim_rho",), (), (), (), ()],
)
def calc_impurity_radiated_power_mavrin_noncoronal( # noqa: PLR0912
rho: NDArray[float64],
electron_temp_profile: NDArray[float64],
electron_density_profile: NDArray[float64],
tau_i: Quantity,
impurity_concentration: float,
impurity_species: Impurity,
plasma_volume: float,
) -> float:
"""Calculation of radiated power, using fits from A.A. Mavrin's 2017 paper.
"Radiative Cooling Rates for Low-Z Impurities in Non-coronal Equilibrium State."
:cite:`mavrin_radiative_2017`
Args:
rho: [~] :term:`glossary link<rho>`
electron_temp_profile: [keV] :term:`glossary link<electron_temp_profile>`
electron_density_profile: [1e19 m^-3] :term:`glossary link<electron_density_profile>`
tau_i: [s] :term:`glossary link<tau_i>`
impurity_concentration: [~] :term:`glossary link<impurity_concentration>`
impurity_species: [] :term:`glossary link<impurity_species>`
plasma_volume: [m^3] :term:`glossary link<plasma_volume>`
Returns:
[MW] Estimated radiation power due to this impurity
"""
impurity_Z = impurity_species.value
# He, Li, Be, C, N, O, Ne, Ar
zimp = np.array([2, 3, 4, 6, 7, 8, 10, 18])
if impurity_Z not in zimp: # pragma: no cover
warnings.warn(f"Mavrin 2017 line radiation calculation not supported for impurity with Z={impurity_Z}", stacklevel=3)
return np.nan
# L_z coefficients for the 11 supported impurities
if impurity_Z == 2: # Helium
temperature_bin_borders = np.array([1.0, 3.0, 10.0, 30.0, 100.0, 15000.0])
radc = np.array(
[
[-3.9341e01, -2.7185e01, -3.4950e01, -3.1299e01, -3.3203e01],
[2.2742e01, -3.4465e01, 5.5957e00, -4.4749e00, -2.3306e00],
[-8.5940e-02, 3.2223e-01, 2.1542e00, 2.9614e-01, -5.3911e-01],
[-2.5420e01, 5.0933e01, -7.4762e00, 1.5259e00, 7.2592e-01],
[1.8843e00, 1.0589e-01, -3.7391e00, -6.1433e-01, 9.7550e-02],
[-3.5681e-01, 1.1632e-01, 1.4444e-01, 3.2651e-01, 2.6917e-01],
[-3.2771e00, -2.3641e01, 2.4534e00, -1.6652e-01, -6.6110e-02],
[-4.9766e00, -7.4782e-01, 1.5000e00, 1.5704e-01, 8.9900e-03],
[1.9730e-02, -7.6200e-03, 2.1307e-01, -8.0601e-04, 2.9240e-02],
[-7.4260e-02, 2.1030e-02, 7.6590e-02, 5.0330e-02, 5.1180e-02],
]
)
elif impurity_Z == 3: # Lithium
temperature_bin_borders = np.array([1.0, 7.0, 30.0, 60.0, 100.0, 1000.0, 10000.0])
radc = np.array(
[
[-3.5752e01, -3.1170e01, -3.6558e01, -3.0560e01, -3.0040e01, -3.4199e01],
[-1.6780e00, -1.6918e01, 9.4272e00, -2.4680e00, -4.2963e00, -8.5686e-01],
[9.5500e-03, 1.1481e-01, 3.5299e00, 1.7912e00, 2.7407e-01, -6.3246e-01],
[-6.1560e00, 2.0492e01, -8.1056e00, -2.8659e-01, 1.1569e00, 2.4968e-01],
[-1.5027e00, 2.6136e-01, -4.4113e00, -1.9929e00, -4.5453e-01, 9.9930e-02],
[2.5568e-01, 2.4870e-01, 5.1430e-02, 2.8150e-01, 3.0616e-01, 2.5080e-01],
[1.1009e01, -7.0035e00, 1.9427e00, 2.3898e-01, -9.1510e-02, -1.7230e-02],
[2.1169e00, -3.3910e-01, 1.3459e00, 5.0412e-01, 9.7550e-02, 1.4410e-02],
[-9.6420e-02, -3.5570e-02, 2.3865e-01, 5.8550e-02, 1.6540e-02, 3.7030e-02],
[1.3460e-02, 4.1910e-02, 8.6850e-02, 6.7410e-02, 5.4690e-02, 5.5670e-02],
]
)
elif impurity_Z == 4: # Beryllium
temperature_bin_borders = np.array([0.2, 0.7, 3.0, 11.0, 45.0, 170.0, 10000.0])
radc = np.array(
[
[-3.0242e01, -3.2152e01, -3.0169e01, -3.7201e01, -4.0868e01, -2.8539e01],
[2.1405e01, 3.1572e00, -8.9830e00, -2.5643e00, 1.4625e01, -5.0020e00],
[1.0117e-01, 1.4168e-01, 6.3656e-01, -4.0467e00, 3.3373e00, 3.1089e-01],
[2.7450e01, -1.4617e01, 4.5232e00, 7.1732e00, -8.8128e00, 1.3149e00],
[8.8367e-01, 1.4646e-01, -1.5126e00, 5.8147e00, -3.1064e00, -4.0022e-01],
[-6.6110e-02, 1.4683e-01, 4.0756e-01, 4.0114e-01, 2.4343e-01, 3.1788e-01],
[3.0202e01, 4.3653e00, -3.7497e-01, -2.5926e00, 1.5996e00, -1.0780e-01],
[1.2175e00, -1.1290e00, 7.2552e-01, -2.0708e00, 6.8069e-01, 7.3280e-02],
[-1.4883e-01, 3.4914e-01, -2.9810e-02, -1.4775e-01, 6.0120e-02, 1.7320e-02],
[4.8900e-03, 4.1730e-02, 5.5620e-02, 2.1900e-02, 6.8350e-02, 6.1360e-02],
]
)
elif impurity_Z == 6: # Carbon
temperature_bin_borders = np.array([1.0, 7.0, 20.0, 70.0, 200.0, 700.0, 15000.0])
radc = np.array(
[
[-3.4509e01, -4.9228e01, -1.9100e01, -6.7743e01, -2.4016e01, -2.8126e01],
[6.7599e00, 5.3922e01, -1.5476e01, 4.1606e01, -7.3974e00, -4.1679e00],
[-1.7140e-02, 8.4584e-01, 4.2962e00, -5.3665e00, 2.9707e00, 4.9937e-01],
[-4.0337e00, -5.1128e01, 2.1893e00, -1.5734e01, 1.6859e00, 9.0578e-01],
[1.5517e-01, -8.9366e-01, -6.1658e00, 6.1760e00, -2.1965e00, -5.3687e-01],
[2.1110e-02, -2.2710e-02, 1.6098e-01, 7.8010e-01, 3.0521e-01, 2.5962e-01],
[6.5977e-01, 1.4758e01, 1.1021e00, 1.7905e00, -1.1147e-01, -5.8310e-02],
[-1.7392e-01, 1.6371e-01, 2.1568e00, -1.7320e00, 3.8653e-01, 1.0420e-01],
[-2.9270e-02, 2.9362e-01, 1.1101e-01, -2.7897e-01, 3.8970e-02, 4.6610e-02],
[1.7600e-03, 5.5880e-02, 4.2700e-02, 2.3450e-02, 7.8690e-02, 7.3950e-02],
]
)
elif impurity_Z == 7: # Nitrogen
temperature_bin_borders = np.array([1.0, 10.0, 30.0, 100.0, 300.0, 1000.0, 15000.0])
radc = np.array(
[
[-3.5312e01, -5.8692e01, -2.0301e01, -7.7571e01, -2.9401e01, -2.7201e01],
[7.1926e00, 6.8148e01, -8.8594e00, 5.0488e01, -3.8191e-01, -4.4640e00],
[7.8200e-03, 3.6209e-01, 6.0500e00, -6.5889e00, 3.5270e00, 7.6960e-01],
[-3.5696e00, -5.4257e01, -2.7129e00, -1.8187e01, -1.0347e00, 9.2450e-01],
[-1.2800e-02, 1.4835e-01, -7.6700e00, 6.8691e00, -2.4192e00, -6.7720e-01],
[1.1180e-02, -1.4700e-03, 1.0705e-01, 8.3119e-01, 3.2269e-01, 2.6185e-01],
[3.5812e-01, 1.3476e01, 1.9691e00, 2.0259e00, 2.2501e-01, -5.6280e-02],
[-2.5100e-03, -2.9646e-01, 2.3943e00, -1.7572e00, 3.9511e-01, 1.2014e-01],
[-2.2020e-02, 2.2706e-01, 1.4088e-01, -2.9376e-01, 2.6510e-02, 4.6870e-02],
[-1.0000e-03, 5.4220e-02, 4.7450e-02, 1.7200e-02, 7.8930e-02, 7.9250e-02],
]
)
elif impurity_Z == 8: # Oxygen
temperature_bin_borders = np.array([1.0, 10.0, 30.0, 100.0, 300.0, 1000.0, 15000.0])
radc = np.array(
[
[-3.6208e01, -2.9057e01, -2.9370e01, -4.4120e-02, -3.7073e01, -2.5037e01],
[7.5487e00, -1.5228e01, 8.7451e00, -5.4918e01, 7.8826e00, -5.7568e00],
[2.3340e-02, -3.1460e00, 6.3827e00, -9.5003e00, 3.7999e00, 1.2973e00],
[-2.1983e00, 2.0826e01, -1.2357e01, 2.8883e01, -3.8006e00, 1.2040e00],
[-1.0131e-01, 5.9427e00, -7.6451e00, 8.5536e00, -2.2619e00, -9.1955e-01],
[8.0600e-03, 1.0610e-01, -2.2230e-02, 5.5336e-01, 5.0270e-01, 2.8988e-01],
[-6.5108e-01, -8.0843e00, 3.4958e00, -4.8731e00, 5.2144e-01, -7.6780e-02],
[8.4570e-02, -2.6827e00, 2.2661e00, -1.9172e00, 3.0219e-01, 1.4568e-01],
[-2.1710e-02, 1.0350e-02, 2.5727e-01, -1.5709e-01, -6.6330e-02, 3.9250e-02],
[-2.1200e-03, 2.6480e-02, 7.7800e-02, 1.6370e-02, 6.1140e-02, 8.3010e-02],
]
)
elif impurity_Z == 10: # Neon
temperature_bin_borders = np.array([1.0, 10.0, 70.0, 300.0, 1000.0, 3000.0, 15000.0])
radc = np.array(
[
[-3.8610e01, -3.6822e01, -6.6901e00, -1.1261e02, -2.6330e02, -1.1174e02],
[1.2606e01, 4.9706e00, -2.4212e01, 8.5765e01, 2.1673e02, 6.1907e01],
[1.7866e-01, -1.5334e00, 7.3589e00, -2.1093e00, 1.2973e00, 4.7967e00],
[-1.0213e01, 1.1973e00, 5.7352e00, -3.0372e01, -6.7799e01, -1.6289e01],
[-7.7051e-01, 2.7279e00, -7.4602e00, 2.2928e00, -7.3310e-01, -2.5731e00],
[2.7510e-02, 9.0090e-02, -7.9030e-02, 7.7055e-01, 4.4883e-01, 4.2620e-01],
[4.3390e00, -1.3992e00, -8.5020e-02, 3.5346e00, 7.0398e00, 1.4263e00],
[6.4207e-01, -1.1084e00, 1.8679e00, -5.6062e-01, 9.3190e-02, 3.3443e-01],
[-3.3560e-02, 1.3620e-02, 2.2507e-01, -1.8569e-01, -1.5390e-02, -9.3734e-04],
[-1.3333e-04, 2.4300e-02, 7.1420e-02, 3.7550e-02, 7.7660e-02, 8.4220e-02],
]
)
elif impurity_Z == 18: # Argon
temperature_bin_borders = np.array([1.0, 10.0, 50.0, 150.0, 500.0, 1500.0, 10000.0])
radc = np.array(
[
[-3.6586e01, -4.8732e01, -2.3157e01, -6.8134e01, 5.5851e01, -6.2758e01],
[1.2841e01, 3.8185e01, -8.5132e00, 3.6408e01, -7.8618e01, 2.5163e01],
[2.3080e-02, -7.0622e-01, 1.5617e00, -7.3868e00, 1.0520e01, -7.4717e-01],
[-1.2087e01, -2.5859e01, 1.5478e00, -1.0735e01, 2.2871e01, -6.8170e00],
[-9.8000e-03, 1.2850e00, -1.8880e00, 6.8800e00, -7.7061e00, 6.9486e-01],
[-2.4600e-03, -6.8710e-02, 2.2830e-01, 3.1142e-01, -1.8530e-01, 4.6946e-01],
[4.8823e00, 5.4372e00, 2.8279e-01, 8.0440e-01, -2.1616e00, 5.9969e-01],
[-3.7470e-02, -5.2157e-01, 5.5767e-01, -1.5740e00, 1.4123e00, -1.3487e-01],
[1.1100e-03, 1.4016e-01, -9.9600e-02, -9.9180e-02, 1.8409e-01, -8.1380e-02],
[1.1100e-03, 1.9120e-02, -1.5280e-02, 9.4500e-03, 6.7470e-02, 2.5840e-02],
]
)
else:
raise RuntimeError("This should never happen, please ensure all impurity cases in zimp array are covered!")
# If trying to evaluate for a temperature outside of the given range, assume nearest neighbor
# and throw a warning
if any(electron_temp_profile < temperature_bin_borders[0]) or any(
electron_temp_profile > temperature_bin_borders[-1]
): # pragma: no cover
warnings.warn(
f"Mavrin 2017 line radiation calculation is only valid between {temperature_bin_borders[0]}eV-{temperature_bin_borders[-1]}eV. Using nearest neighbor extrapolation.",
stacklevel=3,
)
electron_temp_profile = np.maximum(electron_temp_profile, temperature_bin_borders[0])
electron_temp_profile = np.minimum(electron_temp_profile, temperature_bin_borders[-1])
# solve for radiated power
ne_tau_i_per_m3 = electron_density_profile * tau_i
X_vals = np.log10(electron_temp_profile)
Y_vals = np.log10(ne_tau_i_per_m3 / 1e19)
if np.any(Y_vals > 0.0): # pragma: no cover
warnings.warn("Warning: treating points with ne_tau_i_per_m3 > 1e19 m^-3 s as coronal.", stacklevel=3)
Y_vals = np.minimum(Y_vals, 0.0)
log10_Lz = np.zeros(electron_temp_profile.size)
for i, Te_test in enumerate(electron_temp_profile):
X, Y = X_vals[i], Y_vals[i]
for j in range(temperature_bin_borders.size - 1):
Te_min, Te_max = temperature_bin_borders[j], temperature_bin_borders[j + 1]
if Te_min <= Te_test <= Te_max:
log10_Lz[i] = (
radc[0, j]
+ radc[1, j] * X
+ radc[2, j] * Y
+ radc[3, j] * X**2
+ radc[4, j] * X * Y
+ radc[5, j] * Y**2
+ radc[6, j] * X**3
+ radc[7, j] * X**2 * Y
+ radc[8, j] * X * Y**2
+ radc[9, j] * Y**3
)
continue
radrate = 10.0**log10_Lz
qRad = radrate * electron_density_profile * electron_density_profile * impurity_concentration # W / (m^3 s) | radiated_power = integrate_profile_over_volume(qRad, rho, plasma_volume) # [W] | 3 | 2023-10-19 16:58:23+00:00 | 12k |
GXimingLu/IPA | main.py | [
{
"identifier": "get_args",
"path": "arguments.py",
"snippet": "def get_args():\n parser = argparse.ArgumentParser(description='RL')\n\n # dataset\n parser.add_argument(\n '--output-dir', type=str, default=f'{HOME_PATH}/commonGen')\n parser.add_argument(\n '--dataset-train', ty... | import os
import torch
import json
import time
import logging
import random
import argparse
import numpy as np
import torch.nn.functional as F
from typing import List
from datetime import datetime
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam, Optimizer
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.tensorboard import SummaryWriter
from transformers import get_linear_schedule_with_warmup
from arguments import get_args
from policy import Policy
from data_pool import DataPool
from reward import Reward
from utils.utils import ensure_dir, ceil_div, reduce_mean, reduce_sum
from utils.generation_utils import decode | 7,558 | self.kl_ctl = FixedController(self.params.kl_coef)
self.kl_loss = torch.nn.KLDivLoss(reduction="none")
if self.params.adaptive_entropy:
self.entropy_ctl = AdaptiveController(self.params.entropy_coef, self.params.target_entropy,
self.params.horizon)
else:
self.entropy_ctl = FixedController(self.params.entropy_coef)
self.tree_tokens = tree_tokens
self.best_cat = self.tree_tokens[0]
self.best_cat_id = self.policy.tokenizer.convert_tokens_to_ids(self.best_cat)
self.sample_dataloader, self.sampler = None, None
self.seq_collator = SequenceCollator(tokenizer=policy.tokenizer)
if resume:
sample_dataset = SequenceDataset(data_pool=self.data_pool)
self.sample_dataloader = DataLoader(sample_dataset, batch_size=self.params.batch_size,
shuffle=True, drop_last=True, collate_fn=self.seq_collator)
self.sampler = iter(self.sample_dataloader)
def sample(self, step):
if step % self.params.sample_interval != 0:
return
log.info(f"[step {step}] Sampling ...")
concepts, prompts, responses = [], [], []
for i, batch in enumerate(tqdm(self.train_dataloader, total=len(self.train_dataloader),
desc='Sampling from current policy')):
input_ids, attention_mask, concept, constraints = batch
use_constraint = random.choices([1, 0], weights=[self.params.hard_prob, 1 - self.params.hard_prob], k=1)[0]
rollouts = self.policy.sample(input_ids=input_ids, attention_mask=attention_mask,
constraints=constraints if use_constraint else None,
max_len=self.params.response_length, top_p=self.params.top_p,
use_control_code=(step > 0))
prompt, response = rollouts['query/text'], rollouts['response/text']
concepts.extend(concept)
prompts.extend(prompt)
responses.extend(response)
scores = self.score_model.get_reward(prompts, responses, concepts, f'step{step}')
self.data_pool.add(prompts=prompts, responses=responses, scores=scores['reward'])
sample_dataset = SequenceDataset(data_pool=self.data_pool)
self.sample_dataloader = DataLoader(sample_dataset, batch_size=self.params.batch_size,
shuffle=True, drop_last=True, collate_fn=self.seq_collator)
self.sampler = iter(self.sample_dataloader)
def step(self, step_num):
step_started_at = time.time()
self.save(step=step_num)
self.eval(step=step_num)
self.sample(step=step_num)
try:
batch = next(self.sampler)
assert len(batch[0]) == self.params.batch_size, 'insufficient batch'
except (StopIteration, AssertionError):
self.sampler = iter(self.sample_dataloader)
batch = next(self.sampler)
self.policy.value_model.train()
ppo_loss, stats = self.loss(step_num, *batch)
ppo_loss = ppo_loss / self.params.grad_accum
ppo_loss.backward()
if self.params.clip_grad:
torch.nn.utils.clip_grad_norm_(self.policy.value_model.parameters(), self.params.max_grad_norm)
if (step_num + 1) % self.params.grad_accum == 0:
self.optimizer.step()
self.optimizer.zero_grad()
self.scheduler.step()
for metric in ['kl', 'entropy']:
self.writer.add_scalar(f'Objective/{metric}', stats[f'objective/{metric}'], step_num)
for metric in ['lm', 'kl', 'entropy', 'total']:
self.writer.add_scalar(f'Loss/{metric}', stats[f'loss/{metric}'], step_num)
self.writer.add_scalar(f'Params/lr', self.optimizer.param_groups[0]['lr'], step_num)
self.writer.add_scalar(f'Params/kl_coef', self.kl_ctl.value, step_num)
self.writer.add_scalar(f'Params/entropy_coef', self.entropy_ctl.value, step_num)
self.kl_ctl.update(stats['objective/kl'], self.params.batch_size, True)
self.entropy_ctl.update(stats['objective/entropy'], self.params.batch_size, False)
step_time = time.time() - step_started_at
eps_per_second = float(self.params.batch_size) / step_time
log.info(f"[step {step_num}] step_time={step_time:.2f}s, eps/s={eps_per_second:.2f}")
def loss(self, step, query_input_ids, query_mask, response_input_ids, response_mask):
outputs = self.policy.forward_pass(query_input_ids, query_mask, response_input_ids, response_mask,
use_control_code=True)
lm_loss, logprobs, entropy, logits = outputs['response/lm_loss'], outputs['response/log_prob'], \
outputs['response/entropy'], outputs['response/logits']
masks = response_mask.to(self.policy.device)
with torch.no_grad():
ref_outputs = self.policy.forward_pass(query_input_ids[:, 1:], query_mask[:, 1:],
response_input_ids, response_mask, use_control_code=False)
ref_logprobs, ref_logits = ref_outputs['response/log_prob'], ref_outputs['response/logits']
kl = torch.sum(self.kl_loss(F.log_softmax(ref_logits, dim=-1), F.softmax(logits, dim=-1)), dim=-1)
loss = reduce_mean(lm_loss + self.kl_ctl.value * kl - self.entropy_ctl.value * entropy, masks)
data = {'logprobs': logprobs, 'ref_logprobs': ref_logprobs, 'masks': masks,
'logits': logits, 'ref_logits': ref_logits,
'lm_loss': reduce_mean(lm_loss, masks), 'kl_loss': reduce_mean(kl, masks),
'entropy': reduce_mean(entropy, masks), 'total_loss': loss}
stats = self.record_step_stats(data)
queries, responses = decode(self.policy.tokenizer, query_input_ids, response_input_ids)
self.print_samples(queries=queries, responses=responses, lm_loss=reduce_mean(lm_loss, masks, axis=1),
logprobs=logprobs, ref_logprobs=ref_logprobs, masks=masks, step=step)
return loss, stats
def record_step_stats(self, data):
masks = data['masks']
kl = torch.sum(self.kl_loss(F.log_softmax(data['ref_logits'], dim=-1), F.softmax(data['logits'], dim=-1)), dim=-1)
|
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
log = logging.getLogger(__name__)
class PromptDataset(Dataset):
def __init__(self, path, tokenizer):
data = json.load(open(path, 'r'))
self.items = [v for k, v in data.items() if v['human_order']]
self.tokenizer = tokenizer
def __len__(self):
return len(self.items)
def __getitem__(self, idx):
item = self.items[idx]
order_words = random.choice(item['human_order'])
constraint = json.dumps([list(map(lambda x: self.tokenizer.encode(f' {x}'), item['inflection'][w]))
for w in order_words.split('-')])
prompt = 'Generate a sentence including the following keywords in the same order as listed: %s\n\nAnswer:'
prompt = prompt % ' '.join(order_words.split('-'))
return {
'order': order_words,
'constraint': constraint,
'prompt': prompt,
}
class PromptCollator(object):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def __call__(self, sequences):
concepts = [sequence['order'] for sequence in sequences]
prompts = [sequence['prompt'] for sequence in sequences]
constraints = [sequence['constraint'] for sequence in sequences]
encodings_dict = self.tokenizer(prompts, return_tensors="pt", padding=True)
input_ids = encodings_dict['input_ids']
attention_mask = encodings_dict['attention_mask']
return input_ids, attention_mask, concepts, constraints
class SequenceDataset(Dataset):
def __init__(self, data_pool: DataPool):
self.queries, self.responses, self.cat_tokens = data_pool.get_data()
def __len__(self):
return len(self.queries)
def __getitem__(self, idx):
return {'query': self.queries[idx],
'response': self.responses[idx],
'cat_tokens': self.cat_tokens[idx]
}
class SequenceCollator(object):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def __call__(self, sequences):
queries = [sequence['query'] for sequence in sequences]
responses = [sequence['response'] + self.tokenizer.eos_token for sequence in sequences]
cat_ids = [self.tokenizer.convert_tokens_to_ids(sequence['cat_tokens']) for sequence in sequences]
query_encodings_dict = self.tokenizer(queries, return_tensors="pt", padding=True)
query_input_ids = query_encodings_dict['input_ids']
query_mask = query_encodings_dict['attention_mask']
query_input_ids = torch.cat([query_input_ids.new(cat_ids)[:, None], query_input_ids], dim=1)
query_mask = torch.cat([query_mask.new([1] * len(query_mask))[:, None], query_mask], dim=1)
response_encodings_dict = self.tokenizer(responses, return_tensors="pt", padding=True)
response_input_ids = response_encodings_dict['input_ids']
response_mask = response_encodings_dict['attention_mask']
return query_input_ids, query_mask, response_input_ids, response_mask
class FixedController:
def __init__(self, coef):
self.value = coef
def update(self, current, n_steps, lower_bound):
pass
class AdaptiveController:
def __init__(self, init_coef, target, horizon):
self.value = init_coef
self.target = target
self.horizon = horizon
def update(self, current, n_steps, lower_bound):
proportional_error = np.clip(current / self.target - 1, -0.2, 0.2)
if lower_bound:
mult = 1 + proportional_error * n_steps / self.horizon
else:
mult = 1 - proportional_error * n_steps / self.horizon
self.value *= mult
class ConditionTrainer:
def __init__(self,
params: argparse.Namespace,
policy: Policy,
data_pool: DataPool,
score_model: Reward,
tree_tokens: List[str],
train_dataloader: DataLoader,
val_dataloader: DataLoader,
optimizer: Optimizer,
scheduler: LambdaLR,
resume: bool):
self.params = params
self.policy = policy
self.data_pool = data_pool
self.score_model = score_model
self.optimizer = optimizer
self.scheduler = scheduler
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.writer = SummaryWriter(log_dir=params.tensorboard_dir)
if self.params.adaptive_kl:
self.kl_ctl = AdaptiveController(self.params.kl_coef, self.params.target_kl, self.params.horizon)
else:
self.kl_ctl = FixedController(self.params.kl_coef)
self.kl_loss = torch.nn.KLDivLoss(reduction="none")
if self.params.adaptive_entropy:
self.entropy_ctl = AdaptiveController(self.params.entropy_coef, self.params.target_entropy,
self.params.horizon)
else:
self.entropy_ctl = FixedController(self.params.entropy_coef)
self.tree_tokens = tree_tokens
self.best_cat = self.tree_tokens[0]
self.best_cat_id = self.policy.tokenizer.convert_tokens_to_ids(self.best_cat)
self.sample_dataloader, self.sampler = None, None
self.seq_collator = SequenceCollator(tokenizer=policy.tokenizer)
if resume:
sample_dataset = SequenceDataset(data_pool=self.data_pool)
self.sample_dataloader = DataLoader(sample_dataset, batch_size=self.params.batch_size,
shuffle=True, drop_last=True, collate_fn=self.seq_collator)
self.sampler = iter(self.sample_dataloader)
def sample(self, step):
if step % self.params.sample_interval != 0:
return
log.info(f"[step {step}] Sampling ...")
concepts, prompts, responses = [], [], []
for i, batch in enumerate(tqdm(self.train_dataloader, total=len(self.train_dataloader),
desc='Sampling from current policy')):
input_ids, attention_mask, concept, constraints = batch
use_constraint = random.choices([1, 0], weights=[self.params.hard_prob, 1 - self.params.hard_prob], k=1)[0]
rollouts = self.policy.sample(input_ids=input_ids, attention_mask=attention_mask,
constraints=constraints if use_constraint else None,
max_len=self.params.response_length, top_p=self.params.top_p,
use_control_code=(step > 0))
prompt, response = rollouts['query/text'], rollouts['response/text']
concepts.extend(concept)
prompts.extend(prompt)
responses.extend(response)
scores = self.score_model.get_reward(prompts, responses, concepts, f'step{step}')
self.data_pool.add(prompts=prompts, responses=responses, scores=scores['reward'])
sample_dataset = SequenceDataset(data_pool=self.data_pool)
self.sample_dataloader = DataLoader(sample_dataset, batch_size=self.params.batch_size,
shuffle=True, drop_last=True, collate_fn=self.seq_collator)
self.sampler = iter(self.sample_dataloader)
def step(self, step_num):
step_started_at = time.time()
self.save(step=step_num)
self.eval(step=step_num)
self.sample(step=step_num)
try:
batch = next(self.sampler)
assert len(batch[0]) == self.params.batch_size, 'insufficient batch'
except (StopIteration, AssertionError):
self.sampler = iter(self.sample_dataloader)
batch = next(self.sampler)
self.policy.value_model.train()
ppo_loss, stats = self.loss(step_num, *batch)
ppo_loss = ppo_loss / self.params.grad_accum
ppo_loss.backward()
if self.params.clip_grad:
torch.nn.utils.clip_grad_norm_(self.policy.value_model.parameters(), self.params.max_grad_norm)
if (step_num + 1) % self.params.grad_accum == 0:
self.optimizer.step()
self.optimizer.zero_grad()
self.scheduler.step()
for metric in ['kl', 'entropy']:
self.writer.add_scalar(f'Objective/{metric}', stats[f'objective/{metric}'], step_num)
for metric in ['lm', 'kl', 'entropy', 'total']:
self.writer.add_scalar(f'Loss/{metric}', stats[f'loss/{metric}'], step_num)
self.writer.add_scalar(f'Params/lr', self.optimizer.param_groups[0]['lr'], step_num)
self.writer.add_scalar(f'Params/kl_coef', self.kl_ctl.value, step_num)
self.writer.add_scalar(f'Params/entropy_coef', self.entropy_ctl.value, step_num)
self.kl_ctl.update(stats['objective/kl'], self.params.batch_size, True)
self.entropy_ctl.update(stats['objective/entropy'], self.params.batch_size, False)
step_time = time.time() - step_started_at
eps_per_second = float(self.params.batch_size) / step_time
log.info(f"[step {step_num}] step_time={step_time:.2f}s, eps/s={eps_per_second:.2f}")
def loss(self, step, query_input_ids, query_mask, response_input_ids, response_mask):
outputs = self.policy.forward_pass(query_input_ids, query_mask, response_input_ids, response_mask,
use_control_code=True)
lm_loss, logprobs, entropy, logits = outputs['response/lm_loss'], outputs['response/log_prob'], \
outputs['response/entropy'], outputs['response/logits']
masks = response_mask.to(self.policy.device)
with torch.no_grad():
ref_outputs = self.policy.forward_pass(query_input_ids[:, 1:], query_mask[:, 1:],
response_input_ids, response_mask, use_control_code=False)
ref_logprobs, ref_logits = ref_outputs['response/log_prob'], ref_outputs['response/logits']
kl = torch.sum(self.kl_loss(F.log_softmax(ref_logits, dim=-1), F.softmax(logits, dim=-1)), dim=-1)
loss = reduce_mean(lm_loss + self.kl_ctl.value * kl - self.entropy_ctl.value * entropy, masks)
data = {'logprobs': logprobs, 'ref_logprobs': ref_logprobs, 'masks': masks,
'logits': logits, 'ref_logits': ref_logits,
'lm_loss': reduce_mean(lm_loss, masks), 'kl_loss': reduce_mean(kl, masks),
'entropy': reduce_mean(entropy, masks), 'total_loss': loss}
stats = self.record_step_stats(data)
queries, responses = decode(self.policy.tokenizer, query_input_ids, response_input_ids)
self.print_samples(queries=queries, responses=responses, lm_loss=reduce_mean(lm_loss, masks, axis=1),
logprobs=logprobs, ref_logprobs=ref_logprobs, masks=masks, step=step)
return loss, stats
def record_step_stats(self, data):
masks = data['masks']
kl = torch.sum(self.kl_loss(F.log_softmax(data['ref_logits'], dim=-1), F.softmax(data['logits'], dim=-1)), dim=-1) | mean_kl = torch.mean(reduce_sum(kl, masks, axis=1)) | 7 | 2023-10-20 08:30:18+00:00 | 12k |
violet-sto/HN-GFN | proxy/proxy.py | [
{
"identifier": "Regressor",
"path": "proxy/regression.py",
"snippet": "class Regressor(nn.Module):\n def __init__(self, args, nhid, nvec, num_out_per_stem, num_out_per_mol, num_conv_steps, version, dropout_rate=0, do_stem_mask=True, do_nblocks=False):\n nn.Module.__init__(self)\n self.... | import numpy as np
import pandas as pd
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
from proxy.regression import Regressor, DropoutRegressor, EvidentialRegressor, EnsembleRegressor, GPRegressor
from mol_mdp_ext import MolMDPExtended
from botorch.utils.multi_objective.box_decompositions.non_dominated import FastNondominatedPartitioning
from botorch.utils.multi_objective.hypervolume import Hypervolume
from botorch.acquisition.multi_objective.monte_carlo import qExpectedHypervolumeImprovement
from botorch.acquisition.multi_objective.analytic import ExpectedHypervolumeImprovement
from botorch.acquisition.analytic import UpperConfidenceBound, ExpectedImprovement
from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization
from botorch.utils.transforms import normalize, unnormalize
from botorch.acquisition.objective import GenericMCObjective
from botorch.acquisition.objective import ScalarizedPosteriorTransform
from botorch.utils.multi_objective.pareto import is_non_dominated
from botorch.sampling.samplers import SobolQMCNormalSampler
from sklearn.model_selection import train_test_split
from utils.acq_func import qUpperConfidenceBound, qExpectedImprovement
from copy import copy, deepcopy | 9,858 | # from botorch.acquisition.monte_carlo import qUpperConfidenceBound, qExpectedImprovement
def make_proxy_model(args, mdp):
repr_type = args.proxy_repr_type
nemb = args.proxy_nemb
num_conv_steps = args.proxy_num_conv_steps
model_version = args.proxy_model_version
if args.proxy_uncertainty == "none":
model = Regressor(args,
nhid=nemb,
nvec=0,
num_out_per_stem=mdp.num_blocks,
num_out_per_mol=len(args.objectives),
num_conv_steps=num_conv_steps,
version=model_version,
dropout_rate=args.proxy_dropout)
if args.proxy_uncertainty == "dropout":
model = DropoutRegressor(args,
nhid=nemb,
nvec=0,
num_out_per_stem=mdp.num_blocks,
num_out_per_mol=len(args.objectives),
num_conv_steps=num_conv_steps,
version=model_version,
dropout_rate=args.proxy_dropout,
num_dropout_samples=args.proxy_num_dropout_samples)
elif args.proxy_uncertainty == 'ensemble':
model = EnsembleRegressor(args,
nhid=nemb,
nvec=0,
num_out_per_stem=mdp.num_blocks,
num_out_per_mol=len(args.objectives),
num_conv_steps=num_conv_steps,
version=model_version,
dropout_rate=args.proxy_dropout,
num_dropout_samples=args.proxy_num_dropout_samples)
elif args.proxy_uncertainty == 'evidential':
model = EvidentialRegressor(args,
nhid=nemb,
nvec=0,
num_out_per_stem=mdp.num_blocks,
num_out_per_mol=len(args.objectives),
num_conv_steps=num_conv_steps,
version=model_version,
dropout_rate=args.proxy_dropout)
elif args.proxy_uncertainty == 'GP':
model = GPRegressor(args,
nhid=nemb,
nvec=0,
num_out_per_stem=mdp.num_blocks,
num_out_per_mol=len(args.objectives),
num_conv_steps=num_conv_steps,
version=model_version,
dropout_rate=args.proxy_dropout)
model.to(args.device)
if args.floatX == 'float64':
model = model.double()
return model
def get_proxy(args, bpath, oracle):
if args.acq_fn.lower() == 'none':
return NoAF(args, bpath, oracle)
elif args.acq_fn.lower() == 'ucb':
return UCB(args, bpath, oracle)
elif args.acq_fn.lower() == 'ucb_chebyshev':
return UCB_chebyshev(args, bpath, oracle)
elif args.acq_fn.lower() == 'ei':
return EI(args, bpath, oracle)
class Proxy:
def __init__(self, args, bpath, oracle):
self.args = args
self.ref_point = torch.zeros(len(args.objectives)).to(args.device)
self.oracle = oracle
self.device = args.device
| # from botorch.acquisition.monte_carlo import qUpperConfidenceBound, qExpectedImprovement
def make_proxy_model(args, mdp):
repr_type = args.proxy_repr_type
nemb = args.proxy_nemb
num_conv_steps = args.proxy_num_conv_steps
model_version = args.proxy_model_version
if args.proxy_uncertainty == "none":
model = Regressor(args,
nhid=nemb,
nvec=0,
num_out_per_stem=mdp.num_blocks,
num_out_per_mol=len(args.objectives),
num_conv_steps=num_conv_steps,
version=model_version,
dropout_rate=args.proxy_dropout)
if args.proxy_uncertainty == "dropout":
model = DropoutRegressor(args,
nhid=nemb,
nvec=0,
num_out_per_stem=mdp.num_blocks,
num_out_per_mol=len(args.objectives),
num_conv_steps=num_conv_steps,
version=model_version,
dropout_rate=args.proxy_dropout,
num_dropout_samples=args.proxy_num_dropout_samples)
elif args.proxy_uncertainty == 'ensemble':
model = EnsembleRegressor(args,
nhid=nemb,
nvec=0,
num_out_per_stem=mdp.num_blocks,
num_out_per_mol=len(args.objectives),
num_conv_steps=num_conv_steps,
version=model_version,
dropout_rate=args.proxy_dropout,
num_dropout_samples=args.proxy_num_dropout_samples)
elif args.proxy_uncertainty == 'evidential':
model = EvidentialRegressor(args,
nhid=nemb,
nvec=0,
num_out_per_stem=mdp.num_blocks,
num_out_per_mol=len(args.objectives),
num_conv_steps=num_conv_steps,
version=model_version,
dropout_rate=args.proxy_dropout)
elif args.proxy_uncertainty == 'GP':
model = GPRegressor(args,
nhid=nemb,
nvec=0,
num_out_per_stem=mdp.num_blocks,
num_out_per_mol=len(args.objectives),
num_conv_steps=num_conv_steps,
version=model_version,
dropout_rate=args.proxy_dropout)
model.to(args.device)
if args.floatX == 'float64':
model = model.double()
return model
def get_proxy(args, bpath, oracle):
if args.acq_fn.lower() == 'none':
return NoAF(args, bpath, oracle)
elif args.acq_fn.lower() == 'ucb':
return UCB(args, bpath, oracle)
elif args.acq_fn.lower() == 'ucb_chebyshev':
return UCB_chebyshev(args, bpath, oracle)
elif args.acq_fn.lower() == 'ei':
return EI(args, bpath, oracle)
class Proxy:
def __init__(self, args, bpath, oracle):
self.args = args
self.ref_point = torch.zeros(len(args.objectives)).to(args.device)
self.oracle = oracle
self.device = args.device | self.mdp = MolMDPExtended(bpath) | 5 | 2023-10-24 14:10:35+00:00 | 12k |
SALT-NLP/Efficient_Unlearning | src/models/transformers/parameter-efficient-finetuning/layer.py | [
{
"identifier": "AdapterCompositionBlock",
"path": "src/models/transformers/parameter-efficient-finetuning/composition.py",
"snippet": "class AdapterCompositionBlock(Sequence):\n def __init__(self, *children):\n self.children = [parse_composition(b, None) for b in children]\n\n def __getite... | from abc import ABC, abstractmethod
from typing import List, Mapping, Union
from torch import nn
from .composition import AdapterCompositionBlock, BatchSplit, Fuse, Parallel, Split, Stack
from .configuration import AdapterConfig
from .context import AdapterSetup, ForwardContext
from .modeling import Adapter, BertFusion, ParallelAdapter
import numpy as np
import torch | 8,171 |
class AdapterLayerBase(ABC):
"""
Base class for all adaptation methods that require per-layer modules.
"""
@property
def layer_idx(self):
return getattr(self, "_layer_idx", -1)
@layer_idx.setter
def layer_idx(self, layer_idx):
idx = getattr(self, "_layer_idx", layer_idx)
assert idx == layer_idx
setattr(self, "_layer_idx", idx)
def get_active_setup(self, module_dict):
if getattr(self.config, "is_adaptable", False):
# First check current context before falling back to defined setup
context = AdapterSetup.get_context()
if context is not None:
adapter_setup = context.adapter_setup
else:
adapter_setup = self.config.adapters.active_setup
else:
adapter_setup = None
skip_adapters = adapter_setup is None or (
self.config.adapters.skip_layers is not None and self.layer_idx in self.config.adapters.skip_layers
)
if not skip_adapters and (len(set(module_dict.keys()) & adapter_setup.flatten()) > 0):
return adapter_setup
else:
return None
def _store_gating_score(self, adapter_name, gating_score):
context = ForwardContext.get_context()
if context.output_adapter_gating_scores:
gating_cache = context.adapter_gating_scores
if self.layer_idx not in gating_cache[adapter_name]:
gating_cache[adapter_name][self.layer_idx] = {}
gating_score = gating_score.detach().squeeze().cpu().numpy()
if len(gating_score.shape) == 0:
gating_score = np.expand_dims(gating_score, axis=0)
cache_score = gating_cache[adapter_name][self.layer_idx].get(self.location_key, None)
if cache_score is not None:
gating_cache[adapter_name][self.layer_idx][self.location_key] = np.column_stack(
(cache_score, gating_score)
)
else:
gating_cache[adapter_name][self.layer_idx][self.location_key] = gating_score
def _store_fusion_attentions(self, fusion_name, attentions):
context = ForwardContext.get_context()
if context.output_adapter_fusion_attentions:
attention_cache = context.adapter_fusion_attentions
if self.layer_idx not in attention_cache[fusion_name]:
attention_cache[fusion_name][self.layer_idx] = {}
attention_cache[fusion_name][self.layer_idx][self.location_key] = attentions
@abstractmethod
def add_adapter(self, adapter_name: str, layer_idx: int):
raise NotImplementedError()
@abstractmethod
def delete_adapter(self, adapter_name: str):
raise NotImplementedError()
@abstractmethod
def add_fusion_layer(self, adapter_names: Union[List, str]):
raise NotImplementedError()
@abstractmethod
def delete_fusion_layer(self, adapter_names: Union[List, str]):
raise NotImplementedError()
@abstractmethod
def enable_adapters(self, adapter_setup: AdapterCompositionBlock, unfreeze_adapters: bool, unfreeze_fusion: bool):
raise NotImplementedError()
@abstractmethod
def get_adapter(self, adapter_name: str) -> nn.Module:
raise NotImplementedError()
class AdapterLayer(AdapterLayerBase, nn.Module):
def __init__(self, location_key: str, config):
super().__init__()
self.location_key = location_key
self.config = config
def _init_adapter_modules(self):
self.adapters = nn.ModuleDict(dict())
self.adapter_fusion_layer = nn.ModuleDict(dict())
def add_adapter(self, adapter_name: str, layer_idx: int):
self.layer_idx = layer_idx
adapter_config = self.config.adapters.match(
adapter_name,
config_type=AdapterConfig,
layer_idx=self.layer_idx,
location_key=self.location_key,
)
if adapter_config is not None:
reduction_factor = adapter_config["reduction_factor"]
if isinstance(reduction_factor, Mapping):
if str(self.layer_idx) in reduction_factor:
reduction_factor = reduction_factor[str(self.layer_idx)]
elif "default" in reduction_factor:
reduction_factor = reduction_factor["default"]
else:
raise KeyError(
"The given reduction factor mapping does not give a default value and does not specify each "
"reduction factor individually. You need to provide a default value like this: "
'{"1": 16, "default": 16}'
)
if adapter_config.is_parallel:
|
class AdapterLayerBase(ABC):
"""
Base class for all adaptation methods that require per-layer modules.
"""
@property
def layer_idx(self):
return getattr(self, "_layer_idx", -1)
@layer_idx.setter
def layer_idx(self, layer_idx):
idx = getattr(self, "_layer_idx", layer_idx)
assert idx == layer_idx
setattr(self, "_layer_idx", idx)
def get_active_setup(self, module_dict):
if getattr(self.config, "is_adaptable", False):
# First check current context before falling back to defined setup
context = AdapterSetup.get_context()
if context is not None:
adapter_setup = context.adapter_setup
else:
adapter_setup = self.config.adapters.active_setup
else:
adapter_setup = None
skip_adapters = adapter_setup is None or (
self.config.adapters.skip_layers is not None and self.layer_idx in self.config.adapters.skip_layers
)
if not skip_adapters and (len(set(module_dict.keys()) & adapter_setup.flatten()) > 0):
return adapter_setup
else:
return None
def _store_gating_score(self, adapter_name, gating_score):
context = ForwardContext.get_context()
if context.output_adapter_gating_scores:
gating_cache = context.adapter_gating_scores
if self.layer_idx not in gating_cache[adapter_name]:
gating_cache[adapter_name][self.layer_idx] = {}
gating_score = gating_score.detach().squeeze().cpu().numpy()
if len(gating_score.shape) == 0:
gating_score = np.expand_dims(gating_score, axis=0)
cache_score = gating_cache[adapter_name][self.layer_idx].get(self.location_key, None)
if cache_score is not None:
gating_cache[adapter_name][self.layer_idx][self.location_key] = np.column_stack(
(cache_score, gating_score)
)
else:
gating_cache[adapter_name][self.layer_idx][self.location_key] = gating_score
def _store_fusion_attentions(self, fusion_name, attentions):
context = ForwardContext.get_context()
if context.output_adapter_fusion_attentions:
attention_cache = context.adapter_fusion_attentions
if self.layer_idx not in attention_cache[fusion_name]:
attention_cache[fusion_name][self.layer_idx] = {}
attention_cache[fusion_name][self.layer_idx][self.location_key] = attentions
@abstractmethod
def add_adapter(self, adapter_name: str, layer_idx: int):
raise NotImplementedError()
@abstractmethod
def delete_adapter(self, adapter_name: str):
raise NotImplementedError()
@abstractmethod
def add_fusion_layer(self, adapter_names: Union[List, str]):
raise NotImplementedError()
@abstractmethod
def delete_fusion_layer(self, adapter_names: Union[List, str]):
raise NotImplementedError()
@abstractmethod
def enable_adapters(self, adapter_setup: AdapterCompositionBlock, unfreeze_adapters: bool, unfreeze_fusion: bool):
raise NotImplementedError()
@abstractmethod
def get_adapter(self, adapter_name: str) -> nn.Module:
raise NotImplementedError()
class AdapterLayer(AdapterLayerBase, nn.Module):
def __init__(self, location_key: str, config):
super().__init__()
self.location_key = location_key
self.config = config
def _init_adapter_modules(self):
self.adapters = nn.ModuleDict(dict())
self.adapter_fusion_layer = nn.ModuleDict(dict())
def add_adapter(self, adapter_name: str, layer_idx: int):
self.layer_idx = layer_idx
adapter_config = self.config.adapters.match(
adapter_name,
config_type=AdapterConfig,
layer_idx=self.layer_idx,
location_key=self.location_key,
)
if adapter_config is not None:
reduction_factor = adapter_config["reduction_factor"]
if isinstance(reduction_factor, Mapping):
if str(self.layer_idx) in reduction_factor:
reduction_factor = reduction_factor[str(self.layer_idx)]
elif "default" in reduction_factor:
reduction_factor = reduction_factor["default"]
else:
raise KeyError(
"The given reduction factor mapping does not give a default value and does not specify each "
"reduction factor individually. You need to provide a default value like this: "
'{"1": 16, "default": 16}'
)
if adapter_config.is_parallel: | adapter_class = ParallelAdapter | 11 | 2023-10-18 18:05:54+00:00 | 12k |
justincui03/tesla | distill.py | [
{
"identifier": "augment",
"path": "utils.py",
"snippet": "def augment(images, dc_aug_param, device):\n # This can be sped up in the future.\n\n if dc_aug_param != None and dc_aug_param['strategy'] != 'none':\n scale = dc_aug_param['scale']\n crop = dc_aug_param['crop']\n rota... | import os
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.utils
import wandb
import copy
import random
import warnings
from tqdm import tqdm
from utils import augment, get_dataset, get_network, get_eval_pool, evaluate_synset, get_time, DiffAugment, DiffAugmentList, ParamDiffAug
from reparam_module import ReparamModule
from torch.utils.data import Subset
from torch.utils.data import DataLoader
from PIL import PngImagePlugin | 9,617 | image_save = args.zca_trans.inverse_transform(image_save)
image_save.cpu()
torch.save(image_save.cpu(), os.path.join(save_dir, "images_zca_{}.pt".format(it)))
upsampled = image_save
if args.dataset != "ImageNet":
upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=2)
upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=3)
grid = torchvision.utils.make_grid(upsampled, nrow=10, normalize=True, scale_each=True)
wandb.log({"Reconstructed_Images": wandb.Image(torch.nan_to_num(grid.detach().cpu()))}, step=it)
wandb.log({'Reconstructed_Pixels': wandb.Histogram(torch.nan_to_num(image_save.detach().cpu()))}, step=it)
for clip_val in [2.5]:
std = torch.std(image_save)
mean = torch.mean(image_save)
upsampled = torch.clip(image_save, min=mean - clip_val * std, max=mean + clip_val * std)
if args.dataset != "ImageNet":
upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=2)
upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=3)
grid = torchvision.utils.make_grid(upsampled, nrow=10, normalize=True, scale_each=True)
wandb.log({"Clipped_Reconstructed_Images/std_{}".format(clip_val): wandb.Image(
torch.nan_to_num(grid.detach().cpu()))}, step=it)
wandb.log({"Synthetic_LR": syn_lr.detach().cpu()}, step=it)
student_net = get_network(args.model, channel, num_classes, im_size, dist=False).to(args.device) # get a random model
student_net = ReparamModule(student_net)
if args.distributed:
student_net = torch.nn.DataParallel(student_net)
student_net.train()
if not args.random_trajectory:
if args.load_all:
expert_trajectory = buffer[np.random.randint(0, len(buffer))]
else:
expert_trajectory = buffer[expert_idx]
expert_idx += 1
if expert_idx == len(buffer):
expert_idx = 0
file_idx += 1
if file_idx == len(expert_files):
file_idx = 0
random.shuffle(expert_files)
print("loading file {}".format(expert_files[file_idx]))
if args.max_files != 1:
del buffer
buffer = torch.load(expert_files[file_idx])
if args.max_experts is not None:
buffer = buffer[:args.max_experts]
random.shuffle(buffer)
start_epoch = np.random.randint(0, args.max_start_epoch)
if not args.random_trajectory:
starting_params = expert_trajectory[start_epoch]
target_params = expert_trajectory[start_epoch+args.expert_epochs]
else:
starting_params = [p for p in student_net.parameters()]
target_params = [p for p in student_net.parameters()]
target_params = torch.cat([p.data.to(args.device).reshape(-1) for p in target_params], 0)
student_params = [torch.cat([p.data.to(args.device).reshape(-1) for p in starting_params], 0).requires_grad_(True)]
starting_params = torch.cat([p.data.to(args.device).reshape(-1) for p in starting_params], 0)
param_dist = torch.tensor(0.0).to(args.device)
param_dist += torch.nn.functional.mse_loss(starting_params, target_params, reduction="sum")
# produce soft labels for soft label assignment.
if args.teacher_label:
label_net = get_network(args.model, channel, num_classes, im_size, dist=False).to(args.device) # get a random model
label_net = ReparamModule(label_net)
label_net.eval()
# use the target param as the model param to get soft labels.
label_params = copy.deepcopy(target_params.detach()).requires_grad_(False)
batch_labels = []
SOFT_INIT_BATCH_SIZE = 50
if image_syn.shape[0] > SOFT_INIT_BATCH_SIZE and args.dataset == 'ImageNet':
for indices in torch.split(torch.tensor([i for i in range(0, image_syn.shape[0])], dtype=torch.long), SOFT_INIT_BATCH_SIZE):
batch_labels.append(label_net(image_syn[indices].detach().to(args.device), flat_param=label_params))
else:
label_syn = label_net(image_syn.detach().to(args.device), flat_param=label_params)
label_syn = torch.cat(batch_labels, dim=0)
label_syn = torch.nn.functional.softmax(label_syn)
del label_net, label_params
for _ in batch_labels:
del _
syn_images = image_syn
y_hat = label_syn.to(args.device)
syn_image_gradients = torch.zeros(syn_images.shape).to(args.device)
x_list = []
original_x_list = []
y_list = []
indices_chunks = []
gradient_sum = torch.zeros(student_params[-1].shape).to(args.device)
indices_chunks_copy = []
for _ in range(args.syn_steps):
if not indices_chunks:
indices = torch.randperm(len(syn_images))
indices_chunks = list(torch.split(indices, args.batch_syn))
these_indices = indices_chunks.pop()
indices_chunks_copy.append(these_indices)
x = syn_images[these_indices]
this_y = y_hat[these_indices]
original_x_list.append(x)
if args.dsa and (not args.no_aug):
|
LARGE_ENOUGH_NUMBER = 100
PngImagePlugin.MAX_TEXT_CHUNK = LARGE_ENOUGH_NUMBER * (1024**2)
warnings.filterwarnings("ignore", category=DeprecationWarning)
def main(args):
if args.zca and args.texture:
raise AssertionError("Cannot use zca and texture together")
if args.texture and args.pix_init == "real":
print("WARNING: Using texture with real initialization will take a very long time to smooth out the boundaries between images.")
if args.max_experts is not None and args.max_files is not None:
args.total_experts = args.max_experts * args.max_files
print("CUDNN STATUS: {}".format(torch.backends.cudnn.enabled))
args.dsa = True if args.dsa == 'True' else False
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
eval_it_pool = np.arange(0, args.Iteration + 1, args.eval_it).tolist()
channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader, loader_train_dict, class_map, class_map_inv = get_dataset(args.dataset, args.data_path, args.batch_real, args=args)
model_eval_pool = get_eval_pool(args.eval_mode, args.model, args.model)
im_res = im_size[0]
args.im_size = im_size
accs_all_exps = dict() # record performances of all experiments
for key in model_eval_pool:
accs_all_exps[key] = []
data_save = []
if args.dsa:
# args.epoch_eval_train = 1000
args.dc_aug_param = None
args.dsa_param = ParamDiffAug()
dsa_params = args.dsa_param
if args.zca:
zca_trans = args.zca_trans
else:
zca_trans = None
wandb.init(sync_tensorboard=False,
project="DatasetDistillation",
job_type="CleanRepo",
config=args,
)
args = type('', (), {})()
for key in wandb.config._items:
setattr(args, key, wandb.config._items[key])
args.dsa_param = dsa_params
args.zca_trans = zca_trans
if args.batch_syn is None:
args.batch_syn = num_classes * args.ipc
args.distributed = torch.cuda.device_count() > 1
print('Hyper-parameters: \n', args.__dict__)
print('Evaluation model pool: ', model_eval_pool)
''' organize the real dataset '''
indices_class = [[] for c in range(num_classes)]
# Build label to index map
print("---------------Build label to index map--------------")
# For machines with limited RAM, it's impossible to load all ImageNet or even TinyImageNet into memory.
# Even if it's possible, it will take too long to process.
# Therefore we pregenerate an indices to image map and use this map to quickly random samples from ImageNet or TinyImageNet dataset.
if args.dataset == 'ImageNet':
indices_class = np.load('indices/imagenet_indices_class.npy', allow_pickle=True)
elif args.dataset == 'Tiny':
indices_class = np.load('indices/tiny_indices_class.npy', allow_pickle=True)
else:
for i, data in tqdm(enumerate(dst_train)):
indices_class[data[1]].append(i)
# for c in range(num_classes):
# print('class c = %d: %d real images'%(c, len(indices_class[c])))
def get_images(c, n): # get random n images from class c
idx_shuffle = np.random.permutation(indices_class[c])[:n]
subset = Subset(dst_train, idx_shuffle)
data_loader = DataLoader(subset, batch_size=n)
# only read the first batch which has n(IPC) number of images.
for data in data_loader:
return data[0].to("cpu")
''' initialize the synthetic data '''
label_syn = torch.tensor([np.ones(args.ipc)*i for i in range(num_classes)], dtype=torch.long, requires_grad=False, device=args.device).view(-1) # [0,0,0, 1,1,1, ..., 9,9,9]
if args.texture:
image_syn = torch.randn(size=(num_classes * args.ipc, channel, im_size[0]*args.canvas_size, im_size[1]*args.canvas_size), dtype=torch.float)
else:
image_syn = torch.randn(size=(num_classes * args.ipc, channel, im_size[0], im_size[1]), dtype=torch.float)
syn_lr = torch.tensor(args.lr_teacher).to(args.device)
if args.pix_init == 'real':
print('initialize synthetic data from random real images')
for c in range(num_classes):
image_syn.data[c * args.ipc:(c + 1) * args.ipc] = get_images(c, args.ipc).detach().data
else:
print('initialize synthetic data from random noise')
''' training '''
image_syn = image_syn.detach().to(args.device).requires_grad_(True)
print(image_syn.shape)
syn_lr = syn_lr.detach().to(args.device).requires_grad_(True)
optimizer_img = torch.optim.SGD([image_syn], lr=args.lr_img, momentum=0.5)
optimizer_lr = torch.optim.SGD([syn_lr], lr=args.lr_lr, momentum=0.5)
optimizer_img.zero_grad()
optimizer_lr.zero_grad()
criterion = nn.CrossEntropyLoss().to(args.device)
print('%s training begins'%get_time())
expert_dir = os.path.join(args.buffer_path, args.dataset)
if args.dataset in ["CIFAR10", "CIFAR100"] and not args.zca:
expert_dir += "_NO_ZCA"
expert_dir = os.path.join(expert_dir, args.model)
print("Expert Dir: {}".format(expert_dir))
if not args.random_trajectory:
if args.load_all:
buffer = []
n = 0
while os.path.exists(os.path.join(expert_dir, "replay_buffer_{}.pt".format(n))):
buffer = buffer + torch.load(os.path.join(expert_dir, "replay_buffer_{}.pt".format(n)))
n += 1
if n == 0:
raise AssertionError("No buffers detected at {}".format(expert_dir))
else:
expert_files = []
n = 0
while os.path.exists(os.path.join(expert_dir, "replay_buffer_{}.pt".format(n))):
expert_files.append(os.path.join(expert_dir, "replay_buffer_{}.pt".format(n)))
n += 1
if n == 0:
raise AssertionError("No buffers detected at {}".format(expert_dir))
file_idx = 0
expert_idx = 0
random.shuffle(expert_files)
if args.max_files is not None:
expert_files = expert_files[:args.max_files]
print("loading file {}".format(expert_files[file_idx]))
buffer = torch.load(expert_files[file_idx])
if args.max_experts is not None:
buffer = buffer[:args.max_experts]
random.shuffle(buffer)
best_acc = {m: 0 for m in model_eval_pool}
best_std = {m: 0 for m in model_eval_pool}
for it in range(0, args.Iteration+1):
save_this_it = False
# writer.add_scalar('Progress', it, it)
wandb.log({"Progress": it}, step=it)
''' Evaluate synthetic data '''
if it in eval_it_pool and args.eval_it > 0:
for model_eval in model_eval_pool:
print('-------------------------\nEvaluation\nmodel_train = %s, model_eval = %s, iteration = %d'%(args.model, model_eval, it))
if args.dsa:
print('DSA augmentation strategy: \n', args.dsa_strategy)
print('DSA augmentation parameters: \n', args.dsa_param.__dict__)
else:
print('DC augmentation parameters: \n', args.dc_aug_param)
accs_test = []
accs_train = []
for it_eval in range(args.num_eval):
net_eval = get_network(model_eval, channel, num_classes, im_size).to(args.device) # get a random model
eval_labs = label_syn
with torch.no_grad():
image_save = image_syn
image_syn_eval, label_syn_eval = copy.deepcopy(image_save.detach()), copy.deepcopy(eval_labs.detach()) # avoid any unaware modification
args.lr_net = syn_lr.item()
_, acc_train, acc_test = evaluate_synset(it_eval, net_eval, image_syn_eval, label_syn_eval, testloader, args, texture=args.texture)
accs_test.append(acc_test)
accs_train.append(acc_train)
accs_test = np.array(accs_test)
accs_train = np.array(accs_train)
acc_test_mean = np.mean(accs_test)
acc_test_std = np.std(accs_test)
if acc_test_mean > best_acc[model_eval]:
best_acc[model_eval] = acc_test_mean
best_std[model_eval] = acc_test_std
save_this_it = True
print('Evaluate %d random %s, mean = %.4f std = %.4f\n-------------------------'%(len(accs_test), model_eval, acc_test_mean, acc_test_std))
wandb.log({'Accuracy/{}'.format(model_eval): acc_test_mean}, step=it)
wandb.log({'Max_Accuracy/{}'.format(model_eval): best_acc[model_eval]}, step=it)
wandb.log({'Std/{}'.format(model_eval): acc_test_std}, step=it)
wandb.log({'Max_Std/{}'.format(model_eval): best_std[model_eval]}, step=it)
if it in eval_it_pool and (save_this_it or it % 1000 == 0) and args.eval_it > 0:
with torch.no_grad():
image_save = image_syn.cuda()
save_dir = os.path.join(".", "logged_files", args.dataset, 'offline' if wandb.run.name is None else wandb.run.name)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_save.cpu(), os.path.join(save_dir, "images_{}.pt".format(it)))
torch.save(label_syn.cpu(), os.path.join(save_dir, "labels_{}.pt".format(it)))
if save_this_it:
torch.save(image_save.cpu(), os.path.join(save_dir, "images_best.pt".format(it)))
torch.save(label_syn.cpu(), os.path.join(save_dir, "labels_best.pt".format(it)))
wandb.log({"Pixels": wandb.Histogram(torch.nan_to_num(image_syn.detach().cpu()))}, step=it)
if args.ipc < 50 or args.force_save:
upsampled = image_save
if args.dataset != "ImageNet":
upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=2)
upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=3)
grid = torchvision.utils.make_grid(upsampled, nrow=10, normalize=True, scale_each=True)
wandb.log({"Synthetic_Images": wandb.Image(torch.nan_to_num(grid.detach().cpu()))}, step=it)
wandb.log({'Synthetic_Pixels': wandb.Histogram(torch.nan_to_num(image_save.detach().cpu()))}, step=it)
for clip_val in [2.5]:
std = torch.std(image_save)
mean = torch.mean(image_save)
upsampled = torch.clip(image_save, min=mean-clip_val*std, max=mean+clip_val*std)
if args.dataset != "ImageNet":
upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=2)
upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=3)
grid = torchvision.utils.make_grid(upsampled, nrow=10, normalize=True, scale_each=True)
wandb.log({"Clipped_Synthetic_Images/std_{}".format(clip_val): wandb.Image(torch.nan_to_num(grid.detach().cpu()))}, step=it)
if args.zca:
image_save = image_save.to(args.device)
image_save = args.zca_trans.inverse_transform(image_save)
image_save.cpu()
torch.save(image_save.cpu(), os.path.join(save_dir, "images_zca_{}.pt".format(it)))
upsampled = image_save
if args.dataset != "ImageNet":
upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=2)
upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=3)
grid = torchvision.utils.make_grid(upsampled, nrow=10, normalize=True, scale_each=True)
wandb.log({"Reconstructed_Images": wandb.Image(torch.nan_to_num(grid.detach().cpu()))}, step=it)
wandb.log({'Reconstructed_Pixels': wandb.Histogram(torch.nan_to_num(image_save.detach().cpu()))}, step=it)
for clip_val in [2.5]:
std = torch.std(image_save)
mean = torch.mean(image_save)
upsampled = torch.clip(image_save, min=mean - clip_val * std, max=mean + clip_val * std)
if args.dataset != "ImageNet":
upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=2)
upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=3)
grid = torchvision.utils.make_grid(upsampled, nrow=10, normalize=True, scale_each=True)
wandb.log({"Clipped_Reconstructed_Images/std_{}".format(clip_val): wandb.Image(
torch.nan_to_num(grid.detach().cpu()))}, step=it)
wandb.log({"Synthetic_LR": syn_lr.detach().cpu()}, step=it)
student_net = get_network(args.model, channel, num_classes, im_size, dist=False).to(args.device) # get a random model
student_net = ReparamModule(student_net)
if args.distributed:
student_net = torch.nn.DataParallel(student_net)
student_net.train()
if not args.random_trajectory:
if args.load_all:
expert_trajectory = buffer[np.random.randint(0, len(buffer))]
else:
expert_trajectory = buffer[expert_idx]
expert_idx += 1
if expert_idx == len(buffer):
expert_idx = 0
file_idx += 1
if file_idx == len(expert_files):
file_idx = 0
random.shuffle(expert_files)
print("loading file {}".format(expert_files[file_idx]))
if args.max_files != 1:
del buffer
buffer = torch.load(expert_files[file_idx])
if args.max_experts is not None:
buffer = buffer[:args.max_experts]
random.shuffle(buffer)
start_epoch = np.random.randint(0, args.max_start_epoch)
if not args.random_trajectory:
starting_params = expert_trajectory[start_epoch]
target_params = expert_trajectory[start_epoch+args.expert_epochs]
else:
starting_params = [p for p in student_net.parameters()]
target_params = [p for p in student_net.parameters()]
target_params = torch.cat([p.data.to(args.device).reshape(-1) for p in target_params], 0)
student_params = [torch.cat([p.data.to(args.device).reshape(-1) for p in starting_params], 0).requires_grad_(True)]
starting_params = torch.cat([p.data.to(args.device).reshape(-1) for p in starting_params], 0)
param_dist = torch.tensor(0.0).to(args.device)
param_dist += torch.nn.functional.mse_loss(starting_params, target_params, reduction="sum")
# produce soft labels for soft label assignment.
if args.teacher_label:
label_net = get_network(args.model, channel, num_classes, im_size, dist=False).to(args.device) # get a random model
label_net = ReparamModule(label_net)
label_net.eval()
# use the target param as the model param to get soft labels.
label_params = copy.deepcopy(target_params.detach()).requires_grad_(False)
batch_labels = []
SOFT_INIT_BATCH_SIZE = 50
if image_syn.shape[0] > SOFT_INIT_BATCH_SIZE and args.dataset == 'ImageNet':
for indices in torch.split(torch.tensor([i for i in range(0, image_syn.shape[0])], dtype=torch.long), SOFT_INIT_BATCH_SIZE):
batch_labels.append(label_net(image_syn[indices].detach().to(args.device), flat_param=label_params))
else:
label_syn = label_net(image_syn.detach().to(args.device), flat_param=label_params)
label_syn = torch.cat(batch_labels, dim=0)
label_syn = torch.nn.functional.softmax(label_syn)
del label_net, label_params
for _ in batch_labels:
del _
syn_images = image_syn
y_hat = label_syn.to(args.device)
syn_image_gradients = torch.zeros(syn_images.shape).to(args.device)
x_list = []
original_x_list = []
y_list = []
indices_chunks = []
gradient_sum = torch.zeros(student_params[-1].shape).to(args.device)
indices_chunks_copy = []
for _ in range(args.syn_steps):
if not indices_chunks:
indices = torch.randperm(len(syn_images))
indices_chunks = list(torch.split(indices, args.batch_syn))
these_indices = indices_chunks.pop()
indices_chunks_copy.append(these_indices)
x = syn_images[these_indices]
this_y = y_hat[these_indices]
original_x_list.append(x)
if args.dsa and (not args.no_aug): | x = DiffAugment(x, args.dsa_strategy, param=args.dsa_param) | 6 | 2023-10-17 23:11:36+00:00 | 12k |
upiterbarg/hihack | models/utils.py | [
{
"identifier": "CDGPT5",
"path": "models/cdgpt5.py",
"snippet": "class CDGPT5(nn.Module):\n def __init__(self, shape, action_space, flags, device):\n super(CDGPT5, self).__init__()\n\n self.flags = flags\n self.num_actions = len(action_space)\n self.use_prev_action = flag... | import omegaconf
import os
import pathlib
import pdb
import sys
import torch
from .cdgpt5 import CDGPT5
from .cleaved_hierarchical_policy import CleavedHierarchicalPolicy
from .flat_transformer import FlatTransformer
from .hierarchical_lstm import HierarchicalLSTM
from .hierarchical_transformer_lstm import HierarchicalTransformerLSTM
from .transformer_lstm import TransformerLSTM
from nle.env.base import DUNGEON_SHAPE
from omegaconf import OmegaConf
from tasks import ENVS | 10,749 |
base_path = str(pathlib.Path().resolve())
hihack_path = os.path.join(base_path[:base_path.find('hihack')], 'hihack')
sys.path.insert(0, os.path.join(hihack_path, 'dungeonsdata-neurips2022/experiment_code/hackrl'))
MODELS = [
CDGPT5,
HierarchicalLSTM,
HierarchicalTransformerLSTM,
TransformerLSTM,
|
base_path = str(pathlib.Path().resolve())
hihack_path = os.path.join(base_path[:base_path.find('hihack')], 'hihack')
sys.path.insert(0, os.path.join(hihack_path, 'dungeonsdata-neurips2022/experiment_code/hackrl'))
MODELS = [
CDGPT5,
HierarchicalLSTM,
HierarchicalTransformerLSTM,
TransformerLSTM, | FlatTransformer | 2 | 2023-10-23 15:44:32+00:00 | 12k |
avilliai/Bert_Vits2_Sever | train_ms.py | [
{
"identifier": "TextAudioSpeakerLoader",
"path": "data_utils.py",
"snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from... | import os
import json
import argparse
import itertools
import math
import torch
import shutil
import torch.multiprocessing as mp
import torch.distributed as dist
import logging
import commons
import utils
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.cuda.amp import autocast, GradScaler
from tqdm import tqdm
from data_utils import (
TextAudioSpeakerLoader,
TextAudioSpeakerCollate,
DistributedBucketSampler
)
from models import (
SynthesizerTrn,
MultiPeriodDiscriminator,
DurationDiscriminator,
)
from losses import (
generator_loss,
discriminator_loss,
feature_loss,
kl_loss
)
from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
from text.symbols import symbols | 8,152 | logging.getLogger('numba').setLevel(logging.WARNING)
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
torch.set_float32_matmul_precision('medium')
global_step = 0
def main():
"""Assume Single Node Multi GPUs Training Only"""
assert torch.cuda.is_available(), "CPU training is not allowed."
n_gpus = torch.cuda.device_count()
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '65280'
hps = utils.get_hparams()
if not hps.cont:
shutil.copy('./pretrained_models/D_0.pth','./logs/OUTPUT_MODEL/D_0.pth')
shutil.copy('./pretrained_models/G_0.pth','./logs/OUTPUT_MODEL/G_0.pth')
shutil.copy('./pretrained_models/DUR_0.pth','./logs/OUTPUT_MODEL/DUR_0.pth')
mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
def run(rank, n_gpus, hps):
global global_step
if rank == 0:
logger = utils.get_logger(hps.model_dir)
logger.info(hps)
utils.check_git_hash(hps.model_dir)
writer = SummaryWriter(log_dir=hps.model_dir)
writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank)
torch.manual_seed(hps.train.seed)
torch.cuda.set_device(rank)
| logging.getLogger('numba').setLevel(logging.WARNING)
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
torch.set_float32_matmul_precision('medium')
global_step = 0
def main():
"""Assume Single Node Multi GPUs Training Only"""
assert torch.cuda.is_available(), "CPU training is not allowed."
n_gpus = torch.cuda.device_count()
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '65280'
hps = utils.get_hparams()
if not hps.cont:
shutil.copy('./pretrained_models/D_0.pth','./logs/OUTPUT_MODEL/D_0.pth')
shutil.copy('./pretrained_models/G_0.pth','./logs/OUTPUT_MODEL/G_0.pth')
shutil.copy('./pretrained_models/DUR_0.pth','./logs/OUTPUT_MODEL/DUR_0.pth')
mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
def run(rank, n_gpus, hps):
global global_step
if rank == 0:
logger = utils.get_logger(hps.model_dir)
logger.info(hps)
utils.check_git_hash(hps.model_dir)
writer = SummaryWriter(log_dir=hps.model_dir)
writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank)
torch.manual_seed(hps.train.seed)
torch.cuda.set_device(rank)
| train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) | 0 | 2023-10-23 08:24:12+00:00 | 12k |
t-ega/whatsapp-cloud-sdk | whatsapp_cloud_sdk/bot.py | [
{
"identifier": "CustomHTTPError",
"path": "whatsapp_cloud_sdk/_exceptions/http_error.py",
"snippet": "class CustomHTTPError(Exception):\n \"\"\"\n Represents a custom HTTP error.\n\n This exception class is used to raise custom HTTP errors with\n specific status codes and response text.\n ... | from typing import Optional, List, Dict
from unicodedata import decimal
from whatsapp_cloud_sdk._exceptions.http_error import CustomHTTPError
from whatsapp_cloud_sdk._base_api import _BaseApi
from whatsapp_cloud_sdk._files.contact import Contact
from whatsapp_cloud_sdk._utils.json_serializer import MyEncoder
from whatsapp_cloud_sdk._validators.messages import (
TextMessage,
ButtonMessage,
ButtonContents,
LinkMessage,
LocationMessage,
)
from whatsapp_cloud_sdk._formaters.message_formatter import MessageFormatter, LinkTypes
import json
import requests | 7,476 | longitude=message.longitude,
latitude=latitude,
message_id=message_id,
)
return await self.__send(data=payload)
async def send_contact(
self,
contacts: List[Contact],
recipient_number: str,
message_id: Optional[str] = None,
):
"""
Send a contact to a recipient.
Args:
contacts (list): A list of contact details.Each contact detail a list of contact objects.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): An optional message ID if it is a reply to a message.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
if not isinstance(contacts, list):
raise TypeError("Contacts must be a list")
for i, contact in contacts:
if not isinstance(contact, Contact):
raise AttributeError(
f"Contact {i} must be of type {type(Contact)}. Got {type(type(contact))} instead."
)
payload = formatter.format_contact_message(
contacts=contacts, to=recipient_number, message_id=message_id
)
return await self.__send(data=payload)
async def send_sticker_with_url(
self,
link: str,
recipient_number: str,
message_id: Optional[str],
):
"""
Send a sticker by URL to a recipient.
Args:
link (str): The URL of the sticker.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): An optional message ID if it is a reply to a message.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
payload = formatter.format_sticker_message_by_url(
link=link, to=recipient_number, message_id=message_id
)
return await self.__send(data=payload)
async def mark_message_as_read(self, message_id: str):
"""
Mark a message as read.
Args:
message_id (str): The ID of the message to mark as read.
Raises:
ValueError: If message_id is not provided.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
if not message_id:
raise ValueError("A message Id is required")
payload = formatter.mark_message_as_read(message_id=message_id)
return await self.__send(data=payload)
async def __send(
self,
data: dict,
) -> dict:
"""
Send data to the WhatsApp API.
Args:
data (dict): The data to send to the WhatsApp API.
Raises:
AttributeError: If there is no data to send.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
if not data:
raise AttributeError("No data to send")
# Convert message_body to JSON
json_data = json.dumps(data, cls=MyEncoder)
timeout_secs = 10
response = requests.post(
self.WA_URL, headers=self.HEADERS, data=json_data, timeout=timeout_secs
)
try:
response.raise_for_status()
except requests.HTTPError as exc:
# Re raise the error with the text gotten
| """This module Represents a WhatsApp bot for communication with the WhatsApp API."""
formatter = MessageFormatter()
class Bot(_BaseApi):
# pylint: disable=line-too-long
"""
Represents a WhatsApp bot for communication with the WhatsApp API.
This class inherits from the `BaseApi` class and provides methods for sending various types of
messages, marking messages as read, and handling communication with the WhatsApp API.
Args:
cloud_api_access_token (str, optional): The Cloud API access token used for authentication.
wa_phone_number_id (str, optional): The WhatsApp phone number ID.
version (str, optional): The WhatsApp API version to use.
Inherits attributes from the `BaseApi` class, such as `WA_URL` and `HEADERS`.
Attributes:
Inherits attributes from the `BaseApi` class.
Methods:
- `send_text(text: str, recipient_number: str, message_id: str = None, preview_url: bool = False)`:
Send a text message to a recipient.
- `send_text_with_buttons(text: str, buttons: list, recipient_number: str)`:
Send a text message with buttons to a recipient.
- `send_reply_with_reaction(message_id: str, emoji: str, recipient_number: str)`:
Send a reaction to a message.
- `send_image_by_url(link: str, caption: Optional[str], recipient_number: str, message_id: Optional[str])`:
Send an image by URL.
- `send_audio_by_url(link: str, caption: Optional[str], recipient_number: str)`:
Send audio by URL.
- `send_document_by_url(link: str, caption: Optional[str], recipient_number: str)`:
Send a document by URL.
- `send_video_by_url(link: str, caption: Optional[str], recipient_number: str, message_id: Optional[str] = None)
`:
Send a video by URL.
- `send_location(latitude: decimal, longitude: int, name: str, address: str, recipient_number: str)`:
Send a location.
- `send_contact(contact: list, recipient_number: str)`:
Send a contact.
- `send_sticker_with_url(link: str, recipient_number: str)`:
Send a sticker by URL.
- `mark_message_as_read(message_id: str)`:
Mark a message as read.
- `__send(data: dict, method: Optional[str] = "POST") -> dict`:
Send data to the WhatsApp API.
Usage Example:
```
python
from your_library import Bot
# Initialize the bot.
bot = Bot(cloud_api_access_token="your_access_token", wa_phone_number_id="your_phone_number_id", version="v17.0")
# Use bot methods to interact with the WhatsApp API
bot.send_text("Hello, world!", "recipient_number")
```
"""
def __init__(
self,
cloud_api_access_token: str = None,
wa_phone_number_id: str = None,
version: str = None,
):
"""
Initialize a Bot instance for WhatsApp API communication.
Args:
cloud_api_access_token (str, optional): The Cloud API access token used for authentication.
wa_phone_number_id (str, optional): The WhatsApp phone number ID.
version (str, optional): The WhatsApp API version to use.
Inherits attributes from the `BaseApi` class.
"""
super().__init__(
cloud_api_access_token=cloud_api_access_token,
wa_phone_number_id=wa_phone_number_id,
version=version,
)
async def send_text(
self,
text: str,
recipient_number: str,
message_id: str = None,
preview_url: bool = False,
):
"""
Send a text message to a recipient.
Args:
text (str): The text of the message.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): The ID of the message if it is a reply to a message (optional).
preview_url (bool): Enable or disable URL preview (default is False).
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
message = TextMessage(
text=text, recipient_number=recipient_number, message_id=message_id
)
payload = formatter.format_text_message(
to=message.recipient_number,
body=message.text,
message_id=message_id,
preview_url=preview_url,
)
return await self.__send(data=payload)
async def send_text_with_buttons(
self,
text: str,
buttons: List[Dict[str, str]],
recipient_number: str,
message_id: Optional[str],
):
"""
Send a text message with buttons to a recipient.
Args:
text (str): The text of the message.
buttons (list): List of buttons, where each button is a dictionary with the following keys:
- 'title' (str): The title or label of the button.
- 'id' (optional, str): An optional id for the button.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): An optional message ID if it is a reply to a message.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
if not isinstance(buttons, list):
raise TypeError("Buttons must be a list of dict object")
buttons_content = [ButtonContents(**b) for b in buttons]
message = ButtonMessage(
text=text, recipient_number=recipient_number, buttons=buttons_content
)
payload = formatter.format_button_message(
to=recipient_number,
text=message.text,
buttons=message.buttons,
message_id=message_id,
)
return await self.__send(data=payload)
# pylint: disable=fixme
# TODO: Add input validation for all bot methods
async def send_reaction_message(
self, message_id: Optional[str], emoji, recipient_number: str
):
"""
Send a reaction message.
Args:
message_id (str, optional): An optional message ID if it is a reply to a message.
emoji (str): The reaction emoji to send.
recipient_number (str): The recipient's WhatsApp phone number.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
payload = formatter.format_reply_with_reaction(
to=recipient_number, message_id=message_id, emoji=emoji
)
return await self.__send(data=payload)
async def send_image_by_url(
self,
link: str,
caption: Optional[str],
recipient_number: str,
message_id: Optional[str],
):
"""
Send an image by URL to a recipient.
Args:
link (str): The URL of the image.
caption (str, optional): An optional caption for the image.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): An optional message ID if it is a reply to a message.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
message = LinkMessage(link=link, caption=caption)
payload = formatter.format_link_message(
to=recipient_number,
link=message.link,
m_type=LinkTypes.IMAGE,
message_id=message_id,
)
return await self.__send(data=payload)
async def send_audio_by_url(
self,
link: str,
recipient_number: str,
message_id: Optional[str],
):
"""
Send an audio file by URL to a recipient.
Args:
link (str): The URL of the audio file.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): An optional message ID if it is a reply to a message.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
message = LinkMessage(link=link)
payload = formatter.format_link_message(
to=recipient_number,
link=message.link,
m_type=LinkTypes.AUDIO,
message_id=message_id,
)
return await self.__send(data=payload)
async def send_document_by_url(
self,
link: str,
caption: Optional[str],
recipient_number: str,
message_id: Optional[str] = None,
):
"""
Send a document by URL to a recipient.
Args:
link (str): The URL of the document.
caption (str, optional): An optional caption for the document.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): An optional message ID if it is a reply to a message.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
message = LinkMessage(
link=link,
caption=caption,
)
payload = formatter.format_send_document_by_url(
to=recipient_number,
document_link=message.link,
caption=message.caption,
message_id=message_id,
)
return await self.__send(data=payload)
async def send_video_by_url(
self,
link: str,
caption: Optional[str],
recipient_number: str,
message_id: Optional[str] = None,
):
"""
Send a video by URL to a recipient.
Args:
link (str): The URL of the video.
caption (str, optional): An optional caption for the video.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): An optional message ID if it is a reply to a message.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
message = LinkMessage(link=link, caption=caption)
payload = formatter.format_link_message(
to=recipient_number,
link=message.link,
m_type=LinkTypes.VIDEO,
caption=message.caption,
message_id=message_id,
)
return await self.__send(data=payload)
# pylint: disable=too-many-arguments
async def send_location(
self,
latitude: decimal,
longitude: int,
name: str,
address: str,
recipient_number: str,
message_id: Optional[str] = None,
):
"""
Send a location to a recipient.
Args:
latitude (decimal): The latitude of the location.
longitude (int): The longitude of the location.
name (str): The name of the location.
address (str): The address of the location.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): An optional message ID if it is a reply to a message.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
message = LocationMessage(longitude=longitude, name=name, address=address)
payload = formatter.format_location_message(
to=recipient_number,
name=message.name,
address=message.address,
longitude=message.longitude,
latitude=latitude,
message_id=message_id,
)
return await self.__send(data=payload)
async def send_contact(
self,
contacts: List[Contact],
recipient_number: str,
message_id: Optional[str] = None,
):
"""
Send a contact to a recipient.
Args:
contacts (list): A list of contact details.Each contact detail a list of contact objects.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): An optional message ID if it is a reply to a message.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
if not isinstance(contacts, list):
raise TypeError("Contacts must be a list")
for i, contact in contacts:
if not isinstance(contact, Contact):
raise AttributeError(
f"Contact {i} must be of type {type(Contact)}. Got {type(type(contact))} instead."
)
payload = formatter.format_contact_message(
contacts=contacts, to=recipient_number, message_id=message_id
)
return await self.__send(data=payload)
async def send_sticker_with_url(
self,
link: str,
recipient_number: str,
message_id: Optional[str],
):
"""
Send a sticker by URL to a recipient.
Args:
link (str): The URL of the sticker.
recipient_number (str): The recipient's WhatsApp phone number.
message_id (str, optional): An optional message ID if it is a reply to a message.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
payload = formatter.format_sticker_message_by_url(
link=link, to=recipient_number, message_id=message_id
)
return await self.__send(data=payload)
async def mark_message_as_read(self, message_id: str):
"""
Mark a message as read.
Args:
message_id (str): The ID of the message to mark as read.
Raises:
ValueError: If message_id is not provided.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
if not message_id:
raise ValueError("A message Id is required")
payload = formatter.mark_message_as_read(message_id=message_id)
return await self.__send(data=payload)
async def __send(
self,
data: dict,
) -> dict:
"""
Send data to the WhatsApp API.
Args:
data (dict): The data to send to the WhatsApp API.
Raises:
AttributeError: If there is no data to send.
Returns:
Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain
The response from the WhatsApp API.
"""
if not data:
raise AttributeError("No data to send")
# Convert message_body to JSON
json_data = json.dumps(data, cls=MyEncoder)
timeout_secs = 10
response = requests.post(
self.WA_URL, headers=self.HEADERS, data=json_data, timeout=timeout_secs
)
try:
response.raise_for_status()
except requests.HTTPError as exc:
# Re raise the error with the text gotten | raise CustomHTTPError( | 0 | 2023-10-15 21:12:45+00:00 | 12k |
caglarkucuk/earthformer-satellite-to-radar | ef-sat2rad/earthformer/datasets/sevir/sevir_torch_wrap.py | [
{
"identifier": "cfg",
"path": "ef-sat2rad/earthformer/config.py",
"snippet": "_CURR_DIR = os.path.realpath(os.path.dirname(os.path.realpath(__file__)))"
},
{
"identifier": "SEVIRDataLoader",
"path": "ef-sat2rad/earthformer/datasets/sevir/sevir_dataloader.py",
"snippet": "class SEVIRData... | import os
import numpy as np
import datetime
import pandas as pd
import torch
import random
import numpy as np
import cv2
import torch
import torchvision
import h5py
from typing import Union, Dict, Sequence, Tuple, List
from torch.utils.data import Dataset as TorchDataset, DataLoader
from pytorch_lightning import LightningDataModule
from ...config import cfg
from .sevir_dataloader import SEVIRDataLoader
from torch.utils.data import Dataset
from torch.utils.data import DataLoader | 10,012 |
def __init__(self,
seq_len: int = 25,
raw_seq_len: int = 49,
sample_mode: str = "sequent",
stride: int = 12,
batch_size: int = 1,
layout: str = "NHWT",
num_shard: int = 1,
rank: int = 0,
split_mode: str = "uneven",
sevir_catalog: Union[str, pd.DataFrame] = None,
sevir_data_dir: str = None,
start_date: datetime.datetime = None,
end_date: datetime.datetime = None,
datetime_filter = None,
catalog_filter = "default",
shuffle: bool = False,
shuffle_seed: int = 1,
output_type = np.float32,
preprocess: bool = True,
rescale_method: str = "01",
verbose: bool = False):
super(SEVIRTorchDataset, self).__init__()
self.layout = layout
self.sevir_dataloader = SEVIRDataLoader(
data_types=["vil", ],
seq_len=seq_len,
raw_seq_len=raw_seq_len,
sample_mode=sample_mode,
stride=stride,
batch_size=batch_size,
layout=layout,
num_shard=num_shard,
rank=rank,
split_mode=split_mode,
sevir_catalog=sevir_catalog,
sevir_data_dir=sevir_data_dir,
start_date=start_date,
end_date=end_date,
datetime_filter=datetime_filter,
catalog_filter=catalog_filter,
shuffle=shuffle,
shuffle_seed=shuffle_seed,
output_type=output_type,
preprocess=preprocess,
rescale_method=rescale_method,
downsample_dict=None,
verbose=verbose)
def __getitem__(self, index):
data_dict = self.sevir_dataloader._idx_sample(index=index)
return data_dict
def __len__(self):
return self.sevir_dataloader.__len__()
def collate_fn(self, data_dict_list):
r"""
Parameters
----------
data_dict_list: list[Dict[str, torch.Tensor]]
Returns
-------
merged_data: Dict[str, torch.Tensor]
batch_size = len(data_dict_list) * data_dict["key"].batch_size
"""
batch_dim = self.layout.find('N')
data_list_dict = {
key: [data_dict[key]
for data_dict in data_dict_list]
for key in data_dict_list[0]}
# TODO: key "mask" is not handled. Temporally fine since this func is not used
data_list_dict.pop("mask", None)
merged_dict = {
key: torch.cat(data_list,
dim=batch_dim)
for key, data_list in data_list_dict.items()}
merged_dict["mask"] = None
return merged_dict
def get_torch_dataloader(self,
outer_batch_size=1,
collate_fn=None,
num_workers=1):
# TODO: num_workers > 1
r"""
We set the batch_size in Dataset by default, so outer_batch_size should be 1.
In this case, not using `collate_fn` can save time.
"""
if outer_batch_size == 1:
collate_fn = lambda x:x[0]
else:
if collate_fn is None:
collate_fn = self.collate_fn
dataloader = DataLoader(
dataset=self,
batch_size=outer_batch_size,
collate_fn=collate_fn,
num_workers=num_workers)
return dataloader
def check_aws():
r"""
Check if aws cli is installed.
"""
if os.system("which aws") != 0:
raise RuntimeError("AWS CLI is not installed! Please install it first. See https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html")
def download_SEVIR(save_dir=None):
r"""
Downloaded dataset is saved in save_dir/sevir
"""
check_aws()
if save_dir is None:
|
class SEVIRTorchDataset(TorchDataset):
def __init__(self,
seq_len: int = 25,
raw_seq_len: int = 49,
sample_mode: str = "sequent",
stride: int = 12,
batch_size: int = 1,
layout: str = "NHWT",
num_shard: int = 1,
rank: int = 0,
split_mode: str = "uneven",
sevir_catalog: Union[str, pd.DataFrame] = None,
sevir_data_dir: str = None,
start_date: datetime.datetime = None,
end_date: datetime.datetime = None,
datetime_filter = None,
catalog_filter = "default",
shuffle: bool = False,
shuffle_seed: int = 1,
output_type = np.float32,
preprocess: bool = True,
rescale_method: str = "01",
verbose: bool = False):
super(SEVIRTorchDataset, self).__init__()
self.layout = layout
self.sevir_dataloader = SEVIRDataLoader(
data_types=["vil", ],
seq_len=seq_len,
raw_seq_len=raw_seq_len,
sample_mode=sample_mode,
stride=stride,
batch_size=batch_size,
layout=layout,
num_shard=num_shard,
rank=rank,
split_mode=split_mode,
sevir_catalog=sevir_catalog,
sevir_data_dir=sevir_data_dir,
start_date=start_date,
end_date=end_date,
datetime_filter=datetime_filter,
catalog_filter=catalog_filter,
shuffle=shuffle,
shuffle_seed=shuffle_seed,
output_type=output_type,
preprocess=preprocess,
rescale_method=rescale_method,
downsample_dict=None,
verbose=verbose)
def __getitem__(self, index):
data_dict = self.sevir_dataloader._idx_sample(index=index)
return data_dict
def __len__(self):
return self.sevir_dataloader.__len__()
def collate_fn(self, data_dict_list):
r"""
Parameters
----------
data_dict_list: list[Dict[str, torch.Tensor]]
Returns
-------
merged_data: Dict[str, torch.Tensor]
batch_size = len(data_dict_list) * data_dict["key"].batch_size
"""
batch_dim = self.layout.find('N')
data_list_dict = {
key: [data_dict[key]
for data_dict in data_dict_list]
for key in data_dict_list[0]}
# TODO: key "mask" is not handled. Temporally fine since this func is not used
data_list_dict.pop("mask", None)
merged_dict = {
key: torch.cat(data_list,
dim=batch_dim)
for key, data_list in data_list_dict.items()}
merged_dict["mask"] = None
return merged_dict
def get_torch_dataloader(self,
outer_batch_size=1,
collate_fn=None,
num_workers=1):
# TODO: num_workers > 1
r"""
We set the batch_size in Dataset by default, so outer_batch_size should be 1.
In this case, not using `collate_fn` can save time.
"""
if outer_batch_size == 1:
collate_fn = lambda x:x[0]
else:
if collate_fn is None:
collate_fn = self.collate_fn
dataloader = DataLoader(
dataset=self,
batch_size=outer_batch_size,
collate_fn=collate_fn,
num_workers=num_workers)
return dataloader
def check_aws():
r"""
Check if aws cli is installed.
"""
if os.system("which aws") != 0:
raise RuntimeError("AWS CLI is not installed! Please install it first. See https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html")
def download_SEVIR(save_dir=None):
r"""
Downloaded dataset is saved in save_dir/sevir
"""
check_aws()
if save_dir is None: | save_dir = cfg.datasets_dir | 0 | 2023-10-23 11:45:50+00:00 | 12k |
DTennant/GPC | data/get_datasets.py | [
{
"identifier": "MergedDataset",
"path": "data/data_utils.py",
"snippet": "class MergedDataset(Dataset):\n\n \"\"\"\n Takes two datasets (labelled_dataset, unlabelled_dataset) and merges them\n Allows you to iterate over them in parallel\n \"\"\"\n\n def __init__(self, labelled_dataset, u... | from data.data_utils import MergedDataset
from data.cifar import get_cifar_10_datasets, get_cifar_100_datasets, get_cifar_100_ucd_datasets
from data.herbarium_19 import get_herbarium_datasets
from data.stanford_cars import get_scars_datasets
from data.imagenet import get_imagenet_100_datasets, get_imagenet_ucd_100_datasets
from data.cub import get_cub_datasets, get_cub_universal_datasets
from data.fgvc_aircraft import get_aircraft_datasets
from data.inat_mini import get_inat_universal_datasets
from data.domainnet import get_domainnet_universal_datasets
from data.color_symbol import get_color_symbol_universal_datasets
from data.cifar import subsample_classes as subsample_dataset_cifar
from data.herbarium_19 import subsample_classes as subsample_dataset_herb
from data.stanford_cars import subsample_classes as subsample_dataset_scars
from data.imagenet import subsample_classes as subsample_dataset_imagenet
from data.cub import subsample_classes as subsample_dataset_cub
from data.fgvc_aircraft import subsample_classes as subsample_dataset_air
from copy import deepcopy
from config import osr_split_dir
import pickle
import os | 8,111 |
sub_sample_class_funcs = {
'cifar10': subsample_dataset_cifar,
'cifar100': subsample_dataset_cifar,
'imagenet_100': subsample_dataset_imagenet,
'herbarium_19': subsample_dataset_herb,
|
sub_sample_class_funcs = {
'cifar10': subsample_dataset_cifar,
'cifar100': subsample_dataset_cifar,
'imagenet_100': subsample_dataset_imagenet,
'herbarium_19': subsample_dataset_herb, | 'cub': subsample_dataset_cub, | 10 | 2023-10-23 18:23:22+00:00 | 12k |
nju-websoft/SCR | main.py | [
{
"identifier": "reset_id",
"path": "framework/utils.py",
"snippet": "def reset_id(labels, new_id):\n res = []\n for index in range(len(labels)):\n res.append(new_id[int(labels[index])])\n return torch.tensor(res)"
},
{
"identifier": "get_reset",
"path": "framework/utils.py",... | import torch
import random
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import math
import warnings
from framework.utils import reset_id, get_reset, trigger_combine_event, unpack_batch
from framework.optimization import BertAdam, AdamW
from argparse import ArgumentParser
from model.trigger_encoder import triggerEncoder
from model.argument_detection import argumentDetection
from model.classifier import classifier
from model.entity_detection import entityDetection
from framework.config import Config
from framework.dataloader import *
from transformers import logging
from sklearn.cluster import KMeans | 8,496 | gold = copy.deepcopy(gold[0])
sentence = ''.join(sentence) + str(trig)
if sentence in gold_args:
print(gold_args[sentence])
print(gold)
assert(0)
gold_args[sentence] = gold
label_num += len(gold)
for step, (input_ids, input_masks, in_sent, segment_ids, sentence, trigger, ner) in enumerate(eval_data_loader):
input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).cuda()
input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).cuda()
segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).cuda()
with torch.no_grad():
logits = argument_detection.get_res(input_ids, segment_ids, input_masks, ner)
for i in range(len(in_sent)):
sent = copy.deepcopy(sentence[i])
tr = copy.deepcopy(trigger[i])
tr = sampler.index2vocab[tr]
sent = ''.join(sent) + str(tr)
new_logits = logits[i]
seen_args = copy.deepcopy(metadata[tr])
seen_args += [0]
pred_roles = []
if new_logits == None:
continue
for index, value in enumerate(new_logits):
logi = value[seen_args]
max_value, pred_role = torch.max(logi, dim = 0)
start, end = ner[i][index]
one_pred = (start, end, seen_args[int(pred_role)])
if seen_args[int(pred_role)] != 0:
pred_roles.append(one_pred)
if sent in gold_args:
one_gold_args = copy.deepcopy(gold_args[sent])
pred_num += len(pred_roles)
for preds in pred_roles:
if preds in one_gold_args:
while(preds in one_gold_args):
correct_num += 1
one_gold_args.remove(preds)
else:
pred_num += len(pred_roles)
if pred_num == 0 or label_num == 0 or correct_num == 0:
return 0
pred_c = 100.0*correct_num/pred_num
recall_c = 100.0*correct_num/label_num
f1_c = 2*pred_c*recall_c/(pred_c+recall_c)
return f1_c
def select_argu_data(config, argument_detection, relation_dataset,new_id, event_mention):
train_data_loader = get_ACEArgData_loader(relation_dataset, config, shuffle = False, batch_size = 1)
features = []
argument_detection.eval()
for step, (sentence, input_ids, input_masks, in_sent, segment_ids, args, args_offset, gold_args, ner, trigger) in enumerate(train_data_loader):
input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).cuda()
input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).cuda()
segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).cuda()
with torch.no_grad():
feature = argument_detection.get_feature(input_ids, segment_ids, input_masks).cpu()
features.append(feature)
features = np.concatenate(features)
num_clusters = min(config.memory_size, len(relation_dataset))
if num_clusters == len(relation_dataset):
memory = []
for i in relation_dataset:
memory.append(i)
return memory
distances = KMeans(n_clusters = num_clusters, random_state = 0).fit_transform(features)
memory = []
for k in range(num_clusters):
select_index = np.argmin(distances[:, k])
ins = relation_dataset[select_index]
memory.append(ins)
return memory
def main():
# load config
parser = ArgumentParser()
parser.add_argument('--config', default='./config/ace.ini')
args = parser.parse_args()
config = Config(args.config)
# set train param
config.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_size_per_step = int(config.batch_size / config.gradient_accumulation_steps)
triger_result_total, trigger_result_cur, argument_result_total, argument_result_cur = [], [], [], []
# six truns and get average
for i in range(config.total_round):
print(f"Now is round {i}")
config.seed += 100
random.seed(config.seed)
np.random.seed(config.seed)
torch.manual_seed(config.seed)
# now is trigger detection task
sampler = ACETriDataloder(config, i)
trigger_one_round_res = []
argument_one_round_res = []
# trigger memory space
trigger_memorized_samples = {}
# argument memory space
argument_memorized_samples = {}
# init trigger encode model
entity_detection = entityDetection(config).to(config.device)
| logging.set_verbosity_warning()
logging.set_verbosity_error()
warnings.filterwarnings('ignore')
def eval_trigger(trigger_encoder, trigger_classifier, eval_data, config, new_id, save, ltlabel, id2label):
eval_data_loader = get_ACETriData_loader(eval_data, config, shuffle = True)
trigger_encoder.eval()
trigger_classifier.eval()
pred_num = 0
correct_num = 0
label_num = 0
pred_res = []
for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(eval_data_loader):
sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device)
with torch.no_grad():
feature = trigger_encoder(sentence_ids, input_ids, input_masks, segment_ids)
#feature = torch.stack([x.to(device) for x in feature],dim=0)
logits = trigger_classifier(feature, None, None)
new_logits = logits
for index, value in enumerate(in_sent):
evetype = []
pred_first = True
value = value == 1
gold_offset = torch.nonzero(labels[index][value]).squeeze(dim = 1)
gold_label = torch.gather(labels[index][value], dim = 0, index = gold_offset)
assert(len(gold_label) != 0)
gold_label = [int(val) for val in gold_label]
gold_offset = [int(val) for val in gold_offset]
new_gold_label = []
i = 0
while i < len(gold_label):
if i+1 >= len(gold_label):
if config.lttest and id2label[gold_label[i]] not in ltlabel:
break
else:
new_gold_label.append(gold_label[i])
break
while gold_label[i] == gold_label[i+1] and gold_offset[i]+1 == gold_offset[i+1]:
i += 1
if i+1 >= len(gold_label):
break
if config.lttest == False or id2label[gold_label[i]] in ltlabel:
new_gold_label.append(gold_label[i])
i+=1
gold_label = new_gold_label
label_num += len(gold_label)
res = new_logits[index][value,:]
max_value, pred_tri_each_word = torch.max(res, 1)
pred_trigger = 0
offset = 0
pred_offset, pred_label = [], []
for offset, trigger in enumerate(pred_tri_each_word):
if trigger!=0:
if config.lttest == False or id2label[int(trigger)] in ltlabel:
pred_offset.append(offset)
pred_label.append(trigger)
new_pred_label = []
i = 0
while i < len(pred_label):
if i+1 >= len(pred_label):
new_pred_label.append(pred_label[i])
break
while pred_label[i] == pred_label[i+1] and pred_offset[i]+1 == pred_offset[i+1]:
i += 1
if i+1 >= len(pred_label):
break
new_pred_label.append(pred_label[i])
i+=1
new_pred_label = [int(val) for val in new_pred_label]
pred_num += len(new_pred_label)
for pred_trigger in new_pred_label:
if save:
if id2label[pred_trigger] not in evetype:
evetype.append(id2label[pred_trigger])
onesamp = {}
onesamp['sentence'] = sentence[index]
onesamp['trigger'] = id2label[pred_trigger]
onesamp['s_start'] = 0
pred_res.append(onesamp)
if pred_trigger in gold_label:
correct_num += 1
gold_label.remove(pred_trigger)
if pred_num == 0 or label_num == 0 or correct_num == 0:
return 0
pred_c = 100.0*correct_num/pred_num
recall_c = 100.0*correct_num/label_num
f1_c = 2*pred_c*recall_c/(pred_c+recall_c)
if save:
f = open(config.trigger_pred_file, 'w')
json.dump(pred_res, f)
f.close()
return f1_c
def train_simple_trigger(trigger_encoder, trigger_classifier, tr_data, config, new_id):
train_data_loader = get_ACETriData_loader(tr_data, config, shuffle = True)
trigger_encoder.train()
trigger_classifier.train()
param_optimizer_1 = list(trigger_encoder.named_parameters())
param_optimizer_1 = [n for n in param_optimizer_1 if 'pooler' not in n[0]]
param_optimizer_2 = list(trigger_classifier.named_parameters())
param_optimizer_2 = [n for n in param_optimizer_2 if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer_1
if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01, "betas": (0.9, 0.999), 'lr':config.trigger_encoder_learning_rate},
{'params': [p for n, p in param_optimizer_1
if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, "betas": (0.9, 0.999),'lr':config.trigger_encoder_learning_rate},
{'params': [p for n, p in param_optimizer_2
if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01, "betas": (0.9, 0.999), 'lr':config.trigger_classifier_learning_rate},
{'params': [p for n, p in param_optimizer_2
if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, "betas": (0.9, 0.999), 'lr':config.trigger_classifier_learning_rate}
]
optimizer = AdamW(params = optimizer_grouped_parameters)
epoch_index, best_f1, es_index = 0, 0, 0
fd_criterion = nn.CosineEmbeddingLoss()
logits = None
global_step = 0
while(True):
losses = []
for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(train_data_loader):
sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device)
feature = trigger_encoder(sentence_ids, input_ids, input_masks, segment_ids)
logits, loss = trigger_classifier(feature, input_masks, labels)
losses.append(loss.cpu().detach().numpy())
loss.backward()
if (step + 1) % config.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
global_step += 1
print(f"epoch: {epoch_index}, loss is {np.array(losses).mean()}")
epoch_index += 1
if epoch_index >= 5:
break
def train_trigger(trigger_encoder, trigger_classifier, tr_data, de_data, seen_train_event, config, new_id, forward_encoder, forward_classifier, forward_event, trigger_tailed, ltlabel, id2label):
if config.kd == True and forward_event != None:
forward_index = reset_id(forward_event, new_id).cuda()
print(forward_index)
T = config.temp
train_data_loader = get_ACETriData_loader(tr_data, config, shuffle = True)
trigger_encoder.train()
trigger_classifier.train()
param_optimizer_1 = list(trigger_encoder.named_parameters())
param_optimizer_1 = [n for n in param_optimizer_1 if 'pooler' not in n[0]]
param_optimizer_2 = list(trigger_classifier.named_parameters())
param_optimizer_2 = [n for n in param_optimizer_2 if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer_1
if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01, "betas": (0.9, 0.999), 'lr':config.trigger_encoder_learning_rate},
{'params': [p for n, p in param_optimizer_1
if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, "betas": (0.9, 0.999),'lr':config.trigger_encoder_learning_rate},
{'params': [p for n, p in param_optimizer_2
if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01, "betas": (0.9, 0.999), 'lr':config.trigger_classifier_learning_rate},
{'params': [p for n, p in param_optimizer_2
if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, "betas": (0.9, 0.999), 'lr':config.trigger_classifier_learning_rate}
]
if config.merit == 'epochs':
num_train_optimization_steps = len(train_data_loader) // config.gradient_accumulation_steps * config.epochs
optimizer = AdamW(params = optimizer_grouped_parameters,
weight_decay=config.weight_decay)
elif config.merit == 'early_stop':
optimizer = AdamW(params = optimizer_grouped_parameters)
epoch_index, best_f1, es_index = 0, 0, 0
#fd_criterion = nn.CosineEmbeddingLoss(reduction = 'sum')
fd_criterion = nn.CosineEmbeddingLoss()
logits = None
global_step = 0
while(True):
losses = []
for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(train_data_loader):
sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device)
feature = trigger_encoder(sentence_ids, input_ids, input_masks, segment_ids)
if len(trigger_tailed) != 0:
tail_res = []
for i, label in enumerate(labels):
flabels = label!=0
pos_labels = label[flabels]
pos_index = torch.nonzero(label)
for index, fe in enumerate(pos_labels):
if int(fe) in trigger_tailed:
protos, standard = trigger_tailed[int(fe)]
protos = protos[flabels]
standard = standard[flabels]
for st in range(len(standard)):
s = torch.tensor(np.random.normal(0, standard[st], 1)).cuda()
j = pos_index[index]
feature[i][j] += s
tail_res.append((i,j,s))
logits, loss = trigger_classifier(feature, input_masks, labels)
if config.kd == True and forward_event != None:
#print(tail_res)
kd_loss = 0
temp_masks = copy.deepcopy(input_masks)
forward_features = forward_encoder(sentence_ids, input_ids, temp_masks, segment_ids)
if len(trigger_tailed) != 0:
for i,j,s in tail_res:
forward_features[i][j] += s
forward_logits = forward_classifier(forward_features, temp_masks, None)
forward_logits = (forward_logits.index_select(2, forward_index)/T).view(-1, len(forward_event))
new_logits = (logits.index_select(2, forward_index)/T).view(-1, len(forward_event))
active_loss = (input_masks.view(-1) == 1).cuda()
forward_logits = forward_logits[active_loss]
new_logits = new_logits[active_loss]
if config.select == True:
max_forward_index = max(forward_index)
label_index = (labels.view(-1)<=max_forward_index)[active_loss].cuda()
forward_logits[:,0] = 0
new_logits[:,0] = 0
forward_logits = forward_logits[label_index]
new_logits = new_logits[label_index]
forward_logits = F.softmax(forward_logits, dim = 1)
new_logits = F.log_softmax(new_logits, dim = 1)
kd_loss = -torch.mean(torch.sum(forward_logits * new_logits, dim = 1))
#kd_loss = -torch.sum(torch.sum(forward_logits * new_logits, dim = 1))
if config.attention == True:
attention = trigger_encoder.get_attention(input_ids, input_masks, segment_ids)
forward_attention = forward_encoder.get_attention(input_ids, input_masks, segment_ids)
attention = attention.matmul(feature)
forward_attention = forward_attention.matmul(forward_features)
attention = F.normalize(attention, p=2, dim=2).view(-1, attention.shape[2])[active_loss]
forward_attention = F.normalize(forward_attention, p=2, dim=2).view(-1, forward_attention.shape[2])[active_loss]
fd_loss = fd_criterion(attention, forward_attention, torch.ones(attention.shape[0]).cuda())
kd_loss = kd_loss + fd_loss
loss = (1-config.alpha)*loss+config.alpha*kd_loss
losses.append(loss.cpu().detach().numpy())
loss.backward()
if (step + 1) % config.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
global_step += 1
if config.merit == 'early_stop':
res = 0
res = eval_trigger(trigger_encoder, trigger_classifier, de_data, config, new_id, False, ltlabel, id2label)
trigger_encoder.train()
trigger_classifier.train()
if res > best_f1:
best_f1 = res
es_index = 0
encoder_output_path = config.output_dir+ config.trigger_encoder_file
torch.save(trigger_encoder.state_dict(), encoder_output_path)
classifier_output_path = config.output_dir+ config.trigger_classifier_file
torch.save(trigger_classifier.state_dict(), classifier_output_path)
else:
es_index += 1
print(f"epoch: {epoch_index}, loss is {np.array(losses).mean()}, f1 is {res} and best f1 is {best_f1}")
epoch_index += 1
if es_index >= config.early_stop:
trigger_encoder.load_state_dict(torch.load(encoder_output_path))
trigger_classifier.load_state_dict(torch.load(classifier_output_path))
break
if config.merit == 'epochs':
print(f"epoch: {epoch_index}, loss is {np.array(losses).mean()}")
epoch_index += 1
if epoch_index >= config.epochs:
break
def select_data(config, trigger_encoder, relation_dataset, new_id, event):
train_data_loader = get_ACETriData_loader(relation_dataset, config, shuffle = False, batch_size = 1)
features = []
trigger_encoder.eval()
for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(train_data_loader):
sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device)
with torch.no_grad():
feature = trigger_encoder.get_feature(sentence_ids, input_ids, input_masks, segment_ids).cpu()
features.append(feature)
features = np.concatenate(features)
num_clusters = min(config.memory_size, len(relation_dataset))
if num_clusters == len(relation_dataset):
memory = []
for i in relation_dataset:
memory.append(i)
return memory
distances = KMeans(n_clusters = num_clusters, random_state = 0).fit_transform(features)
memory = []
for k in range(num_clusters):
select_index = np.argmin(distances[:, k])
ins = relation_dataset[select_index]
memory.append(ins)
return memory
def addPseudoLabel(trigger_encoder, trigger_classifier, data, config, id2label):
pseudo_data = []
eval_data_loader = get_ACETriData_loader(data, config, shuffle = True, batch_size = 1)
trigger_encoder.eval()
trigger_classifier.eval()
for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(eval_data_loader):
sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, None, config.device)
with torch.no_grad():
feature = trigger_encoder(sentence_ids, input_ids, input_masks, segment_ids)
logits = trigger_classifier(feature, None, None)
new_logits = logits
for index, value in enumerate(in_sent):
pred_first = True
value = value == 1
gold_offset = torch.nonzero(labels[index][value]).squeeze(dim = 1)
gold_label = torch.gather(labels[index][value], dim = 0, index = gold_offset)
gold_label = [int(val) for val in gold_label]
gold_offset = [int(val) for val in gold_offset]
res = new_logits[index][value,:]
max_value, pred_tri_each_word = torch.max(res, 1)
pred_trigger = 0
for offset, trigger in enumerate(pred_tri_each_word):
if trigger!=0 and max_value[offset] > 0.8 and offset not in gold_offset:
one_sample = {}
one_sample['sentence_ids'] = sentence_ids[0].tolist()
one_sample['input_ids'] = input_ids[0].tolist()
one_sample['input_masks'] = input_masks[0].tolist()
pseudo_label = torch.zeros(len(input_ids[0]))
pseudo_label[offset] = id2label[int(trigger)]
one_sample['labels'] = pseudo_label.tolist()
one_sample['in_sent'] = in_sent[0].tolist()
one_sample['segment_ids'] = segment_ids[0].tolist()
one_sample['ners'] = ners[0].tolist()
one_sample['sentence'] = sentence[0]
pseudo_data.append(one_sample)
return pseudo_data + data
def get_trigger_proto(config, trigger_encoder, relation_dataset, new_id, event):
train_data_loader = get_ACETriData_loader(relation_dataset, config, shuffle = False, batch_size = 1)
features = []
trigger_encoder.eval()
for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(train_data_loader):
sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device)
with torch.no_grad():
feature = trigger_encoder(sentence_ids, input_ids, input_masks, segment_ids)
feature = feature[labels == event]
features.append(feature)
features = torch.cat(features, dim = 0)
proto = torch.mean(features, dim = 0, keepdim = True).cpu()
standard = torch.sqrt(torch.var(features, dim=0)).cpu()
return proto, standard
def kt_long_tailed(trigger_protos, trigger_num):
len_tail = int(0.8*len(trigger_num))
res = {}
for i in range(len_tail):
tail_event = trigger_num[i][0]
tail_proto, tail_standard = trigger_protos[tail_event]
tail_proto = tail_proto.squeeze(0)
tail_standard = tail_standard.squeeze(0)
tail_cos, all_proto, all_standard = [], [], []
for event, (proto, standard) in trigger_protos.items():
proto = proto.squeeze(0)
standard = standard.squeeze(0)
if event != tail_event:
tail_cos.append(F.cosine_similarity(tail_proto, proto, dim = 0))
all_proto.append(proto)
all_standard.append(standard)
all_proto = torch.stack(all_proto)
all_standard = torch.stack(all_standard)
tail_cos = torch.stack(tail_cos)
tail_cos = F.softmax(tail_cos, dim=0)
res_standard = torch.matmul(tail_cos, all_standard)
res_proto = torch.matmul(tail_cos, all_proto)
res[tail_event] = (res_proto, res_standard)
return res
def eval_entity_detection(entity_detection, eval_data, config, new_id):
eval_data_loader = get_ACETriData_loader(eval_data, config, shuffle = True)
entity_detection.eval()
pred_num = 0
correct_num = 0
label_num = 0
pred_res = []
for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(eval_data_loader):
sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device)
with torch.no_grad():
logits = entity_detection.get_res(input_ids, segment_ids, input_masks)
new_logits = logits
for index, value in enumerate(in_sent):
value = value == 1
pred_logits = torch.tensor(new_logits[index])[1:-1].tolist()
gold_offset = []
start, end, now = 0,0,0
for offset, wo in enumerate(ners[index][value]):
wo = int(wo)
if wo !=0 and now == 0:
now = wo
start = offset
end = offset+1
elif wo !=0 and now !=0 and wo == now:
end = offset+1
elif wo !=0 and now !=0 and wo != now:
now = wo
gold_offset.append((start, end))
start = offset
end = offset+1
elif wo == 0 and now == 0:
start, end = 0, 0
elif wo == 0 and now != 0:
now = 0
gold_offset.append((start, end))
if now != 0:
gold_offset.append((start, end))
for i in gold_offset:
start, end = i
for j in range(start, end-1):
if ners[index][value][j] != ners[index][value][j+1]:
print(ners[index][value])
print(gold_offset)
assert(0)
label_num+=len(gold_offset)
pred_offset = []
start, end, now = 0,0,0
pred_tri_each_word = pred_logits
for offset, wo in enumerate(pred_tri_each_word):
wo = int(wo)
if wo !=0 and now == 0:
now = wo
start = offset
end = offset+1
elif wo !=0 and now !=0 and wo == now:
end = offset+1
elif wo !=0 and now !=0 and wo != now:
now = wo
pred_offset.append((start, end))
start = offset
end = offset+1
elif wo == 0 and now == 0:
start, end = 0, 0
elif wo == 0 and now != 0:
now = 0
pred_offset.append((start, end))
if now != 0:
pred_offset.append((start, end))
pred_num += len(pred_offset)
for pred in pred_offset:
if pred in gold_offset:
correct_num += 1
if pred_num == 0 or label_num == 0 or correct_num == 0:
return 0
pred_c = 100.0*correct_num/pred_num
recall_c = 100.0*correct_num/label_num
f1_c = 2*pred_c*recall_c/(pred_c+recall_c)
return f1_c
def pred_entity_detection(config, entity_detection, sampler):
eval_data = sampler.read_pred_sample(config.trigger_pred_file)
eval_data_loader = get_ACEPredData_loader(eval_data, config, shuffle = True)
entity_detection.eval()
pred_num = 0
correct_num = 0
label_num = 0
pred_res = []
for step, (input_ids, input_masks, in_sent, segment_ids, sentence, event) in enumerate(eval_data_loader):
input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).cuda()
input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).cuda()
segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).cuda()
with torch.no_grad():
logits = entity_detection.get_res(input_ids, segment_ids, input_masks)
new_logits = logits
for index, value in enumerate(in_sent):
value = value == 1
pred_logits = torch.tensor(new_logits[index])[1:-1].tolist()
pred_offset = []
start, end, now = 0,0,0
pred_tri_each_word = pred_logits
for offset, wo in enumerate(pred_tri_each_word):
wo = int(wo)
if wo !=0 and now == 0:
now = wo
start = offset
end = offset+1
elif wo !=0 and now !=0 and wo == now:
end = offset+1
elif wo !=0 and now !=0 and wo != now:
now = wo
pred_offset.append((start, end))
start = offset
end = offset+1
elif wo == 0 and now == 0:
start, end = 0, 0
elif wo == 0 and now != 0:
now = 0
pred_offset.append((start, end))
if now != 0:
pred_offset.append((start, end))
onesamp = {}
onesamp['sentence'] = sentence[index]
onesamp['trigger'] = event[index]
onesamp['s_start'] = 0
onesamp['ner'] = pred_offset
pred_res.append(onesamp)
f = open(config.entity_pred_file, 'w')
json.dump(pred_res, f)
f.close()
print('Entity predict over')
def train_entity_detection(entity_detection, tr_data, de_data, config, new_id):
train_data_loader = get_ACETriData_loader(tr_data, config, shuffle = True)
entity_detection.train()
param_optimizer_1 = list(entity_detection.named_parameters())
param_optimizer_1 = [n for n in param_optimizer_1 if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer_1
if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01, "betas": (0.9, 0.999), 'lr':config.entity_detection_leraning_rate},
{'params': [p for n, p in param_optimizer_1
if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, "betas": (0.9, 0.999),'lr':config.entity_detection_leraning_rate}
]
optimizer = AdamW(params = optimizer_grouped_parameters)
epoch_index, best_f1, es_index = 0, 0, 0
fd_criterion = nn.CosineEmbeddingLoss()
logits = None
global_step = 0
while(True):
losses = []
for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(train_data_loader):
sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device)
loss = entity_detection(input_ids, ners, segment_ids, input_masks)
losses.append(loss.cpu().detach().numpy())
loss.backward()
if (step + 1) % config.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
global_step += 1
res = 0
res = eval_entity_detection(entity_detection, de_data, config, new_id)
entity_detection.train()
if res > best_f1:
best_f1 = res
es_index = 0
encoder_output_path = config.output_dir+ config.entity_file
torch.save(entity_detection.state_dict(), encoder_output_path)
else:
es_index += 1
print(f"epoch: {epoch_index}, loss is {np.array(losses).mean()}, f1 is {res} and best f1 is {best_f1}")
epoch_index += 1
if es_index >= config.early_stop:
entity_detection.load_state_dict(torch.load(encoder_output_path))
break
def train_argument_detection(argument_detection, tr_data, de_data, config, metadata, unseen_metadata):
train_data_loader = get_ACEArgData_loader(tr_data, config, shuffle = True)
argument_detection.train()
param_optimizer_1 = list(argument_detection.named_parameters())
param_optimizer_1 = [n for n in param_optimizer_1 if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer_1
if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01, "betas": (0.9, 0.999), 'lr':config.argument_detection_leraning_rate},
{'params': [p for n, p in param_optimizer_1
if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, "betas": (0.9, 0.999),'lr':config.argument_detection_leraning_rate}
]
optimizer = AdamW(params = optimizer_grouped_parameters)
epoch_index, best_f1, es_index = 0, 0, 0
fd_criterion = nn.CosineEmbeddingLoss()
logits = None
global_step = 0
while(True):
losses = []
for step, (sentence, input_ids, input_masks, in_sent, segment_ids, args, args_offset, gold_args, ner, trigger) in enumerate(train_data_loader):
input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).cuda()
input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).cuda()
segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).cuda()
args = torch.tensor(np.array([item.cpu().detach().numpy() for item in args])).cuda()
loss = argument_detection(input_ids, args, segment_ids, input_masks, args_offset, metadata, unseen_metadata, trigger, ner, gold_args)
losses.append(loss.cpu().detach().numpy())
loss.backward()
if (step + 1) % config.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
global_step += 1
res = 0
res = eval_argument_detection(argument_detection, de_data, config, metadata)
argument_detection.train()
if res > best_f1:
best_f1 = res
es_index = 0
encoder_output_path = config.output_dir+ config.argument_file
torch.save(argument_detection.state_dict(), encoder_output_path)
else:
es_index += 1
print(f"epoch: {epoch_index}, loss is {np.array(losses).mean()}, f1 is {res} and best f1 is {best_f1}")
epoch_index += 1
if es_index >= config.early_stop:
argument_detection.load_state_dict(torch.load(encoder_output_path))
break
def eval_argument_detection(argument_detection, eval_data, config, metadata):
eval_data_loader = get_ACEArgData_loader(eval_data, config, shuffle = True)
argument_detection.eval()
pred_num = 0
correct_num = 0
label_num = 0
pred_res = []
for step, (sentence, input_ids, input_masks, in_sent, segment_ids, args, args_offset, gold_args, ner, trigger) in enumerate(eval_data_loader):
input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).cuda()
input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).cuda()
segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).cuda()
with torch.no_grad():
logits = argument_detection.get_res(input_ids, segment_ids, input_masks, ner)
for i in range(len(in_sent)):
new_logits = logits[i]
seen_args = copy.deepcopy(metadata[trigger[i]])
seen_args += [0]
pred_roles = []
if new_logits == None:
continue
for index, value in enumerate(new_logits):
logi = value[seen_args]
max_value, pred_role = torch.max(logi, dim = 0)
start, end = ner[i][index]
one_pred = (start, end, seen_args[int(pred_role)])
if seen_args[int(pred_role)] != 0:
pred_roles.append(one_pred)
one_gold_args = copy.deepcopy(gold_args[i])
pred_num += len(pred_roles)
label_num += len(one_gold_args)
for preds in pred_roles:
if preds in one_gold_args:
correct_num += 1
one_gold_args.remove(preds)
if pred_num == 0 or label_num == 0 or correct_num == 0:
return 0
pred_c = 100.0*correct_num/pred_num
recall_c = 100.0*correct_num/label_num
f1_c = 2*pred_c*recall_c/(pred_c+recall_c)
return f1_c
def pred_argument_detection(config, argument_detection, sampler, metadata, gold_data):
eval_data = sampler.read_pred_ner_sample(config.entity_pred_file)
eval_data_loader = get_ACEPredNerData_loader(eval_data, config, shuffle = True)
argument_detection.eval()
pred_num = 0
correct_num = 0
label_num = 0
pred_res = []
gold_args = {}
gold_data_loader = get_ACEArgData_loader(gold_data, config, shuffle = True, batch_size = 1)
for step, (sentence, _, _, _, _, args, args_offset, gold, _, trig) in enumerate(gold_data_loader):
sentence = copy.deepcopy(sentence[0])
trig = copy.deepcopy(trig[0])
gold = copy.deepcopy(gold[0])
sentence = ''.join(sentence) + str(trig)
if sentence in gold_args:
print(gold_args[sentence])
print(gold)
assert(0)
gold_args[sentence] = gold
label_num += len(gold)
for step, (input_ids, input_masks, in_sent, segment_ids, sentence, trigger, ner) in enumerate(eval_data_loader):
input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).cuda()
input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).cuda()
segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).cuda()
with torch.no_grad():
logits = argument_detection.get_res(input_ids, segment_ids, input_masks, ner)
for i in range(len(in_sent)):
sent = copy.deepcopy(sentence[i])
tr = copy.deepcopy(trigger[i])
tr = sampler.index2vocab[tr]
sent = ''.join(sent) + str(tr)
new_logits = logits[i]
seen_args = copy.deepcopy(metadata[tr])
seen_args += [0]
pred_roles = []
if new_logits == None:
continue
for index, value in enumerate(new_logits):
logi = value[seen_args]
max_value, pred_role = torch.max(logi, dim = 0)
start, end = ner[i][index]
one_pred = (start, end, seen_args[int(pred_role)])
if seen_args[int(pred_role)] != 0:
pred_roles.append(one_pred)
if sent in gold_args:
one_gold_args = copy.deepcopy(gold_args[sent])
pred_num += len(pred_roles)
for preds in pred_roles:
if preds in one_gold_args:
while(preds in one_gold_args):
correct_num += 1
one_gold_args.remove(preds)
else:
pred_num += len(pred_roles)
if pred_num == 0 or label_num == 0 or correct_num == 0:
return 0
pred_c = 100.0*correct_num/pred_num
recall_c = 100.0*correct_num/label_num
f1_c = 2*pred_c*recall_c/(pred_c+recall_c)
return f1_c
def select_argu_data(config, argument_detection, relation_dataset,new_id, event_mention):
train_data_loader = get_ACEArgData_loader(relation_dataset, config, shuffle = False, batch_size = 1)
features = []
argument_detection.eval()
for step, (sentence, input_ids, input_masks, in_sent, segment_ids, args, args_offset, gold_args, ner, trigger) in enumerate(train_data_loader):
input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).cuda()
input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).cuda()
segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).cuda()
with torch.no_grad():
feature = argument_detection.get_feature(input_ids, segment_ids, input_masks).cpu()
features.append(feature)
features = np.concatenate(features)
num_clusters = min(config.memory_size, len(relation_dataset))
if num_clusters == len(relation_dataset):
memory = []
for i in relation_dataset:
memory.append(i)
return memory
distances = KMeans(n_clusters = num_clusters, random_state = 0).fit_transform(features)
memory = []
for k in range(num_clusters):
select_index = np.argmin(distances[:, k])
ins = relation_dataset[select_index]
memory.append(ins)
return memory
def main():
# load config
parser = ArgumentParser()
parser.add_argument('--config', default='./config/ace.ini')
args = parser.parse_args()
config = Config(args.config)
# set train param
config.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_size_per_step = int(config.batch_size / config.gradient_accumulation_steps)
triger_result_total, trigger_result_cur, argument_result_total, argument_result_cur = [], [], [], []
# six truns and get average
for i in range(config.total_round):
print(f"Now is round {i}")
config.seed += 100
random.seed(config.seed)
np.random.seed(config.seed)
torch.manual_seed(config.seed)
# now is trigger detection task
sampler = ACETriDataloder(config, i)
trigger_one_round_res = []
argument_one_round_res = []
# trigger memory space
trigger_memorized_samples = {}
# argument memory space
argument_memorized_samples = {}
# init trigger encode model
entity_detection = entityDetection(config).to(config.device) | argument_detection = argumentDetection(config).to(config.device) | 7 | 2023-10-17 02:40:04+00:00 | 12k |
IBM/VillanDiffusion | operate.py | [
{
"identifier": "fid",
"path": "fid_score.py",
"snippet": "def fid(path: List[str], batch_size: int=50, dims: int=2048, device: str=None, num_workers: int=None):\n if device is None:\n device = torch.device('cuda' if (torch.cuda.is_available()) else 'cpu')\n else:\n device = torch.de... | from functools import partial
from typing import List, Set, Tuple, Union
from diffusers import DiffusionPipeline, StableDiffusionPipeline, AutoencoderKL, UNet2DConditionModel, DPMSolverMultistepScheduler
from torchmetrics import StructuralSimilarityIndexMeasure
from torch import nn
from PIL import Image
from tqdm import tqdm
from accelerate import Accelerator
from fid_score import fid
from dataset import CaptionBackdoor, Backdoor, DatasetLoader, ImagePathDataset, ReplicateDataset
from config import SamplingStatic, MeasuringStatic, PromptDatasetStatic, DEFAULT_PROMPTS_POKEMON, DEFAULT_PROMPTS_CELEBA, ModelSchedStatic
from tools import batchify, batchify_generator, randn_images, encode_latents, save_grid, match_count
from tools import Log
import glob
import json
import os
import random
import pickle
import gc
import torch
import numpy as np | 8,195 |
return out_img_dir
@staticmethod
def _batch_sampling(prompts: List[str], pipeline: DiffusionPipeline, inits: torch.Tensor=None,
num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS,
guidance_scale: float=SamplingStatic.GUIDANCE_SCALE,
max_batch_n: int=SamplingStatic.MAX_BATCH_N,
seed: int=SamplingStatic.SEED, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN,
return_imgs: bool=False):
with torch.no_grad():
tensor_dtype: torch.dtype = torch.FloatTensor
for i, param in enumerate(pipeline.unet.parameters()):
tensor_dtype: torch.dtype = param.type()
if i > 0:
break
device: str = pipeline.device
pipeline_call = partial(pipeline, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=torch.manual_seed(seed), output_type=None)
prompt_batchs = batchify(xs=prompts, max_batch_n=max_batch_n)
if inits is not None:
if len(prompts) != len(inits):
raise ValueError()
init_batchs = torch.split(inits.type(tensor_dtype), max_batch_n)
else:
init_batchs = [None] * len(prompt_batchs)
# print(f"Prompt Batchs: {prompt_batchs}")
# print(f"Init Batchs: {len(init_batchs)}")
all_imgs = []
cnt: int = 0
# print(f"prompt_batch: {len(prompt_batchs)}, init_batch: {len(init_batchs)}")
for prompt_batch, init_batch in zip(prompt_batchs, init_batchs):
# print(f"prompt_batch: {prompt_batch}")
print(f"prompt_batch Size: {len(prompt_batch)}, init_batchs: {init_batch}")
if init_batch is not None:
init_batch = init_batch.to(device=device)
batch_imgs = pipeline_call(prompt=prompt_batch, latents=init_batch).images
handle_batch_fn(cnt, batch_imgs, prompt_batch, init_batch)
cnt += len(batch_imgs)
if return_imgs:
all_imgs += [batch_imgs]
del prompt_batch
del batch_imgs
if init_batch is not None:
del init_batch
torch.cuda.empty_cache()
gc.collect()
del pipeline
torch.cuda.empty_cache()
gc.collect()
if return_imgs:
return np.concatenate(all_imgs)
else:
return None
@staticmethod
def _sample(prompts: List[str], pipe: DiffusionPipeline, inits: torch.Tensor=None,
num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS,
guidance_scale: float=SamplingStatic.GUIDANCE_SCALE,
max_batch_n: int=SamplingStatic.MAX_BATCH_N,
seed: int=SamplingStatic.SEED, handle_fn: callable=SamplingStatic.HANDLE_FN,
handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN, return_imgs: bool=False):
if len(prompts) < SamplingStatic.SHOW_PROMPT_N:
Log.info(f"Prompts: {prompts}")
else:
Log.info(f"Prompts: {prompts[:SamplingStatic.SHOW_PROMPT_N]}")
# print(f"inits: {inits.shape}")
images = Sampling._batch_sampling(prompts=prompts, inits=inits, pipeline=pipe,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale, max_batch_n=max_batch_n,
seed=seed, handle_batch_fn=handle_batch_fn,
return_imgs=return_imgs)
handle_fn(images, prompts, inits)
if return_imgs:
return images
return None
def sample(self, prompts: List[str], pipe: DiffusionPipeline, inits: torch.Tensor=None, seed: int=SamplingStatic.SEED,
handle_fn: callable=SamplingStatic.HANDLE_FN, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN, return_imgs: bool=False):
return Sampling._sample(prompts=prompts, inits=inits, pipe=pipe, num_inference_steps=self.__num_inference_steps,
guidance_scale=self.__guidance_scale, max_batch_n=self.__max_batch_n, seed=seed,
handle_fn=handle_fn, handle_batch_fn=handle_batch_fn, return_imgs=return_imgs)
def image_backdoor_sample(self, prompts: List[str], trigger: str, pipe: DiffusionPipeline, inits: torch.Tensor=None, seed: int=SamplingStatic.SEED,
handle_fn: callable=SamplingStatic.HANDLE_FN, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN, return_imgs: bool=False):
if inits is None:
channel, image_size = 3, pipe.unet.sample_size
noise: torch.Tensor = randn_images(n=len(prompts), channel=channel, image_size=image_size, seed=seed)
if hasattr(pipe, 'vae'):
inits: torch.Tensor = encode_latents(pipe.vae, noise + self.__image_backdoor.get_trigger(type=trigger, channel=channel, image_size=image_size), weight_dtype=torch.float16)
else:
inits: torch.Tensor = noise + trigger
return self.sample(prompts=prompts, pipe=pipe, inits=inits, seed=seed, handle_fn=handle_fn, handle_batch_fn=handle_batch_fn, return_imgs=return_imgs)
def caption_backdoor_sample(self, prompts: List[str], trigger: str, pipe: DiffusionPipeline, start_pos: int=SamplingStatic.TRIG_START_POS,
end_pos: int=SamplingStatic.TRIG_END_POS, inits: torch.Tensor=None, seed: int=SamplingStatic.SEED,
handle_fn: callable=SamplingStatic.HANDLE_FN, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN, return_imgs: bool=False):
# def normalize_pos(pos: int, txt_len: int):
# if pos > txt_len:
# pos = txt_len
# elif pos + txt_len < 0:
# pos = 0
# return pos
# def insert_trigger(txt: str):
# txt_ls_len = len(txt.split(" "))
# pos_idxs = [i for i in range(txt_ls_len + 1)]
# pos_idxs = pos_idxs[normalize_pos(pos=start_pos, txt_len=txt_ls_len):normalize_pos(pos=end_pos, txt_len=txt_ls_len)]
# txt_ls = txt.split(" ")
# insert_pos = random.choice(pos_idxs)
# txt_ls.insert(insert_pos, trigger)
# return ' '.join(txt_ls)
# prompts: List[str] = [insert_trigger(txt=prompt) for prompt in prompts]
| """
Some commly used operations
"""
# import argparse
# from math import ceil, sqrt
# from dataclasses import dataclass, field
# from transformers import AutoTokenizer, PretrainedConfig
class Sampling:
def __init__(self, backdoor_ds_root: str="datasets", num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS, guidance_scale: float=SamplingStatic.GUIDANCE_SCALE, max_batch_n: int=SamplingStatic.MAX_BATCH_N):
# self.__image_trigger_type: str = image_trigger
# self.__caption_trigger_type: str = caption_trigger
self.__num_inference_steps: int = num_inference_steps
self.__guidance_scale: float = guidance_scale
self.__max_batch_n: int = max_batch_n
self.__image_backdoor: Backdoor = Backdoor(root=backdoor_ds_root)
# self.__caption_backdoor: CaptionBackdoor = CaptionBackdoor()
@property
def image_backdoor(self):
return self.__image_backdoor
@staticmethod
def get_folder(sched_name: str=None, num_inference_steps: int=None, img_num: int=None, image_trigger: str=None, caption_trigger: str=None):
if caption_trigger is not None:
out_img_dir: str = "caption_backdoor_samples"
elif image_trigger is not None:
out_img_dir: str = "image_backdoor_samples"
else:
out_img_dir: str = "clean_samples"
if sched_name is not None:
out_img_dir += f"_{str(sched_name)}"
if num_inference_steps is not None:
out_img_dir += f"_step{str(num_inference_steps)}"
if img_num is not None:
out_img_dir += f"_n{str(img_num)}"
return out_img_dir
@staticmethod
def _batch_sampling(prompts: List[str], pipeline: DiffusionPipeline, inits: torch.Tensor=None,
num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS,
guidance_scale: float=SamplingStatic.GUIDANCE_SCALE,
max_batch_n: int=SamplingStatic.MAX_BATCH_N,
seed: int=SamplingStatic.SEED, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN,
return_imgs: bool=False):
with torch.no_grad():
tensor_dtype: torch.dtype = torch.FloatTensor
for i, param in enumerate(pipeline.unet.parameters()):
tensor_dtype: torch.dtype = param.type()
if i > 0:
break
device: str = pipeline.device
pipeline_call = partial(pipeline, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=torch.manual_seed(seed), output_type=None)
prompt_batchs = batchify(xs=prompts, max_batch_n=max_batch_n)
if inits is not None:
if len(prompts) != len(inits):
raise ValueError()
init_batchs = torch.split(inits.type(tensor_dtype), max_batch_n)
else:
init_batchs = [None] * len(prompt_batchs)
# print(f"Prompt Batchs: {prompt_batchs}")
# print(f"Init Batchs: {len(init_batchs)}")
all_imgs = []
cnt: int = 0
# print(f"prompt_batch: {len(prompt_batchs)}, init_batch: {len(init_batchs)}")
for prompt_batch, init_batch in zip(prompt_batchs, init_batchs):
# print(f"prompt_batch: {prompt_batch}")
print(f"prompt_batch Size: {len(prompt_batch)}, init_batchs: {init_batch}")
if init_batch is not None:
init_batch = init_batch.to(device=device)
batch_imgs = pipeline_call(prompt=prompt_batch, latents=init_batch).images
handle_batch_fn(cnt, batch_imgs, prompt_batch, init_batch)
cnt += len(batch_imgs)
if return_imgs:
all_imgs += [batch_imgs]
del prompt_batch
del batch_imgs
if init_batch is not None:
del init_batch
torch.cuda.empty_cache()
gc.collect()
del pipeline
torch.cuda.empty_cache()
gc.collect()
if return_imgs:
return np.concatenate(all_imgs)
else:
return None
@staticmethod
def _sample(prompts: List[str], pipe: DiffusionPipeline, inits: torch.Tensor=None,
num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS,
guidance_scale: float=SamplingStatic.GUIDANCE_SCALE,
max_batch_n: int=SamplingStatic.MAX_BATCH_N,
seed: int=SamplingStatic.SEED, handle_fn: callable=SamplingStatic.HANDLE_FN,
handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN, return_imgs: bool=False):
if len(prompts) < SamplingStatic.SHOW_PROMPT_N:
Log.info(f"Prompts: {prompts}")
else:
Log.info(f"Prompts: {prompts[:SamplingStatic.SHOW_PROMPT_N]}")
# print(f"inits: {inits.shape}")
images = Sampling._batch_sampling(prompts=prompts, inits=inits, pipeline=pipe,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale, max_batch_n=max_batch_n,
seed=seed, handle_batch_fn=handle_batch_fn,
return_imgs=return_imgs)
handle_fn(images, prompts, inits)
if return_imgs:
return images
return None
def sample(self, prompts: List[str], pipe: DiffusionPipeline, inits: torch.Tensor=None, seed: int=SamplingStatic.SEED,
handle_fn: callable=SamplingStatic.HANDLE_FN, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN, return_imgs: bool=False):
return Sampling._sample(prompts=prompts, inits=inits, pipe=pipe, num_inference_steps=self.__num_inference_steps,
guidance_scale=self.__guidance_scale, max_batch_n=self.__max_batch_n, seed=seed,
handle_fn=handle_fn, handle_batch_fn=handle_batch_fn, return_imgs=return_imgs)
def image_backdoor_sample(self, prompts: List[str], trigger: str, pipe: DiffusionPipeline, inits: torch.Tensor=None, seed: int=SamplingStatic.SEED,
handle_fn: callable=SamplingStatic.HANDLE_FN, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN, return_imgs: bool=False):
if inits is None:
channel, image_size = 3, pipe.unet.sample_size
noise: torch.Tensor = randn_images(n=len(prompts), channel=channel, image_size=image_size, seed=seed)
if hasattr(pipe, 'vae'):
inits: torch.Tensor = encode_latents(pipe.vae, noise + self.__image_backdoor.get_trigger(type=trigger, channel=channel, image_size=image_size), weight_dtype=torch.float16)
else:
inits: torch.Tensor = noise + trigger
return self.sample(prompts=prompts, pipe=pipe, inits=inits, seed=seed, handle_fn=handle_fn, handle_batch_fn=handle_batch_fn, return_imgs=return_imgs)
def caption_backdoor_sample(self, prompts: List[str], trigger: str, pipe: DiffusionPipeline, start_pos: int=SamplingStatic.TRIG_START_POS,
end_pos: int=SamplingStatic.TRIG_END_POS, inits: torch.Tensor=None, seed: int=SamplingStatic.SEED,
handle_fn: callable=SamplingStatic.HANDLE_FN, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN, return_imgs: bool=False):
# def normalize_pos(pos: int, txt_len: int):
# if pos > txt_len:
# pos = txt_len
# elif pos + txt_len < 0:
# pos = 0
# return pos
# def insert_trigger(txt: str):
# txt_ls_len = len(txt.split(" "))
# pos_idxs = [i for i in range(txt_ls_len + 1)]
# pos_idxs = pos_idxs[normalize_pos(pos=start_pos, txt_len=txt_ls_len):normalize_pos(pos=end_pos, txt_len=txt_ls_len)]
# txt_ls = txt.split(" ")
# insert_pos = random.choice(pos_idxs)
# txt_ls.insert(insert_pos, trigger)
# return ' '.join(txt_ls)
# prompts: List[str] = [insert_trigger(txt=prompt) for prompt in prompts] | prompts: List[str] = CaptionBackdoor.backdoor_caption_generator(_type=trigger, start_pos=start_pos, end_pos=end_pos)(prompts) | 1 | 2023-10-17 19:57:37+00:00 | 12k |
nchen909/Pass-Tuning | models_list/bitfit/modeling_auto.py | [
{
"identifier": "PLBartForConditionalGeneration",
"path": "models_list/bitfit/modeling_plbart.py",
"snippet": "class PLBartForConditionalGeneration(PLBartPreTrainedModel):\n base_model_prefix = \"model\"\n _keys_to_ignore_on_load_missing = [\n r\"final_logits_bias\",\n r\"encoder.ver... | import warnings
from collections import OrderedDict
from transformers.utils import logging
from transformers.models.albert.modeling_albert import (
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from .modeling_plbart import (
PLBartForConditionalGeneration,
PLBartModel,
)
from transformers.models.bart.modeling_bart import (
BartForCausalLM,
BartForQuestionAnswering,
BartForSequenceClassification,
)
from transformers.models.bert.modeling_bert import (
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLMHeadModel,
BertModel,
)
from transformers.models.bert_generation.modeling_bert_generation import BertGenerationDecoder, BertGenerationEncoder
from transformers.models.big_bird.modeling_big_bird import (
BigBirdForCausalLM,
BigBirdForMaskedLM,
BigBirdForMultipleChoice,
BigBirdForPreTraining,
BigBirdForQuestionAnswering,
BigBirdForSequenceClassification,
BigBirdForTokenClassification,
BigBirdModel,
)
from transformers.models.bigbird_pegasus.modeling_bigbird_pegasus import (
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
)
from transformers.models.blenderbot.modeling_blenderbot import BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel
from transformers.models.blenderbot_small.modeling_blenderbot_small import (
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
)
from transformers.models.camembert.modeling_camembert import (
CamembertForCausalLM,
CamembertForMaskedLM,
CamembertForMultipleChoice,
CamembertForQuestionAnswering,
CamembertForSequenceClassification,
CamembertForTokenClassification,
CamembertModel,
)
from transformers.models.canine.modeling_canine import (
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineModel,
)
from transformers.models.clip.modeling_clip import CLIPModel
from transformers.models.convbert.modeling_convbert import (
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertModel,
)
from transformers.models.ctrl.modeling_ctrl import CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel
from transformers.models.deberta.modeling_deberta import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta_v2.modeling_deberta_v2 import (
DebertaV2ForMaskedLM,
DebertaV2ForQuestionAnswering,
DebertaV2ForSequenceClassification,
DebertaV2ForTokenClassification,
DebertaV2Model,
)
from transformers.models.deit.modeling_deit import DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTModel
from transformers.models.detr.modeling_detr import DetrForObjectDetection, DetrModel
from transformers.models.distilbert.modeling_distilbert import (
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
from transformers.models.dpr.modeling_dpr import DPRQuestionEncoder
from transformers.models.electra.modeling_electra import (
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
)
from transformers.models.encoder_decoder.modeling_encoder_decoder import EncoderDecoderModel
from transformers.models.flaubert.modeling_flaubert import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.fsmt.modeling_fsmt import FSMTForConditionalGeneration, FSMTModel
from transformers.models.funnel.modeling_funnel import (
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
)
from transformers.models.gpt2.modeling_gpt2 import GPT2ForSequenceClassification, GPT2LMHeadModel, GPT2Model
from transformers.models.gpt_neo.modeling_gpt_neo import GPTNeoForCausalLM, GPTNeoForSequenceClassification, GPTNeoModel
from transformers.models.hubert.modeling_hubert import HubertModel
from transformers.models.ibert.modeling_ibert import (
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
)
from transformers.models.layoutlm.modeling_layoutlm import (
LayoutLMForMaskedLM,
LayoutLMForSequenceClassification,
LayoutLMForTokenClassification,
LayoutLMModel,
)
from transformers.models.led.modeling_led import (
LEDForConditionalGeneration,
LEDForQuestionAnswering,
LEDForSequenceClassification,
LEDModel,
)
from transformers.models.longformer.modeling_longformer import (
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
)
from transformers.models.luke.modeling_luke import LukeModel
from transformers.models.lxmert.modeling_lxmert import LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel
from transformers.models.m2m_100.modeling_m2m_100 import M2M100ForConditionalGeneration, M2M100Model
from transformers.models.marian.modeling_marian import MarianForCausalLM, MarianModel, MarianMTModel
from transformers.models.mbart.modeling_mbart import (
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
)
from transformers.models.megatron_bert.modeling_megatron_bert import (
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
from transformers.models.mobilebert.modeling_mobilebert import (
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
from transformers.models.mpnet.modeling_mpnet import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
from transformers.models.mt5.modeling_mt5 import MT5ForConditionalGeneration, MT5Model
from transformers.models.openai.modeling_openai import OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel
from transformers.models.pegasus.modeling_pegasus import PegasusForCausalLM, PegasusForConditionalGeneration, PegasusModel
from transformers.models.prophetnet.modeling_prophetnet import ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel
from transformers.models.rag.modeling_rag import ( # noqa: F401 - need to import all RagModels to be in globals() function
RagModel,
RagSequenceForGeneration,
RagTokenForGeneration,
)
from transformers.models.reformer.modeling_reformer import (
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerModel,
ReformerModelWithLMHead,
)
from transformers.models.retribert.modeling_retribert import RetriBertModel
from transformers.models.roberta.modeling_roberta import (
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
)
from transformers.models.roformer.modeling_roformer import (
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerModel,
)
from transformers.models.speech_to_text.modeling_speech_to_text import Speech2TextForConditionalGeneration, Speech2TextModel
from transformers.models.squeezebert.modeling_squeezebert import (
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
from .modeling_t5 import T5ForConditionalGeneration, T5Model
from transformers.models.tapas.modeling_tapas import (
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
)
from transformers.models.transfo_xl.modeling_transfo_xl import TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel
from transformers.models.visual_bert.modeling_visual_bert import VisualBertForPreTraining, VisualBertModel
from transformers.models.vit.modeling_vit import ViTForImageClassification, ViTModel
from transformers.models.wav2vec2.modeling_wav2vec2 import Wav2Vec2ForMaskedLM, Wav2Vec2ForPreTraining, Wav2Vec2Model
from transformers.models.xlm.modeling_xlm import (
XLMForMultipleChoice,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm_prophetnet.modeling_xlm_prophetnet import (
XLMProphetNetForCausalLM,
XLMProphetNetForConditionalGeneration,
XLMProphetNetModel,
)
from transformers.models.xlm_roberta.modeling_xlm_roberta import (
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
)
from transformers.models.xlnet.modeling_xlnet import (
XLNetForMultipleChoice,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
)
from transformers.models.auto.auto_factory import _BaseAutoModelClass, auto_class_update
from transformers.models.auto.configuration_auto import (
AlbertConfig,
PLBartConfig,
BertConfig,
BertGenerationConfig,
BigBirdConfig,
BigBirdPegasusConfig,
BlenderbotConfig,
BlenderbotSmallConfig,
CamembertConfig,
CanineConfig,
CLIPConfig,
ConvBertConfig,
CTRLConfig,
DebertaConfig,
DebertaV2Config,
DeiTConfig,
DetrConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
EncoderDecoderConfig,
FlaubertConfig,
FSMTConfig,
FunnelConfig,
GPT2Config,
GPTNeoConfig,
HubertConfig,
IBertConfig,
LayoutLMConfig,
LEDConfig,
LongformerConfig,
LukeConfig,
LxmertConfig,
M2M100Config,
MarianConfig,
MBartConfig,
MegatronBertConfig,
MobileBertConfig,
MPNetConfig,
MT5Config,
OpenAIGPTConfig,
PegasusConfig,
ProphetNetConfig,
ReformerConfig,
RetriBertConfig,
RobertaConfig,
RoFormerConfig,
Speech2TextConfig,
SqueezeBertConfig,
T5Config,
TapasConfig,
TransfoXLConfig,
VisualBertConfig,
ViTConfig,
Wav2Vec2Config,
XLMConfig,
XLMProphetNetConfig,
XLMRobertaConfig,
XLNetConfig,
)
| 10,778 | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Model class. """
# Add modeling imports here
# # Instead of loading the BART from the transformers==4.9.1, we choose to load from our own prefix-tuning version.
# Instead of loading the T5 from the transformers==4.9.1, we choose to load from our prefix-tuning version.
logger = logging.get_logger(__name__)
MODEL_MAPPING = OrderedDict(
[
# Base model mapping
(VisualBertConfig, VisualBertModel),
(CanineConfig, CanineModel),
(RoFormerConfig, RoFormerModel),
(CLIPConfig, CLIPModel),
(BigBirdPegasusConfig, BigBirdPegasusModel),
(DeiTConfig, DeiTModel),
(LukeConfig, LukeModel),
(DetrConfig, DetrModel),
(GPTNeoConfig, GPTNeoModel),
(BigBirdConfig, BigBirdModel),
(Speech2TextConfig, Speech2TextModel),
(ViTConfig, ViTModel),
(Wav2Vec2Config, Wav2Vec2Model),
(HubertConfig, HubertModel),
(M2M100Config, M2M100Model),
(ConvBertConfig, ConvBertModel),
(LEDConfig, LEDModel),
(BlenderbotSmallConfig, BlenderbotSmallModel),
(RetriBertConfig, RetriBertModel),
(MT5Config, MT5Model),
| # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Model class. """
# Add modeling imports here
# # Instead of loading the BART from the transformers==4.9.1, we choose to load from our own prefix-tuning version.
# Instead of loading the T5 from the transformers==4.9.1, we choose to load from our prefix-tuning version.
logger = logging.get_logger(__name__)
MODEL_MAPPING = OrderedDict(
[
# Base model mapping
(VisualBertConfig, VisualBertModel),
(CanineConfig, CanineModel),
(RoFormerConfig, RoFormerModel),
(CLIPConfig, CLIPModel),
(BigBirdPegasusConfig, BigBirdPegasusModel),
(DeiTConfig, DeiTModel),
(LukeConfig, LukeModel),
(DetrConfig, DetrModel),
(GPTNeoConfig, GPTNeoModel),
(BigBirdConfig, BigBirdModel),
(Speech2TextConfig, Speech2TextModel),
(ViTConfig, ViTModel),
(Wav2Vec2Config, Wav2Vec2Model),
(HubertConfig, HubertModel),
(M2M100Config, M2M100Model),
(ConvBertConfig, ConvBertModel),
(LEDConfig, LEDModel),
(BlenderbotSmallConfig, BlenderbotSmallModel),
(RetriBertConfig, RetriBertModel),
(MT5Config, MT5Model),
| (T5Config, T5Model),
| 3 | 2023-10-20 09:24:44+00:00 | 12k |
JoaoPedro9674/django-ledger | django_ledger/tests/base.py | [
{
"identifier": "EntityDataGenerator",
"path": "django_ledger/io/data_generator.py",
"snippet": "class EntityDataGenerator(LoggingMixIn):\n\n def __init__(self,\n user_model,\n entity_model: Union[EntityModel, str],\n start_date: date,\n ... | from datetime import date, timedelta
from decimal import Decimal
from itertools import cycle
from logging import getLogger, DEBUG
from random import randint, choice
from typing import Optional
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.test import TestCase
from django.test.client import Client
from django.utils.timezone import get_default_timezone
from django_ledger.io.data_generator import EntityDataGenerator
from django_ledger.models.entity import EntityModel, EntityModelQuerySet | 7,817 |
UserModel = get_user_model()
class DjangoLedgerBaseTest(TestCase):
FY_STARTS = None
CAPITAL_CONTRIBUTION = None
START_DATE = None
DAYS_FORWARD = 9 * 30
TX_QUANTITY = 50
user_model = None
TEST_DATA = list()
CLIENT = None
TZ = None
N = None
USER_EMAIL = None
PASSWORD = None
USERNAME = None
logger = None
accrual_cycle = cycle([True, False])
@classmethod
def setUpTestData(cls):
cls.logger = getLogger(__name__)
cls.logger.setLevel(level=DEBUG)
cls.USERNAME: str = 'testuser'
cls.PASSWORD: str = 'NeverUseThisPassword12345'
cls.USER_EMAIL: str = 'testuser@djangoledger.com'
cls.N: int = 2
cls.DAYS_FWD: int = randint(180, 180 * 3)
cls.TZ = get_default_timezone()
cls.START_DATE = cls.get_random_date()
cls.CLIENT = Client(enforce_csrf_checks=False)
try:
cls.user_model = UserModel.objects.get(username=cls.USERNAME)
except ObjectDoesNotExist:
cls.user_model = UserModel.objects.create_user(
username=cls.USERNAME,
password=cls.PASSWORD,
email=cls.USER_EMAIL,
)
cls.FY_STARTS = list(str(i) for i in range(1, 13))
cls.TEST_DATA = list()
cls.CAPITAL_CONTRIBUTION = Decimal('50000.00')
cls.ENTITY_MODEL_QUERYSET: Optional[EntityModelQuerySet] = None
cls.create_entity_models(n=cls.N)
cls.populate_entity_models()
@classmethod
def get_random_date(cls) -> date:
return date(
year=choice(range(1990, 2020)),
month=choice(range(1, 13)),
day=choice(range(1, 28))
)
@classmethod
def login_client(cls):
# cls.logger.info('Logging in client...')
cls.CLIENT.login(
username=cls.USERNAME,
password=cls.PASSWORD
)
@classmethod
def logout_client(cls):
# cls.logger.info('Logging out client...')
cls.CLIENT.logout()
@classmethod
def refresh_test_data(cls, n: int = None):
N = n if n else cls.N
cls.TEST_DATA = [cls.get_random_entity_data() for _ in range(N)]
@classmethod
def get_random_entity_data(cls) -> dict:
return {
'slug': f'a-cool-slug-{randint(10000, 99999)}',
'name': f'Testing Inc-{randint(100000, 999999)}',
'address_1': f'{randint(100000, 999999)} Main St',
'address_2': f'Suite {randint(1000, 9999)}',
'city': 'Charlotte',
'state': 'NC',
'zip_code': '28202',
'country': 'US',
'email': 'mytest@testinginc.com',
'website': 'http://www.mytestingco.com',
'fy_start_month': choice(cls.FY_STARTS),
'admin': cls.user_model,
'accrual_method': next(cls.accrual_cycle)
}
def get_random_entity_model(self) -> EntityModel:
if self.ENTITY_MODEL_QUERYSET:
return choice(self.ENTITY_MODEL_QUERYSET)
raise ValueError('EntityModels have not been populated.')
@classmethod
def create_entity_models(cls, save=True, n: int = 5):
cls.refresh_test_data(n)
for ent_data in cls.TEST_DATA:
entity_model = EntityModel.add_root(**ent_data)
entity_model.admin = cls.user_model
entity_model.clean()
if save:
entity_model.save()
@classmethod
def populate_entity_models(cls):
entities_qs = EntityModel.objects.all()
for entity_model in entities_qs:
|
UserModel = get_user_model()
class DjangoLedgerBaseTest(TestCase):
FY_STARTS = None
CAPITAL_CONTRIBUTION = None
START_DATE = None
DAYS_FORWARD = 9 * 30
TX_QUANTITY = 50
user_model = None
TEST_DATA = list()
CLIENT = None
TZ = None
N = None
USER_EMAIL = None
PASSWORD = None
USERNAME = None
logger = None
accrual_cycle = cycle([True, False])
@classmethod
def setUpTestData(cls):
cls.logger = getLogger(__name__)
cls.logger.setLevel(level=DEBUG)
cls.USERNAME: str = 'testuser'
cls.PASSWORD: str = 'NeverUseThisPassword12345'
cls.USER_EMAIL: str = 'testuser@djangoledger.com'
cls.N: int = 2
cls.DAYS_FWD: int = randint(180, 180 * 3)
cls.TZ = get_default_timezone()
cls.START_DATE = cls.get_random_date()
cls.CLIENT = Client(enforce_csrf_checks=False)
try:
cls.user_model = UserModel.objects.get(username=cls.USERNAME)
except ObjectDoesNotExist:
cls.user_model = UserModel.objects.create_user(
username=cls.USERNAME,
password=cls.PASSWORD,
email=cls.USER_EMAIL,
)
cls.FY_STARTS = list(str(i) for i in range(1, 13))
cls.TEST_DATA = list()
cls.CAPITAL_CONTRIBUTION = Decimal('50000.00')
cls.ENTITY_MODEL_QUERYSET: Optional[EntityModelQuerySet] = None
cls.create_entity_models(n=cls.N)
cls.populate_entity_models()
@classmethod
def get_random_date(cls) -> date:
return date(
year=choice(range(1990, 2020)),
month=choice(range(1, 13)),
day=choice(range(1, 28))
)
@classmethod
def login_client(cls):
# cls.logger.info('Logging in client...')
cls.CLIENT.login(
username=cls.USERNAME,
password=cls.PASSWORD
)
@classmethod
def logout_client(cls):
# cls.logger.info('Logging out client...')
cls.CLIENT.logout()
@classmethod
def refresh_test_data(cls, n: int = None):
N = n if n else cls.N
cls.TEST_DATA = [cls.get_random_entity_data() for _ in range(N)]
@classmethod
def get_random_entity_data(cls) -> dict:
return {
'slug': f'a-cool-slug-{randint(10000, 99999)}',
'name': f'Testing Inc-{randint(100000, 999999)}',
'address_1': f'{randint(100000, 999999)} Main St',
'address_2': f'Suite {randint(1000, 9999)}',
'city': 'Charlotte',
'state': 'NC',
'zip_code': '28202',
'country': 'US',
'email': 'mytest@testinginc.com',
'website': 'http://www.mytestingco.com',
'fy_start_month': choice(cls.FY_STARTS),
'admin': cls.user_model,
'accrual_method': next(cls.accrual_cycle)
}
def get_random_entity_model(self) -> EntityModel:
if self.ENTITY_MODEL_QUERYSET:
return choice(self.ENTITY_MODEL_QUERYSET)
raise ValueError('EntityModels have not been populated.')
@classmethod
def create_entity_models(cls, save=True, n: int = 5):
cls.refresh_test_data(n)
for ent_data in cls.TEST_DATA:
entity_model = EntityModel.add_root(**ent_data)
entity_model.admin = cls.user_model
entity_model.clean()
if save:
entity_model.save()
@classmethod
def populate_entity_models(cls):
entities_qs = EntityModel.objects.all()
for entity_model in entities_qs: | data_generator = EntityDataGenerator( | 0 | 2023-10-20 01:07:20+00:00 | 12k |
hitz-zentroa/This-is-not-a-Dataset | run.py | [
{
"identifier": "load_model",
"path": "load_model.py",
"snippet": "def load_model(\n inference: bool,\n model_weights_name_or_path: str,\n quantization: Optional[int] = None,\n use_lora: bool = False,\n lora_weights_name_or_path: Optional[str] = None,\n lora_target_modules: Optional[Li... | from load_model import load_model
from dataset import get_dataloader
from evaluate import evaluate
from config import DataTrainingArguments, ModelArguments
from transformers import (
HfArgumentParser,
Seq2SeqTrainingArguments,
set_seed,
get_scheduler,
)
from tqdm import tqdm
from accelerate import Accelerator, find_executable_batch_size
from typing import List
from optimizer import get_optimizer
from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from transformers.modeling_utils import unwrap_model
import torch
import os
import wandb
import gc
import json
import math
import sys
import logging | 10,589 | **gen_inputs,
).logits
logits = logits[:, -1, :]
logits = torch.nn.functional.softmax(logits, dim=-1)
logits = logits[:, [yes_id, no_id]]
logits = logits[:, 0] / (logits[:, 0] + logits[:, 1])
preds = logits > 0.5
preds = accelerator.gather(preds).cpu().tolist()
logits = accelerator.gather(logits).cpu().tolist()
if accelerator.is_local_main_process:
if accelerator.num_processes > 1:
# Remove duplicated in last batch if we are in a distributed setting
if step == len(dataloader) - 1:
preds = preds[: (len(dataloader.dataset) - samples_seen)]
logits = logits[: (len(dataloader.dataset) - samples_seen)]
else:
samples_seen += len(batch)
all_preds.extend(preds)
all_scores.extend(logits)
else:
preds = model.generate(
input_ids=batch["input_ids"],
attention_mask=batch["attention_mask"],
max_new_tokens=6,
)
preds = accelerator.gather(
accelerator.pad_across_processes(
preds,
dim=1,
pad_index=tokenizer.pad_token_id,
)
).cpu()
inputs_ids = accelerator.gather(
accelerator.pad_across_processes(
batch["input_ids"],
dim=1,
pad_index=tokenizer.pad_token_id,
)
).cpu()
preds = preds[:, len(inputs_ids[0]) :]
if accelerator.is_local_main_process:
if accelerator.num_processes > 1:
# Remove duplicated in last batch if we are in a distributed setting
if step == len(dataloader) - 1:
preds = preds[: (len(dataloader.dataset) - samples_seen)]
else:
samples_seen += len(batch)
preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
# print(preds)
for pred in preds:
pred = pred.lower()
if "true" in pred:
all_preds.append(True)
else:
all_preds.append(False)
if accelerator.is_local_main_process:
with open(output_path, "w", encoding="utf8") as f:
for pred in all_preds if not return_scores else all_scores:
print(pred, file=f)
if not return_scores:
json_dataset = dataloader.dataset.get_jsonl()
assert len(json_dataset) == len(all_preds)
with open(
os.path.splitext(output_path)[0] + ".jsonl", "w", encoding="utf8"
) as f:
for json_line, pred in zip(json_dataset, all_preds):
json_line["prediction"] = bool(pred)
print(json.dumps(json_line, ensure_ascii=False), file=f)
model.train()
def main(
model_args: ModelArguments,
data_args: DataTrainingArguments,
training_args: Seq2SeqTrainingArguments,
):
assert (
training_args.do_train or training_args.do_predict
), "You must specify do_train or do_predict"
assert not (training_args.do_train and data_args.do_predict_full_dataset), (
"You cannot do both training and predict_full_dataset, "
"as the model will be evaluated on the full dataset, which"
" includes the training set."
)
logging.basicConfig(level=logging.INFO)
accelerator = Accelerator()
print(f"Accelerator State: {accelerator.state}")
set_seed(training_args.seed)
if training_args.do_train:
model, tokenizer = load_model(
inference=False,
model_weights_name_or_path=model_args.model_name_or_path,
lora_weights_name_or_path=model_args.lora_weights_name_or_path,
quantization=model_args.quantization,
use_lora=model_args.use_lora,
lora_target_modules=model_args.lora_target_modules,
torch_dtype=model_args.torch_dtype,
force_auto_device_map=data_args.force_auto_device_map,
use_flash_attention=model_args.use_flash_attention,
use_gradient_checkpointing=model_args.use_lora,
)
true_tokens_ids = tokenizer.encode("True", add_special_tokens=False)
false_tokens_ids = tokenizer.encode("False", add_special_tokens=False)
|
def clean_cache():
"""Clean cache to avoid memory leak.
This fixes this issue: https://github.com/huggingface/transformers/issues/22801"""
print(f"Cleaning GPU memory. Current memory usage: {torch.cuda.memory_allocated()}")
torch.cuda.empty_cache()
gc.collect()
torch.cuda.empty_cache()
print(f"GPU memory usage after cleaning: {torch.cuda.memory_allocated()}")
def compute_loss(model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if "labels" in inputs:
labels = inputs.pop("labels")
else:
raise ValueError("You should supply a labels key to compute the loss")
if "loss_weight_mask" in inputs:
loss_weight_mask = inputs.pop("loss_weight_mask")
else:
raise ValueError("You should supply a loss_weight_mask key to compute the loss")
if unwrap_model(model).config.is_encoder_decoder:
outputs = model(labels=labels, **inputs)
else:
outputs = model(**inputs)
logits = outputs["logits"] if isinstance(outputs, dict) else outputs[0]
model_name = unwrap_model(model)._get_name()
if (
model_name in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values()
or model_name == "PeftModelForCausalLM"
):
logits = logits[..., :-1, :].contiguous()
labels = labels[..., 1:].contiguous()
loss_weight_mask = loss_weight_mask[..., 1:].contiguous()
logits = logits.view(-1, logits.size(-1))
labels = labels.view(-1)
loss_weight_mask = loss_weight_mask.view(-1)
loss_fct = torch.nn.CrossEntropyLoss(reduction="none", ignore_index=-100)
loss = loss_fct(logits, labels)
loss = torch.sum(loss * loss_weight_mask) / torch.sum(loss_weight_mask)
return (loss, outputs) if return_outputs else loss
def gen_predictions(
model,
tokenizer,
true_tokens_ids: List[int],
false_tokens_ids: List[int],
dataloader,
output_path,
accelerator,
print_first=False,
predict_with_generate=False,
return_scores=False,
):
if predict_with_generate and return_scores:
raise ValueError(
"return_scores is not supported when predict_with_generate is True"
)
model.eval()
with torch.no_grad():
samples_seen: int = 0
yes_id = true_tokens_ids[0]
no_id = false_tokens_ids[0]
all_preds = []
all_scores = []
first = True
for step, batch in enumerate(
tqdm(dataloader, f"Inference on {os.path.basename(output_path)}")
):
if print_first and accelerator.is_local_main_process:
### DEBUG ###
if print_first and first and accelerator.is_main_process:
decodeable_inputs = batch.input_ids.clone()
decodeable_inputs[
decodeable_inputs == -100
] = tokenizer.pad_token_id
model_inputs = "\n".join(
tokenizer.batch_decode(
decodeable_inputs,
skip_special_tokens=False,
clean_up_tokenization_spaces=False,
)
)
print(f"*** Sample of batch 0 ***")
print(f"-- Model inputs --\n{model_inputs}")
print(f"*** End of sample ***\n")
first = False
if not predict_with_generate:
if not model.config.is_encoder_decoder:
logits = model(
input_ids=batch["input_ids"],
attention_mask=batch["attention_mask"],
).logits
else:
encoder_output = model.get_encoder()(
input_ids=batch["input_ids"],
attention_mask=batch["attention_mask"],
)
decoder_args = {
"attention_mask": batch["attention_mask"],
"use_cache": False,
"encoder_outputs": encoder_output,
}
gen_inputs = model.prepare_inputs_for_generation(
input_ids=torch.tensor(
[[tokenizer.pad_token_id]] * len(batch["input_ids"])
).to(batch["input_ids"].device),
**decoder_args,
)
logits = model(
**gen_inputs,
).logits
logits = logits[:, -1, :]
logits = torch.nn.functional.softmax(logits, dim=-1)
logits = logits[:, [yes_id, no_id]]
logits = logits[:, 0] / (logits[:, 0] + logits[:, 1])
preds = logits > 0.5
preds = accelerator.gather(preds).cpu().tolist()
logits = accelerator.gather(logits).cpu().tolist()
if accelerator.is_local_main_process:
if accelerator.num_processes > 1:
# Remove duplicated in last batch if we are in a distributed setting
if step == len(dataloader) - 1:
preds = preds[: (len(dataloader.dataset) - samples_seen)]
logits = logits[: (len(dataloader.dataset) - samples_seen)]
else:
samples_seen += len(batch)
all_preds.extend(preds)
all_scores.extend(logits)
else:
preds = model.generate(
input_ids=batch["input_ids"],
attention_mask=batch["attention_mask"],
max_new_tokens=6,
)
preds = accelerator.gather(
accelerator.pad_across_processes(
preds,
dim=1,
pad_index=tokenizer.pad_token_id,
)
).cpu()
inputs_ids = accelerator.gather(
accelerator.pad_across_processes(
batch["input_ids"],
dim=1,
pad_index=tokenizer.pad_token_id,
)
).cpu()
preds = preds[:, len(inputs_ids[0]) :]
if accelerator.is_local_main_process:
if accelerator.num_processes > 1:
# Remove duplicated in last batch if we are in a distributed setting
if step == len(dataloader) - 1:
preds = preds[: (len(dataloader.dataset) - samples_seen)]
else:
samples_seen += len(batch)
preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
# print(preds)
for pred in preds:
pred = pred.lower()
if "true" in pred:
all_preds.append(True)
else:
all_preds.append(False)
if accelerator.is_local_main_process:
with open(output_path, "w", encoding="utf8") as f:
for pred in all_preds if not return_scores else all_scores:
print(pred, file=f)
if not return_scores:
json_dataset = dataloader.dataset.get_jsonl()
assert len(json_dataset) == len(all_preds)
with open(
os.path.splitext(output_path)[0] + ".jsonl", "w", encoding="utf8"
) as f:
for json_line, pred in zip(json_dataset, all_preds):
json_line["prediction"] = bool(pred)
print(json.dumps(json_line, ensure_ascii=False), file=f)
model.train()
def main(
model_args: ModelArguments,
data_args: DataTrainingArguments,
training_args: Seq2SeqTrainingArguments,
):
assert (
training_args.do_train or training_args.do_predict
), "You must specify do_train or do_predict"
assert not (training_args.do_train and data_args.do_predict_full_dataset), (
"You cannot do both training and predict_full_dataset, "
"as the model will be evaluated on the full dataset, which"
" includes the training set."
)
logging.basicConfig(level=logging.INFO)
accelerator = Accelerator()
print(f"Accelerator State: {accelerator.state}")
set_seed(training_args.seed)
if training_args.do_train:
model, tokenizer = load_model(
inference=False,
model_weights_name_or_path=model_args.model_name_or_path,
lora_weights_name_or_path=model_args.lora_weights_name_or_path,
quantization=model_args.quantization,
use_lora=model_args.use_lora,
lora_target_modules=model_args.lora_target_modules,
torch_dtype=model_args.torch_dtype,
force_auto_device_map=data_args.force_auto_device_map,
use_flash_attention=model_args.use_flash_attention,
use_gradient_checkpointing=model_args.use_lora,
)
true_tokens_ids = tokenizer.encode("True", add_special_tokens=False)
false_tokens_ids = tokenizer.encode("False", add_special_tokens=False)
| train_dataloader = get_dataloader( | 1 | 2023-10-18 10:24:48+00:00 | 12k |
SKYeve/Transcript-Combiner | pull_notes.py | [
{
"identifier": "YoudaoNoteConvert",
"path": "convert.py",
"snippet": "class YoudaoNoteConvert(object):\n \"\"\"\n 有道云笔记 xml或者json 内容转换为 markdown 内容\n \"\"\"\n\n @staticmethod\n def covert_html_to_markdown(file_path) -> str:\n \"\"\"\n 转换 HTML 为 MarkDown\n :param file... | import json
import logging
import os
import re
import sys
import time
import traceback
import xml.etree.ElementTree as ET
import requests
from enum import Enum
from typing import Tuple
from convert import YoudaoNoteConvert
from youDaoNoteApi import YoudaoNoteApi
from pull_images import PullImages
from public import FileActionEnum
from public import covert_config | 8,146 | dir_info = self.youdaonote_api.get_dir_info_by_id(root_dir_id)
for entry in dir_info['entries']:
file_entry = entry['fileEntry']
if file_entry['name'] == ydnote_dir:
return file_entry['id'], ''
return '', '有道云笔记指定顶层目录不存在'
def _add_or_update_file(self, file_id, file_name, local_dir, modify_time):
"""
新增或更新文件
:param file_id:
:param file_name:
:param local_dir:
:param modify_time:
:return:
"""
youdao_file_suffix = os.path.splitext(file_name)[1] # 笔记后缀
note_type = self.judge_type(file_id,youdao_file_suffix)
# print(f"{file_name}:{note_type}")
is_note = True if note_type == 1 or note_type == 2 else False
original_file_path = os.path.join(local_dir, file_name).replace('\\', '/') # 原后缀路径
# 生成.md后缀的文件的绝对路径
local_file_path = os.path.join(local_dir, ''.join([os.path.splitext(file_name)[0], MARKDOWN_SUFFIX])).replace(
'\\', '/') if is_note else original_file_path
# 如果有有道云笔记是「note」类型,则提示类型
tip = f'| 原文件: {file_name} | 类型:{note_type}'
file_action = self._get_file_action(local_file_path, modify_time)
if file_action == FileActionEnum.CONTINUE:
return
if file_action == FileActionEnum.UPDATE:
# 考虑到使用 f.write() 直接覆盖原文件,在 Windows 下报错(WinError 183),先将其删除
os.remove(local_file_path)
try:
self._pull_file(file_id, original_file_path, note_type)
print('{}「{}」{}'.format(file_action.value, local_file_path, tip))
except Exception as error:
print('{}「{}」失败!请检查文件!错误提示:{}'.format(file_action.value, original_file_path, format(error)))
def _judge_is_note(self, file_id, youdao_file_suffix):
"""
判断是否是 note 类型
:param file_id:
:param youdao_file_suffix:
:return:
"""
is_note = False
# 1、如果文件是 .note 类型
if youdao_file_suffix == NOTE_SUFFIX:
is_note = True
# 2、如果文件没有类型后缀,但以 `<?xml` 开头
if not youdao_file_suffix:
response = self.youdaonote_api.get_file_by_id(file_id)
content = response.content[:5]
is_note = True if content == b"<?xml" else False
return is_note
# def judge_type(self, noteType: int, orgEditorType: int) -> int:
# """
# 判断返回内容
# :param entryType: int
# :param orgEditorType: int
# :return: note_type: int
# """
# note_type = 0
# # 返回xml格式的note笔记内容,noteType == 0 and orgEditorType == 1
# if noteType == 0 and orgEditorType == 1:
# note_type = 1
# # 返回json格式的note笔记内容
# elif (noteType == 7 or noteType == 5) and orgEditorType == 1:
# note_type = 2
# # 返回md文件内容
# elif noteType == 0 and orgEditorType == 0:
# note_type = 3
# return note_type
def judge_type(self,file_id: str ,youdao_file_suffix: str) -> int:
"""
判断返回内容
:param entryType: int
:param orgEditorType: int
:return: note_type: int
"""
note_type = 0
is_xml = False
if youdao_file_suffix == ".note":
response = self.youdaonote_api.get_file_by_id(file_id)
content = response.content[:5]
is_xml = True if content == b"<?xml" else False
if is_xml: # xml类型
note_type = 1
else: # json类型
note_type = 2
elif youdao_file_suffix == ".md":
note_type = 3
else:
print(f"文件后缀「{youdao_file_suffix}」不识别,请检查!")
return note_type
def _pull_file(self, file_id, file_path, note_type):
"""
下载文件
:param file_id:
:param file_path:
:param itype:
:return:
"""
# 1、所有的都先下载
response = self.youdaonote_api.get_file_by_id(file_id)
with open(file_path, 'wb') as f:
f.write(response.content) # response.content 本身就是字节类型
new_file_path = ""
# 2、如果文件是 note 类型,将其转换为 MarkDown 类型
if note_type == 1:
try:
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
MARKDOWN_SUFFIX = '.md'
NOTE_SUFFIX = '.note'
CONFIG_PATH = 'config.json'
class YoudaoNotePull(object):
"""
有道云笔记 Pull 封装
"""
CONFIG_PATH = 'config.json'
def __init__(self):
self.root_local_dir = None # 本地文件根目录
self.youdaonote_api = None
self.smms_secret_token = None
self.is_relative_path = None # 是否使用相对路径
def get_ydnote_dir_id(self):
"""
获取有道云笔记根目录或指定目录 ID
:return:
"""
config_dict, error_msg = covert_config(CONFIG_PATH)
if error_msg:
return '', error_msg
local_dir, error_msg = self._check_local_dir(local_dir=config_dict['local_dir'])
if error_msg:
return '', error_msg
self.root_local_dir = local_dir
self.youdaonote_api = YoudaoNoteApi()
error_msg = self.youdaonote_api.login_by_cookies()
if error_msg:
return '', error_msg
self.smms_secret_token = config_dict['smms_secret_token']
self.is_relative_path = config_dict['is_relative_path']
return self._get_ydnote_dir_id(ydnote_dir=config_dict['ydnote_dir'])
def pull_dir_by_id_recursively(self, dir_id, local_dir):
"""
根据目录 ID 循环遍历下载目录下所有文件
:param dir_id:
:param local_dir: 本地目录
:return: error_msg
"""
dir_info = self.youdaonote_api.get_dir_info_by_id(dir_id)
try:
entries = dir_info['entries']
except KeyError:
raise KeyError('有道云笔记修改了接口地址,此脚本暂时不能使用!请提 issue')
for entry in entries:
file_entry = entry['fileEntry']
id = file_entry['id']
file_name = file_entry['name']
file_name = self._optimize_file_name(file_name)
# noteType = file_entry['noteType']
# orgEditorType = file_entry['orgEditorType']
if file_entry['dir']:
sub_dir = os.path.join(local_dir, file_name).replace('\\', '/')
# 判断本地文件夹是否存在
if not os.path.exists(sub_dir):
os.mkdir(sub_dir)
self.pull_dir_by_id_recursively(id, sub_dir)
else:
modify_time = file_entry['modifyTimeForSort']
self._add_or_update_file(id, file_name, local_dir, modify_time)
def _check_local_dir(self, local_dir, test_default_dir=None) -> Tuple[str, str]:
"""
检查本地文件夹
:param local_dir: 本地文件夹名(绝对路径)
:return: local_dir, error_msg
"""
# 如果没有指定本地文件夹,当前目录新增 youdaonote 目录
if not local_dir:
add_dir = test_default_dir if test_default_dir else 'youdaonote'
# 兼容 Windows 系统,将路径分隔符(\\)替换为 /
local_dir = os.path.join(os.getcwd(), add_dir).replace('\\', '/')
# 如果指定的本地文件夹不存在,创建文件夹
if not os.path.exists(local_dir):
try:
os.mkdir(local_dir)
except:
return '', '请检查「{}」上层文件夹是否存在,并使用绝对路径!'.format(local_dir)
return local_dir, ''
def _get_ydnote_dir_id(self, ydnote_dir) -> Tuple[str, str]:
"""
获取指定有道云笔记指定目录 ID
:param ydnote_dir: 指定有道云笔记指定目录
:return: dir_id, error_msg
"""
root_dir_info = self.youdaonote_api.get_root_dir_info_id()
root_dir_id = root_dir_info['fileEntry']['id']
# 如果不指定文件夹,取根目录 ID
if not ydnote_dir:
return root_dir_id, ''
dir_info = self.youdaonote_api.get_dir_info_by_id(root_dir_id)
for entry in dir_info['entries']:
file_entry = entry['fileEntry']
if file_entry['name'] == ydnote_dir:
return file_entry['id'], ''
return '', '有道云笔记指定顶层目录不存在'
def _add_or_update_file(self, file_id, file_name, local_dir, modify_time):
"""
新增或更新文件
:param file_id:
:param file_name:
:param local_dir:
:param modify_time:
:return:
"""
youdao_file_suffix = os.path.splitext(file_name)[1] # 笔记后缀
note_type = self.judge_type(file_id,youdao_file_suffix)
# print(f"{file_name}:{note_type}")
is_note = True if note_type == 1 or note_type == 2 else False
original_file_path = os.path.join(local_dir, file_name).replace('\\', '/') # 原后缀路径
# 生成.md后缀的文件的绝对路径
local_file_path = os.path.join(local_dir, ''.join([os.path.splitext(file_name)[0], MARKDOWN_SUFFIX])).replace(
'\\', '/') if is_note else original_file_path
# 如果有有道云笔记是「note」类型,则提示类型
tip = f'| 原文件: {file_name} | 类型:{note_type}'
file_action = self._get_file_action(local_file_path, modify_time)
if file_action == FileActionEnum.CONTINUE:
return
if file_action == FileActionEnum.UPDATE:
# 考虑到使用 f.write() 直接覆盖原文件,在 Windows 下报错(WinError 183),先将其删除
os.remove(local_file_path)
try:
self._pull_file(file_id, original_file_path, note_type)
print('{}「{}」{}'.format(file_action.value, local_file_path, tip))
except Exception as error:
print('{}「{}」失败!请检查文件!错误提示:{}'.format(file_action.value, original_file_path, format(error)))
def _judge_is_note(self, file_id, youdao_file_suffix):
"""
判断是否是 note 类型
:param file_id:
:param youdao_file_suffix:
:return:
"""
is_note = False
# 1、如果文件是 .note 类型
if youdao_file_suffix == NOTE_SUFFIX:
is_note = True
# 2、如果文件没有类型后缀,但以 `<?xml` 开头
if not youdao_file_suffix:
response = self.youdaonote_api.get_file_by_id(file_id)
content = response.content[:5]
is_note = True if content == b"<?xml" else False
return is_note
# def judge_type(self, noteType: int, orgEditorType: int) -> int:
# """
# 判断返回内容
# :param entryType: int
# :param orgEditorType: int
# :return: note_type: int
# """
# note_type = 0
# # 返回xml格式的note笔记内容,noteType == 0 and orgEditorType == 1
# if noteType == 0 and orgEditorType == 1:
# note_type = 1
# # 返回json格式的note笔记内容
# elif (noteType == 7 or noteType == 5) and orgEditorType == 1:
# note_type = 2
# # 返回md文件内容
# elif noteType == 0 and orgEditorType == 0:
# note_type = 3
# return note_type
def judge_type(self,file_id: str ,youdao_file_suffix: str) -> int:
"""
判断返回内容
:param entryType: int
:param orgEditorType: int
:return: note_type: int
"""
note_type = 0
is_xml = False
if youdao_file_suffix == ".note":
response = self.youdaonote_api.get_file_by_id(file_id)
content = response.content[:5]
is_xml = True if content == b"<?xml" else False
if is_xml: # xml类型
note_type = 1
else: # json类型
note_type = 2
elif youdao_file_suffix == ".md":
note_type = 3
else:
print(f"文件后缀「{youdao_file_suffix}」不识别,请检查!")
return note_type
def _pull_file(self, file_id, file_path, note_type):
"""
下载文件
:param file_id:
:param file_path:
:param itype:
:return:
"""
# 1、所有的都先下载
response = self.youdaonote_api.get_file_by_id(file_id)
with open(file_path, 'wb') as f:
f.write(response.content) # response.content 本身就是字节类型
new_file_path = ""
# 2、如果文件是 note 类型,将其转换为 MarkDown 类型
if note_type == 1:
try: | new_file_path = YoudaoNoteConvert.covert_xml_to_markdown(file_path) | 0 | 2023-10-17 11:21:50+00:00 | 12k |
S-LoRA/S-LoRA | slora/models/llama/layer_infer/transformer_layer_infer.py | [
{
"identifier": "LlamaTransformerLayerWeight",
"path": "slora/models/llama/layer_weights/transformer_layer_weight.py",
"snippet": "class LlamaTransformerLayerWeight(TransformerLayerWeight):\n def __init__(self, layer_num, tp_rank, world_size, data_type, network_config, mode=[]):\n super().__in... | import torch
import torch.functional as F
import torch.distributed as dist
import numpy as np
import triton
from typing import Tuple
from slora.models.llama.layer_weights.transformer_layer_weight import LlamaTransformerLayerWeight
from slora.models.llama.triton_kernel.context_flashattention_nopad import context_attention_fwd
from slora.models.llama.triton_kernel.token_attention_nopad_att1 import token_att_fwd, token_att_fwd_int8k
from slora.models.llama.triton_kernel.token_attention_nopad_softmax import token_softmax_fwd
from slora.models.llama.triton_kernel.token_attention_nopad_reduceV import token_att_fwd2, token_att_fwd2_int8v
from slora.models.llama.triton_kernel.rmsnorm import rmsnorm_forward
from slora.models.llama.triton_kernel.rotary_emb import rotary_emb_fwd
from slora.models.llama.infer_struct import LlamaInferStateInfo
from slora.common.basemodel.triton_kernel.destindex_copy_kv import destindex_copy_kv, destindex_copy_quantize_kv
from slora.common.basemodel import TransformerLayerInferTpl
from slora.models.llama.triton_kernel.token_attention_softmax_and_reducev import token_softmax_reducev_fwd | 8,352 |
class LlamaTransformerLayerInfer(TransformerLayerInferTpl):
"""
"""
def __init__(self, layer_num, tp_rank, world_size, network_config, mode=[]):
super().__init__(layer_num, tp_rank, world_size, network_config, mode)
self.eps_ = network_config["rms_norm_eps"]
self.tp_q_head_num_ = network_config["num_attention_heads"] // self.world_size_
self.tp_k_head_num_ = self.tp_q_head_num_
self.tp_v_head_num_ = self.tp_q_head_num_
self.tp_o_head_num_ = self.tp_q_head_num_
self.head_dim_ = network_config["hidden_size"] // network_config["num_attention_heads"]
self.embed_dim_ = network_config["hidden_size"]
return
def _att_norm(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor:
return rmsnorm_forward(input, weight=layer_weight.att_norm_weight_, eps=self.eps_)
def _ffn_norm(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor:
return rmsnorm_forward(input, weight=layer_weight.ffn_norm_weight_, eps=self.eps_)
def _get_qkv(self, input, cache_k, cache_v, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor:
q = torch.mm(input.view(-1, self.embed_dim_), layer_weight.q_weight_)
rotary_emb_fwd(q.view(-1, self.tp_q_head_num_, self.head_dim_), infer_state.position_cos, infer_state.position_sin)
torch.mm(input.view(-1, self.embed_dim_), layer_weight.k_weight_,
out=cache_k.view(-1, self.tp_k_head_num_ * self.head_dim_))
rotary_emb_fwd(cache_k, infer_state.position_cos, infer_state.position_sin)
torch.mm(input.view(-1, self.embed_dim_), layer_weight.v_weight_,
out=cache_v.view(-1, self.tp_v_head_num_ * self.head_dim_))
return q
def _post_cache_kv(self, cache_k, cache_v, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight):
mem_manager = infer_state.mem_manager
if infer_state.is_prefill:
self._copy_kv_to_mem_cache(cache_k, cache_v, infer_state.prefill_mem_index, mem_manager)
return
else:
if not infer_state.decode_is_contiguous:
self._copy_kv_to_mem_cache(cache_k, cache_v, infer_state.decode_mem_index, mem_manager)
return
return
def _context_attention_kernel(self, q, k, v, infer_state:LlamaInferStateInfo, layer_weight)->torch.Tensor:
o_tensor = torch.empty_like(q)
context_attention_fwd(q.view(-1, self.tp_q_head_num_, self.head_dim_),
k.view(-1, self.tp_k_head_num_, self.head_dim_),
v.view(-1, self.tp_v_head_num_, self.head_dim_),
o_tensor.view(-1, self.tp_q_head_num_, self.head_dim_),
infer_state.b_start_loc,
infer_state.b_seq_len,
infer_state.max_len_in_batch)
return o_tensor
def _token_attention_kernel(self, q, infer_state:LlamaInferStateInfo, layer_weight)->torch.Tensor:
return self._token_decode_attention_mode(q, infer_state)
def _get_o(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor:
o_tensor = torch.mm(input.view(-1, self.tp_o_head_num_ * self.head_dim_), layer_weight.o_weight_)
return o_tensor
def _ffn(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor:
gate_out = torch.mm(input.view(-1, self.embed_dim_), layer_weight.gate_proj)
torch.nn.functional.silu(gate_out, inplace=True)
up_out = torch.mm(input.view(-1, self.embed_dim_), layer_weight.up_proj)
input = None
ffn1_out = gate_out * up_out
gate_out, up_out = None, None
ffn2_out = torch.mm(ffn1_out, layer_weight.down_proj)
ffn1_out = None
return ffn2_out
def _copy_kv_to_mem_cache(self, key_buffer, value_buffer, mem_index, mem_manager):
if "int8kv" in self.mode:
destindex_copy_quantize_kv(key_buffer,
mem_index,
mem_manager.key_buffer[self.layer_num_],
mem_manager.key_scale_buffer[self.layer_num_])
destindex_copy_quantize_kv(value_buffer,
mem_index,
mem_manager.value_buffer[self.layer_num_],
mem_manager.value_scale_buffer[self.layer_num_])
else:
destindex_copy_kv(key_buffer, mem_index, mem_manager.key_buffer[self.layer_num_])
destindex_copy_kv(value_buffer, mem_index, mem_manager.value_buffer[self.layer_num_])
def _token_decode_attention_normal(self, q, infer_state: LlamaInferStateInfo):
total_token_num = infer_state.total_token_num
batch_size = infer_state.batch_size
calcu_shape1 = (batch_size, self.tp_q_head_num_, self.head_dim_)
att_m_tensor = torch.empty((self.tp_q_head_num_, total_token_num), dtype=q.dtype, device="cuda")
token_att_fwd(q.view(calcu_shape1),
infer_state.mem_manager.key_buffer[self.layer_num_],
att_m_tensor,
infer_state.b_loc,
infer_state.b_start_loc,
infer_state.b_seq_len,
infer_state.max_len_in_batch)
if triton.__version__ == "2.0.0":
prob = torch.empty_like(att_m_tensor)
|
class LlamaTransformerLayerInfer(TransformerLayerInferTpl):
"""
"""
def __init__(self, layer_num, tp_rank, world_size, network_config, mode=[]):
super().__init__(layer_num, tp_rank, world_size, network_config, mode)
self.eps_ = network_config["rms_norm_eps"]
self.tp_q_head_num_ = network_config["num_attention_heads"] // self.world_size_
self.tp_k_head_num_ = self.tp_q_head_num_
self.tp_v_head_num_ = self.tp_q_head_num_
self.tp_o_head_num_ = self.tp_q_head_num_
self.head_dim_ = network_config["hidden_size"] // network_config["num_attention_heads"]
self.embed_dim_ = network_config["hidden_size"]
return
def _att_norm(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor:
return rmsnorm_forward(input, weight=layer_weight.att_norm_weight_, eps=self.eps_)
def _ffn_norm(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor:
return rmsnorm_forward(input, weight=layer_weight.ffn_norm_weight_, eps=self.eps_)
def _get_qkv(self, input, cache_k, cache_v, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor:
q = torch.mm(input.view(-1, self.embed_dim_), layer_weight.q_weight_)
rotary_emb_fwd(q.view(-1, self.tp_q_head_num_, self.head_dim_), infer_state.position_cos, infer_state.position_sin)
torch.mm(input.view(-1, self.embed_dim_), layer_weight.k_weight_,
out=cache_k.view(-1, self.tp_k_head_num_ * self.head_dim_))
rotary_emb_fwd(cache_k, infer_state.position_cos, infer_state.position_sin)
torch.mm(input.view(-1, self.embed_dim_), layer_weight.v_weight_,
out=cache_v.view(-1, self.tp_v_head_num_ * self.head_dim_))
return q
def _post_cache_kv(self, cache_k, cache_v, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight):
mem_manager = infer_state.mem_manager
if infer_state.is_prefill:
self._copy_kv_to_mem_cache(cache_k, cache_v, infer_state.prefill_mem_index, mem_manager)
return
else:
if not infer_state.decode_is_contiguous:
self._copy_kv_to_mem_cache(cache_k, cache_v, infer_state.decode_mem_index, mem_manager)
return
return
def _context_attention_kernel(self, q, k, v, infer_state:LlamaInferStateInfo, layer_weight)->torch.Tensor:
o_tensor = torch.empty_like(q)
context_attention_fwd(q.view(-1, self.tp_q_head_num_, self.head_dim_),
k.view(-1, self.tp_k_head_num_, self.head_dim_),
v.view(-1, self.tp_v_head_num_, self.head_dim_),
o_tensor.view(-1, self.tp_q_head_num_, self.head_dim_),
infer_state.b_start_loc,
infer_state.b_seq_len,
infer_state.max_len_in_batch)
return o_tensor
def _token_attention_kernel(self, q, infer_state:LlamaInferStateInfo, layer_weight)->torch.Tensor:
return self._token_decode_attention_mode(q, infer_state)
def _get_o(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor:
o_tensor = torch.mm(input.view(-1, self.tp_o_head_num_ * self.head_dim_), layer_weight.o_weight_)
return o_tensor
def _ffn(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor:
gate_out = torch.mm(input.view(-1, self.embed_dim_), layer_weight.gate_proj)
torch.nn.functional.silu(gate_out, inplace=True)
up_out = torch.mm(input.view(-1, self.embed_dim_), layer_weight.up_proj)
input = None
ffn1_out = gate_out * up_out
gate_out, up_out = None, None
ffn2_out = torch.mm(ffn1_out, layer_weight.down_proj)
ffn1_out = None
return ffn2_out
def _copy_kv_to_mem_cache(self, key_buffer, value_buffer, mem_index, mem_manager):
if "int8kv" in self.mode:
destindex_copy_quantize_kv(key_buffer,
mem_index,
mem_manager.key_buffer[self.layer_num_],
mem_manager.key_scale_buffer[self.layer_num_])
destindex_copy_quantize_kv(value_buffer,
mem_index,
mem_manager.value_buffer[self.layer_num_],
mem_manager.value_scale_buffer[self.layer_num_])
else:
destindex_copy_kv(key_buffer, mem_index, mem_manager.key_buffer[self.layer_num_])
destindex_copy_kv(value_buffer, mem_index, mem_manager.value_buffer[self.layer_num_])
def _token_decode_attention_normal(self, q, infer_state: LlamaInferStateInfo):
total_token_num = infer_state.total_token_num
batch_size = infer_state.batch_size
calcu_shape1 = (batch_size, self.tp_q_head_num_, self.head_dim_)
att_m_tensor = torch.empty((self.tp_q_head_num_, total_token_num), dtype=q.dtype, device="cuda")
token_att_fwd(q.view(calcu_shape1),
infer_state.mem_manager.key_buffer[self.layer_num_],
att_m_tensor,
infer_state.b_loc,
infer_state.b_start_loc,
infer_state.b_seq_len,
infer_state.max_len_in_batch)
if triton.__version__ == "2.0.0":
prob = torch.empty_like(att_m_tensor) | token_softmax_fwd(att_m_tensor, infer_state.b_start_loc, infer_state.b_seq_len, prob, infer_state.max_len_in_batch) | 4 | 2023-11-05 04:08:36+00:00 | 12k |
Yuliang-Liu/Monkey | finetune_multitask.py | [
{
"identifier": "MonkeyLMHeadModel",
"path": "monkey_model/modeling_monkey.py",
"snippet": "class MonkeyLMHeadModel(QWenLMHeadModel):\n _keys_to_ignore_on_load_missing = [r\"h\\.\\d+\\.attn\\.rotary_emb\\.inv_freq\"]\n _keys_to_ignore_on_load_unexpected = [r\"h\\.\\d+\\.attn\\.masked_bias\"]\n\n ... | from dataclasses import dataclass, field
from typing import Dict, Optional, List
from torch.utils.data import Dataset
from deepspeed import zero
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
from transformers import Trainer, GPTQConfig, deepspeed
from transformers.trainer_pt_utils import LabelSmoother
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
from accelerate.utils import DistributedType
from monkey_model.modeling_monkey import MonkeyLMHeadModel
from monkey_model.tokenization_qwen import QWenTokenizer
from monkey_model.configuration_monkey import MonkeyConfig
import json
import math
import logging
import os
import torch
import transformers
import numpy as np
import random | 7,345 | def __len__(self):
return len(self.input_ids)
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
return dict(
input_ids=self.input_ids[i],
labels=self.labels[i],
attention_mask=self.attention_mask[i],
)
class LazySupervisedDataset(Dataset):
"""Dataset for supervised fine-tuning."""
def __init__(self, raw_data, tokenizer: transformers.PreTrainedTokenizer, max_len: int):
super(LazySupervisedDataset, self).__init__()
self.tokenizer = tokenizer
self.max_len = max_len
rank0_print("Formatting inputs...Skip in lazy mode")
self.tokenizer = tokenizer
self.raw_data = raw_data
self.cached_data_dict = {}
def __len__(self):
return len(self.raw_data)
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
if i in self.cached_data_dict:
return self.cached_data_dict[i]
ret = preprocess(self.raw_data[i]["conversations"], self.tokenizer, self.max_len)
ret = dict(
input_ids=ret["input_ids"],
labels=ret["labels"],
attention_mask=ret["attention_mask"],
)
self.cached_data_dict[i] = ret
return ret
def make_supervised_data_module(
tokenizer: transformers.PreTrainedTokenizer, data_args, max_len,
) -> Dict:
"""Make dataset and collator for supervised fine-tuning."""
dataset_cls = (
LazySupervisedDataset if data_args.lazy_preprocess else SupervisedDataset
)
rank0_print("Loading data...")
train_json = json.load(open(data_args.data_path, "r"))
train_dataset = dataset_cls(train_json, tokenizer=tokenizer, max_len=max_len)
if data_args.eval_data_path:
eval_json = json.load(open(data_args.eval_data_path, "r"))
eval_dataset = dataset_cls(eval_json, tokenizer=tokenizer, max_len=max_len)
else:
eval_dataset = None
return dict(train_dataset=train_dataset, eval_dataset=eval_dataset)
def print_trainable_params(model: torch.nn.Module):
trainable_params, all_param = 0, 0
for param in model.parameters():
num_params = param.numel()
all_param += num_params
if param.requires_grad:
trainable_params += num_params
rank0_print("trainable params: {:d} || all params: {:d} || trainable%: {:.4f}".format(
trainable_params, all_param, 100 * trainable_params / all_param))
# for name,p in model.named_parameters():
# if p.requires_grad and "transformer.h" not in name:
# print(name)
def train():
global local_rank
parser = transformers.HfArgumentParser(
(ModelArguments, DataArguments, TrainingArguments, LoraArguments)
)
(
model_args,
data_args,
training_args,
lora_args,
) = parser.parse_args_into_dataclasses()
if getattr(training_args, 'deepspeed', None) and getattr(lora_args, 'q_lora', False):
training_args.distributed_state.distributed_type = DistributedType.DEEPSPEED
compute_dtype = (
torch.float16
if training_args.fp16
else (torch.bfloat16 if training_args.bf16 else torch.float32)
)
local_rank = training_args.local_rank
device_map = None
world_size = int(os.environ.get("WORLD_SIZE", 1))
ddp = world_size != 1
if lora_args.q_lora:
device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)} if ddp else None
if len(training_args.fsdp) > 0 or deepspeed.is_deepspeed_zero3_enabled():
logging.warning(
"FSDP or ZeRO3 are not incompatible with QLoRA."
)
# Set RoPE scaling factor
config = MonkeyConfig.from_pretrained(
"monkey_model",
cache_dir=training_args.cache_dir,
trust_remote_code=True,
)
rank0_print(config)
config.use_cache = False
# Load model and tokenizer
rank0_print("loading base model")
| # This code is based on the revised code from fastchat based on tatsu-lab/stanford_alpaca.
IGNORE_TOKEN_ID = LabelSmoother.ignore_index
@dataclass
class ModelArguments:
model_name_or_path: Optional[str] = field(default="")
@dataclass
class DataArguments:
data_path: str = field(
default=None, metadata={"help": "Path to the training data."}
)
eval_data_path: str = field(
default=None, metadata={"help": "Path to the evaluation data."}
)
lazy_preprocess: bool = False
@dataclass
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
model_max_length: int = field(
default=8192,
metadata={
"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
},
)
use_lora: bool = False
fix_vit: bool = True
@dataclass
class LoraArguments:
lora_r: int = 16
lora_alpha: int = 32
lora_dropout: float = 0.05
lora_target_modules: List[str] = field(
default_factory=lambda: ["in_proj","out_proj","c_fc"] ##["in_proj","out_proj","c_fc"]
)
lora_weight_path: str = ""
lora_bias: str = "none"
q_lora: bool = False
def maybe_zero_3(param):
if hasattr(param, "ds_id"):
assert param.ds_status == ZeroParamStatus.NOT_AVAILABLE
with zero.GatheredParameters([param]):
param = param.data.detach().cpu().clone()
else:
param = param.detach().cpu().clone()
return param
# Borrowed from peft.utils.get_peft_model_state_dict
def get_peft_state_maybe_zero_3(named_params, bias):
if bias == "none":
to_return = {k: t for k, t in named_params if "lora_" in k}
elif bias == "all":
to_return = {k: t for k, t in named_params if "lora_" in k or "bias" in k}
elif bias == "lora_only":
to_return = {}
maybe_lora_bias = {}
lora_bias_names = set()
for k, t in named_params:
if "lora_" in k:
to_return[k] = t
bias_name = k.split("lora_")[0] + "bias"
lora_bias_names.add(bias_name)
elif "bias" in k:
maybe_lora_bias[k] = t
for k, t in maybe_lora_bias:
if bias_name in lora_bias_names:
to_return[bias_name] = t
else:
raise NotImplementedError
to_return = {k: maybe_zero_3(v) for k, v in to_return.items()}
return to_return
local_rank = None
def rank0_print(*args):
if local_rank == 0:
print(*args)
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str, bias="none"):
"""Collects the state dict and dump to disk."""
# check if zero3 mode enabled
if deepspeed.is_deepspeed_zero3_enabled():
state_dict = trainer.model_wrapped._zero3_consolidated_16bit_state_dict()
else:
state_dict = trainer.model.state_dict()
if trainer.args.should_save and trainer.args.local_rank == 0:
trainer._save(output_dir, state_dict=state_dict)
def format_tokenizer(tokenizer, message, return_target=False, label=False):
_input_ids = tokenizer(message).input_ids
input_ids = _input_ids
if return_target:
if label:
target = input_ids
else:
target = [IGNORE_TOKEN_ID] * (len(_input_ids))
return input_ids, target
else:
return input_ids
def preprocess(
source,
tokenizer,
max_len,
system_message: str = "You are a helpful assistant.",
padding=True
):
# Apply prompt templates
input_ids, targets = [], []
user, assistant = source[0], source[1]
user_input = user['value']
assistant_input = assistant['value']
message_l = [user_input, assistant_input]
for i, message in enumerate(message_l):
try:
_input_ids, _target = format_tokenizer(tokenizer, message, return_target=True, label=True if i == len(message_l) - 1 else False) # <img> 有些text会有img标签,所以使用<img>作为特殊id有问题,标签数量不对等会报错
except Exception as e:
print(e)
continue
input_ids += _input_ids
targets += _target
assert len(_input_ids) == len(_input_ids)
if padding:
input_ids += [-1]+[tokenizer.pad_token_id] * (max_len - len(input_ids)-1)
targets += [tokenizer.pad_token_id] +[IGNORE_TOKEN_ID] * (max_len - len(targets)-1)
targets = targets[:max_len]
input_ids = input_ids[:max_len]
input_ids = torch.tensor(input_ids, dtype=torch.int)
targets = torch.tensor(targets, dtype=torch.int)
attention_mask=input_ids.ne(tokenizer.pad_token_id)
input_ids[input_ids == -1 ] = tokenizer.pad_token_id
return dict(
input_ids=input_ids,
labels=targets,
attention_mask=attention_mask,
)
class SupervisedDataset(Dataset):
"""Dataset for supervised fine-tuning."""
def __init__(self, raw_data, tokenizer: transformers.PreTrainedTokenizer, max_len: int):
super(SupervisedDataset, self).__init__()
rank0_print("Formatting inputs...")
sources = [example["conversations"] for example in raw_data]
data_dict = preprocess(sources, tokenizer, max_len)
self.input_ids = data_dict["input_ids"]
self.labels = data_dict["labels"]
self.attention_mask = data_dict["attention_mask"]
def __len__(self):
return len(self.input_ids)
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
return dict(
input_ids=self.input_ids[i],
labels=self.labels[i],
attention_mask=self.attention_mask[i],
)
class LazySupervisedDataset(Dataset):
"""Dataset for supervised fine-tuning."""
def __init__(self, raw_data, tokenizer: transformers.PreTrainedTokenizer, max_len: int):
super(LazySupervisedDataset, self).__init__()
self.tokenizer = tokenizer
self.max_len = max_len
rank0_print("Formatting inputs...Skip in lazy mode")
self.tokenizer = tokenizer
self.raw_data = raw_data
self.cached_data_dict = {}
def __len__(self):
return len(self.raw_data)
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
if i in self.cached_data_dict:
return self.cached_data_dict[i]
ret = preprocess(self.raw_data[i]["conversations"], self.tokenizer, self.max_len)
ret = dict(
input_ids=ret["input_ids"],
labels=ret["labels"],
attention_mask=ret["attention_mask"],
)
self.cached_data_dict[i] = ret
return ret
def make_supervised_data_module(
tokenizer: transformers.PreTrainedTokenizer, data_args, max_len,
) -> Dict:
"""Make dataset and collator for supervised fine-tuning."""
dataset_cls = (
LazySupervisedDataset if data_args.lazy_preprocess else SupervisedDataset
)
rank0_print("Loading data...")
train_json = json.load(open(data_args.data_path, "r"))
train_dataset = dataset_cls(train_json, tokenizer=tokenizer, max_len=max_len)
if data_args.eval_data_path:
eval_json = json.load(open(data_args.eval_data_path, "r"))
eval_dataset = dataset_cls(eval_json, tokenizer=tokenizer, max_len=max_len)
else:
eval_dataset = None
return dict(train_dataset=train_dataset, eval_dataset=eval_dataset)
def print_trainable_params(model: torch.nn.Module):
trainable_params, all_param = 0, 0
for param in model.parameters():
num_params = param.numel()
all_param += num_params
if param.requires_grad:
trainable_params += num_params
rank0_print("trainable params: {:d} || all params: {:d} || trainable%: {:.4f}".format(
trainable_params, all_param, 100 * trainable_params / all_param))
# for name,p in model.named_parameters():
# if p.requires_grad and "transformer.h" not in name:
# print(name)
def train():
global local_rank
parser = transformers.HfArgumentParser(
(ModelArguments, DataArguments, TrainingArguments, LoraArguments)
)
(
model_args,
data_args,
training_args,
lora_args,
) = parser.parse_args_into_dataclasses()
if getattr(training_args, 'deepspeed', None) and getattr(lora_args, 'q_lora', False):
training_args.distributed_state.distributed_type = DistributedType.DEEPSPEED
compute_dtype = (
torch.float16
if training_args.fp16
else (torch.bfloat16 if training_args.bf16 else torch.float32)
)
local_rank = training_args.local_rank
device_map = None
world_size = int(os.environ.get("WORLD_SIZE", 1))
ddp = world_size != 1
if lora_args.q_lora:
device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)} if ddp else None
if len(training_args.fsdp) > 0 or deepspeed.is_deepspeed_zero3_enabled():
logging.warning(
"FSDP or ZeRO3 are not incompatible with QLoRA."
)
# Set RoPE scaling factor
config = MonkeyConfig.from_pretrained(
"monkey_model",
cache_dir=training_args.cache_dir,
trust_remote_code=True,
)
rank0_print(config)
config.use_cache = False
# Load model and tokenizer
rank0_print("loading base model") | model = MonkeyLMHeadModel.from_pretrained( | 0 | 2023-11-09 14:31:48+00:00 | 12k |
OpenBMB/ProAgent | ProAgent/n8n_parser/compiler.py | [
{
"identifier": "ENVIRONMENT",
"path": "ProAgent/router/utils.py",
"snippet": "class ENVIRONMENT(Enum):\n '''\n 决定了 record cache 的访问形式\n - Development:不访问缓存,从头开始\n - Refine:访问缓存,但 user messages 必须一致,若不一致(例如节点返回值变化)则停止访问缓存\n - Production:无条件访问缓存,将 record 重播一遍\n '''\n # how to handle ... | import omegaconf
import json
from typing import List, Dict
from copy import deepcopy
from termcolor import colored
from ProAgent.router.utils import ENVIRONMENT
from ProAgent.utils import NodeType, ToolCallStatus, Action, WorkflowType, TestResult, RunTimeStatus, TestDataType
from ProAgent.n8n_parser.node import n8nPythonNode, n8nNodeMeta
from ProAgent.n8n_parser.workflow import n8nPythonWorkflow
from ProAgent.n8n_parser.param_parser import parse_properties
from ProAgent.n8n_tester.run_code import n8nPythonCodeRunner
from ProAgent.n8n_parser.intrinsic_functions import mainWorkflow_code
from ProAgent.loggers.logs import print_action_base, print_action_tool
from ProAgent.running_recorder import RunningRecoder
from ProAgent.config import CONFIG | 8,583 |
class Compiler():
"""和nodes.json交互,同时存储目前所有的数据结构
"""
def __init__(self, cfg: omegaconf.DictConfig, recorder: RunningRecoder):
"""
Initializes the class with the given configuration and recorder.
Parameters:
cfg (omegaconf.DictConfig): The configuration object.
recorder (RunningRecoder): The recorder object.
Returns:
None
"""
self.cfg = cfg
self.recorder = recorder
self.nodes: List[n8nPythonNode] = []
self.trigger_id = 0
self.action_id = 0
self.workflows: Dict[n8nPythonWorkflow] = {}
self.mainWorkflow: n8nPythonWorkflow = n8nPythonWorkflow(
implement_code = mainWorkflow_code
)
self.resolve()
self.code_runner = n8nPythonCodeRunner()
self.code_runner.flash(
main_workflow = self.mainWorkflow,
workflows=self.workflows,
nodes = self.nodes
)
self.update_runtime()
def resolve_integration(self, integration_json):
"""
Generates a function comment for the given function body.
Args:
integration_json (dict): A dictionary containing information about the integration.
Returns:
dict: A dictionary containing the resolved integration data.
Raises:
AssertionError: If the target resource name is not found in the integration data.
"""
integration_name = integration_json["name"].split(".")[-1]
integration_data = {}
no_resource = True
no_operation = True
for property in integration_json["properties"]:
if property["name"] == "resource":
for resource in property["options"]:
integration_data[resource["value"]] = {}
no_resource = False
break
if no_resource:
integration_data["default"] = {}
for property in integration_json["properties"]:
if property["name"] == "operation":
target_resource_name = "default"
if "displayOptions" in property.keys():
assert "show" in property["displayOptions"].keys() and "resource" in property["displayOptions"]["show"].keys()
assert len(property["displayOptions"]["show"]["resource"]) == 1
target_resource_name = property["displayOptions"]["show"]["resource"][0]
assert target_resource_name in integration_data.keys(), f"{target_resource_name} in {integration_data.keys()}"
target_resource = integration_data[target_resource_name]
for operation in property["options"]:
operation_name = operation["value"]
operation_description = ""
if "description" in operation.keys():
operation_description = operation["description"]
node_type = NodeType.trigger if "trigger" in integration_name.lower() or "webhook" in integration_name.lower() else NodeType.action
target_resource[operation_name] = n8nNodeMeta(
node_type=node_type,
integration_name=integration_name,
resource_name=target_resource_name,
operation_name=operation_name,
operation_description=operation_description
)
no_operation = False
if no_operation:
assert no_resource
node_type = NodeType.trigger if "trigger" in integration_name.lower() or "webhook" in integration_name.lower() else NodeType.action
integration_data["default"]["default"] = n8nNodeMeta(
node_type=node_type,
integration_name=integration_name,
resource_name="default",
operation_name="default",
operation_description=""
)
return integration_data
def print_flatten_tools(self):
"""
Generates a function comment for the given function body in a markdown code block with the correct language syntax.
Returns:
str: The function comment in markdown format.
"""
output_description_list = []
for k1, integration_name in enumerate(list(self.flattened_tools.keys())):
operation_counter = 1
data = self.flattened_tools[integration_name]["data"]
des = self.flattened_tools[integration_name]["meta"]["description"]
|
class Compiler():
"""和nodes.json交互,同时存储目前所有的数据结构
"""
def __init__(self, cfg: omegaconf.DictConfig, recorder: RunningRecoder):
"""
Initializes the class with the given configuration and recorder.
Parameters:
cfg (omegaconf.DictConfig): The configuration object.
recorder (RunningRecoder): The recorder object.
Returns:
None
"""
self.cfg = cfg
self.recorder = recorder
self.nodes: List[n8nPythonNode] = []
self.trigger_id = 0
self.action_id = 0
self.workflows: Dict[n8nPythonWorkflow] = {}
self.mainWorkflow: n8nPythonWorkflow = n8nPythonWorkflow(
implement_code = mainWorkflow_code
)
self.resolve()
self.code_runner = n8nPythonCodeRunner()
self.code_runner.flash(
main_workflow = self.mainWorkflow,
workflows=self.workflows,
nodes = self.nodes
)
self.update_runtime()
def resolve_integration(self, integration_json):
"""
Generates a function comment for the given function body.
Args:
integration_json (dict): A dictionary containing information about the integration.
Returns:
dict: A dictionary containing the resolved integration data.
Raises:
AssertionError: If the target resource name is not found in the integration data.
"""
integration_name = integration_json["name"].split(".")[-1]
integration_data = {}
no_resource = True
no_operation = True
for property in integration_json["properties"]:
if property["name"] == "resource":
for resource in property["options"]:
integration_data[resource["value"]] = {}
no_resource = False
break
if no_resource:
integration_data["default"] = {}
for property in integration_json["properties"]:
if property["name"] == "operation":
target_resource_name = "default"
if "displayOptions" in property.keys():
assert "show" in property["displayOptions"].keys() and "resource" in property["displayOptions"]["show"].keys()
assert len(property["displayOptions"]["show"]["resource"]) == 1
target_resource_name = property["displayOptions"]["show"]["resource"][0]
assert target_resource_name in integration_data.keys(), f"{target_resource_name} in {integration_data.keys()}"
target_resource = integration_data[target_resource_name]
for operation in property["options"]:
operation_name = operation["value"]
operation_description = ""
if "description" in operation.keys():
operation_description = operation["description"]
node_type = NodeType.trigger if "trigger" in integration_name.lower() or "webhook" in integration_name.lower() else NodeType.action
target_resource[operation_name] = n8nNodeMeta(
node_type=node_type,
integration_name=integration_name,
resource_name=target_resource_name,
operation_name=operation_name,
operation_description=operation_description
)
no_operation = False
if no_operation:
assert no_resource
node_type = NodeType.trigger if "trigger" in integration_name.lower() or "webhook" in integration_name.lower() else NodeType.action
integration_data["default"]["default"] = n8nNodeMeta(
node_type=node_type,
integration_name=integration_name,
resource_name="default",
operation_name="default",
operation_description=""
)
return integration_data
def print_flatten_tools(self):
"""
Generates a function comment for the given function body in a markdown code block with the correct language syntax.
Returns:
str: The function comment in markdown format.
"""
output_description_list = []
for k1, integration_name in enumerate(list(self.flattened_tools.keys())):
operation_counter = 1
data = self.flattened_tools[integration_name]["data"]
des = self.flattened_tools[integration_name]["meta"]["description"] | if integration_name in CONFIG.default_knowledge.keys(): | 17 | 2023-11-03 01:20:14+00:00 | 12k |
LLaVA-VL/LLaVA-Plus-Codebase | llava/model/language_model/mpt/modeling_mpt.py | [
{
"identifier": "attn_bias_shape",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if al... | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional, Tuple, Union
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from .attention import attn_bias_shape, build_attn_bias
from .blocks import MPTBlock
from .custom_embedding import SharedEmbedding
from .norm import NORM_CLASS_REGISTRY
from .configuration_mpt import MPTConfig
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
from .meta_init_context import init_empty_weights
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
from .flash_attn_triton import flash_attn_func | 9,446 | assert isinstance(attn_bias, torch.Tensor)
attn_bias = self._apply_sequence_id(attn_bias, sequence_id)
if attention_mask is not None:
s_k = attention_mask.shape[-1]
if attn_bias is None:
attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype)
else:
_s_k = max(0, attn_bias.size(-1) - s_k)
attn_bias = attn_bias[:, :, :, _s_k:]
if prefix_mask is not None and attention_mask.shape != prefix_mask.shape:
raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.')
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val)
return (attn_bias, None)
def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor):
(s_k, s_q) = attn_bias.shape[-2:]
if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len:
raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.')
seq_len = prefix_mask.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len)
prefix = prefix_mask.view(-1, 1, 1, seq_len)
cannot_attend = ~torch.logical_or(causal, prefix.bool())
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor):
seq_len = sequence_id.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1)
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.Tensor]=None):
return_dict = return_dict if return_dict is not None else self.config.return_dict
use_cache = use_cache if use_cache is not None else self.config.use_cache
if attention_mask is not None:
attention_mask = attention_mask.bool()
if prefix_mask is not None:
prefix_mask = prefix_mask.bool()
if not return_dict:
raise NotImplementedError('return_dict False is not implemented yet for MPT')
if output_attentions:
if self.attn_impl != 'torch':
raise NotImplementedError('output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.')
if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training:
raise NotImplementedError('MPT does not support training with left padding.')
if self.prefix_lm and prefix_mask is None:
raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.')
if self.training:
if self.attn_uses_sequence_id and sequence_id is None:
raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.')
elif self.attn_uses_sequence_id is False and sequence_id is not None:
warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.')
if input_ids is not None:
S = input_ids.size(1)
assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}'
tok_emb = self.wte(input_ids)
else:
assert inputs_embeds is not None
assert self.alibi, 'inputs_embeds is not implemented for MPT unless for alibi.'
S = inputs_embeds.size(1)
tok_emb = inputs_embeds
if self.alibi:
x = tok_emb
else:
past_position = 0
if past_key_values is not None:
if len(past_key_values) != self.config.n_layers:
raise ValueError(f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).')
past_position = past_key_values[0][0].size(1)
if self.attn_impl == 'torch':
past_position = past_key_values[0][0].size(3)
if S + past_position > self.config.max_seq_len:
raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.')
pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0)
if attention_mask is not None:
pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0)
pos_emb = self.wpe(pos)
x = tok_emb + pos_emb
if self.embedding_fraction == 1:
x = self.emb_drop(x)
else:
x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction)
assert isinstance(self.emb_drop, nn.Module)
x = self.emb_drop(x_shrunk)
(attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id)
if use_cache and past_key_values is None:
past_key_values = [() for _ in range(self.config.n_layers)]
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for (b_idx, block) in enumerate(self.blocks):
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
past_key_value = past_key_values[b_idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
(x, attn_weights, past_key_value) = torch.utils.checkpoint.checkpoint(block, x, past_key_value, attn_bias, attention_mask, self.is_causal)
else:
(x, attn_weights, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal)
if past_key_values is not None:
past_key_values[b_idx] = past_key_value
if output_attentions:
assert all_self_attns is not None
all_self_attns = all_self_attns + (attn_weights,)
x = self.norm_f(x)
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns)
def param_init_fn(self, module):
init_fn_name = self.config.init_config['name']
| """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max)
self._attn_bias_initialized = True
if self.attn_impl == 'flash':
return (self.attn_bias, attention_mask)
if self.attn_bias is not None:
self.attn_bias = self.attn_bias.to(dtype=dtype, device=device)
attn_bias = self.attn_bias
if self.prefix_lm:
assert isinstance(attn_bias, torch.Tensor)
assert isinstance(prefix_mask, torch.Tensor)
attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask)
if self.attn_uses_sequence_id and sequence_id is not None:
assert isinstance(attn_bias, torch.Tensor)
attn_bias = self._apply_sequence_id(attn_bias, sequence_id)
if attention_mask is not None:
s_k = attention_mask.shape[-1]
if attn_bias is None:
attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype)
else:
_s_k = max(0, attn_bias.size(-1) - s_k)
attn_bias = attn_bias[:, :, :, _s_k:]
if prefix_mask is not None and attention_mask.shape != prefix_mask.shape:
raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.')
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val)
return (attn_bias, None)
def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor):
(s_k, s_q) = attn_bias.shape[-2:]
if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len:
raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.')
seq_len = prefix_mask.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len)
prefix = prefix_mask.view(-1, 1, 1, seq_len)
cannot_attend = ~torch.logical_or(causal, prefix.bool())
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor):
seq_len = sequence_id.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1)
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.Tensor]=None):
return_dict = return_dict if return_dict is not None else self.config.return_dict
use_cache = use_cache if use_cache is not None else self.config.use_cache
if attention_mask is not None:
attention_mask = attention_mask.bool()
if prefix_mask is not None:
prefix_mask = prefix_mask.bool()
if not return_dict:
raise NotImplementedError('return_dict False is not implemented yet for MPT')
if output_attentions:
if self.attn_impl != 'torch':
raise NotImplementedError('output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.')
if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training:
raise NotImplementedError('MPT does not support training with left padding.')
if self.prefix_lm and prefix_mask is None:
raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.')
if self.training:
if self.attn_uses_sequence_id and sequence_id is None:
raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.')
elif self.attn_uses_sequence_id is False and sequence_id is not None:
warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.')
if input_ids is not None:
S = input_ids.size(1)
assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}'
tok_emb = self.wte(input_ids)
else:
assert inputs_embeds is not None
assert self.alibi, 'inputs_embeds is not implemented for MPT unless for alibi.'
S = inputs_embeds.size(1)
tok_emb = inputs_embeds
if self.alibi:
x = tok_emb
else:
past_position = 0
if past_key_values is not None:
if len(past_key_values) != self.config.n_layers:
raise ValueError(f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).')
past_position = past_key_values[0][0].size(1)
if self.attn_impl == 'torch':
past_position = past_key_values[0][0].size(3)
if S + past_position > self.config.max_seq_len:
raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.')
pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0)
if attention_mask is not None:
pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0)
pos_emb = self.wpe(pos)
x = tok_emb + pos_emb
if self.embedding_fraction == 1:
x = self.emb_drop(x)
else:
x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction)
assert isinstance(self.emb_drop, nn.Module)
x = self.emb_drop(x_shrunk)
(attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id)
if use_cache and past_key_values is None:
past_key_values = [() for _ in range(self.config.n_layers)]
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for (b_idx, block) in enumerate(self.blocks):
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
past_key_value = past_key_values[b_idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
(x, attn_weights, past_key_value) = torch.utils.checkpoint.checkpoint(block, x, past_key_value, attn_bias, attention_mask, self.is_causal)
else:
(x, attn_weights, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal)
if past_key_values is not None:
past_key_values[b_idx] = past_key_value
if output_attentions:
assert all_self_attns is not None
all_self_attns = all_self_attns + (attn_weights,)
x = self.norm_f(x)
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns)
def param_init_fn(self, module):
init_fn_name = self.config.init_config['name'] | MODEL_INIT_REGISTRY[init_fn_name](module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config) | 11 | 2023-11-07 13:06:02+00:00 | 12k |
TheFunny/ArisuAutoSweeper | module/device/method/adb.py | [
{
"identifier": "Config",
"path": "module/base/decorator.py",
"snippet": "class Config:\n \"\"\"\n Decorator that calls different function with a same name according to config.\n\n func_list likes:\n func_list = {\n 'func1': [\n {'options': {'ENABLE': True}, 'func': 1},\n ... | import re
import cv2
import numpy as np
import time
from functools import wraps
from adbutils.errors import AdbError
from lxml import etree
from module.base.decorator import Config
from module.device.connection import Connection
from module.device.method.utils import (RETRY_TRIES, retry_sleep, remove_prefix, handle_adb_error,
ImageTruncated, PackageNotInstalled)
from module.exception import RequestHumanTakeover, ScriptError
from module.logger import logger | 9,565 |
def retry(func):
@wraps(func)
def retry_wrapper(self, *args, **kwargs):
"""
Args:
self (Adb):
"""
init = None
for _ in range(RETRY_TRIES):
try:
if callable(init):
retry_sleep(_)
init()
return func(self, *args, **kwargs)
# Can't handle
except RequestHumanTakeover:
break
# When adb server was killed
except ConnectionResetError as e:
logger.error(e)
def init():
self.adb_reconnect()
# AdbError
except AdbError as e:
if handle_adb_error(e):
def init():
self.adb_reconnect()
else:
break
# Package not installed
except PackageNotInstalled as e:
logger.error(e)
def init():
self.detect_package()
# ImageTruncated
|
def retry(func):
@wraps(func)
def retry_wrapper(self, *args, **kwargs):
"""
Args:
self (Adb):
"""
init = None
for _ in range(RETRY_TRIES):
try:
if callable(init):
retry_sleep(_)
init()
return func(self, *args, **kwargs)
# Can't handle
except RequestHumanTakeover:
break
# When adb server was killed
except ConnectionResetError as e:
logger.error(e)
def init():
self.adb_reconnect()
# AdbError
except AdbError as e:
if handle_adb_error(e):
def init():
self.adb_reconnect()
else:
break
# Package not installed
except PackageNotInstalled as e:
logger.error(e)
def init():
self.detect_package()
# ImageTruncated | except ImageTruncated as e: | 6 | 2023-11-01 07:09:45+00:00 | 12k |
liuzhao1225/YouDub | main.py | [
{
"identifier": "TTS_Clone",
"path": "youdub/tts_xttsv2.py",
"snippet": "class TTS_Clone:\n def __init__(self, model_path=\"tts_models/multilingual/multi-dataset/xtts_v2\", device='cuda', language='zh-cn'):\n logging.info(f'Loading TTS model {model_path}...')\n self.tts = TTS(model_path... | import os
import logging
import json
import re
import time
import numpy as np
import re
import argparse
from tqdm import tqdm
from youdub.tts_xttsv2 import TTS_Clone, audio_process_folder
from youdub.tts_bytedance import TTS_Clone as TTS_Clone_bytedance
from youdub.tts_bytedance import audio_process_folder as audio_process_folder_bytedance
from youdub.asr_whisperX import VideoProcessor
from youdub.video_postprocess import replace_audio_ffmpeg
from youdub.translation_unsafe import Translator
from youdub.utils import split_text
from multiprocessing import Process | 9,403 | # from youdub.tts_bytedance import TTS_Clone as TTS_Clone_bytedance, audio_process_folder as audio_process_folder_bytedance
allowed_chars = '[^a-zA-Z0-9_ .]'
def translate_from_folder(folder, translator: Translator, original_fname):
with open(os.path.join(folder, 'en.json'), mode='r', encoding='utf-8') as f:
transcript = json.load(f)
_transcript = [sentence['text'] for sentence in transcript if sentence['text']]
result = ['']
while len(result) != len(_transcript):
result, summary = translator.translate(_transcript, original_fname)
for i, sentence in enumerate(result):
transcript[i]['text'] = sentence
transcript = split_text(transcript) # 使用whisperX后,会自动分句,所以不再需要手动分句。同时避免了将`“你好。”`分为`“你好。`和`”`的情况
with open(os.path.join(folder, 'zh.json'), 'w', encoding='utf-8') as f:
json.dump(transcript, f, ensure_ascii=False, indent=4)
with open(os.path.join(folder, 'summary.txt'), 'w', encoding='utf-8') as f:
f.write(summary)
# def main(input_folder, output_folder, diarize=False):
def main():
parser = argparse.ArgumentParser(description='Process some videos.')
parser.add_argument('--input_folders', type=str, nargs='+', required=True,
help='The list of input folders containing the videos')
parser.add_argument('--output_folders', type=str, nargs='+', required=True, help='The list of output folders where the processed videos will be stored')
parser.add_argument('--vocal_only_folders', type=str, nargs='+', default=[],
help='The list of input folders containing the videos that only need vocal for the final result.')
parser.add_argument('--diarize', action='store_true',
help='Enable diarization')
args = parser.parse_args()
if len(args.input_folders) != len(args.output_folders):
raise ValueError(
"The number of input folders must match the number of output folders.")
print('='*50)
print('Initializing...')
if args.diarize:
print('Diarization enabled.')
print('='*50)
diarize = args.diarize
processor = VideoProcessor(diarize=diarize)
translator = Translator()
tts = TTS_Clone()
tts_bytedance = TTS_Clone_bytedance()
for input_folder, output_folder in zip(args.input_folders, args.output_folders):
if input_folder in args.vocal_only_folders:
vocal_only = True
print(f'Vocal only mode enabled for {input_folder}.')
else:
vocal_only = False
if not os.path.exists(os.path.join(input_folder, '0_finished')):
os.makedirs(os.path.join(input_folder, '0_finished'))
if not os.path.exists(output_folder):
os.makedirs(output_folder)
if not os.path.exists(os.path.join(output_folder, '0_to_upload')):
os.makedirs(os.path.join(output_folder, '0_to_upload'))
if not os.path.exists(os.path.join(output_folder, '0_finished')):
os.makedirs(os.path.join(output_folder, '0_finished'))
print('='*50)
print(
f'Video processing started for {input_folder} to {output_folder}.')
print('='*50)
logging.info('Processing folder...')
files = os.listdir(input_folder)
t = tqdm(files, desc="Processing files")
video_lists = []
for file in t:
print('='*50)
t.set_description(f"Processing {file}")
print('='*50)
if file.endswith('.mp4') or file.endswith('.mkv') or file.endswith('.avi') or file.endswith('.flv'):
original_fname = file[:-4]
new_filename = re.sub(r'[^a-zA-Z0-9_. ]', '', file)
new_filename = re.sub(r'\s+', ' ', new_filename)
new_filename = new_filename.strip()
os.rename(os.path.join(input_folder, file),
os.path.join(input_folder, new_filename))
file = new_filename
video_lists.append(file)
input_path = os.path.join(input_folder, file)
output_path = os.path.join(output_folder, file[:-4]).strip()
if not os.path.exists(output_path):
os.makedirs(output_path)
speaker_to_voice_type = processor.process_video(
input_path, output_path)
else:
continue
if not os.path.exists(os.path.join(output_path, 'zh.json')):
translate_from_folder(output_path, translator, original_fname)
if len(speaker_to_voice_type) == 1:
print('Only one speaker detected. Using TTS.')
audio_process_folder_bytedance(
output_path, tts_bytedance, speaker_to_voice_type, vocal_only=vocal_only)
else:
print('Multiple speakers detected. Using XTTSv2.')
| # from youdub.tts_bytedance import TTS_Clone as TTS_Clone_bytedance, audio_process_folder as audio_process_folder_bytedance
allowed_chars = '[^a-zA-Z0-9_ .]'
def translate_from_folder(folder, translator: Translator, original_fname):
with open(os.path.join(folder, 'en.json'), mode='r', encoding='utf-8') as f:
transcript = json.load(f)
_transcript = [sentence['text'] for sentence in transcript if sentence['text']]
result = ['']
while len(result) != len(_transcript):
result, summary = translator.translate(_transcript, original_fname)
for i, sentence in enumerate(result):
transcript[i]['text'] = sentence
transcript = split_text(transcript) # 使用whisperX后,会自动分句,所以不再需要手动分句。同时避免了将`“你好。”`分为`“你好。`和`”`的情况
with open(os.path.join(folder, 'zh.json'), 'w', encoding='utf-8') as f:
json.dump(transcript, f, ensure_ascii=False, indent=4)
with open(os.path.join(folder, 'summary.txt'), 'w', encoding='utf-8') as f:
f.write(summary)
# def main(input_folder, output_folder, diarize=False):
def main():
parser = argparse.ArgumentParser(description='Process some videos.')
parser.add_argument('--input_folders', type=str, nargs='+', required=True,
help='The list of input folders containing the videos')
parser.add_argument('--output_folders', type=str, nargs='+', required=True, help='The list of output folders where the processed videos will be stored')
parser.add_argument('--vocal_only_folders', type=str, nargs='+', default=[],
help='The list of input folders containing the videos that only need vocal for the final result.')
parser.add_argument('--diarize', action='store_true',
help='Enable diarization')
args = parser.parse_args()
if len(args.input_folders) != len(args.output_folders):
raise ValueError(
"The number of input folders must match the number of output folders.")
print('='*50)
print('Initializing...')
if args.diarize:
print('Diarization enabled.')
print('='*50)
diarize = args.diarize
processor = VideoProcessor(diarize=diarize)
translator = Translator()
tts = TTS_Clone()
tts_bytedance = TTS_Clone_bytedance()
for input_folder, output_folder in zip(args.input_folders, args.output_folders):
if input_folder in args.vocal_only_folders:
vocal_only = True
print(f'Vocal only mode enabled for {input_folder}.')
else:
vocal_only = False
if not os.path.exists(os.path.join(input_folder, '0_finished')):
os.makedirs(os.path.join(input_folder, '0_finished'))
if not os.path.exists(output_folder):
os.makedirs(output_folder)
if not os.path.exists(os.path.join(output_folder, '0_to_upload')):
os.makedirs(os.path.join(output_folder, '0_to_upload'))
if not os.path.exists(os.path.join(output_folder, '0_finished')):
os.makedirs(os.path.join(output_folder, '0_finished'))
print('='*50)
print(
f'Video processing started for {input_folder} to {output_folder}.')
print('='*50)
logging.info('Processing folder...')
files = os.listdir(input_folder)
t = tqdm(files, desc="Processing files")
video_lists = []
for file in t:
print('='*50)
t.set_description(f"Processing {file}")
print('='*50)
if file.endswith('.mp4') or file.endswith('.mkv') or file.endswith('.avi') or file.endswith('.flv'):
original_fname = file[:-4]
new_filename = re.sub(r'[^a-zA-Z0-9_. ]', '', file)
new_filename = re.sub(r'\s+', ' ', new_filename)
new_filename = new_filename.strip()
os.rename(os.path.join(input_folder, file),
os.path.join(input_folder, new_filename))
file = new_filename
video_lists.append(file)
input_path = os.path.join(input_folder, file)
output_path = os.path.join(output_folder, file[:-4]).strip()
if not os.path.exists(output_path):
os.makedirs(output_path)
speaker_to_voice_type = processor.process_video(
input_path, output_path)
else:
continue
if not os.path.exists(os.path.join(output_path, 'zh.json')):
translate_from_folder(output_path, translator, original_fname)
if len(speaker_to_voice_type) == 1:
print('Only one speaker detected. Using TTS.')
audio_process_folder_bytedance(
output_path, tts_bytedance, speaker_to_voice_type, vocal_only=vocal_only)
else:
print('Multiple speakers detected. Using XTTSv2.') | audio_process_folder( | 3 | 2023-11-02 08:21:31+00:00 | 12k |
BrianPugh/cyclopts | tests/test_group_extractors.py | [
{
"identifier": "App",
"path": "cyclopts/core.py",
"snippet": "class App:\n _name: Optional[Tuple[str, ...]] = field(default=None, alias=\"name\", converter=optional_to_tuple_converter)\n\n _help: Optional[str] = field(default=None, alias=\"help\")\n\n usage: Optional[str] = field(default=None)... | import pytest
from cyclopts import App, Group, Parameter
from cyclopts.group_extractors import groups_from_app | 7,710 |
def test_groups_annotated_invalid_recursive_definition():
"""A default_parameter isn't allowed to have a group set, as it would introduce a paradox."""
default_parameter = Parameter(group="Drink") # pyright: ignore[reportGeneralTypeIssues]
with pytest.raises(ValueError):
|
def test_groups_annotated_invalid_recursive_definition():
"""A default_parameter isn't allowed to have a group set, as it would introduce a paradox."""
default_parameter = Parameter(group="Drink") # pyright: ignore[reportGeneralTypeIssues]
with pytest.raises(ValueError): | Group("Food", default_parameter=default_parameter) | 1 | 2023-11-03 02:24:25+00:00 | 12k |
RoboFlamingo/RoboFlamingo | robot_flamingo/data/data.py | [
{
"identifier": "RealDatasetHDF5",
"path": "robot_flamingo/data/real_dataset_hdf5.py",
"snippet": "class RealDatasetHDF5(Dataset):\n def __init__(self,\n data_dir,\n image_fn,\n text_fn,\n seq_len=12,\n mode='train',\n ... | import ast
import functools
import io
import json
import logging
import math
import os
import random
import sys
import tarfile
import zipfile
import braceexpand
import torch
import torchvision
import webdataset as wds
import numpy as np
import numpy as np
import pyhash
import torch
import horovod.torch as hvd
import logging
import numpy as np
import pyhash
import torch
import pickle
import torch.nn as nn
import torch.nn.functional as F
import copy
from cgitb import text
from dataclasses import dataclass
from multiprocessing import Value
from PIL import Image
from torch.utils.data import DataLoader, IterableDataset, get_worker_info, Dataset
from torch.utils.data.distributed import DistributedSampler
from webdataset.filters import _shuffle
from webdataset.tariterators import (
base_plus_ext,
tar_file_expander,
url_opener,
valid_sample,
)
from calvin_agent.datasets.utils.episode_utils import (
get_state_info_dict,
process_actions,
process_depth,
process_language,
process_rgb,
process_state,
)
from omegaconf import DictConfig
from torch.utils.data import Dataset
from robot_flamingo.data.real_dataset_hdf5 import RealDatasetHDF5
from pathlib import Path
from typing import Dict, Tuple, Union
from calvin_agent.datasets.utils.episode_utils import (
get_state_info_dict,
process_actions,
process_depth,
# process_language,
# process_rgb,
process_state,
)
from omegaconf import DictConfig
from torch.utils.data import Dataset
from robot_flamingo.data.vl_dataset import CaptionDataset, VQADataset
from typing import Any, Dict, List, Tuple, Callable
from itertools import chain
from calvin_agent.datasets.utils.episode_utils import lookup_naming_pattern | 9,128 | sampler: DistributedSampler = None
shared_epoch: SharedEpoch = None
dataset: Dataset = None
def set_epoch(self, epoch):
if self.shared_epoch is not None:
self.shared_epoch.set_value(epoch)
if self.sampler is not None and isinstance(self.sampler, DistributedSampler):
self.sampler.set_epoch(epoch)
def preprocess_image(sample, image_processor):
image = [image_processor(s).unsqueeze(0) for s in sample]
image = torch.cat(image, dim=0)
# apply random horizontal flip and color jitter
return image
def preprocess_text_calvin(sample, tokenizer):
tokenizer.padding_side = "right"
sample = [
# (f"{s.strip()}{tokenizer.eos_token}")
# for s in sample
(f"<image>{s.strip()}<|endofchunk|>{tokenizer.eos_token}") for s in sample
]
text = tokenizer(
sample,
max_length=32,
padding="longest",
truncation="only_first",
return_tensors="pt",
)
return text["input_ids"], text["attention_mask"]
def preprocess_interleaved(sample, tokenizer, clip_processor, sim_threshold):
info = json.loads(sample[0])
tar_file_obj = io.BytesIO(sample[1])
image_tar = tarfile.open(fileobj=tar_file_obj)
sentences = info["text_list"]
images, image_idxs = [], []
for image_path, sim in zip(info["image_info"], info["similarity_matrix"]):
# pick one image per sentence
if info["image_info"][image_path]["matched_text_index"] in image_idxs:
continue
rawbytes = image_tar.extractfile(
os.path.join(image_tar.getnames()[0], image_path)
).read()
# filter to images >= 10KB
if len(rawbytes) // 1000 <= MIN_KB:
continue
if sim[info["image_info"][image_path]["matched_text_index"]] < sim_threshold:
continue
image = Image.open(io.BytesIO(rawbytes)).convert("RGB")
images.append(image)
image_idxs.append(info["image_info"][image_path]["matched_text_index"])
if len(images) == 0:
raise ValueError("No images in sample")
# filter out images that are exact duplicates
images_tensors = preprocess_image(images, clip_processor)
keep_ixs = range(min(len(images_tensors), MAX_NUM_IMAGES))
images_tensors = images_tensors[keep_ixs]
image_idxs = [image_idxs[ix] for ix in keep_ixs]
# pad to 5 images
if len(images_tensors) < MAX_NUM_IMAGES:
zero_padding = torch.zeros(
(MAX_NUM_IMAGES - len(images_tensors), 3, 224, 224), dtype=torch.float
)
images_tensors = torch.cat((images_tensors, zero_padding), dim=0)
# add in <image> and <eoc> tokens
# eoc after sentence = "sentence loss"
for ix in image_idxs:
sentences[ix] = f"<|endofchunk|><image>{sentences[ix]}"
text = " ".join(sentences)
text = text.replace("<|endofchunk|>", "", 1) # but remove first eoc
# whitespace cleanup
text = (
text.replace(" <|endofchunk|>", "<|endofchunk|>")
.replace("<image> ", "<image>")
.replace(" <image>", "<image>")
)
text = f"{text}<|endofchunk|>{tokenizer.eos_token}"
tokenizer.padding_side = "right"
text_tensor = tokenizer(
text, max_length=256, truncation=True, padding="max_length", return_tensors="pt"
)
# reject sequences with too few images (after truncation)
num_images = torch.count_nonzero(
text_tensor["input_ids"]
== tokenizer.additional_special_tokens_ids[
tokenizer.additional_special_tokens.index("<image>")
]
)
if num_images == 0:
raise ValueError("No images in sample")
elif (
num_images == 1 and random.random() <= 0.5
): # 50% chance of keeping single image samples
raise ValueError("Only one image in sample")
return (
images_tensors,
(text_tensor["input_ids"], text_tensor["attention_mask"]),
)
def get_coco_dataset(args, image_processor, tokenizer, epoch=0):
coco_data_dir = "path/to/coco/train2014"
coco_ann = "path/to/coco/annotations/captions_train2014.json"
preprocess_text_fn = functools.partial(preprocess_text_calvin, tokenizer=tokenizer)
|
Image.MAX_IMAGE_PIXELS = 1000000000
MAX_NUM_TOKENS = 256
MAX_NUM_IMAGES = 5
TINY_IMAGE_SIZE_THRESHOLD = 1
N_CHANNELS = 3
INTERLEAVED_IMAGE_SIZE = 224
_SHARD_SHUFFLE_SIZE = 2000
_SHARD_SHUFFLE_INITIAL = 500
_SAMPLE_SHUFFLE_SIZE = 5000
_SAMPLE_SHUFFLE_INITIAL = 1000
MIN_KB = 10
MAX_NUM_IMAGES = 5
try:
except ImportError:
hvd = None
hasher = pyhash.fnv1_32()
logger = logging.getLogger(__name__)
obs_config = DictConfig(
{
"rgb_obs": ["rgb_static", "rgb_gripper"],
"depth_obs": [],
"state_obs": ["robot_obs"],
"actions": ["rel_actions"],
"language": ["language"],
}
)
prop_state = DictConfig(
{
"n_state_obs": 15,
"keep_indices": [[0, 15]],
"robot_orientation_idx": [3, 6],
"normalize": True,
"normalize_robot_orientation": True,
}
)
def get_validation_window_size(
idx: int, min_window_size: int, max_window_size: int
) -> int:
"""
In validation step, use hash function instead of random sampling for consistent window sizes across epochs.
Args:
idx: Sequence index.
min_window_size: Minimum window size.
max_window_size: Maximum window size.
Returns:
Window size computed with hash function.
"""
window_range = max_window_size - min_window_size + 1
return min_window_size + hasher(str(idx)) % window_range
class RandomShiftsAug(nn.Module):
def __init__(self, pad):
super().__init__()
self.pad = pad
def forward(self, x):
n, c, h, w = x.size()
assert h == w
padding = tuple([self.pad] * 4)
x = F.pad(x, padding, 'replicate')
eps = 1.0 / (h + 2 * self.pad)
arange = torch.linspace(-1.0 + eps,
1.0 - eps,
h + 2 * self.pad,
device=x.device,
dtype=x.dtype)[:h]
arange = arange.unsqueeze(0).repeat(h, 1).unsqueeze(2)
base_grid = torch.cat([arange, arange.transpose(1, 0)], dim=2)
base_grid = base_grid.unsqueeze(0).repeat(n, 1, 1, 1)
shift = torch.randint(0,
2 * self.pad + 1,
size=(n, 1, 1, 2),
device=x.device,
dtype=x.dtype)
shift *= 2.0 / (h + 2 * self.pad)
grid = base_grid + shift
return F.grid_sample(x, grid, padding_mode='zeros', align_corners=False)
def forward_traj(self, x):
n, t, c, h, w = x.size()
x = x.view(n*t, *x.shape[2:])
assert h == w
padding = tuple([self.pad] * 4)
x = F.pad(x, padding, 'replicate')
eps = 1.0 / (h + 2 * self.pad)
arange = torch.linspace(-1.0 + eps,
1.0 - eps,
h + 2 * self.pad,
device=x.device,
dtype=x.dtype)[:h]
arange = arange.unsqueeze(0).repeat(h, 1).unsqueeze(2)
base_grid = torch.cat([arange, arange.transpose(1, 0)], dim=2)
base_grid = base_grid.unsqueeze(0).repeat(n, 1, 1, 1)
base_grid = base_grid.unsqueeze(1).repeat(1, t, 1, 1, 1)
base_grid = base_grid.view(n*t, *base_grid.shape[2:])
shift = torch.randint(1,
2 * self.pad + 1,
size=(n*t, 1, 1, 2),
device=x.device,
dtype=x.dtype)
shift *= 2.0 / (h + 2 * self.pad)
grid = base_grid + shift
x = F.grid_sample(x, grid, padding_mode='zeros', align_corners=False)
x = x.view(n, t, *x.shape[1:])
return x
class BaseCalvinDataset(Dataset):
"""
Abstract dataset base class.
Args:
datasets_dir: Path of folder containing episode files (string must contain 'validation' or 'training').
obs_space: DictConfig of observation space.
proprio_state: DictConfig with shape of prioprioceptive state.
key: 'vis' or 'lang'.
lang_folder: Name of the subdirectory of the dataset containing the language annotations.
num_workers: Number of dataloading workers for this dataset.
transforms: Dict with pytorch data transforms.
batch_size: Batch size.
min_window_size: Minimum window length of loaded sequences.
max_window_size: Maximum window length of loaded sequences.
pad: If True, repeat last frame such that all sequences have length 'max_window_size'.
aux_lang_loss_window: How many sliding windows to consider for auxiliary language losses, counted from the end
of an annotated language episode.
"""
def __init__(
self,
datasets_dir: Path,
proprio_state: DictConfig = prop_state,
lang_folder: str = "lang_annotations",
num_workers: int = 0,
key: str = "lang",
obs_space: DictConfig = obs_config,
transforms: Dict = {},
batch_size: int = 32,
window_size: int = 16,
min_window_size: int = 16,
max_window_size: int = 16,
pad: bool = True,
aux_lang_loss_window: int = 1,
rgb_pad=-1,
gripper_pad=-1,
traj_cons=False,
text_aug=False,
dif_ws=False,
act_step=1
):
self.observation_space = obs_space
self.proprio_state = proprio_state
self.transforms = transforms
self.with_lang = key == "lang"
self.relative_actions = "rel_actions" in self.observation_space["actions"]
self.pad = pad
self.batch_size = batch_size
self.num_workers = num_workers
self.window_size = window_size
if not dif_ws:
self.min_window_size = window_size + act_step - 1
self.max_window_size = window_size + act_step - 1
else:
self.min_window_size = min_window_size
self.max_window_size = max_window_size
self.act_step = act_step
# print('ws {}, min_ws {}, max_ws {}'.format(self.window_size, self.max_window_size, self.min_window_size))
self.abs_datasets_dir = datasets_dir
self.lang_folder = lang_folder # if self.with_lang else None
self.aux_lang_loss_window = aux_lang_loss_window
self.traj_cons = traj_cons
with open('/mnt/bn/robotics/lxh/robot-flamingo/enrich_lang_annotations.json', 'r') as f:
self.enrich_lang = json.load(f)
self.text_aug = text_aug
self.rgb_pad = rgb_pad
if self.rgb_pad != -1:
self.rgb_shift = RandomShiftsAug(rgb_pad)
self.gripper_pad = gripper_pad
if self.gripper_pad != -1:
self.gripper_shift = RandomShiftsAug(gripper_pad)
assert (
"validation" in self.abs_datasets_dir.as_posix()
or "training" in self.abs_datasets_dir.as_posix()
)
self.validation = "validation" in self.abs_datasets_dir.as_posix()
assert self.abs_datasets_dir.is_dir()
logger.info(f"loading dataset at {self.abs_datasets_dir}")
logger.info("finished loading dataset")
def process_rgb(
self,
episode: Dict[str, np.ndarray],
observation_space: DictConfig,
transforms: Dict,
seq_idx: int = 0,
window_size: int = 0,
) -> Dict[str, Dict[str, torch.Tensor]]:
rgb_obs_keys = observation_space["rgb_obs"]
seq_rgb_obs_dict = {}
for _, rgb_obs_key in enumerate(rgb_obs_keys):
rgb_obs = episode[rgb_obs_key]
# expand dims for single environment obs
if len(rgb_obs.shape) != 4:
rgb_obs = np.expand_dims(rgb_obs, axis=0)
assert len(rgb_obs.shape) == 4
if window_size == 0 and seq_idx == 0: # single file loader
# To Square image
seq_rgb_obs_ = torch.from_numpy(rgb_obs).byte()
else: # episode loader
seq_rgb_obs_ = torch.from_numpy(
rgb_obs[seq_idx : seq_idx + window_size]
).byte()
if rgb_obs_key in transforms:
seq_rgb_obs_ = transforms[rgb_obs_key](seq_rgb_obs_)
seq_rgb_obs_dict[rgb_obs_key] = seq_rgb_obs_
# shape: N_rgb_obs x (BxHxWxC)
return {"rgb_obs": seq_rgb_obs_dict}
def process_language(
self, episode: Dict[str, np.ndarray], transforms: Dict, with_lang: bool
):
return {"lang": episode["language"]}
def __getitem__(self, idx: Union[int, Tuple[int, int]], fixed_seed=False) -> Dict:
"""
Get sequence of dataset.
Args:
idx: Index of the sequence.
Returns:
Loaded sequence.
"""
if isinstance(idx, int):
# When max_ws_size and min_ws_size are equal, avoid unnecessary padding
# acts like Constant dataset. Currently, used for language data
if self.min_window_size == self.max_window_size:
window_size = self.max_window_size
elif self.min_window_size < self.max_window_size:
window_size = self._get_window_size(idx)
else:
logger.error(
f"min_window_size {self.min_window_size} > max_window_size {self.max_window_size}"
)
raise ValueError
else:
idx, window_size = idx
head = False
sequence = self._get_sequences(idx, window_size, head=head)
if self.pad:
pad_size = self._get_pad_size(sequence)
sequence = self._pad_sequence(sequence, pad_size, head=head)
new_list = []
np_rgb = copy.deepcopy(sequence["rgb_obs"]["rgb_static"].numpy())
for i in range(np_rgb.shape[0]):
new_list.append(Image.fromarray(np_rgb[i, :, :, :].astype(np.uint8)))
sequence["rgb_obs"]["rgb_static"] = new_list
new_list = []
np_gripper = copy.deepcopy(sequence["rgb_obs"]["rgb_gripper"].numpy())
for i in range(np_gripper.shape[0]):
new_list.append(Image.fromarray(np_gripper[i, :, :, :].astype(np.uint8)))
sequence["rgb_obs"]["rgb_gripper"] = new_list
# print(pad_size, len(new_list))
return sequence
def _get_sequences(self, idx: int, window_size: int, head: bool=False) -> Dict:
"""
Load sequence of length window_size.
Args:
idx: Index of starting frame.
window_size: Length of sampled episode.
Returns:
dict: Dictionary of tensors of loaded sequence with different input modalities and actions.
"""
episode = self._load_episode(idx, window_size)
seq_state_obs = process_state(
episode, self.observation_space, self.transforms, self.proprio_state
)
seq_rgb_obs = self.process_rgb(episode, self.observation_space, self.transforms)
seq_depth_obs = process_depth(episode, self.observation_space, self.transforms)
seq_acts = process_actions(episode, self.observation_space, self.transforms)
info = get_state_info_dict(episode)
seq_lang = self.process_language(episode, self.transforms, self.with_lang)
info = self._add_language_info(info, idx)
seq_dict = {
**seq_state_obs,
**seq_rgb_obs,
**seq_depth_obs,
**seq_acts,
**info,
**seq_lang,
} # type:ignore
seq_dict["idx"] = idx # type:ignore
return seq_dict
def _load_episode(self, idx: int, window_size: int) -> Dict[str, np.ndarray]:
raise NotImplementedError
def _get_window_size(self, idx: int) -> int:
"""
Sample a window size taking into account the episode limits.
Args:
idx: Index of the sequence to load.
Returns:
Window size.
"""
window_diff = self.max_window_size - self.min_window_size
if len(self.episode_lookup) <= idx + window_diff:
# last episode
max_window = self.min_window_size + len(self.episode_lookup) - idx - 1
elif (
self.episode_lookup[idx + window_diff]
!= self.episode_lookup[idx] + window_diff
):
# less than max_episode steps until next episode
steps_to_next_episode = int(
np.nonzero(
self.episode_lookup[idx : idx + window_diff + 1]
- (self.episode_lookup[idx] + np.arange(window_diff + 1))
)[0][0]
)
max_window = min(
self.max_window_size, (self.min_window_size + steps_to_next_episode - 1)
)
else:
max_window = self.max_window_size
if self.validation:
# in validation step, repeat the window sizes for each epoch.
return get_validation_window_size(idx, self.min_window_size, max_window)
else:
return np.random.randint(self.min_window_size, max_window + 1)
def __len__(self) -> int:
"""
Returns:
Size of the dataset.
"""
return len(self.episode_lookup)
def _get_pad_size(self, sequence: Dict) -> int:
"""
Determine how many frames to append to end of the sequence
Args:
sequence: Loaded sequence.
Returns:
Number of frames to pad.
"""
return self.max_window_size - len(sequence["actions"])
def _pad_sequence(self, seq: Dict, pad_size: int, head: bool=False) -> Dict:
"""
Pad a sequence by repeating the last frame.
Args:
seq: Sequence to pad.
pad_size: Number of frames to pad.
Returns:
Padded sequence.
"""
seq.update({"robot_obs": self._pad_with_repetition(seq["robot_obs"], pad_size)})
seq.update(
{
"rgb_obs": {
k: self._pad_with_repetition(v, pad_size, head)
for k, v in seq["rgb_obs"].items()
}
}
)
seq.update(
{
"depth_obs": {
k: self._pad_with_repetition(v, pad_size, head)
for k, v in seq["depth_obs"].items()
}
}
)
# todo: find better way of distinguishing rk and play action spaces
if not self.relative_actions:
if head:
seq_acts = self._pad_with_zeros(seq["actions"], pad_size, head)
else:
# repeat action for world coordinates action space
seq.update({"actions": self._pad_with_repetition(seq["actions"], pad_size, head)})
else:
# for relative actions zero pad all but the last action dims and repeat last action dim (gripper action)
if head:
seq_acts = self._pad_with_zeros(seq["actions"], pad_size, head)
else:
seq_acts = torch.cat(
[
self._pad_with_zeros(seq["actions"][..., :-1], pad_size, head),
self._pad_with_repetition(seq["actions"][..., -1:], pad_size, head),
],
dim=-1,
)
seq.update({"actions": seq_acts})
seq.update(
{
"state_info": {
k: self._pad_with_repetition(v, pad_size, head)
for k, v in seq["state_info"].items()
}
}
)
return seq
@staticmethod
def _pad_with_repetition(input_tensor: torch.Tensor, pad_size: int, head: bool = False) -> torch.Tensor:
"""
Pad a sequence Tensor by repeating last element pad_size times.
Args:
input_tensor: Sequence to pad.
pad_size: Number of frames to pad.
Returns:
Padded Tensor.
"""
if head:
last_repeated = torch.repeat_interleave(
torch.unsqueeze(input_tensor[0], dim=0), repeats=pad_size, dim=0
)
padded = torch.vstack((last_repeated, input_tensor))
else:
last_repeated = torch.repeat_interleave(
torch.unsqueeze(input_tensor[-1], dim=0), repeats=pad_size, dim=0
)
padded = torch.vstack((input_tensor, last_repeated))
return padded
@staticmethod
def _pad_with_zeros(input_tensor: torch.Tensor, pad_size: int, head: bool = False) -> torch.Tensor:
"""
Pad a Tensor with zeros.
Args:
input_tensor: Sequence to pad.
pad_size: Number of frames to pad.
Returns:
Padded Tensor.
"""
zeros_repeated = torch.repeat_interleave(
torch.unsqueeze(torch.zeros(input_tensor.shape[-1]), dim=0),
repeats=pad_size,
dim=0,
)
if head:
padded = torch.vstack((zeros_repeated, input_tensor))
else:
padded = torch.vstack((input_tensor, zeros_repeated))
return padded
def _add_language_info(self, info: Dict, idx: int) -> Dict:
"""
If dataset contains language, add info to determine if this sequence will be used for the auxiliary losses.
Args:
info: Info dictionary.
idx: Sequence index.
Returns:
Info dictionary with updated information.
"""
if not self.with_lang:
return info
use_for_aux_lang_loss = (
idx + self.aux_lang_loss_window >= len(self.lang_lookup)
or self.lang_lookup[idx] < self.lang_lookup[idx + self.aux_lang_loss_window]
)
info["use_for_aux_lang_loss"] = use_for_aux_lang_loss
return info
class DebugDataset(Dataset):
def __init__(self, **kwargs: Any,):
super().__init__()
def __len__(self) -> int:
return 10000
def __getitem__(self, index):
window_size = 8
rgb = torch.randn(window_size, 3, 200, 200)
gripper = torch.randn(window_size, 84, 84)
state = torch.randn(window_size, 15)
class DiskCalvinDataset(BaseCalvinDataset):
"""
Dataset that loads episodes as individual files from disk.
Args:
skip_frames: Skip this amount of windows for language dataset.
save_format: File format in datasets_dir (pkl or npz).
pretrain: Set to True when pretraining.
"""
def __init__(
self,
image_fn: Callable,
text_fn: Callable,
*args: Any,
skip_frames: int = 1,
save_format: str = "npz",
pretrain: bool = False,
partial_data=False,
**kwargs: Any,
):
super().__init__(*args, **kwargs)
self.save_format = save_format
self.image_fn = image_fn
self.text_fn = text_fn
self.partial_data = partial_data
if self.save_format == "pkl":
self.load_file = load_pkl
elif self.save_format == "npz":
self.load_file = load_npz
else:
raise NotImplementedError
self.pretrain = pretrain
self.skip_frames = skip_frames
if self.with_lang:
(
self.episode_lookup,
self.lang_lookup,
self.lang_ann,
self.lang_task
) = self._build_file_indices_lang(self.abs_datasets_dir)
else:
self.episode_lookup = self._build_file_indices(self.abs_datasets_dir)
self.naming_pattern, self.n_digits = lookup_naming_pattern(
self.abs_datasets_dir, self.save_format
)
def _get_episode_name(self, file_idx: int) -> Path:
"""
Convert file idx to file path.
Args:
file_idx: index of starting frame.
Returns:
Path to file.
"""
return Path(
f"{self.naming_pattern[0]}{file_idx:0{self.n_digits}d}{self.naming_pattern[1]}"
)
def _load_episode(self, idx: int, window_size: int) -> Dict[str, np.ndarray]:
"""
Load consecutive frames saved as individual files on disk and combine to episode dict.
Args:
idx: Index of first frame.
window_size: Length of sampled episode.
Returns:
episode: Dict of numpy arrays containing the episode where keys are the names of modalities.
"""
start_idx = self.episode_lookup[idx]
end_idx = start_idx + window_size
keys = list(chain(*self.observation_space.values()))
keys.remove("language")
keys.append("scene_obs")
episodes = [
self.load_file(self._get_episode_name(file_idx))
for file_idx in range(start_idx, end_idx)
]
episode = {key: np.stack([ep[key] for ep in episodes]) for key in keys}
if self.with_lang:
episode["language"] = self.lang_ann[self.lang_lookup[idx]]
if self.text_aug:
task = self.lang_task[self.lang_lookup[idx]]
enrich_lang = random.choice(self.enrich_lang[task] + [episode["language"]])
episode["language"] = enrich_lang
return episode
def _build_file_indices_lang(
self, abs_datasets_dir: Path
):
"""
This method builds the mapping from index to file_name used for loading the episodes of the language dataset.
Args:
abs_datasets_dir: Absolute path of the directory containing the dataset.
Returns:
episode_lookup: Mapping from training example index to episode (file) index.
lang_lookup: Mapping from training example to index of language instruction.
lang_ann: Language embeddings.
"""
assert abs_datasets_dir.is_dir()
episode_lookup = []
try:
print(
"trying to load lang data from: ",
abs_datasets_dir / self.lang_folder / "auto_lang_ann.npy",
)
lang_data = np.load(
abs_datasets_dir / self.lang_folder / "auto_lang_ann.npy",
allow_pickle=True,
).item()
except Exception:
print(
"Exception, trying to load lang data from: ",
abs_datasets_dir / "auto_lang_ann.npy",
)
lang_data = np.load(
abs_datasets_dir / "auto_lang_ann.npy", allow_pickle=True
).item()
ep_start_end_ids = lang_data["info"]["indx"] # each of them are 64
lang_ann = lang_data["language"]["ann"] # length total number of annotations
lang_task = lang_data["language"]["task"]
lang_lookup = []
partial_st_ed_list = load_partial_traj_data()
for i, (start_idx, end_idx) in enumerate(ep_start_end_ids):
if self.partial_data:
if (start_idx, end_idx) not in partial_st_ed_list:
continue
if self.pretrain:
start_idx = max(
start_idx,
end_idx + 1 - self.min_window_size - self.aux_lang_loss_window,
)
assert end_idx >= self.max_window_size
cnt = 0
for idx in range(start_idx, end_idx + 1 - self.min_window_size):
if cnt % self.skip_frames == 0:
lang_lookup.append(i)
episode_lookup.append(idx)
cnt += 1
return np.array(episode_lookup), lang_lookup, lang_ann, lang_task
def _build_file_indices(self, abs_datasets_dir: Path) -> np.ndarray:
"""
This method builds the mapping from index to file_name used for loading the episodes of the non language
dataset.
Args:
abs_datasets_dir: Absolute path of the directory containing the dataset.
Returns:
episode_lookup: Mapping from training example index to episode (file) index.
"""
assert abs_datasets_dir.is_dir()
episode_lookup = []
ep_start_end_ids = np.load(abs_datasets_dir / "ep_start_end_ids.npy")
logger.info(
f'Found "ep_start_end_ids.npy" with {len(ep_start_end_ids)} episodes.'
)
for start_idx, end_idx in ep_start_end_ids:
assert end_idx > self.max_window_size
for idx in range(start_idx, end_idx + 1 - self.min_window_size):
episode_lookup.append(idx)
return np.array(episode_lookup)
def collater(self, sample):
action_tensors = torch.from_numpy(np.array([np.stack(s["actions"]) for s in sample]))
state_tensors = torch.from_numpy(np.array([np.stack(s["robot_obs"]) for s in sample]))
image_tensors = torch.stack([self.image_fn(s["rgb_obs"]["rgb_static"]) for s in sample])
gripper_tensors = torch.stack([self.image_fn(s["rgb_obs"]["rgb_gripper"]) for s in sample])
stacked_language = [s["lang"] for s in sample]
text_tensors, attention_mask = self.text_fn(stacked_language)
if self.rgb_pad != -1:
bs, seq_len = image_tensors.shape[:2]
if self.traj_cons:
image_tensors = self.rgb_shift.forward_traj(image_tensors)
else:
image_tensors = image_tensors.view(bs*seq_len, *image_tensors.shape[2:])
image_tensors = self.rgb_shift(image_tensors)
image_tensors = image_tensors.view(bs, seq_len, *image_tensors.shape[1:])
if self.gripper_pad != -1:
bs, seq_len = gripper_tensors.shape[:2]
if self.traj_cons:
gripper_tensors = self.gripper_shift.forward_traj(gripper_tensors)
else:
gripper_tensors = gripper_tensors.view(bs * seq_len, *gripper_tensors.shape[2:])
gripper_tensors = self.gripper_shift(gripper_tensors)
gripper_tensors = gripper_tensors.view(bs, seq_len, *gripper_tensors.shape[1:])
robot_obs = torch.zeros(1)
if self.act_step != 1:
actions = torch.zeros((action_tensors.shape[0], self.window_size, self.act_step, action_tensors.shape[-1]))
for b in range(action_tensors.shape[0]):
for ix in range(self.window_size):
actions[b, ix] = action_tensors[b, ix:ix+self.act_step]
robot_obs = torch.zeros((action_tensors.shape[0], self.window_size, self.act_step, state_tensors.shape[-1]))
for b in range(action_tensors.shape[0]):
for ix in range(self.window_size):
robot_obs[b, ix] = state_tensors[b, ix:ix+self.act_step]
robot_obs = torch.cat([robot_obs[..., :6], robot_obs[..., [-1]]], dim=-1)
action_tensors = actions
image_tensors = image_tensors[:, :-(self.act_step-1)]
gripper_tensors = gripper_tensors[:, :-(self.act_step-1)]
state_tensors = state_tensors[:, :-(self.act_step-1)]
return image_tensors, (text_tensors, attention_mask), action_tensors, gripper_tensors, state_tensors, robot_obs
class CalvinDataset(Dataset):
"""Naive implementation of dataset to store
calvin debug dataset, may be changed to WDS for the full dataset
"""
def __init__(self, image_fn, text_fn, dataset_path, is_train=True) -> None:
super().__init__()
self.dataset_path = dataset_path
self.image_fn = image_fn
self.text_fn = text_fn
tag = "training" if is_train else "validation"
self.file_prefix = f"{self.dataset_path}/{tag}"
self.anns = np.load(
f"{self.file_prefix}/lang_annotations/auto_lang_ann.npy", allow_pickle=True
).item()
def __len__(self):
return len(self.anns["info"]["indx"])
def __getitem__(self, index):
task = self.anns["language"]["task"][index]
text = self.anns["language"]["ann"][index]
st, ed = self.anns["info"]["indx"][index]
# CJ: randomly sample a datapoint in the episode
frame = random.randint(st, ed)
frame = np.load(
f"{self.file_prefix}/episode_{frame:07d}.npz"
) # , allow_pickle=True (lazy load)
rgb_static = Image.fromarray(frame["rgb_static"])
rgb_gripper = Image.fromarray(frame["rgb_gripper"])
actions = np.array(frame["rel_actions"])
actions[..., 6:] = (actions[..., 6:] + 1) // 2
return rgb_static, text, actions
def collater(self, sample):
images = [s[0] for s in sample]
texts = [s[1] for s in sample]
actions = [s[2] for s in sample]
image_tensors = self.image_fn(images)
text_tensors = self.text_fn(texts)
action_tensors = torch.FloatTensor(np.stack(actions))
return image_tensors, text_tensors, action_tensors
def load_pkl(filename: Path) -> Dict[str, np.ndarray]:
with open(filename, "rb") as f:
return pickle.load(f)
def load_npz(filename: Path) -> Dict[str, np.ndarray]:
return np.load(filename.as_posix())
class SharedEpoch:
def __init__(self, epoch: int = 0):
self.shared_epoch = Value("i", epoch)
def set_value(self, epoch):
self.shared_epoch.value = epoch
def get_value(self):
return self.shared_epoch.value
@dataclass
class DataInfo:
dataloader: DataLoader
sampler: DistributedSampler = None
shared_epoch: SharedEpoch = None
dataset: Dataset = None
def set_epoch(self, epoch):
if self.shared_epoch is not None:
self.shared_epoch.set_value(epoch)
if self.sampler is not None and isinstance(self.sampler, DistributedSampler):
self.sampler.set_epoch(epoch)
def preprocess_image(sample, image_processor):
image = [image_processor(s).unsqueeze(0) for s in sample]
image = torch.cat(image, dim=0)
# apply random horizontal flip and color jitter
return image
def preprocess_text_calvin(sample, tokenizer):
tokenizer.padding_side = "right"
sample = [
# (f"{s.strip()}{tokenizer.eos_token}")
# for s in sample
(f"<image>{s.strip()}<|endofchunk|>{tokenizer.eos_token}") for s in sample
]
text = tokenizer(
sample,
max_length=32,
padding="longest",
truncation="only_first",
return_tensors="pt",
)
return text["input_ids"], text["attention_mask"]
def preprocess_interleaved(sample, tokenizer, clip_processor, sim_threshold):
info = json.loads(sample[0])
tar_file_obj = io.BytesIO(sample[1])
image_tar = tarfile.open(fileobj=tar_file_obj)
sentences = info["text_list"]
images, image_idxs = [], []
for image_path, sim in zip(info["image_info"], info["similarity_matrix"]):
# pick one image per sentence
if info["image_info"][image_path]["matched_text_index"] in image_idxs:
continue
rawbytes = image_tar.extractfile(
os.path.join(image_tar.getnames()[0], image_path)
).read()
# filter to images >= 10KB
if len(rawbytes) // 1000 <= MIN_KB:
continue
if sim[info["image_info"][image_path]["matched_text_index"]] < sim_threshold:
continue
image = Image.open(io.BytesIO(rawbytes)).convert("RGB")
images.append(image)
image_idxs.append(info["image_info"][image_path]["matched_text_index"])
if len(images) == 0:
raise ValueError("No images in sample")
# filter out images that are exact duplicates
images_tensors = preprocess_image(images, clip_processor)
keep_ixs = range(min(len(images_tensors), MAX_NUM_IMAGES))
images_tensors = images_tensors[keep_ixs]
image_idxs = [image_idxs[ix] for ix in keep_ixs]
# pad to 5 images
if len(images_tensors) < MAX_NUM_IMAGES:
zero_padding = torch.zeros(
(MAX_NUM_IMAGES - len(images_tensors), 3, 224, 224), dtype=torch.float
)
images_tensors = torch.cat((images_tensors, zero_padding), dim=0)
# add in <image> and <eoc> tokens
# eoc after sentence = "sentence loss"
for ix in image_idxs:
sentences[ix] = f"<|endofchunk|><image>{sentences[ix]}"
text = " ".join(sentences)
text = text.replace("<|endofchunk|>", "", 1) # but remove first eoc
# whitespace cleanup
text = (
text.replace(" <|endofchunk|>", "<|endofchunk|>")
.replace("<image> ", "<image>")
.replace(" <image>", "<image>")
)
text = f"{text}<|endofchunk|>{tokenizer.eos_token}"
tokenizer.padding_side = "right"
text_tensor = tokenizer(
text, max_length=256, truncation=True, padding="max_length", return_tensors="pt"
)
# reject sequences with too few images (after truncation)
num_images = torch.count_nonzero(
text_tensor["input_ids"]
== tokenizer.additional_special_tokens_ids[
tokenizer.additional_special_tokens.index("<image>")
]
)
if num_images == 0:
raise ValueError("No images in sample")
elif (
num_images == 1 and random.random() <= 0.5
): # 50% chance of keeping single image samples
raise ValueError("Only one image in sample")
return (
images_tensors,
(text_tensor["input_ids"], text_tensor["attention_mask"]),
)
def get_coco_dataset(args, image_processor, tokenizer, epoch=0):
coco_data_dir = "path/to/coco/train2014"
coco_ann = "path/to/coco/annotations/captions_train2014.json"
preprocess_text_fn = functools.partial(preprocess_text_calvin, tokenizer=tokenizer) | coco_dataset = CaptionDataset(coco_data_dir, coco_ann, preprocess_text_fn, image_processor) | 1 | 2023-11-02 01:36:23+00:00 | 12k |
microsoft/monitors4codegen | src/monitors4codegen/multilspy/lsp_protocol_handler/server.py | [
{
"identifier": "LspNotification",
"path": "src/monitors4codegen/multilspy/lsp_protocol_handler/lsp_requests.py",
"snippet": "class LspNotification:\n def __init__(self, send_notification):\n self.send_notification = send_notification\n\n def did_change_workspace_folders(\n self, par... | import asyncio
import dataclasses
import json
import os
from typing import Any, Dict, List, Optional, Union
from .lsp_requests import LspNotification, LspRequest
from .lsp_types import ErrorCodes | 8,392 | super().__init__(message)
self.code = code
def to_lsp(self) -> StringDict:
return {"code": self.code, "message": super().__str__()}
@classmethod
def from_lsp(cls, d: StringDict) -> "Error":
return Error(d["code"], d["message"])
def __str__(self) -> str:
return f"{super().__str__()} ({self.code})"
def make_response(request_id: Any, params: PayloadLike) -> StringDict:
return {"jsonrpc": "2.0", "id": request_id, "result": params}
def make_error_response(request_id: Any, err: Error) -> StringDict:
return {"jsonrpc": "2.0", "id": request_id, "error": err.to_lsp()}
def make_notification(method: str, params: PayloadLike) -> StringDict:
return {"jsonrpc": "2.0", "method": method, "params": params}
def make_request(method: str, request_id: Any, params: PayloadLike) -> StringDict:
return {"jsonrpc": "2.0", "method": method, "id": request_id, "params": params}
class StopLoopException(Exception):
pass
def create_message(payload: PayloadLike):
body = json.dumps(payload, check_circular=False, ensure_ascii=False, separators=(",", ":")).encode(ENCODING)
return (
f"Content-Length: {len(body)}\r\n".encode(ENCODING),
"Content-Type: application/vscode-jsonrpc; charset=utf-8\r\n\r\n".encode(ENCODING),
body,
)
class MessageType:
error = 1
warning = 2
info = 3
log = 4
class Request:
def __init__(self) -> None:
self.cv = asyncio.Condition()
self.result: Optional[PayloadLike] = None
self.error: Optional[Error] = None
async def on_result(self, params: PayloadLike) -> None:
self.result = params
async with self.cv:
self.cv.notify()
async def on_error(self, err: Error) -> None:
self.error = err
async with self.cv:
self.cv.notify()
def content_length(line: bytes) -> Optional[int]:
if line.startswith(b"Content-Length: "):
_, value = line.split(b"Content-Length: ")
value = value.strip()
try:
return int(value)
except ValueError:
raise ValueError("Invalid Content-Length header: {}".format(value))
return None
class LanguageServerHandler:
"""
This class provides the implementation of Python client for the Language Server Protocol.
A class that launches the language server and communicates with it
using the Language Server Protocol (LSP).
It provides methods for sending requests, responses, and notifications to the server
and for registering handlers for requests and notifications from the server.
Uses JSON-RPC 2.0 for communication with the server over stdin/stdout.
Attributes:
send: A LspRequest object that can be used to send requests to the server and
await for the responses.
notify: A LspNotification object that can be used to send notifications to the server.
cmd: A string that represents the command to launch the language server process.
process: A subprocess.Popen object that represents the language server process.
_received_shutdown: A boolean flag that indicates whether the client has received
a shutdown request from the server.
request_id: An integer that represents the next available request id for the client.
_response_handlers: A dictionary that maps request ids to Request objects that
store the results or errors of the requests.
on_request_handlers: A dictionary that maps method names to callback functions
that handle requests from the server.
on_notification_handlers: A dictionary that maps method names to callback functions
that handle notifications from the server.
logger: An optional function that takes two strings (source and destination) and
a payload dictionary, and logs the communication between the client and the server.
tasks: A dictionary that maps task ids to asyncio.Task objects that represent
the asynchronous tasks created by the handler.
task_counter: An integer that represents the next available task id for the handler.
loop: An asyncio.AbstractEventLoop object that represents the event loop used by the handler.
"""
def __init__(self, process_launch_info: ProcessLaunchInfo, logger=None) -> None:
"""
Params:
cmd: A string that represents the command to launch the language server process.
logger: An optional function that takes two strings (source and destination) and
a payload dictionary, and logs the communication between the client and the server.
"""
self.send = LspRequest(self.send_request)
| """
This file provides the implementation of the JSON-RPC client, that launches and
communicates with the language server.
The initial implementation of this file was obtained from
https://github.com/predragnikolic/OLSP under the MIT License with the following terms:
MIT License
Copyright (c) 2023 Предраг Николић
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
StringDict = Dict[str, Any]
PayloadLike = Union[List[StringDict], StringDict, None]
CONTENT_LENGTH = "Content-Length: "
ENCODING = "utf-8"
@dataclasses.dataclass
class ProcessLaunchInfo:
"""
This class is used to store the information required to launch a process.
"""
# The command to launch the process
cmd: str
# The environment variables to set for the process
env: Dict[str, str] = dataclasses.field(default_factory=dict)
# The working directory for the process
cwd: str = os.getcwd()
class Error(Exception):
def __init__(self, code: ErrorCodes, message: str) -> None:
super().__init__(message)
self.code = code
def to_lsp(self) -> StringDict:
return {"code": self.code, "message": super().__str__()}
@classmethod
def from_lsp(cls, d: StringDict) -> "Error":
return Error(d["code"], d["message"])
def __str__(self) -> str:
return f"{super().__str__()} ({self.code})"
def make_response(request_id: Any, params: PayloadLike) -> StringDict:
return {"jsonrpc": "2.0", "id": request_id, "result": params}
def make_error_response(request_id: Any, err: Error) -> StringDict:
return {"jsonrpc": "2.0", "id": request_id, "error": err.to_lsp()}
def make_notification(method: str, params: PayloadLike) -> StringDict:
return {"jsonrpc": "2.0", "method": method, "params": params}
def make_request(method: str, request_id: Any, params: PayloadLike) -> StringDict:
return {"jsonrpc": "2.0", "method": method, "id": request_id, "params": params}
class StopLoopException(Exception):
pass
def create_message(payload: PayloadLike):
body = json.dumps(payload, check_circular=False, ensure_ascii=False, separators=(",", ":")).encode(ENCODING)
return (
f"Content-Length: {len(body)}\r\n".encode(ENCODING),
"Content-Type: application/vscode-jsonrpc; charset=utf-8\r\n\r\n".encode(ENCODING),
body,
)
class MessageType:
error = 1
warning = 2
info = 3
log = 4
class Request:
def __init__(self) -> None:
self.cv = asyncio.Condition()
self.result: Optional[PayloadLike] = None
self.error: Optional[Error] = None
async def on_result(self, params: PayloadLike) -> None:
self.result = params
async with self.cv:
self.cv.notify()
async def on_error(self, err: Error) -> None:
self.error = err
async with self.cv:
self.cv.notify()
def content_length(line: bytes) -> Optional[int]:
if line.startswith(b"Content-Length: "):
_, value = line.split(b"Content-Length: ")
value = value.strip()
try:
return int(value)
except ValueError:
raise ValueError("Invalid Content-Length header: {}".format(value))
return None
class LanguageServerHandler:
"""
This class provides the implementation of Python client for the Language Server Protocol.
A class that launches the language server and communicates with it
using the Language Server Protocol (LSP).
It provides methods for sending requests, responses, and notifications to the server
and for registering handlers for requests and notifications from the server.
Uses JSON-RPC 2.0 for communication with the server over stdin/stdout.
Attributes:
send: A LspRequest object that can be used to send requests to the server and
await for the responses.
notify: A LspNotification object that can be used to send notifications to the server.
cmd: A string that represents the command to launch the language server process.
process: A subprocess.Popen object that represents the language server process.
_received_shutdown: A boolean flag that indicates whether the client has received
a shutdown request from the server.
request_id: An integer that represents the next available request id for the client.
_response_handlers: A dictionary that maps request ids to Request objects that
store the results or errors of the requests.
on_request_handlers: A dictionary that maps method names to callback functions
that handle requests from the server.
on_notification_handlers: A dictionary that maps method names to callback functions
that handle notifications from the server.
logger: An optional function that takes two strings (source and destination) and
a payload dictionary, and logs the communication between the client and the server.
tasks: A dictionary that maps task ids to asyncio.Task objects that represent
the asynchronous tasks created by the handler.
task_counter: An integer that represents the next available task id for the handler.
loop: An asyncio.AbstractEventLoop object that represents the event loop used by the handler.
"""
def __init__(self, process_launch_info: ProcessLaunchInfo, logger=None) -> None:
"""
Params:
cmd: A string that represents the command to launch the language server process.
logger: An optional function that takes two strings (source and destination) and
a payload dictionary, and logs the communication between the client and the server.
"""
self.send = LspRequest(self.send_request) | self.notify = LspNotification(self.send_notification) | 0 | 2023-11-04 21:49:04+00:00 | 12k |
bigai-nlco/langsuite | langsuite/envs/cwah/cwah_world.py | [
{
"identifier": "CSS4_COLORS",
"path": "langsuite/constants.py",
"snippet": "CSS4_COLORS = {\n \"aliceblue\": \"#F0F8FF\",\n \"antiquewhite\": \"#FAEBD7\",\n \"aqua\": \"#00FFFF\",\n \"aquamarine\": \"#7FFFD4\",\n \"azure\": \"#F0FFFF\",\n \"beige\": \"#F5F5DC\",\n \"bisque\": \"#FF... | import copy
import math
import random
import numpy as np
import plotly.graph_objects as go
from collections import defaultdict
from pathlib import Path
from typing import Any, Dict, Optional, Tuple, Union
from langsuite.constants import CSS4_COLORS
from langsuite.shapes import Geometry, Point2D, Polygon2D
from langsuite.utils.logging import logger
from langsuite.world import (
WORLD_REGISTRY,
Door,
Object2D,
ObjectType,
Room,
Wall,
Window,
World,
) | 7,253 | alias: Optional[str] = None,
geometry: Optional[Polygon2D] = None,
class_name: Optional[str] = None,
room2room: Tuple[str] = ...,
openable: bool = True,
is_open: bool = True,
walls: Tuple[str] = ...,
**kwargs,
):
super().__init__(
door_id,
alias=alias,
geometry=geometry,
class_name=class_name,
room2room=room2room,
openable=openable,
is_open=is_open,
**kwargs,
)
self.walls = walls
self.class_name = class_name
@classmethod
def create(cls, door):
is_open = door.get("openness", 1) == 1
openable = door.get("openable", False)
polys_2d = Polygon2D(door["polygon"])
room2room = [door["room0"], door["room1"]]
class_name = door["class_name"]
# "wall|3|10.14|3.38|15.21|3.38"
return cls(
door["id"],
room2room=room2room,
is_open=is_open,
openable=openable,
class_name=class_name,
geometry=polys_2d,
)
def flip(self) -> None:
"""Flip doors wrt. wall attribute"""
if len(self.walls) > 1 and "exterior" not in self.walls[1]:
# Do not flip if the door is connected to outside.
wall0, wall1 = self.walls
self.walls = [wall1, wall0]
self.room2room = [self.room2room[1], self.room2room[0]]
def set_open(self, open=True):
self.is_open = open
def plot(self, axes=None):
if self.geometry is None:
return
x, y = self.geometry.shapely_geo.xy
axes.plot(x, y, color="green", linewidth=3)
def render(self, fig=None):
if self.geometry is None:
return
if not fig:
fig = go.Figure()
x, y = self.geometry.shapely_geo.exterior.xy
fig.add_shape(
type="rect",
xref="x",
yref="y",
x0=self.geometry.x_min,
y0=self.geometry.y_min,
x1=self.geometry.x_max,
y1=self.geometry.y_max,
opacity=0.2,
fillcolor="lightgreen",
line=dict(width=0),
)
class CwahWindow(Window):
def __init__(
self,
window_id: str,
*,
alias: Optional[str] = None,
geometry: Optional[Geometry] = None,
class_name: Optional[str] = None,
room2room: Tuple[str] = ...,
walls: Tuple[str] = ...,
**kwargs,
):
super().__init__(
window_id,
alias=alias,
geometry=geometry,
class_name=class_name,
room2room=room2room,
**kwargs,
)
self.walls = walls
self.class_name = class_name
@classmethod
def create(cls, window):
room2room = [window["room0"], window["room1"]]
polys_2d = Polygon2D(window["polygon"])
return cls(
window["id"],
geometry=polys_2d,
room2room=room2room,
class_name=window["class_name"],
)
def plot(self, axes=None):
if self.geometry is None:
return
x, y = self.geometry.shapely_geo.xy
axes.plot(x, y, color="blue", linewidth=5)
| # Copyright (c) BIGAI Research. All rights reserved.
# Licensed under the MIT license.
from __future__ import annotations
CwahPath = Path(__file__).parent
def ToEulerAngles(q):
sinp = 2 * (q[3] * q[1] - q[0] * q[2])
sinp = int(sinp)
pitch = math.asin(sinp)
return pitch
def get_bbox(center, size):
minx = center[0] - (1 / 2) * size[0]
maxx = center[0] + (1 / 2) * size[0]
minz = center[2] - (1 / 2) * size[2]
maxz = center[2] + (1 / 2) * size[2]
return [[minx, minz], [minx, maxz], [maxx, maxz], [maxx, minz]]
class CwahWall(Wall):
def __init__(
self,
wall_id: str,
*,
alias: Optional[str] = None,
geometry: Optional[Geometry] = None,
class_name: Optional[str] = None,
room2room: Union[Tuple[str], str] = list(),
empty: bool,
**kwargs,
):
super().__init__(
wall_id,
alias=alias,
geometry=geometry,
class_name=class_name,
asset_id="not_exist",
room2room=room2room,
**kwargs,
)
self.empty = empty
self.class_name = class_name
@classmethod
def create(cls, wall_data):
polys_2d = Polygon2D(wall_data["polygon"])
empty = wall_data.get("empty", False)
return cls(
wall_data["id"],
geometry=polys_2d,
class_name=wall_data["class_name"],
props=wall_data,
empty=empty,
)
def plot(self, axes=None):
if self.geometry is None:
return
x, y = self.geometry.shapely_geo.exterior.xy
if self.empty:
axes.plot(x, y, color="black", linestyle="-.", linewidth=0.5)
else:
axes.plot(x, y, color="black", linewidth=0.5)
axes.fill(x, y, color="gray")
def render(self, fig=None):
if self.geometry is None:
return
if not fig:
fig = go.Figure()
x, y = self.geometry.shapely_geo.exterior.xy
fig.add_shape(
type="rect",
xref="x",
yref="y",
x0=self.geometry.x_min,
y0=self.geometry.y_min,
x1=self.geometry.x_max,
y1=self.geometry.y_max,
opacity=0.2,
fillcolor="black",
line=dict(width=0),
)
class CwahDoor(Door):
def __init__(
self,
door_id: str,
*,
alias: Optional[str] = None,
geometry: Optional[Polygon2D] = None,
class_name: Optional[str] = None,
room2room: Tuple[str] = ...,
openable: bool = True,
is_open: bool = True,
walls: Tuple[str] = ...,
**kwargs,
):
super().__init__(
door_id,
alias=alias,
geometry=geometry,
class_name=class_name,
room2room=room2room,
openable=openable,
is_open=is_open,
**kwargs,
)
self.walls = walls
self.class_name = class_name
@classmethod
def create(cls, door):
is_open = door.get("openness", 1) == 1
openable = door.get("openable", False)
polys_2d = Polygon2D(door["polygon"])
room2room = [door["room0"], door["room1"]]
class_name = door["class_name"]
# "wall|3|10.14|3.38|15.21|3.38"
return cls(
door["id"],
room2room=room2room,
is_open=is_open,
openable=openable,
class_name=class_name,
geometry=polys_2d,
)
def flip(self) -> None:
"""Flip doors wrt. wall attribute"""
if len(self.walls) > 1 and "exterior" not in self.walls[1]:
# Do not flip if the door is connected to outside.
wall0, wall1 = self.walls
self.walls = [wall1, wall0]
self.room2room = [self.room2room[1], self.room2room[0]]
def set_open(self, open=True):
self.is_open = open
def plot(self, axes=None):
if self.geometry is None:
return
x, y = self.geometry.shapely_geo.xy
axes.plot(x, y, color="green", linewidth=3)
def render(self, fig=None):
if self.geometry is None:
return
if not fig:
fig = go.Figure()
x, y = self.geometry.shapely_geo.exterior.xy
fig.add_shape(
type="rect",
xref="x",
yref="y",
x0=self.geometry.x_min,
y0=self.geometry.y_min,
x1=self.geometry.x_max,
y1=self.geometry.y_max,
opacity=0.2,
fillcolor="lightgreen",
line=dict(width=0),
)
class CwahWindow(Window):
def __init__(
self,
window_id: str,
*,
alias: Optional[str] = None,
geometry: Optional[Geometry] = None,
class_name: Optional[str] = None,
room2room: Tuple[str] = ...,
walls: Tuple[str] = ...,
**kwargs,
):
super().__init__(
window_id,
alias=alias,
geometry=geometry,
class_name=class_name,
room2room=room2room,
**kwargs,
)
self.walls = walls
self.class_name = class_name
@classmethod
def create(cls, window):
room2room = [window["room0"], window["room1"]]
polys_2d = Polygon2D(window["polygon"])
return cls(
window["id"],
geometry=polys_2d,
room2room=room2room,
class_name=window["class_name"],
)
def plot(self, axes=None):
if self.geometry is None:
return
x, y = self.geometry.shapely_geo.xy
axes.plot(x, y, color="blue", linewidth=5)
| class CwahRoom(Room): | 9 | 2023-11-01 01:47:00+00:00 | 12k |
radekd91/inferno | inferno/datasets/LRS3Pseudo3DDM.py | [
{
"identifier": "LRS3DataModule",
"path": "inferno/datasets/LRS3DataModule.py",
"snippet": "class LRS3DataModule(FaceVideoDataModule):\nclass LRS3Dataset(VideoDatasetBase):\n def __init__(self, root_dir, output_dir, \n processed_subfolder=None, \n face_detector='mediapip... | from inferno.datasets.LRS3DataModule import LRS3DataModule, LRS3Dataset, robust_collate
from inferno.datasets.ImageDatasetHelpers import bbox2point, bbpoint_warp
from inferno.datasets.ConditionedVideoTestDatasetWrapper import ConditionedVideoTestDatasetWrapper
from pathlib import Path
import imgaug
import numpy as np
import torch
import omegaconf
import time | 7,705 | # landmark_source=self.landmark_sources,
# segmentation_source=self.segmentation_source,
temporal_split_start=self.temporal_split[0] + self.temporal_split[1] if self.temporal_split is not None else None,
temporal_split_end= sum(self.temporal_split) if self.temporal_split is not None else None,
# preload_videos=self.preload_videos,
# inflate_by_video_size=self.inflate_by_video_size,
inflate_by_video_size=False,
include_filename=True,
read_video=self.read_video,
read_audio=self.read_audio,
reconstruction_type=self.reconstruction_type,
return_global_pose=self.return_global_pose,
return_appearance=self.return_appearance,
average_shape_decode=self.average_shape_decode,
emotion_type=self.emotion_type,
return_emotion_feature=self.return_emotion_feature,
)
self.test_set_cond = ConditionedVideoTestDatasetWrapper(
self.test_set_cond_,
self.test_condition_source,
self.test_condition_settings,
key_prefix="gt_",
)
max_training_test_samples = 2
self.test_set_train_cond_ = LRS3Pseudo3dDataset(self.root_dir, self.output_dir, self.video_list, self.video_metas,
sorted(train)[:max_training_test_samples],
self.audio_metas,
# sequence_length=self.sequence_length_test,
sequence_length="all",
image_size=self.image_size,
**self.occlusion_settings_test,
hack_length=False,
# use_original_video=self.use_original_video,
include_processed_audio = self.include_processed_audio,
include_raw_audio = self.include_raw_audio,
# landmark_types=self.landmark_types,
# landmark_source=self.landmark_sources,
# segmentation_source=self.segmentation_source,
temporal_split_start= 0 if self.temporal_split is not None else None,
temporal_split_end=self.temporal_split[0] if self.temporal_split is not None else None,
# preload_videos=self.preload_videos,
# inflate_by_video_size=self.inflate_by_video_size,
inflate_by_video_size=False,
include_filename=True,
read_video=self.read_video,
read_audio=self.read_audio,
reconstruction_type=self.reconstruction_type,
return_global_pose=self.return_global_pose,
return_appearance=self.return_appearance,
average_shape_decode=self.average_shape_decode,
emotion_type=self.emotion_type,
return_emotion_feature=self.return_emotion_feature,
)
self.test_set_train_cond = ConditionedVideoTestDatasetWrapper(
self.test_set_train_cond_,
self.test_condition_source,
self.test_condition_settings,
key_prefix="gt_",
)
max_validation_test_samples = 2
self.test_set_val_cond_ = LRS3Pseudo3dDataset(self.root_dir, self.output_dir, self.video_list, self.video_metas,
sorted(val)[:max_validation_test_samples],
self.audio_metas,
# sequence_length=self.sequence_length_test,
sequence_length="all",
image_size=self.image_size,
**self.occlusion_settings_val,
hack_length=False,
# use_original_video=self.use_original_video,
include_processed_audio = self.include_processed_audio,
include_raw_audio = self.include_raw_audio,
# landmark_types=self.landmark_types,
# landmark_source=self.landmark_sources,
# segmentation_source=self.segmentation_source,
temporal_split_start=self.temporal_split[0] if self.temporal_split is not None else None,
temporal_split_end= self.temporal_split[0] + self.temporal_split[1] if self.temporal_split is not None else None,
# preload_videos=self.preload_videos,
inflate_by_video_size=False,
include_filename=True,
read_video=self.read_video,
read_audio=self.read_audio,
reconstruction_type=self.reconstruction_type,
return_global_pose=self.return_global_pose,
return_appearance=self.return_appearance,
average_shape_decode=self.average_shape_decode,
emotion_type=self.emotion_type,
return_emotion_feature=self.return_emotion_feature,
)
self.test_set_val_cond = ConditionedVideoTestDatasetWrapper(
self.test_set_val_cond_,
self.test_condition_source,
self.test_condition_settings,
key_prefix="gt_",
)
def test_dataloader(self):
test_dls = []
test_dl = super().test_dataloader()
if test_dl is not None:
if not isinstance(test_dl, list):
test_dl = [test_dl]
test_dls += test_dl
self.test_set_names += ["test"]
test_dls += [torch.utils.data.DataLoader(self.test_set_train, shuffle=False,
# num_workers=self.num_workers,
num_workers=0,
pin_memory=True,
batch_size=self.batch_size_test,
drop_last=False,
# drop_last=self.drop_last,
|
class LRS3Pseudo3DDM(LRS3DataModule):
def __init__(self, root_dir, output_dir,
processed_subfolder=None, face_detector='mediapipe',
# landmarks_from='sr_res',
landmarks_from=None,
face_detector_threshold=0.9,
image_size=224, scale=1.25,
batch_size_train=16,
batch_size_val=16,
batch_size_test=16,
sequence_length_train=16,
sequence_length_val=16,
sequence_length_test=16,
# occlusion_length_train=0,
# occlusion_length_val=0,
# occlusion_length_test=0,
occlusion_settings_train=None,
occlusion_settings_val=None,
occlusion_settings_test=None,
split = "original",
num_workers=4,
device=None,
augmentation=None,
drop_last=True,
include_processed_audio = True,
include_raw_audio = True,
test_condition_source=None,
test_condition_settings=None,
inflate_by_video_size=False,
preload_videos=False,
read_video=True,
read_audio=True,
reconstruction_type=None,
return_global_pose= False,
return_appearance= False,
average_shape_decode= True,
emotion_type=None,
return_emotion_feature=False,
):
super().__init__(root_dir, output_dir, processed_subfolder, face_detector,
landmarks_from,
face_detector_threshold,
image_size, scale, batch_size_train, batch_size_val, batch_size_test,
sequence_length_train, sequence_length_val, sequence_length_test,
occlusion_settings_train, occlusion_settings_val, occlusion_settings_test,
split,
num_workers, device, augmentation, drop_last,
include_processed_audio=include_processed_audio,
include_raw_audio=include_raw_audio,
inflate_by_video_size=inflate_by_video_size,
preload_videos=preload_videos
)
self.test_condition_source = test_condition_source or "original"
self.test_condition_settings = test_condition_settings
self.read_video = read_video
self.read_audio = read_audio
self.reconstruction_type = reconstruction_type
if self.reconstruction_type is not None:
if isinstance(self.reconstruction_type, str):
self.reconstruction_type = [self.reconstruction_type]
elif isinstance(self.reconstruction_type, omegaconf.listconfig.ListConfig):
self.reconstruction_type = list(self.reconstruction_type)
assert isinstance(self.reconstruction_type, list), "reconstruction_type must be a list or None"
self.return_global_pose = return_global_pose
self.return_appearance = return_appearance
self.average_shape_decode = average_shape_decode
self.emotion_type = emotion_type
self.return_emotion_feature = return_emotion_feature
def setup(self, stage=None):
train, val, test = self._get_subsets(self.split)
# training_augmenter = create_image_augmenter(self.image_size, self.augmentation)
training_augmenter = None
self.training_set = LRS3Pseudo3dDataset(self.root_dir, self.output_dir, self.video_list, self.video_metas, train,
self.audio_metas, self.sequence_length_train, image_size=self.image_size,
transforms=training_augmenter,
**self.occlusion_settings_train,
hack_length=False,
# use_original_video=self.use_original_video,
include_processed_audio = self.include_processed_audio,
include_raw_audio = self.include_raw_audio,
# landmark_types=self.landmark_types,
# landmark_source=self.landmark_sources,
# segmentation_source=self.segmentation_source,
temporal_split_start= 0 if self.temporal_split is not None else None,
temporal_split_end=self.temporal_split[0] if self.temporal_split is not None else None,
preload_videos=self.preload_videos,
inflate_by_video_size=self.inflate_by_video_size,
read_video=self.read_video,
read_audio=self.read_audio,
reconstruction_type=self.reconstruction_type,
return_global_pose=self.return_global_pose,
return_appearance=self.return_appearance,
average_shape_decode=self.average_shape_decode,
emotion_type=self.emotion_type,
return_emotion_feature=self.return_emotion_feature,
)
self.validation_set = LRS3Pseudo3dDataset(self.root_dir, self.output_dir,
self.video_list, self.video_metas, val, self.audio_metas,
self.sequence_length_val, image_size=self.image_size,
**self.occlusion_settings_val,
hack_length=False,
# use_original_video=self.use_original_video,
include_processed_audio = self.include_processed_audio,
include_raw_audio = self.include_raw_audio,
# landmark_types=self.landmark_types,
# landmark_source=self.landmark_sources,
# segmentation_source=self.segmentation_source,
temporal_split_start=self.temporal_split[0] if self.temporal_split is not None else None,
temporal_split_end= self.temporal_split[0] + self.temporal_split[1] if self.temporal_split is not None else None,
preload_videos=self.preload_videos,
inflate_by_video_size=self.inflate_by_video_size,
read_video=self.read_video,
read_audio=self.read_audio,
reconstruction_type=self.reconstruction_type,
return_global_pose=self.return_global_pose,
return_appearance=self.return_appearance,
average_shape_decode=self.average_shape_decode,
emotion_type=self.emotion_type,
return_emotion_feature=self.return_emotion_feature,
)
self.test_set_names = []
if len(test) > 0:
self.test_set_ = LRS3Pseudo3dDataset(self.root_dir, self.output_dir, self.video_list, self.video_metas, test, self.audio_metas,
# sequence_length=self.sequence_length_test,
sequence_length="all",
image_size=self.image_size,
**self.occlusion_settings_test,
hack_length=False,
# use_original_video=self.use_original_video,
include_processed_audio = self.include_processed_audio,
include_raw_audio = self.include_raw_audio,
# landmark_types=self.landmark_types,
# landmark_source=self.landmark_sources,
# segmentation_source=self.segmentation_source,
temporal_split_start=self.temporal_split[0] + self.temporal_split[1] if self.temporal_split is not None else None,
temporal_split_end= sum(self.temporal_split) if self.temporal_split is not None else None,
# preload_videos=self.preload_videos,
# inflate_by_video_size=self.inflate_by_video_size,
inflate_by_video_size=False,
include_filename=True,
read_video=self.read_video,
read_audio=self.read_audio,
reconstruction_type=self.reconstruction_type,
return_global_pose=self.return_global_pose,
return_appearance=self.return_appearance,
average_shape_decode=self.average_shape_decode,
emotion_type=self.emotion_type,
return_emotion_feature=self.return_emotion_feature,
)
self.test_set = ConditionedVideoTestDatasetWrapper(
self.test_set_,
None,
None,
key_prefix="gt_",
)
max_training_test_samples = 2
self.test_set_train_ = LRS3Pseudo3dDataset(self.root_dir, self.output_dir, self.video_list, self.video_metas, sorted(train)[:max_training_test_samples], self.audio_metas,
# sequence_length=self.sequence_length_test,
sequence_length="all",
image_size=self.image_size,
**self.occlusion_settings_test,
hack_length=False,
# use_original_video=self.use_original_video,
include_processed_audio = self.include_processed_audio,
include_raw_audio = self.include_raw_audio,
# landmark_types=self.landmark_types,
# landmark_source=self.landmark_sources,
# segmentation_source=self.segmentation_source,
temporal_split_start= 0 if self.temporal_split is not None else None,
temporal_split_end=self.temporal_split[0] if self.temporal_split is not None else None,
# preload_videos=self.preload_videos,
# inflate_by_video_size=self.inflate_by_video_size,
inflate_by_video_size=False,
include_filename=True,
read_video=self.read_video,
read_audio=self.read_audio,
reconstruction_type=self.reconstruction_type,
return_global_pose=self.return_global_pose,
return_appearance=self.return_appearance,
average_shape_decode=self.average_shape_decode,
emotion_type=self.emotion_type,
return_emotion_feature=self.return_emotion_feature,
)
self.test_set_train = ConditionedVideoTestDatasetWrapper(
self.test_set_train_,
None,
None,
key_prefix="gt_",
)
max_validation_test_samples = 2
self.test_set_val_ = LRS3Pseudo3dDataset(self.root_dir, self.output_dir, self.video_list, self.video_metas, sorted(val)[:max_validation_test_samples], self.audio_metas,
# sequence_length=self.sequence_length_test,
sequence_length="all",
image_size=self.image_size,
**self.occlusion_settings_test,
hack_length=False,
# use_original_video=self.use_original_video,
include_processed_audio = self.include_processed_audio,
include_raw_audio = self.include_raw_audio,
# landmark_types=self.landmark_types,
# landmark_source=self.landmark_sources,
# segmentation_source=self.segmentation_source,
temporal_split_start=self.temporal_split[0] if self.temporal_split is not None else None,
temporal_split_end= self.temporal_split[0] + self.temporal_split[1] if self.temporal_split is not None else None,
# preload_videos=self.preload_videos,
inflate_by_video_size=False,
include_filename=True,
read_video=self.read_video,
read_audio=self.read_audio,
reconstruction_type=self.reconstruction_type,
return_global_pose=self.return_global_pose,
return_appearance=self.return_appearance,
average_shape_decode=self.average_shape_decode,
emotion_type=self.emotion_type,
return_emotion_feature=self.return_emotion_feature,
)
self.test_set_val = ConditionedVideoTestDatasetWrapper(
self.test_set_val_,
None,
None,
key_prefix="gt_",
)
# conditioned test set
if self.test_condition_source != "original":
if len(test) > 0:
self.test_set_cond_ = LRS3Pseudo3dDataset(self.root_dir, self.output_dir, self.video_list, self.video_metas,
test,
self.audio_metas,
# sequence_length=self.sequence_length_test,
sequence_length="all",
image_size=self.image_size,
**self.occlusion_settings_test,
hack_length=False,
# use_original_video=self.use_original_video,
include_processed_audio = self.include_processed_audio,
include_raw_audio = self.include_raw_audio,
# landmark_types=self.landmark_types,
# landmark_source=self.landmark_sources,
# segmentation_source=self.segmentation_source,
temporal_split_start=self.temporal_split[0] + self.temporal_split[1] if self.temporal_split is not None else None,
temporal_split_end= sum(self.temporal_split) if self.temporal_split is not None else None,
# preload_videos=self.preload_videos,
# inflate_by_video_size=self.inflate_by_video_size,
inflate_by_video_size=False,
include_filename=True,
read_video=self.read_video,
read_audio=self.read_audio,
reconstruction_type=self.reconstruction_type,
return_global_pose=self.return_global_pose,
return_appearance=self.return_appearance,
average_shape_decode=self.average_shape_decode,
emotion_type=self.emotion_type,
return_emotion_feature=self.return_emotion_feature,
)
self.test_set_cond = ConditionedVideoTestDatasetWrapper(
self.test_set_cond_,
self.test_condition_source,
self.test_condition_settings,
key_prefix="gt_",
)
max_training_test_samples = 2
self.test_set_train_cond_ = LRS3Pseudo3dDataset(self.root_dir, self.output_dir, self.video_list, self.video_metas,
sorted(train)[:max_training_test_samples],
self.audio_metas,
# sequence_length=self.sequence_length_test,
sequence_length="all",
image_size=self.image_size,
**self.occlusion_settings_test,
hack_length=False,
# use_original_video=self.use_original_video,
include_processed_audio = self.include_processed_audio,
include_raw_audio = self.include_raw_audio,
# landmark_types=self.landmark_types,
# landmark_source=self.landmark_sources,
# segmentation_source=self.segmentation_source,
temporal_split_start= 0 if self.temporal_split is not None else None,
temporal_split_end=self.temporal_split[0] if self.temporal_split is not None else None,
# preload_videos=self.preload_videos,
# inflate_by_video_size=self.inflate_by_video_size,
inflate_by_video_size=False,
include_filename=True,
read_video=self.read_video,
read_audio=self.read_audio,
reconstruction_type=self.reconstruction_type,
return_global_pose=self.return_global_pose,
return_appearance=self.return_appearance,
average_shape_decode=self.average_shape_decode,
emotion_type=self.emotion_type,
return_emotion_feature=self.return_emotion_feature,
)
self.test_set_train_cond = ConditionedVideoTestDatasetWrapper(
self.test_set_train_cond_,
self.test_condition_source,
self.test_condition_settings,
key_prefix="gt_",
)
max_validation_test_samples = 2
self.test_set_val_cond_ = LRS3Pseudo3dDataset(self.root_dir, self.output_dir, self.video_list, self.video_metas,
sorted(val)[:max_validation_test_samples],
self.audio_metas,
# sequence_length=self.sequence_length_test,
sequence_length="all",
image_size=self.image_size,
**self.occlusion_settings_val,
hack_length=False,
# use_original_video=self.use_original_video,
include_processed_audio = self.include_processed_audio,
include_raw_audio = self.include_raw_audio,
# landmark_types=self.landmark_types,
# landmark_source=self.landmark_sources,
# segmentation_source=self.segmentation_source,
temporal_split_start=self.temporal_split[0] if self.temporal_split is not None else None,
temporal_split_end= self.temporal_split[0] + self.temporal_split[1] if self.temporal_split is not None else None,
# preload_videos=self.preload_videos,
inflate_by_video_size=False,
include_filename=True,
read_video=self.read_video,
read_audio=self.read_audio,
reconstruction_type=self.reconstruction_type,
return_global_pose=self.return_global_pose,
return_appearance=self.return_appearance,
average_shape_decode=self.average_shape_decode,
emotion_type=self.emotion_type,
return_emotion_feature=self.return_emotion_feature,
)
self.test_set_val_cond = ConditionedVideoTestDatasetWrapper(
self.test_set_val_cond_,
self.test_condition_source,
self.test_condition_settings,
key_prefix="gt_",
)
def test_dataloader(self):
test_dls = []
test_dl = super().test_dataloader()
if test_dl is not None:
if not isinstance(test_dl, list):
test_dl = [test_dl]
test_dls += test_dl
self.test_set_names += ["test"]
test_dls += [torch.utils.data.DataLoader(self.test_set_train, shuffle=False,
# num_workers=self.num_workers,
num_workers=0,
pin_memory=True,
batch_size=self.batch_size_test,
drop_last=False,
# drop_last=self.drop_last, | collate_fn=robust_collate | 0 | 2023-11-07 20:13:32+00:00 | 12k |
hxz393/ConfigCenterComparer | ui/table_main.py | [
{
"identifier": "COL_INFO",
"path": "config/settings.py",
"snippet": "COL_INFO = {\n \"name\": {\"col\": 0},\n \"group\": {\"col\": 1},\n \"key\": {\"col\": 2},\n \"pro_value\": {\"col\": 3},\n \"pro_time\": {\"col\": 4},\n \"pre_value\": {\"col\": 5},\n \"pre_time\": {\"col\": 6},\... | import logging
from typing import List, Optional, Dict
from PyQt5.QtCore import Qt, QPoint, pyqtSignal
from PyQt5.QtGui import QBrush, QColor, QKeyEvent
from PyQt5.QtWidgets import QTableWidget, QTableWidgetItem, QMenu, QAction, QHeaderView
from config.settings import COL_INFO, COLOR_SKIP, COLOR_CONSISTENCY_FULLY, COLOR_CONSISTENCY_PARTIALLY, COLOR_EMPTY, COLOR_DEFAULT
from lib.log_time import log_time
from ui.action_copy import ActionCopy
from ui.action_save import ActionSave
from ui.action_skip import ActionSkip
from ui.action_unskip import ActionUnskip
from ui.config_manager import ConfigManager
from ui.lang_manager import LangManager | 9,203 | action.setData(index)
action.triggered.connect(self._toggle_column_visibility)
# 在鼠标右键点击位置显示菜单
menu.exec_(self.horizontalHeader().viewport().mapToGlobal(pos))
def _toggle_column_visibility(self) -> None:
"""
根据用户选择,切换列的可见性。
此方法用于根据用户在上下文菜单中的选择,显示或隐藏特定的列。
:rtype: None
:return: 无返回值。
"""
action = self.sender()
if isinstance(action, QAction):
column_index = action.data()
if action.isChecked():
self.showColumn(column_index)
else:
self.hideColumn(column_index)
def add_row(self, data: List[List[str]]) -> None:
"""
向表格中添加一行数据。
:param data: 要添加的数据列表,每个元素是一个列表,第一个元素代表显示的字符串,第二个元素代表附加数据。
:type data: List[List[str]]
:rtype: None
:return: 无返回值。
"""
row_position = 0
try:
# 获取最后行数
row_position = self.rowCount()
# 插入最后一行
self.insertRow(row_position)
# 插入单元格数据
self._fill_row_data(row_position, data)
except Exception:
logger.exception(f"Error occurred while adding a new row at position {row_position}")
self.removeRow(row_position)
def _fill_row_data(self,
row_position: int,
data: List[List[str]]) -> None:
"""
填充指定行的数据。
:param row_position: 行位置
:param data: 行数据
:type row_position: int
:type data: List[List[str]]
:rtype: None
:return: 无返回值。
"""
for column, (display_text, user_data) in enumerate(data):
# 默认设置显示字符串,也叫 Qt.DisplayRole。获取方法item.text() 或 item.data(Qt.DisplayRole)
item = QTableWidgetItem(str(display_text))
# 设置实际数据,也叫 Qt.UserRole。获取方法 item.data(Qt.UserRole)
item.setData(Qt.UserRole, user_data)
# 设置单元格不可编辑状态
item.setFlags(item.flags() & ~Qt.ItemIsEditable)
# 正常表格插入方法
self.setItem(row_position, column, item)
@log_time
def apply_color_to_table(self, rows: List[int] = None) -> None:
"""
对整个表格进行着色。通常只有初始化时才不带rows参数,以应用到整表。
:param rows: 可选,要应用颜色的行号列表。
:type rows: List[int], optional
:rtype: None
:return: 无返回值。
"""
color_switch = self.config_manager.get_config_main().get('color_set', 'ON')
if color_switch == 'OFF':
return
if rows is None or not isinstance(rows, list):
rows = range(self.rowCount())
try:
for row in rows:
# 不给隐藏行设置颜色
if self.isRowHidden(row):
continue
self._process_row_for_color(row)
except Exception:
logger.exception("Exception in apply_color_to_table method")
self.status_updated.emit(self.lang['label_status_error'])
def _process_row_for_color(self, row: int) -> None:
"""
根据一致性、跳过状态和是否为空值给单行应用颜色。
:param row: 行号,对每行进行颜色处理。
:type row: int
:rtype: None
:return: 无返回值。
"""
consistency_data = self.item(row, COL_INFO['consistency']['col']).data(Qt.UserRole)
skip_data = self.item(row, COL_INFO['skip']['col']).data(Qt.UserRole)
# 忽略状态为是时设置颜色
if skip_data == 'yes':
self.apply_color(row, COLOR_SKIP)
return
# 根据一致性值设置颜色
if consistency_data == 'fully':
self.apply_color(row, COLOR_CONSISTENCY_FULLY)
elif consistency_data == 'partially':
self.apply_color(row, COLOR_CONSISTENCY_PARTIALLY)
else:
| """
此文件定义了 TableMain 类,一个基于 PyQt5 的 QTableWidget 的高级实现。
TableMain 类主要用于显示和管理表格数据,提供了多种扩展功能,包括语言国际化支持、动态配置管理、右键菜单操作等。
该类与多个辅助类(如 LangManager 和 ConfigManager)集成,实现了复杂的功能逻辑。
:author: assassing
:contact: https://github.com/hxz393
:copyright: Copyright 2023, hxz393. 保留所有权利。
"""
logger = logging.getLogger(__name__)
class TableMain(QTableWidget):
"""
主表格类,用于展示和管理数据行。
此类继承自 PyQt5 的 QTableWidget,提供了丰富的数据展示和管理功能。包括但不限于数据的展示、行的颜色标记、右键菜单功能以及快捷键支持。
通过与 LangManager 和 ConfigManager 的集成,支持动态语言切换和配置管理。
:param lang_manager: 用于管理界面语言的 LangManager 实例。
:type lang_manager: LangManager
:param config_manager: 用于管理配置的 ConfigManager 实例。
:type config_manager: ConfigManager
:author: assassing
:contact: https://github.com/hxz393
:copyright: Copyright 2023, hxz393. 保留所有权利。
"""
status_updated = pyqtSignal(str)
filter_updated = pyqtSignal(list)
def __init__(self,
lang_manager: LangManager,
config_manager: ConfigManager):
super().__init__()
self.lang_manager = lang_manager
self.lang_manager.lang_updated.connect(self.update_lang)
self.config_manager = config_manager
# 实例化用到的组件
self.actionCopy = ActionCopy(self.lang_manager, self)
self.actionSave = ActionSave(self.lang_manager, self)
self.actionSkip = ActionSkip(self.lang_manager, self.config_manager, self)
self.actionUnskip = ActionUnskip(self.lang_manager, self.config_manager, self)
# 手动连接实例化的组件信号到转发函数
self.actionCopy.status_updated.connect(self.forward_status)
self.actionSave.status_updated.connect(self.forward_status)
self.actionSkip.status_updated.connect(self.forward_status)
self.actionSkip.filter_updated.connect(self.forward_filter)
self.actionUnskip.status_updated.connect(self.forward_status)
self.actionUnskip.filter_updated.connect(self.forward_filter)
self.initUI()
def initUI(self) -> None:
"""
初始化用户界面。
此方法负责设置表格的基本属性,如列数、表头标签、选择行为等。还包括对特定列的隐藏和宽度调整策略的设置。
:rtype: None
:return: 无返回值。
"""
# 先运行语言更新,里面有表头定义
self.update_lang()
self.hidden_cols = ["pro_time", "pre_time", "test_time", "dev_time"]
self.resize_cols = ["name", "group", "consistency", "skip"]
# 配置表格基本属性
self.setColumnCount(len(self.column_headers))
self.setHorizontalHeaderLabels(self.column_headers)
self.setEditTriggers(QTableWidget.NoEditTriggers)
self.setSelectionBehavior(QTableWidget.SelectItems)
# 隐藏垂直表头
self.verticalHeader().setVisible(False)
# 启用自动换行,没生效
self.setWordWrap(True)
self.setTextElideMode(Qt.ElideNone)
# 为表头视图设置上下文菜单事件
self.horizontalHeader().setContextMenuPolicy(Qt.CustomContextMenu)
self.horizontalHeader().customContextMenuRequested.connect(self._header_context_menu)
# 为表单设置上下文菜单事件
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self._cell_context_menu)
# 隐藏指定列
[self.hideColumn(COL_INFO[i]['col']) for i in self.hidden_cols]
# 设置表宽度策略
self.set_header_resize()
def set_header_resize(self):
"""
设置表头的列宽度和调整策略。
此方法负责定义表头列的宽度调整策略和其他相关属性。它设置了表头列的默认宽度、是否可拖动以及列的自动调整策略。
例如,某些列被设置为根据内容自动调整宽度,而其他列则被设置为可伸缩以适应表格的大小。
:rtype: None
:return: 无返回值。
"""
# 设置默认列宽度,列宽调整策略,列可拖动
self.horizontalHeader().setSectionsMovable(True)
self.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.horizontalHeader().setMinimumSectionSize(100)
# 设置要自动调整宽度的列
[self.horizontalHeader().setSectionResizeMode(COL_INFO[i]['col'], QHeaderView.ResizeToContents) for i in self.resize_cols]
def update_lang(self) -> None:
"""
更新界面语言设置。
:rtype: None
:return: 无返回值。
"""
self.lang = self.lang_manager.get_lang()
self.column_headers = [
self.lang['ui.table_main_1'],
self.lang['ui.table_main_2'],
self.lang['ui.table_main_3'],
self.lang['ui.dialog_settings_connection_2'],
f"{self.lang['ui.dialog_settings_connection_2']} {self.lang['ui.table_main_4']}",
self.lang['ui.dialog_settings_connection_3'],
f"{self.lang['ui.dialog_settings_connection_3']} {self.lang['ui.table_main_4']}",
self.lang['ui.dialog_settings_connection_4'],
f"{self.lang['ui.dialog_settings_connection_4']} {self.lang['ui.table_main_4']}",
self.lang['ui.dialog_settings_connection_5'],
f"{self.lang['ui.dialog_settings_connection_5']} {self.lang['ui.table_main_4']}",
self.lang['ui.table_main_5'],
self.lang['ui.table_main_6'],
]
# 重新应用到表头
self.setHorizontalHeaderLabels(self.column_headers)
# 定义数据和显示映射的字典
consistency_status_mapping = {
"inconsistent": self.lang['ui.action_start_8'],
"fully": self.lang['ui.action_start_9'],
"partially": self.lang['ui.action_start_10'],
"unknown": self.lang['ui.action_start_13'],
}
skip_status_mapping = {
"no": self.lang['ui.action_start_11'],
"yes": self.lang['ui.action_start_12'],
"unknown": self.lang['ui.action_start_13'],
}
for row in range(self.rowCount()):
# 更新忽略状态文字
self._update_item_text(row, "skip", skip_status_mapping)
# 更新一致性状态文字
self._update_item_text(row, "consistency", consistency_status_mapping)
def _update_item_text(self,
row: int,
user_data_key: str,
text_mapping: Dict[str, str]) -> None:
"""
根据提供的文本映射更新指定行的项文本。
此方法用于更新表格或列表中特定行的文本。它根据用户数据键(user_data_key)获取对应行的项,然后根据提供的文本映射(text_mapping)更新该项的文本。
:param row: 要更新的行索引。
:type row: int
:param user_data_key: 用于获取项的用户数据键。
:type user_data_key: str
:param text_mapping: 用户数据到文本的映射字典。
:type text_mapping: Dict[str, str]
:return: 无返回值。
:rtype: None
"""
item = self.item(row, COL_INFO[user_data_key]['col'])
if item is not None:
user_data = item.data(Qt.UserRole)
if user_data in text_mapping:
item.setText(text_mapping[user_data])
def keyPressEvent(self, event: QKeyEvent) -> None:
"""
处理键盘事件。
此方法用于处理键盘事件,特别是复制功能的快捷键。如果按下 Ctrl+C,则复制选中的单元格内容。
:param event: 键盘事件对象。
:type event: QKeyEvent
:rtype: None
:return: 无返回值。
"""
if event.key() == Qt.Key_C and (event.modifiers() & Qt.ControlModifier):
self.actionCopy.action_copy()
else:
super().keyPressEvent(event)
def _cell_context_menu(self, pos: QPoint) -> None:
"""
实现表格单元格的右键菜单功能。
:param pos: 右键点击的位置。
:type pos: QPoint
:rtype: None
:return: 无返回值。
"""
menu = QMenu(self)
menu.addAction(self.actionCopy.action_copy)
separator = QAction(menu)
separator.setSeparator(True)
menu.addAction(separator)
menu.addAction(self.actionSkip.action_skip)
menu.addAction(self.actionUnskip.action_unskip)
sep = QAction(menu)
sep.setSeparator(True)
menu.addAction(sep)
menu.addAction(self.actionSave.action_save)
menu.exec_(self.viewport().mapToGlobal(pos))
def _header_context_menu(self, pos: QPoint) -> None:
"""
实现表头的右键菜单功能。
:param pos: 右键点击的位置。
:type pos: QPoint
:rtype: None
:return: 无返回值。
"""
menu = QMenu(self)
# 动态创建一个菜单项,用于隐藏/显示列
for index in range(self.columnCount()):
column_name = self.horizontalHeaderItem(index).text()
action = menu.addAction(f"{column_name}")
action.setCheckable(True)
action.setChecked(not self.isColumnHidden(index))
action.setData(index)
action.triggered.connect(self._toggle_column_visibility)
# 在鼠标右键点击位置显示菜单
menu.exec_(self.horizontalHeader().viewport().mapToGlobal(pos))
def _toggle_column_visibility(self) -> None:
"""
根据用户选择,切换列的可见性。
此方法用于根据用户在上下文菜单中的选择,显示或隐藏特定的列。
:rtype: None
:return: 无返回值。
"""
action = self.sender()
if isinstance(action, QAction):
column_index = action.data()
if action.isChecked():
self.showColumn(column_index)
else:
self.hideColumn(column_index)
def add_row(self, data: List[List[str]]) -> None:
"""
向表格中添加一行数据。
:param data: 要添加的数据列表,每个元素是一个列表,第一个元素代表显示的字符串,第二个元素代表附加数据。
:type data: List[List[str]]
:rtype: None
:return: 无返回值。
"""
row_position = 0
try:
# 获取最后行数
row_position = self.rowCount()
# 插入最后一行
self.insertRow(row_position)
# 插入单元格数据
self._fill_row_data(row_position, data)
except Exception:
logger.exception(f"Error occurred while adding a new row at position {row_position}")
self.removeRow(row_position)
def _fill_row_data(self,
row_position: int,
data: List[List[str]]) -> None:
"""
填充指定行的数据。
:param row_position: 行位置
:param data: 行数据
:type row_position: int
:type data: List[List[str]]
:rtype: None
:return: 无返回值。
"""
for column, (display_text, user_data) in enumerate(data):
# 默认设置显示字符串,也叫 Qt.DisplayRole。获取方法item.text() 或 item.data(Qt.DisplayRole)
item = QTableWidgetItem(str(display_text))
# 设置实际数据,也叫 Qt.UserRole。获取方法 item.data(Qt.UserRole)
item.setData(Qt.UserRole, user_data)
# 设置单元格不可编辑状态
item.setFlags(item.flags() & ~Qt.ItemIsEditable)
# 正常表格插入方法
self.setItem(row_position, column, item)
@log_time
def apply_color_to_table(self, rows: List[int] = None) -> None:
"""
对整个表格进行着色。通常只有初始化时才不带rows参数,以应用到整表。
:param rows: 可选,要应用颜色的行号列表。
:type rows: List[int], optional
:rtype: None
:return: 无返回值。
"""
color_switch = self.config_manager.get_config_main().get('color_set', 'ON')
if color_switch == 'OFF':
return
if rows is None or not isinstance(rows, list):
rows = range(self.rowCount())
try:
for row in rows:
# 不给隐藏行设置颜色
if self.isRowHidden(row):
continue
self._process_row_for_color(row)
except Exception:
logger.exception("Exception in apply_color_to_table method")
self.status_updated.emit(self.lang['label_status_error'])
def _process_row_for_color(self, row: int) -> None:
"""
根据一致性、跳过状态和是否为空值给单行应用颜色。
:param row: 行号,对每行进行颜色处理。
:type row: int
:rtype: None
:return: 无返回值。
"""
consistency_data = self.item(row, COL_INFO['consistency']['col']).data(Qt.UserRole)
skip_data = self.item(row, COL_INFO['skip']['col']).data(Qt.UserRole)
# 忽略状态为是时设置颜色
if skip_data == 'yes':
self.apply_color(row, COLOR_SKIP)
return
# 根据一致性值设置颜色
if consistency_data == 'fully':
self.apply_color(row, COLOR_CONSISTENCY_FULLY)
elif consistency_data == 'partially':
self.apply_color(row, COLOR_CONSISTENCY_PARTIALLY)
else: | self.apply_color(row, COLOR_DEFAULT) | 5 | 2023-11-07 01:02:38+00:00 | 12k |
pytorch-labs/ao | test/test.py | [
{
"identifier": "DynamicallyPerAxisQuantizedLinear",
"path": "torchao/quantization/dynamic_quant.py",
"snippet": "class DynamicallyPerAxisQuantizedLinear(torch.nn.Linear):\n \"\"\"\n This class is a replacement for `torch.nn.Linear`. It implements a\n quantized matmul using int8 dynamic symmetr... | import copy
import unittest
import torch
import torch.nn as nn
import os
from torch._inductor.utils import run_and_get_code
from torch._dynamo import config
from torch.ao.quantization import MinMaxObserver, QConfigMapping
from torchao.quantization.dynamic_quant import (
DynamicallyPerAxisQuantizedLinear,
)
from torchao.quantization.quant_api import (
apply_dynamic_quant,
apply_weight_only_int8_quant,
change_linear_weights_to_int8_dqtensors,
change_linear_weights_to_int8_woqtensors,
change_linear_weights_to_int4_woqtensors,
_replace_with_custom_fn_if_matches_filter,
)
from torchao.quantization.quant_primitives import (
dequantize_per_channel,
dequantize_per_tensor,
dynamically_quantize_per_channel,
dynamically_quantize_per_tensor,
quant_int8_dynamic_linear,
quant_int8_dynamic_per_token_linear,
quantize_activation_per_token_absmax,
safe_int_mm,
)
from torchao.quantization.smoothquant import (
get_scale,
smooth_fq_linear_to_inference,
SmoothFakeDynamicallyQuantizedLinear,
swap_linear_with_smooth_fq_linear,
)
from torchao.quantization.subclass import (
Int8DynamicallyQuantizedLinearWeight,
Int8WeightOnlyQuantizedLinearWeight,
Int4WeightOnlyQuantizedLinearWeight
)
from torchao.quantization.utils import (
_apply_logging_hook,
compute_error,
compute_error as SQNR,
_fqn_to_op_to_shape_to_count,
LoggingTensorMode,
)
from torch.ao.quantization.quantize_fx import convert_to_reference_fx, prepare_fx
from transformers import ( # type: ignore[import-untyped]
DistilBertModel,
DistilBertTokenizer,
) | 8,662 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# mypy: ignore-errors
torch.manual_seed(0)
config.cache_size_limit = 100
class SmoothquantUnitTest(unittest.TestCase):
# first, let's reproduce the graphic from the paper, Figure 4, to ensure
# we are calculating the scales correctly
def test_figure_4(self):
X = torch.FloatTensor([1, -16, 2, 6, -2, 8, -1, -9]).reshape(1, 2, 4)
W = torch.FloatTensor([2, 1, -2, 1, -1, -1, 2, -1, -2, -1, -1, 1]).reshape(4, 3)
X_mul_W = torch.matmul(X, W)
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# mypy: ignore-errors
torch.manual_seed(0)
config.cache_size_limit = 100
class SmoothquantUnitTest(unittest.TestCase):
# first, let's reproduce the graphic from the paper, Figure 4, to ensure
# we are calculating the scales correctly
def test_figure_4(self):
X = torch.FloatTensor([1, -16, 2, 6, -2, 8, -1, -9]).reshape(1, 2, 4)
W = torch.FloatTensor([2, 1, -2, 1, -1, -1, 2, -1, -2, -1, -1, 1]).reshape(4, 3)
X_mul_W = torch.matmul(X, W)
| smoothquant_scale = get_scale( | 15 | 2023-11-03 21:27:36+00:00 | 12k |
google-research/semivl | third_party/unimatch/supervised.py | [
{
"identifier": "__version__",
"path": "version.py",
"snippet": ""
},
{
"identifier": "gen_code_archive",
"path": "utils/gen_code_archive.py",
"snippet": "def gen_code_archive(out_dir, file='code.tar.gz'):\n archive = os.path.join(out_dir, file)\n os.makedirs(os.path.dirname(archiv... | import argparse
import logging
import os
import pprint
import shutil
import uuid
import torch
import numpy as np
import torch.distributed as dist
import torch.backends.cudnn as cudnn
import yaml
import mmseg
from version import __version__
from datetime import datetime
from utils.gen_code_archive import gen_code_archive
from tqdm import tqdm
from torch import nn
from torch.nn import functional as F
from torch.optim import SGD
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from third_party.unimatch.dataset.semi import SemiDataset
from model.builder import build_model
from mmseg.core import build_optimizer
from experiments import get_git_revision
from datasets.classes import CLASSES
from third_party.unimatch.util.ohem import ProbOhemCrossEntropy2d
from third_party.unimatch.util.utils import count_params, AverageMeter, intersectionAndUnion, init_log
from third_party.unimatch.util.dist_helper import setup_distributed | 7,448 | else:
if mode == 'center_crop':
h, w = img.shape[-2:]
start_h, start_w = (h - cfg['crop_size']) // 2, (w - cfg['crop_size']) // 2
img = img[:, :, start_h:start_h + cfg['crop_size'], start_w:start_w + cfg['crop_size']]
mask = mask[:, start_h:start_h + cfg['crop_size'], start_w:start_w + cfg['crop_size']]
final = model(img)
pred = final.argmax(dim=1)
if return_logits:
return pred, final
else:
return pred
def evaluate(model, loader, mode, cfg):
model.eval()
assert mode in ['original', 'center_crop', 'padded_sliding_window', 'zegclip_sliding_window', 'sliding_window']
intersection_meter = AverageMeter()
union_meter = AverageMeter()
with torch.no_grad():
for img, mask, id in tqdm(loader, total=len(loader)):
img = img.cuda()
pred = predict(model, img, mask, mode, cfg)
intersection, union, target = \
intersectionAndUnion(pred.cpu().numpy(), mask.numpy(), cfg['nclass'], 255)
reduced_intersection = torch.from_numpy(intersection).cuda()
reduced_union = torch.from_numpy(union).cuda()
reduced_target = torch.from_numpy(target).cuda()
dist.all_reduce(reduced_intersection)
dist.all_reduce(reduced_union)
dist.all_reduce(reduced_target)
intersection_meter.update(reduced_intersection.cpu().numpy())
union_meter.update(reduced_union.cpu().numpy())
iou_class = intersection_meter.sum / (union_meter.sum + 1e-10) * 100.0
mIOU = np.mean(iou_class)
return mIOU, iou_class
def main():
args = parser.parse_args()
with open(args.config, "r") as fp:
cfg = yaml.load(fp, Loader=yaml.Loader)
labeled_id_path = f'splits/{cfg["dataset"]}/{cfg["split"]}/labeled.txt'
unlabeled_id_path = f'splits/{cfg["dataset"]}/{cfg["split"]}/unlabeled.txt'
logger = init_log('global', logging.INFO)
logger.propagate = 0
rank, world_size = setup_distributed(port=args.port)
if rank == 0:
timestr = datetime.now().strftime("%y%m%d-%H%M")
uid = str(uuid.uuid4())[:5]
run_name = f'{timestr}_{cfg["name"]}_v{__version__}_{uid}'.replace('.', '-')
save_path = f'exp/exp-{cfg["exp"]}/{run_name}'
os.makedirs(save_path, exist_ok=True)
formatter = logging.Formatter(fmt='[%(asctime)s] [%(levelname)-8s] %(message)s')
fileHandler = logging.FileHandler(f'{save_path}/debug.log')
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
all_args = {**cfg, **vars(args),
'labeled_id_path': labeled_id_path, 'unlabeled_id_path': unlabeled_id_path,
'ngpus': world_size, 'run_name': run_name, 'save_path': save_path,
'exec_git_rev': get_git_revision(), 'exec_version': __version__}
logger.info('{}\n'.format(pprint.pformat(all_args)))
writer = SummaryWriter(save_path)
shutil.copyfile(args.config, os.path.join(save_path, 'config.yaml'))
with open(os.path.join(save_path, 'all_args.yaml'), 'w') as f:
yaml.dump(all_args, f, default_flow_style=None, sort_keys=False, indent=2)
gen_code_archive(save_path)
cudnn.enabled = True
cudnn.benchmark = True
model = build_model(cfg)
if rank == 0:
logger.info(model)
logger.info('Total params: {:.1f}M\n'.format(count_params(model)))
if 'optimizer' not in cfg:
optimizer = SGD([{'params': model.backbone.parameters(), 'lr': cfg['lr']},
{'params': [param for name, param in model.named_parameters() if 'backbone' not in name],
'lr': cfg['lr'] * cfg['lr_multi']}], lr=cfg['lr'], momentum=0.9, weight_decay=1e-4)
else:
optimizer = build_optimizer(model, cfg['optimizer'])
# print(len(optimizer.param_groups), 'param groups')
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
# print(group['initial_lr'], group['lr'], group['weight_decay'])
local_rank = int(os.environ["LOCAL_RANK"])
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda(local_rank)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], broadcast_buffers=False,
output_device=local_rank, find_unused_parameters=('zegclip' in cfg['model']))
if cfg['criterion']['name'] == 'CELoss':
criterion = nn.CrossEntropyLoss(**cfg['criterion']['kwargs']).cuda(local_rank)
elif cfg['criterion']['name'] == 'OHEM':
criterion = ProbOhemCrossEntropy2d(**cfg['criterion']['kwargs']).cuda(local_rank)
elif cfg['criterion']['name'] == 'mmseg':
criterion = None
else:
raise NotImplementedError('%s criterion is not implemented' % cfg['criterion']['name'])
|
parser = argparse.ArgumentParser(description='Revisiting Weak-to-Strong Consistency in Semi-Supervised Semantic Segmentation')
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--local_rank', default=0, type=int)
parser.add_argument('--port', default=None, type=int)
def predict(model, img, mask, mode, cfg, return_logits=False):
if mode == 'padded_sliding_window':
grid = cfg['crop_size']
stride = cfg['stride']
if stride < 1:
stride = int(grid * stride)
b, _, h, w = img.shape
final = torch.zeros(b, cfg['nclass'], h, w).cuda()
row = 0
while row < h:
col = 0
while col < w:
y1 = row
y2 = min(h, row + grid)
x1 = col
x2 = min(w, col + grid)
crop_h = y2 - y1
crop_w = x2 - x1
# print(y1, y2, x1, x2, crop_h, crop_w)
cropped_img = torch.zeros((b, 3, grid, grid), device=img.device)
cropped_img[:, :, :crop_h, :crop_w] = img[:, :, y1: y2, x1: x2]
pred = model(cropped_img)
final[:, :, y1: y2, x1: x2] += pred.softmax(dim=1)[:, :, :crop_h, :crop_w]
col += stride
row += stride
pred = final.argmax(dim=1)
elif mode == 'zegclip_sliding_window':
h_stride, w_stride = cfg['stride'], cfg['stride']
h_crop, w_crop = cfg['crop_size'], cfg['crop_size']
batch_size, _, h_img, w_img = img.size()
num_classes = cfg['nclass']
h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1
w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1
preds = img.new_zeros((batch_size, num_classes, h_img, w_img))
count_mat = img.new_zeros((batch_size, 1, h_img, w_img))
for h_idx in range(h_grids):
for w_idx in range(w_grids):
y1 = h_idx * h_stride
x1 = w_idx * w_stride
y2 = min(y1 + h_crop, h_img)
x2 = min(x1 + w_crop, w_img)
y1 = max(y2 - h_crop, 0)
x1 = max(x2 - w_crop, 0)
crop_img = img[:, :, y1:y2, x1:x2]
crop_seg_logit = model(crop_img)
preds += F.pad(crop_seg_logit,
(int(x1), int(preds.shape[3] - x2), int(y1),
int(preds.shape[2] - y2)))
count_mat[:, :, y1:y2, x1:x2] += 1
assert (count_mat == 0).sum() == 0
preds = preds / count_mat
final = mmseg.ops.resize(
preds,
size=mask.shape[-2:],
mode='bilinear',
align_corners=True,
warning=False)
pred = final.argmax(dim=1)
elif mode == 'sliding_window':
grid = cfg['crop_size']
b, _, h, w = img.shape
final = torch.zeros(b, cfg['nclass'], h, w).cuda()
row = 0
while row < h:
col = 0
while col < w:
pred = model(img[:, :, row: min(h, row + grid), col: min(w, col + grid)])
final[:, :, row: min(h, row + grid), col: min(w, col + grid)] += pred.softmax(dim=1)
col += int(grid * 2 / 3)
row += int(grid * 2 / 3)
pred = final.argmax(dim=1)
else:
if mode == 'center_crop':
h, w = img.shape[-2:]
start_h, start_w = (h - cfg['crop_size']) // 2, (w - cfg['crop_size']) // 2
img = img[:, :, start_h:start_h + cfg['crop_size'], start_w:start_w + cfg['crop_size']]
mask = mask[:, start_h:start_h + cfg['crop_size'], start_w:start_w + cfg['crop_size']]
final = model(img)
pred = final.argmax(dim=1)
if return_logits:
return pred, final
else:
return pred
def evaluate(model, loader, mode, cfg):
model.eval()
assert mode in ['original', 'center_crop', 'padded_sliding_window', 'zegclip_sliding_window', 'sliding_window']
intersection_meter = AverageMeter()
union_meter = AverageMeter()
with torch.no_grad():
for img, mask, id in tqdm(loader, total=len(loader)):
img = img.cuda()
pred = predict(model, img, mask, mode, cfg)
intersection, union, target = \
intersectionAndUnion(pred.cpu().numpy(), mask.numpy(), cfg['nclass'], 255)
reduced_intersection = torch.from_numpy(intersection).cuda()
reduced_union = torch.from_numpy(union).cuda()
reduced_target = torch.from_numpy(target).cuda()
dist.all_reduce(reduced_intersection)
dist.all_reduce(reduced_union)
dist.all_reduce(reduced_target)
intersection_meter.update(reduced_intersection.cpu().numpy())
union_meter.update(reduced_union.cpu().numpy())
iou_class = intersection_meter.sum / (union_meter.sum + 1e-10) * 100.0
mIOU = np.mean(iou_class)
return mIOU, iou_class
def main():
args = parser.parse_args()
with open(args.config, "r") as fp:
cfg = yaml.load(fp, Loader=yaml.Loader)
labeled_id_path = f'splits/{cfg["dataset"]}/{cfg["split"]}/labeled.txt'
unlabeled_id_path = f'splits/{cfg["dataset"]}/{cfg["split"]}/unlabeled.txt'
logger = init_log('global', logging.INFO)
logger.propagate = 0
rank, world_size = setup_distributed(port=args.port)
if rank == 0:
timestr = datetime.now().strftime("%y%m%d-%H%M")
uid = str(uuid.uuid4())[:5]
run_name = f'{timestr}_{cfg["name"]}_v{__version__}_{uid}'.replace('.', '-')
save_path = f'exp/exp-{cfg["exp"]}/{run_name}'
os.makedirs(save_path, exist_ok=True)
formatter = logging.Formatter(fmt='[%(asctime)s] [%(levelname)-8s] %(message)s')
fileHandler = logging.FileHandler(f'{save_path}/debug.log')
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
all_args = {**cfg, **vars(args),
'labeled_id_path': labeled_id_path, 'unlabeled_id_path': unlabeled_id_path,
'ngpus': world_size, 'run_name': run_name, 'save_path': save_path,
'exec_git_rev': get_git_revision(), 'exec_version': __version__}
logger.info('{}\n'.format(pprint.pformat(all_args)))
writer = SummaryWriter(save_path)
shutil.copyfile(args.config, os.path.join(save_path, 'config.yaml'))
with open(os.path.join(save_path, 'all_args.yaml'), 'w') as f:
yaml.dump(all_args, f, default_flow_style=None, sort_keys=False, indent=2)
gen_code_archive(save_path)
cudnn.enabled = True
cudnn.benchmark = True
model = build_model(cfg)
if rank == 0:
logger.info(model)
logger.info('Total params: {:.1f}M\n'.format(count_params(model)))
if 'optimizer' not in cfg:
optimizer = SGD([{'params': model.backbone.parameters(), 'lr': cfg['lr']},
{'params': [param for name, param in model.named_parameters() if 'backbone' not in name],
'lr': cfg['lr'] * cfg['lr_multi']}], lr=cfg['lr'], momentum=0.9, weight_decay=1e-4)
else:
optimizer = build_optimizer(model, cfg['optimizer'])
# print(len(optimizer.param_groups), 'param groups')
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
# print(group['initial_lr'], group['lr'], group['weight_decay'])
local_rank = int(os.environ["LOCAL_RANK"])
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda(local_rank)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], broadcast_buffers=False,
output_device=local_rank, find_unused_parameters=('zegclip' in cfg['model']))
if cfg['criterion']['name'] == 'CELoss':
criterion = nn.CrossEntropyLoss(**cfg['criterion']['kwargs']).cuda(local_rank)
elif cfg['criterion']['name'] == 'OHEM':
criterion = ProbOhemCrossEntropy2d(**cfg['criterion']['kwargs']).cuda(local_rank)
elif cfg['criterion']['name'] == 'mmseg':
criterion = None
else:
raise NotImplementedError('%s criterion is not implemented' % cfg['criterion']['name'])
| trainset = SemiDataset(cfg, 'train_l', id_path=labeled_id_path) | 2 | 2023-11-02 14:49:38+00:00 | 12k |
intellerce/controlanimate | animatediff/models/unet.py | [
{
"identifier": "CrossAttnDownBlock3D",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers... | from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.models.modeling_utils import ModelMixin
from diffusers.loaders import UNet2DConditionLoadersMixin
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from .unet_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block,
)
from .resnet import InflatedConv3d, InflatedGroupNorm
from typing import Any, Dict, List, Optional, Tuple, Union
from diffusers.models.attention_processor import (
ADDED_KV_ATTENTION_PROCESSORS,
CROSS_ATTENTION_PROCESSORS,
AttentionProcessor,
AttnAddedKVProcessor,
AttnProcessor,
)
from diffusers.utils import WEIGHTS_NAME, SAFETENSORS_WEIGHTS_NAME
import os
import json
import pdb
import torch
import torch.nn as nn
import torch.utils.checkpoint
import safetensors | 8,195 | cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
use_inflated_groupnorm=False,
addition_embed_type: Optional[str] = None,
addition_time_embed_dim: Optional[int] = None,
dropout: float = 0.0,
encoder_hid_dim: Optional[int] = None,
encoder_hid_dim_type: Optional[str] = None,
conv_in_kernel: int = 3,
conv_out_kernel: int = 3,
attention_type: str = "default",
class_embeddings_concat: bool = False,
mid_block_only_cross_attention: Optional[bool] = None,
cross_attention_norm: Optional[str] = None,
addition_embed_type_num_heads=64,
transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,
time_embedding_type: str = "positional",
time_embedding_dim: Optional[int] = None,
time_embedding_act_fn: Optional[str] = None,
timestep_post_act: Optional[str] = None,
time_cond_proj_dim: Optional[int] = None,
# Additional
use_motion_module = False,
motion_module_resolutions = ( 1,2,4,8 ),
motion_module_mid_block = False,
motion_module_decoder_only = False,
motion_module_type = None,
motion_module_kwargs = {},
unet_use_cross_frame_attention = None,
unet_use_temporal_attention = None,
):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
# self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
self.time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
post_act_fn=timestep_post_act,
cond_proj_dim=time_cond_proj_dim,
)
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = [only_cross_attention] * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
if class_embeddings_concat:
# The time embeddings are concatenated with the class embeddings. The dimension of the
# time embeddings passed to the down, middle, and up blocks is twice the dimension of the
# regular time embeddings
blocks_time_embed_dim = time_embed_dim * 2
else:
blocks_time_embed_dim = time_embed_dim
# self.time_embedding = TimestepEmbedding(
# timestep_input_dim,
# time_embed_dim,
# act_fn=act_fn,
# post_act_fn=timestep_post_act,
# cond_proj_dim=time_cond_proj_dim,
# )
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
| # Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_condition.py
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
mid_block_type: str = "UNetMidBlock3DCrossAttn",
up_block_types: Tuple[str] = (
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D"
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: int = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
use_inflated_groupnorm=False,
addition_embed_type: Optional[str] = None,
addition_time_embed_dim: Optional[int] = None,
dropout: float = 0.0,
encoder_hid_dim: Optional[int] = None,
encoder_hid_dim_type: Optional[str] = None,
conv_in_kernel: int = 3,
conv_out_kernel: int = 3,
attention_type: str = "default",
class_embeddings_concat: bool = False,
mid_block_only_cross_attention: Optional[bool] = None,
cross_attention_norm: Optional[str] = None,
addition_embed_type_num_heads=64,
transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,
time_embedding_type: str = "positional",
time_embedding_dim: Optional[int] = None,
time_embedding_act_fn: Optional[str] = None,
timestep_post_act: Optional[str] = None,
time_cond_proj_dim: Optional[int] = None,
# Additional
use_motion_module = False,
motion_module_resolutions = ( 1,2,4,8 ),
motion_module_mid_block = False,
motion_module_decoder_only = False,
motion_module_type = None,
motion_module_kwargs = {},
unet_use_cross_frame_attention = None,
unet_use_temporal_attention = None,
):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
# self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
self.time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
post_act_fn=timestep_post_act,
cond_proj_dim=time_cond_proj_dim,
)
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = [only_cross_attention] * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
if class_embeddings_concat:
# The time embeddings are concatenated with the class embeddings. The dimension of the
# time embeddings passed to the down, middle, and up blocks is twice the dimension of the
# regular time embeddings
blocks_time_embed_dim = time_embed_dim * 2
else:
blocks_time_embed_dim = time_embed_dim
# self.time_embedding = TimestepEmbedding(
# timestep_input_dim,
# time_embed_dim,
# act_fn=act_fn,
# post_act_fn=timestep_post_act,
# cond_proj_dim=time_cond_proj_dim,
# )
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
| down_block = get_down_block( | 5 | 2023-11-04 01:35:44+00:00 | 12k |
Zaczero/openstreetmap-ng | scripts/load_osm.py | [
{
"identifier": "Format06",
"path": "src/lib/format/format06.py",
"snippet": "class Format06:\n @staticmethod\n def _encode_tags(tags: dict) -> Sequence[dict] | dict:\n if format_is_json():\n return tags\n else:\n return tuple({'@k': k, '@v': v} for k, v in tags... | import sys
import anyio
import xmltodict
from datetime import datetime
from pathlib import Path
from shapely.geometry import Point
from src.lib.format.format06 import Format06
from src.lib_cython.xmltodict import XMLToDict
from src.models.db.element import Element
from src.models.db.element_node import ElementNode
from src.models.db.element_relation import ElementRelation
from src.models.db.element_way import ElementWay
from src.models.element_member import ElementMemberRef
from src.models.element_type import ElementType
from src.models.typed_element_ref import TypedElementRef | 10,295 |
async def main():
load_path = Path(sys.argv[1])
print(f'Loading {load_path} into database...')
def thread():
batch = []
total = 0
async def process_batch():
nonlocal batch
nonlocal total
batch_ = batch
batch = []
total += len(batch_)
print(f'Processing batch of {len(batch_)} elements (total {total})')
await Element._collection().bulk_write(batch_, ordered=False)
def item_callback(tree, body):
if not isinstance(body, dict):
body = {}
element_type, element = tree[-1]
if element_type not in ('node', 'way', 'relation'):
return True
base = {
'typed_id': int(element['id']),
'changeset_id': int(element['changeset']),
'created_at': datetime.fromisoformat(element['timestamp']),
'version': int(element['version']),
'visible': element.get('visible', True),
'tags': Format06._decode_tags_unsafe(body.get('tag', [])),
}
if element_type == 'node':
if 'lon' not in element:
lon = 0
lat = 0
else:
lon = float(element['lon'])
lat = float(element['lat'])
batch.append(ElementNode(**base, point=Point(lon, lat)).create_batch())
elif element_type == 'way':
if 'nd' not in body:
body['nd'] = []
batch.append(ElementWay(**base, nodes=tuple(n['@ref'] for n in body['nd'])).create_batch())
elif element_type == 'relation':
if 'member' not in body:
body['member'] = []
batch.append(
ElementRelation(
**base,
members=tuple(
|
async def main():
load_path = Path(sys.argv[1])
print(f'Loading {load_path} into database...')
def thread():
batch = []
total = 0
async def process_batch():
nonlocal batch
nonlocal total
batch_ = batch
batch = []
total += len(batch_)
print(f'Processing batch of {len(batch_)} elements (total {total})')
await Element._collection().bulk_write(batch_, ordered=False)
def item_callback(tree, body):
if not isinstance(body, dict):
body = {}
element_type, element = tree[-1]
if element_type not in ('node', 'way', 'relation'):
return True
base = {
'typed_id': int(element['id']),
'changeset_id': int(element['changeset']),
'created_at': datetime.fromisoformat(element['timestamp']),
'version': int(element['version']),
'visible': element.get('visible', True),
'tags': Format06._decode_tags_unsafe(body.get('tag', [])),
}
if element_type == 'node':
if 'lon' not in element:
lon = 0
lat = 0
else:
lon = float(element['lon'])
lat = float(element['lat'])
batch.append(ElementNode(**base, point=Point(lon, lat)).create_batch())
elif element_type == 'way':
if 'nd' not in body:
body['nd'] = []
batch.append(ElementWay(**base, nodes=tuple(n['@ref'] for n in body['nd'])).create_batch())
elif element_type == 'relation':
if 'member' not in body:
body['member'] = []
batch.append(
ElementRelation(
**base,
members=tuple( | ElementMemberRef( | 3 | 2023-11-04 01:12:13+00:00 | 12k |
codefuse-ai/Collinear-Constrained-Attention | model/llama/convert_llama_weights_to_hf.py | [
{
"identifier": "LlamaConfig",
"path": "model/llama/configuration_llama.py",
"snippet": "class LlamaConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA\n model according to the specified argumen... | import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from .configuration_llama import LlamaConfig
from .modeling_llama import LlamaForCausalLM
from .tokenization_llama import LlamaTokenizer
from tokenization_llama_fast import LlamaTokenizerFast | 9,724 | else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
state_dict = {
f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
f"layers.{layer_i}.attention_norm.weight"
].clone(),
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
f"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
state_dict[f"model.layers.{layer_i}.self_attn.q_proj.weight"] = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(n_heads_per_shard, dims_per_head, dim)
for i in range(num_shards)
],
dim=0,
).reshape(dim, dim)
)
state_dict[f"model.layers.{layer_i}.self_attn.k_proj.weight"] = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(
num_local_key_value_heads, dims_per_head, dim
)
for i in range(num_shards)
],
dim=0,
).reshape(key_value_dim, dim),
num_key_value_heads,
key_value_dim,
dim,
)
state_dict[f"model.layers.{layer_i}.self_attn.v_proj.weight"] = torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(
num_local_key_value_heads, dims_per_head, dim
)
for i in range(num_shards)
],
dim=0,
).reshape(key_value_dim, dim)
state_dict[f"model.layers.{layer_i}.self_attn.o_proj.weight"] = torch.cat(
[loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(num_shards)], dim=1
)
state_dict[f"model.layers.{layer_i}.mlp.gate_proj.weight"] = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(num_shards)], dim=0
)
state_dict[f"model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(num_shards)], dim=1
)
state_dict[f"model.layers.{layer_i}.mlp.up_proj.weight"] = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(num_shards)], dim=0
)
state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq
for k, v in state_dict.items():
index_dict["weight_map"][k] = filename
param_count += v.numel()
torch.save(state_dict, os.path.join(tmp_model_path, filename))
filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
state_dict = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
state_dict = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(num_shards)], dim=1
),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(num_shards)], dim=0),
}
for k, v in state_dict.items():
index_dict["weight_map"][k] = filename
param_count += v.numel()
torch.save(state_dict, os.path.join(tmp_model_path, filename))
# Write configs
index_dict["metadata"] = {"total_size": param_count * 2}
write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json"))
ffn_dim_multiplier = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
multiple_of = params["multiple_of"] if "multiple_of" in params else 256
config = LlamaConfig(
hidden_size=dim,
intermediate_size=compute_intermediate_size(dim, ffn_dim_multiplier, multiple_of),
num_attention_heads=params["n_heads"],
num_hidden_layers=params["n_layers"],
rms_norm_eps=params["norm_eps"],
num_key_value_heads=num_key_value_heads,
)
config.save_pretrained(tmp_model_path)
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model.")
model = LlamaForCausalLM.from_pretrained(tmp_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format.")
model.save_pretrained(model_path, safe_serialization=safe_serialization)
shutil.rmtree(tmp_model_path)
def write_tokenizer(tokenizer_path, input_tokenizer_path):
# Initialize the tokenizer based on the `spm` model
| # Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
# from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
LlamaTokenizerFast = None
"""
Sample usage:
```
python src/transformers/models/llama/convert_llama_weights_to_hf.py \
--input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path
```
Thereafter, models can be loaded via:
```py
from transformers import LlamaForCausalLM, LlamaTokenizer
model = LlamaForCausalLM.from_pretrained("/output/path")
tokenizer = LlamaTokenizer.from_pretrained("/output/path")
```
Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions
come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM).
"""
INTERMEDIATE_SIZE_MAP = {
"7B": 11008,
"13B": 13824,
"30B": 17920,
"65B": 22016,
"70B": 28672,
}
NUM_SHARDS = {
"7B": 1,
"7Bf": 1,
"13B": 2,
"13Bf": 2,
"30B": 4,
"65B": 8,
"70B": 8,
"70Bf": 8,
}
def compute_intermediate_size(n, ffn_dim_multiplier=1, multiple_of=256):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of)
def read_json(path):
with open(path, "r") as f:
return json.load(f)
def write_json(text, path):
with open(path, "w") as f:
json.dump(text, f)
def write_model(model_path, input_base_path, model_size, safe_serialization=True):
os.makedirs(model_path, exist_ok=True)
tmp_model_path = os.path.join(model_path, "tmp")
os.makedirs(tmp_model_path, exist_ok=True)
params = read_json(os.path.join(input_base_path, "params.json"))
num_shards = NUM_SHARDS[model_size]
n_layers = params["n_layers"]
n_heads = params["n_heads"]
n_heads_per_shard = n_heads // num_shards
dim = params["dim"]
dims_per_head = dim // n_heads
base = 10000.0
inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head))
if "n_kv_heads" in params:
num_key_value_heads = params["n_kv_heads"] # for GQA / MQA
num_local_key_value_heads = n_heads_per_shard // num_key_value_heads
key_value_dim = dim // num_key_value_heads
else: # compatibility with other checkpoints
num_key_value_heads = n_heads
num_local_key_value_heads = n_heads_per_shard
key_value_dim = dim
# permute for sliced rotary
def permute(w, n_heads=n_heads, dim1=dim, dim2=dim):
return w.view(n_heads, dim1 // n_heads // 2, 2, dim2).transpose(1, 2).reshape(dim1, dim2)
print(f"Fetching all parameters from the checkpoint at {input_base_path}.")
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
loaded = torch.load(os.path.join(input_base_path, "consolidated.00.pth"), map_location="cpu")
else:
# Sharded
loaded = [
torch.load(os.path.join(input_base_path, f"consolidated.{i:02d}.pth"), map_location="cpu")
for i in range(num_shards)
]
param_count = 0
index_dict = {"weight_map": {}}
for layer_i in range(n_layers):
filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
state_dict = {
f"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wq.weight"]
),
f"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wk.weight"]
),
f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"],
f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"],
f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"],
f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"],
f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"],
f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"],
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
state_dict = {
f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
f"layers.{layer_i}.attention_norm.weight"
].clone(),
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
f"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
state_dict[f"model.layers.{layer_i}.self_attn.q_proj.weight"] = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(n_heads_per_shard, dims_per_head, dim)
for i in range(num_shards)
],
dim=0,
).reshape(dim, dim)
)
state_dict[f"model.layers.{layer_i}.self_attn.k_proj.weight"] = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(
num_local_key_value_heads, dims_per_head, dim
)
for i in range(num_shards)
],
dim=0,
).reshape(key_value_dim, dim),
num_key_value_heads,
key_value_dim,
dim,
)
state_dict[f"model.layers.{layer_i}.self_attn.v_proj.weight"] = torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(
num_local_key_value_heads, dims_per_head, dim
)
for i in range(num_shards)
],
dim=0,
).reshape(key_value_dim, dim)
state_dict[f"model.layers.{layer_i}.self_attn.o_proj.weight"] = torch.cat(
[loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(num_shards)], dim=1
)
state_dict[f"model.layers.{layer_i}.mlp.gate_proj.weight"] = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(num_shards)], dim=0
)
state_dict[f"model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(num_shards)], dim=1
)
state_dict[f"model.layers.{layer_i}.mlp.up_proj.weight"] = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(num_shards)], dim=0
)
state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq
for k, v in state_dict.items():
index_dict["weight_map"][k] = filename
param_count += v.numel()
torch.save(state_dict, os.path.join(tmp_model_path, filename))
filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
state_dict = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
state_dict = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(num_shards)], dim=1
),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(num_shards)], dim=0),
}
for k, v in state_dict.items():
index_dict["weight_map"][k] = filename
param_count += v.numel()
torch.save(state_dict, os.path.join(tmp_model_path, filename))
# Write configs
index_dict["metadata"] = {"total_size": param_count * 2}
write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json"))
ffn_dim_multiplier = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
multiple_of = params["multiple_of"] if "multiple_of" in params else 256
config = LlamaConfig(
hidden_size=dim,
intermediate_size=compute_intermediate_size(dim, ffn_dim_multiplier, multiple_of),
num_attention_heads=params["n_heads"],
num_hidden_layers=params["n_layers"],
rms_norm_eps=params["norm_eps"],
num_key_value_heads=num_key_value_heads,
)
config.save_pretrained(tmp_model_path)
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model.")
model = LlamaForCausalLM.from_pretrained(tmp_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format.")
model.save_pretrained(model_path, safe_serialization=safe_serialization)
shutil.rmtree(tmp_model_path)
def write_tokenizer(tokenizer_path, input_tokenizer_path):
# Initialize the tokenizer based on the `spm` model | tokenizer_class = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast | 2 | 2023-11-02 01:37:01+00:00 | 12k |
bytedance/cryostar | projects/star/train_density.py | [
{
"identifier": "StarfileDataSet",
"path": "cryostar/utils/dataio.py",
"snippet": "class StarfileDataSet(Dataset):\n\n def __init__(self, cfg: StarfileDatasetConfig):\n super().__init__()\n self.cfg = cfg\n self.df = starfile.read(Path(cfg.starfile_path))\n\n if \"optics\"... | import os
import os.path as osp
import einops
import lightning.pytorch as pl
import numpy as np
import torch
from lightning.pytorch.strategies import DDPStrategy
from lightning.pytorch.utilities import rank_zero_only
from torch.utils.data import DataLoader
from tqdm import tqdm
from mmengine import mkdir_or_exist
from cryostar.utils.dataio import StarfileDataSet, StarfileDatasetConfig
from cryostar.nerf.volume_utils import ImplicitFourierVolume
from cryostar.utils.transforms import SpatialGridTranslate, FourierGridTranslate
from cryostar.utils.ctf_utils import CTFRelion, CTFCryoDRGN
from cryostar.utils.fft_utils import (fourier_to_primal_2d, primal_to_fourier_2d)
from cryostar.utils.latent_space_utils import sample_along_pca, get_nearest_point, cluster_kmeans
from cryostar.utils.misc import (pl_init_exp, create_circular_mask, log_to_current, pretty_dict)
from cryostar.utils.losses import calc_kl_loss
from cryostar.utils.ml_modules import VAEEncoder, reparameterize
from cryostar.utils.mrc_tools import save_mrc
from miscs import infer_ctf_params_from_config | 10,236 |
log_to_current = rank_zero_only(log_to_current)
TASK_NAME = "density"
class CryoModel(pl.LightningModule):
def __init__(self, cfg, dataset):
super().__init__()
self.cfg = cfg
self.dataset = dataset
self.z_dim = cfg.model.z_dim
self.history_saved_dirs = []
if cfg.extra_input_data_attr.given_z is None and self.z_dim != 0:
if cfg.model.enc_space == "real":
self.encoder = VAEEncoder(self.cfg.data_process.down_side_shape**2,
cfg.model.hidden,
self.z_dim,
num_hidden_layers=4)
elif cfg.model.enc_space == "fourier":
self.encoder = VAEEncoder(2 * self.cfg.data_process.down_side_shape**2,
cfg.model.hidden,
self.z_dim,
num_hidden_layers=4)
else:
raise NotImplementedError
if cfg.model.shift_method == "interp":
self.translate = SpatialGridTranslate(self.cfg.data_process.down_side_shape, )
log_to_current("We will deprecate `model.shift_method=interp` in a future version, use `model.shift_method=fft` instead.")
elif cfg.model.shift_method == "fft":
self.f_translate = FourierGridTranslate(self.cfg.data_process.down_side_shape, )
else:
raise NotImplementedError
ctf_params = infer_ctf_params_from_config(cfg)
if cfg.model.ctf == "v1":
self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset))
log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.")
elif cfg.model.ctf == "v2":
|
log_to_current = rank_zero_only(log_to_current)
TASK_NAME = "density"
class CryoModel(pl.LightningModule):
def __init__(self, cfg, dataset):
super().__init__()
self.cfg = cfg
self.dataset = dataset
self.z_dim = cfg.model.z_dim
self.history_saved_dirs = []
if cfg.extra_input_data_attr.given_z is None and self.z_dim != 0:
if cfg.model.enc_space == "real":
self.encoder = VAEEncoder(self.cfg.data_process.down_side_shape**2,
cfg.model.hidden,
self.z_dim,
num_hidden_layers=4)
elif cfg.model.enc_space == "fourier":
self.encoder = VAEEncoder(2 * self.cfg.data_process.down_side_shape**2,
cfg.model.hidden,
self.z_dim,
num_hidden_layers=4)
else:
raise NotImplementedError
if cfg.model.shift_method == "interp":
self.translate = SpatialGridTranslate(self.cfg.data_process.down_side_shape, )
log_to_current("We will deprecate `model.shift_method=interp` in a future version, use `model.shift_method=fft` instead.")
elif cfg.model.shift_method == "fft":
self.f_translate = FourierGridTranslate(self.cfg.data_process.down_side_shape, )
else:
raise NotImplementedError
ctf_params = infer_ctf_params_from_config(cfg)
if cfg.model.ctf == "v1":
self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset))
log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.")
elif cfg.model.ctf == "v2": | self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset)) | 6 | 2023-11-06 07:15:26+00:00 | 12k |
xyongLu/SBCFormer | main.py | [
{
"identifier": "Mixup",
"path": "mixup.py",
"snippet": "class Mixup:\n \"\"\" Mixup/Cutmix that applies different params to each element or whole batch\n\n Args:\n mixup_alpha (float): mixup alpha value, mixup is active if > 0.\n cutmix_alpha (float): cutmix alpha value, cutmix is a... | import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
import utils
from pathlib import Path
from mixup import Mixup
from timm.models import create_model
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.scheduler import create_scheduler
from timm.optim import create_optimizer
from timm.utils import NativeScaler, get_state_dict, ModelEma
from datasets import build_dataset
from engine import train_one_epoch, evaluate
from losses import DistillationLoss
from samplers import RASampler
from models import * | 7,208 | parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=False)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Distillation parameters distilled
parser.add_argument('--distilled', action='store_true', default=False, help='Perform distilled ')
parser.add_argument('--teacher-model', default='regnety_200mf', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher-path', type=str, default='')
parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
# Dataset parameters
parser.add_argument('--data-path', default= '../../PythonWork_E/Data/ImageNet_2012',#'./data', type=str,
help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR10', 'CIFAR100' , 'IMNET'],
type=str, help='Image Net dataset path')
parser.add_argument('--inat-category', default='name',
choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],
type=str, help='semantic granularity')
parser.add_argument('--output_dir', default='./outputs', help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default= '', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true', default=False, help='Perform evaluation only')
parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin-mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
# test throught
parser.add_argument('--throughout', action='store_true', help='Perform throughout only')
return parser
@torch.no_grad()
def throughput(data_loader, model, logger):
model.eval()
for _, (images, _) in enumerate(data_loader):
images = images.cuda(non_blocking=True)
batch_size = images.shape[0]
for i in range(50):
model(images)
torch.cuda.synchronize()
logger.info(f"throughput averaged with 30 times")
tic1 = time.time()
for i in range(30):
model(images)
torch.cuda.synchronize()
tic2 = time.time()
logger.info(f"batch_size {batch_size} throughput {30 * batch_size / (tic2 - tic1)}")
return
def main(args):
utils.init_distributed_mode(args)
print('------------ Options -------------')
for key, value in sorted(vars(args).items()):
print('%16.16s: %16.16s' % (str(key), str(value)))
print('-------------- End ----------------')
if args.distillation_type != 'none' and args.finetune and not args.eval:
raise NotImplementedError("Finetuning with distillation not yet supported")
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
dataset_val, args.nb_classes = build_dataset(is_train=False, args=args)
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if args.repeated_aug:
| # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
# from ptflops import get_model_complexity_info
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("running on {} device.".format(device))
def get_args_parser():
parser = argparse.ArgumentParser('SlenderViT training and evaluation script', add_help=False)
# Model parameters
parser.add_argument('--uni-note', default='', type=str, help='unique note on the name of model to train')
parser.add_argument('--model', default='SBCFormer_B', type=str, metavar='MODEL',
help='Name of model to train.')
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--input-size', default=224, type=int, help='images input size')
parser.add_argument('--in-chans', type=int, default=3, help='the channel of inputs ')
parser.add_argument('--batch-size', default=30, type=int)
parser.add_argument('--drop', type=float, default=0., metavar='PCT', help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)')
parser.add_argument('--model-ema', action='store_true')
parser.add_argument('--no-model-ema', action='store_false', dest='model_ema')
parser.set_defaults(model_ema=False)
parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON', help='Optimizer Epsilon (defaudevice = torch.device(args.device)ult: None, no clipping)')
parser.add_argument('--clip-grad', type=float, default=5, metavar='NORM', help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05, help='weight decay (default: 0.05)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER', help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=2.5e-4, metavar='LR', help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct', help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT', help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV', help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR', help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N', help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N', help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N', help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=False)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Distillation parameters distilled
parser.add_argument('--distilled', action='store_true', default=False, help='Perform distilled ')
parser.add_argument('--teacher-model', default='regnety_200mf', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher-path', type=str, default='')
parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
# Dataset parameters
parser.add_argument('--data-path', default= '../../PythonWork_E/Data/ImageNet_2012',#'./data', type=str,
help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR10', 'CIFAR100' , 'IMNET'],
type=str, help='Image Net dataset path')
parser.add_argument('--inat-category', default='name',
choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],
type=str, help='semantic granularity')
parser.add_argument('--output_dir', default='./outputs', help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default= '', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true', default=False, help='Perform evaluation only')
parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin-mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
# test throught
parser.add_argument('--throughout', action='store_true', help='Perform throughout only')
return parser
@torch.no_grad()
def throughput(data_loader, model, logger):
model.eval()
for _, (images, _) in enumerate(data_loader):
images = images.cuda(non_blocking=True)
batch_size = images.shape[0]
for i in range(50):
model(images)
torch.cuda.synchronize()
logger.info(f"throughput averaged with 30 times")
tic1 = time.time()
for i in range(30):
model(images)
torch.cuda.synchronize()
tic2 = time.time()
logger.info(f"batch_size {batch_size} throughput {30 * batch_size / (tic2 - tic1)}")
return
def main(args):
utils.init_distributed_mode(args)
print('------------ Options -------------')
for key, value in sorted(vars(args).items()):
print('%16.16s: %16.16s' % (str(key), str(value)))
print('-------------- End ----------------')
if args.distillation_type != 'none' and args.finetune and not args.eval:
raise NotImplementedError("Finetuning with distillation not yet supported")
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
dataset_val, args.nb_classes = build_dataset(is_train=False, args=args)
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if args.repeated_aug: | sampler_train = RASampler( | 5 | 2023-11-06 03:31:47+00:00 | 12k |
zamaniamin/fastapi-shop | apps/products/tests/test_product_media.py | [
{
"identifier": "FakeUser",
"path": "apps/accounts/faker/data.py",
"snippet": "class FakeUser(BaseFakeAccount):\n\n @classmethod\n def populate_members(cls):\n \"\"\"\n Create an admin and a user.\n \"\"\"\n\n # --- admin ---\n user, access_token = FakeAccount.ve... | import asyncio
import pytest
from fastapi import status
from fastapi.testclient import TestClient
from apps.accounts.faker.data import FakeUser
from apps.accounts.models import User
from apps.core.base_test_case import BaseTestCase
from apps.main import app
from apps.products.faker.data import FakeProduct, FakeMedia
from apps.products.services import ProductService
from config.database import DatabaseManager | 7,659 |
class ProductMediaTestBase(BaseTestCase):
product_endpoint = '/products/'
product_media_endpoint = '/products/media/'
# --- members ---
admin: User | None = None
admin_authorization = {}
@classmethod
def setup_class(cls):
cls.client = TestClient(app)
DatabaseManager.create_test_database()
# --- create an admin ---
cls.admin, access_token = FakeUser.populate_admin()
cls.admin_authorization = {"Authorization": f"Bearer {access_token}"}
@classmethod
def teardown_class(cls):
DatabaseManager.drop_all_tables()
class TestCreateProductMedia(ProductMediaTestBase):
"""
Test create product-media on the multi scenario
"""
def test_create_product_media(self):
"""
Test create a product-media (images) for a product and attach them to that product (assuming valid data).
Test the File "type, size and url".
"""
# --- create a product ---
product_payload, product = FakeProduct.populate_product()
# --- upload files ----
file_paths = FakeMedia.populate_images_for_product()
files = [("x_files", open(file_path, "rb")) for file_path in file_paths]
media_payload = {
'alt': 'Test Alt Text'
}
# --- request ---
response = self.client.post(f"{self.product_endpoint}{product.id}/media/", data=media_payload, files=files,
headers=self.admin_authorization)
assert response.status_code == status.HTTP_201_CREATED
# --- response data ---
expected = response.json()
# --- media ---
assert "media" in expected
media_list = expected["media"]
assert isinstance(media_list, list)
for media in media_list:
assert media["media_id"] > 0
assert media["product_id"] == product.id
assert media["alt"] == media_payload['alt']
assert "src" in media and not None
assert media["type"] == 'jpg'
assert media["updated_at"] is None
self.assert_datetime_format(media['created_at'])
# --- test static file URL ---
url = f'/media/test{media_list[0]["src"].split("/media")[-1]}'
response = self.client.get(url)
assert response.status_code == status.HTTP_200_OK
# test file size is not zero
assert len(response.content) > 0
class TestRetrieveProductMedia(ProductMediaTestBase):
"""
Test retrieve product-media on the multi scenario
"""
def test_retrieve_single_media(self):
"""
Test retrieve a single product image
"""
# --- create a product ---
payload, product = asyncio.run(FakeProduct.populate_product_with_media())
# --- get a media ---
|
class ProductMediaTestBase(BaseTestCase):
product_endpoint = '/products/'
product_media_endpoint = '/products/media/'
# --- members ---
admin: User | None = None
admin_authorization = {}
@classmethod
def setup_class(cls):
cls.client = TestClient(app)
DatabaseManager.create_test_database()
# --- create an admin ---
cls.admin, access_token = FakeUser.populate_admin()
cls.admin_authorization = {"Authorization": f"Bearer {access_token}"}
@classmethod
def teardown_class(cls):
DatabaseManager.drop_all_tables()
class TestCreateProductMedia(ProductMediaTestBase):
"""
Test create product-media on the multi scenario
"""
def test_create_product_media(self):
"""
Test create a product-media (images) for a product and attach them to that product (assuming valid data).
Test the File "type, size and url".
"""
# --- create a product ---
product_payload, product = FakeProduct.populate_product()
# --- upload files ----
file_paths = FakeMedia.populate_images_for_product()
files = [("x_files", open(file_path, "rb")) for file_path in file_paths]
media_payload = {
'alt': 'Test Alt Text'
}
# --- request ---
response = self.client.post(f"{self.product_endpoint}{product.id}/media/", data=media_payload, files=files,
headers=self.admin_authorization)
assert response.status_code == status.HTTP_201_CREATED
# --- response data ---
expected = response.json()
# --- media ---
assert "media" in expected
media_list = expected["media"]
assert isinstance(media_list, list)
for media in media_list:
assert media["media_id"] > 0
assert media["product_id"] == product.id
assert media["alt"] == media_payload['alt']
assert "src" in media and not None
assert media["type"] == 'jpg'
assert media["updated_at"] is None
self.assert_datetime_format(media['created_at'])
# --- test static file URL ---
url = f'/media/test{media_list[0]["src"].split("/media")[-1]}'
response = self.client.get(url)
assert response.status_code == status.HTTP_200_OK
# test file size is not zero
assert len(response.content) > 0
class TestRetrieveProductMedia(ProductMediaTestBase):
"""
Test retrieve product-media on the multi scenario
"""
def test_retrieve_single_media(self):
"""
Test retrieve a single product image
"""
# --- create a product ---
payload, product = asyncio.run(FakeProduct.populate_product_with_media())
# --- get a media --- | media = ProductService.retrieve_media_list(product.id)[0] | 6 | 2023-11-06 04:46:03+00:00 | 12k |
lukas-clarke/eight_sleep | custom_components/eight_sleep/pyEight/eight.py | [
{
"identifier": "NotAuthenticatedError",
"path": "custom_components/eight_sleep/pyEight/exceptions.py",
"snippet": "class NotAuthenticatedError(BaseEightSleepError):\n \"\"\"Exception for eight sleep authentication errors..\"\"\""
},
{
"identifier": "RequestError",
"path": "custom_compone... | import asyncio
import atexit
import pytz
import logging
import time
import httpx
from datetime import datetime
from typing import Any
from aiohttp.client import ClientError, ClientSession, ClientTimeout
from .constants import *
from .exceptions import NotAuthenticatedError, RequestError
from .user import EightUser
from .structs import Token | 9,451 | self.users: dict[str, EightUser] = {}
self._user_id: str | None = None
self._token: str | None = None
self._token_expiration: datetime | None = None
self._device_ids: list[str] = []
self._is_pod: bool = False
# Setup 10 element list
self._device_json_list: list[dict] = []
self._api_session = client_session
self._internal_session: bool = False
if check_auth:
self._get_auth()
# Stop on exit
atexit.register(self.at_exit)
def at_exit(self) -> None:
"""Run at exit."""
try:
loop = asyncio.get_running_loop()
asyncio.run_coroutine_threadsafe(self.stop(), loop).result()
except RuntimeError:
asyncio.run(self.stop())
@property
def token(self) -> str | None:
"""Return session token."""
return self._token
@property
def user_id(self) -> str | None:
"""Return user ID of the logged in user."""
return self._user_id
@property
def device_id(self) -> str | None:
"""Return devices id."""
return self._device_ids[0]
@property
def device_data(self) -> dict:
"""Return current raw device_data json."""
return self._device_json_list[0]
@property
def device_data_history(self) -> list[dict]:
"""Return full raw device_data json list."""
return self._device_json_list
@property
def need_priming(self) -> bool:
return self.device_data["needsPriming"]
@property
def is_priming(self) -> bool:
return self.device_data["priming"]
@property
def has_water(self) -> bool:
return self.device_data["hasWater"]
@property
def last_prime(self):
return self.convert_string_to_datetime(self.device_data["lastPrime"])
@property
def is_pod(self) -> bool:
"""Return if device is a POD."""
return self._is_pod
def convert_string_to_datetime(self, datetime_str):
datetime_str = str(datetime_str).strip()
# Convert string to datetime object.
try:
# Try to parse the first format
datetime_object = datetime.strptime(datetime_str, "%Y-%m-%dT%H:%M:%SZ")
except ValueError:
try:
# Try to parse the second format
datetime_object = datetime.strptime(
datetime_str, "%Y-%m-%dT%H:%M:%S.%fZ"
)
except ValueError:
# Handle if neither format is matched
raise ValueError(f"Unsupported date string format for {datetime_str}")
# Set the timezone to UTC
utc_timezone = pytz.UTC
datetime_object_utc = datetime_object.replace(tzinfo=utc_timezone)
# Set the timezone to a specific timezone
timezone = pytz.timezone(self.timezone)
return datetime_object_utc.astimezone(timezone)
async def _get_auth(self) -> Token:
data = {
"client_id": self._client_id,
"client_secret": self._client_secret,
"grant_type": "password",
"username": self._email,
"password": self._password,
}
async with httpx.AsyncClient() as client:
response = await client.post(
AUTH_URL,
headers=DEFAULT_AUTH_HEADERS,
json=data,
timeout=DEFAULT_TIMEOUT,
)
if response.status_code == 200:
access_token_str = response.json()["access_token"]
expiration_seconds_int = (
float(response.json()["expires_in"]) + time.time()
)
main_id = response.json()["userId"]
return Token(access_token_str, expiration_seconds_int, main_id)
else:
| """
pyeight.eight
~~~~~~~~~~~~~~~~~~~~
Provides api for Eight Sleep
Copyright (c) 2022-2023 <https://github.com/lukas-clarke/pyEight>
Licensed under the MIT license.
"""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
CLIENT_TIMEOUT = ClientTimeout(total=DEFAULT_TIMEOUT)
class EightSleep:
"""Eight sleep API object."""
def __init__(
self,
email: str,
password: str,
timezone: str,
client_id: str = None,
client_secret: str = None,
client_session: ClientSession | None = None,
check_auth: bool = False,
) -> None:
"""Initialize eight sleep class."""
self._email = email
self._password = password
# If client_id isn't set, use the default value
if not client_id:
client_id = "0894c7f33bb94800a03f1f4df13a4f38"
self._client_id = client_id
# client_secret isn't required for current Eight Sleep API auth
# but can't be empty value, so setting random string if not set
if not client_secret:
client_secret = "ASDF"
self._client_secret = client_secret
self.timezone = timezone
self.users: dict[str, EightUser] = {}
self._user_id: str | None = None
self._token: str | None = None
self._token_expiration: datetime | None = None
self._device_ids: list[str] = []
self._is_pod: bool = False
# Setup 10 element list
self._device_json_list: list[dict] = []
self._api_session = client_session
self._internal_session: bool = False
if check_auth:
self._get_auth()
# Stop on exit
atexit.register(self.at_exit)
def at_exit(self) -> None:
"""Run at exit."""
try:
loop = asyncio.get_running_loop()
asyncio.run_coroutine_threadsafe(self.stop(), loop).result()
except RuntimeError:
asyncio.run(self.stop())
@property
def token(self) -> str | None:
"""Return session token."""
return self._token
@property
def user_id(self) -> str | None:
"""Return user ID of the logged in user."""
return self._user_id
@property
def device_id(self) -> str | None:
"""Return devices id."""
return self._device_ids[0]
@property
def device_data(self) -> dict:
"""Return current raw device_data json."""
return self._device_json_list[0]
@property
def device_data_history(self) -> list[dict]:
"""Return full raw device_data json list."""
return self._device_json_list
@property
def need_priming(self) -> bool:
return self.device_data["needsPriming"]
@property
def is_priming(self) -> bool:
return self.device_data["priming"]
@property
def has_water(self) -> bool:
return self.device_data["hasWater"]
@property
def last_prime(self):
return self.convert_string_to_datetime(self.device_data["lastPrime"])
@property
def is_pod(self) -> bool:
"""Return if device is a POD."""
return self._is_pod
def convert_string_to_datetime(self, datetime_str):
datetime_str = str(datetime_str).strip()
# Convert string to datetime object.
try:
# Try to parse the first format
datetime_object = datetime.strptime(datetime_str, "%Y-%m-%dT%H:%M:%SZ")
except ValueError:
try:
# Try to parse the second format
datetime_object = datetime.strptime(
datetime_str, "%Y-%m-%dT%H:%M:%S.%fZ"
)
except ValueError:
# Handle if neither format is matched
raise ValueError(f"Unsupported date string format for {datetime_str}")
# Set the timezone to UTC
utc_timezone = pytz.UTC
datetime_object_utc = datetime_object.replace(tzinfo=utc_timezone)
# Set the timezone to a specific timezone
timezone = pytz.timezone(self.timezone)
return datetime_object_utc.astimezone(timezone)
async def _get_auth(self) -> Token:
data = {
"client_id": self._client_id,
"client_secret": self._client_secret,
"grant_type": "password",
"username": self._email,
"password": self._password,
}
async with httpx.AsyncClient() as client:
response = await client.post(
AUTH_URL,
headers=DEFAULT_AUTH_HEADERS,
json=data,
timeout=DEFAULT_TIMEOUT,
)
if response.status_code == 200:
access_token_str = response.json()["access_token"]
expiration_seconds_int = (
float(response.json()["expires_in"]) + time.time()
)
main_id = response.json()["userId"]
return Token(access_token_str, expiration_seconds_int, main_id)
else: | raise RequestError( | 1 | 2023-11-01 16:15:52+00:00 | 12k |
gickowtf/pixoo-homeassistant | custom_components/divoom_pixoo/pixoo64/_pixoo.py | [
{
"identifier": "Palette",
"path": "custom_components/divoom_pixoo/pixoo64/_colors.py",
"snippet": "class Palette:\n BLACK = COLOR_BLACK\n WHITE = COLOR_WHITE"
},
{
"identifier": "retrieve_glyph",
"path": "custom_components/divoom_pixoo/pixoo64/_font.py",
"snippet": "def retrieve_g... | import base64
import json
import requests
from enum import IntEnum
from PIL import Image, ImageOps
from ._colors import Palette
from ._font import retrieve_glyph, FONT_GICKO, FONT_PICO_8 | 10,788 | def draw_filled_rectangle_from_top_left_to_bottom_right_rgb(self,
top_left_x=0,
top_left_y=0,
bottom_right_x=1,
bottom_right_y=1,
r=0, g=0, b=0):
self.draw_filled_rectangle((top_left_x, top_left_y),
(bottom_right_x, bottom_right_y), (r, g, b))
def draw_image(self, image_path_or_object, xy=(0, 0),
image_resample_mode=ImageResampleMode.PIXEL_ART,
pad_resample=False):
image = image_path_or_object if isinstance(image_path_or_object,
Image.Image) else Image.open(
image_path_or_object)
size = image.size
width = size[0]
height = size[1]
# See if it needs to be scaled/resized to fit the display
if width > self.size or height > self.size:
if pad_resample:
image = ImageOps.pad(image, (self.size, self.size),
image_resample_mode)
else:
image.thumbnail((self.size, self.size), image_resample_mode)
if self.debug:
print(
f'[.] Resized image to fit on screen (saving aspect ratio): "{image_path_or_object}" ({width}, {height}) '
f'-> ({image.size[0]}, {image.size[1]})')
# Convert the loaded image to RGB
rgb_image = image.convert('RGB')
# Iterate over all pixels in the image that are left and buffer them
for y in range(image.size[1]):
for x in range(image.size[0]):
location = (x, y)
placed_x = x + xy[0]
if self.size - 1 < placed_x or placed_x < 0:
continue
placed_y = y + xy[1]
if self.size - 1 < placed_y or placed_y < 0:
continue
self.draw_pixel((placed_x, placed_y),
rgb_image.getpixel(location))
def draw_image_at_location(self, image_path_or_object, x, y,
image_resample_mode=ImageResampleMode.PIXEL_ART):
self.draw_image(image_path_or_object, (x, y), image_resample_mode)
def draw_line(self, start_xy, stop_xy, rgb=Palette.WHITE):
line = set()
# Calculate the amount of steps needed between the points to draw a nice line
amount_of_steps = minimum_amount_of_steps(start_xy, stop_xy)
# Iterate over them and create a nice set of pixels
for step in range(amount_of_steps):
if amount_of_steps == 0:
interpolant = 0
else:
interpolant = step / amount_of_steps
# Add a pixel as a rounded location
line.add(
round_location(lerp_location(start_xy, stop_xy, interpolant)))
# Draw the actual pixel line
for pixel in line:
self.draw_pixel(pixel, rgb)
def draw_line_from_start_to_stop_rgb(self, start_x, start_y, stop_x, stop_y,
r=255, g=255, b=255):
self.draw_line((start_x, start_y), (stop_x, stop_y), (r, g, b))
def draw_pixel(self, xy, rgb):
# If it's not on the screen, we're not going to bother
if xy[0] < 0 or xy[0] >= self.size or xy[1] < 0 or xy[1] >= self.size:
if self.debug:
limit = self.size - 1
print(
f'[!] Invalid coordinates given: ({xy[0]}, {xy[1]}) (maximum coordinates are ({limit}, {limit})')
return
# Calculate the index
index = xy[0] + (xy[1] * self.size)
# Color it
self.draw_pixel_at_index(index, rgb)
def draw_pixel_at_index(self, index, rgb):
# Validate the index
if index < 0 or index >= self.pixel_count:
if self.debug:
print(f'[!] Invalid index given: {index} (maximum index is {self.pixel_count - 1})')
return
# Clamp the color, just to be safe
rgb = clamp_color(rgb)
# Move to place in array
index = index * 3
self.__buffer[index] = rgb[0]
self.__buffer[index + 1] = rgb[1]
self.__buffer[index + 2] = rgb[2]
def draw_pixel_at_index_rgb(self, index, r, g, b):
self.draw_pixel_at_index(index, (r, g, b))
def draw_pixel_at_location_rgb(self, x, y, r, g, b):
self.draw_pixel((x, y), (r, g, b))
def draw_character(self, character, xy=(0, 0), rgb=Palette.WHITE, font=None):
if font is None:
font = FONT_PICO_8
|
def clamp(value, minimum=0, maximum=255):
if value > maximum:
return maximum
if value < minimum:
return minimum
return value
def clamp_color(rgb):
return clamp(rgb[0]), clamp(rgb[1]), clamp(rgb[2])
def lerp(start, end, interpolant):
return start + interpolant * (end - start)
def lerp_location(xy1, xy2, interpolant):
return lerp(xy1[0], xy2[0], interpolant), lerp(xy1[1], xy2[1], interpolant)
def minimum_amount_of_steps(xy1, xy2):
return max(abs(xy1[0] - xy2[0]), abs(xy1[1] - xy2[1]))
def rgb_to_hex_color(rgb):
return f'#{rgb[0]:0>2X}{rgb[1]:0>2X}{rgb[2]:0>2X}'
def round_location(xy):
return round(xy[0]), round(xy[1])
class Channel(IntEnum):
FACES = 0
CLOUD = 1
VISUALIZER = 2
CUSTOM = 3
class ImageResampleMode(IntEnum):
PIXEL_ART = Image.NEAREST
class TextScrollDirection(IntEnum):
LEFT = 0
RIGHT = 1
class Pixoo:
__buffer = []
__buffers_send = 0
__counter = 0
__refresh_counter_limit = 32
def __init__(self, address, size=64, debug=False, refresh_connection_automatically=True):
assert size in [16, 32, 64], \
'Invalid screen size in pixels given. ' \
'Valid options are 16, 32, and 64'
self.refresh_connection_automatically = refresh_connection_automatically
self.address = address
self.debug = debug
self.size = size
# Total number of pixels
self.pixel_count = self.size * self.size
# Generate URL
self.__url = 'http://{0}/post'.format(address)
# Prefill the buffer
self.fill()
# Retrieve the counter
self.__load_counter()
# Resetting if needed
if self.refresh_connection_automatically and self.__counter > self.__refresh_counter_limit:
self.__reset_counter()
def clear(self, rgb: object = Palette.BLACK) -> object:
self.fill(rgb)
def clear_rgb(self, r, g, b):
self.fill_rgb(r, g, b)
def draw_character_at_location_rgb(self, character, x=0, y=0, r=255, g=255,
b=255):
self.draw_character(character, (x, y), (r, g, b))
def draw_filled_rectangle(self, top_left_xy=(0, 0), bottom_right_xy=(1, 1),
rgb=Palette.BLACK):
for y in range(top_left_xy[1], bottom_right_xy[1] + 1):
for x in range(top_left_xy[0], bottom_right_xy[0] + 1):
self.draw_pixel((x, y), rgb)
def draw_filled_rectangle_from_top_left_to_bottom_right_rgb(self,
top_left_x=0,
top_left_y=0,
bottom_right_x=1,
bottom_right_y=1,
r=0, g=0, b=0):
self.draw_filled_rectangle((top_left_x, top_left_y),
(bottom_right_x, bottom_right_y), (r, g, b))
def draw_image(self, image_path_or_object, xy=(0, 0),
image_resample_mode=ImageResampleMode.PIXEL_ART,
pad_resample=False):
image = image_path_or_object if isinstance(image_path_or_object,
Image.Image) else Image.open(
image_path_or_object)
size = image.size
width = size[0]
height = size[1]
# See if it needs to be scaled/resized to fit the display
if width > self.size or height > self.size:
if pad_resample:
image = ImageOps.pad(image, (self.size, self.size),
image_resample_mode)
else:
image.thumbnail((self.size, self.size), image_resample_mode)
if self.debug:
print(
f'[.] Resized image to fit on screen (saving aspect ratio): "{image_path_or_object}" ({width}, {height}) '
f'-> ({image.size[0]}, {image.size[1]})')
# Convert the loaded image to RGB
rgb_image = image.convert('RGB')
# Iterate over all pixels in the image that are left and buffer them
for y in range(image.size[1]):
for x in range(image.size[0]):
location = (x, y)
placed_x = x + xy[0]
if self.size - 1 < placed_x or placed_x < 0:
continue
placed_y = y + xy[1]
if self.size - 1 < placed_y or placed_y < 0:
continue
self.draw_pixel((placed_x, placed_y),
rgb_image.getpixel(location))
def draw_image_at_location(self, image_path_or_object, x, y,
image_resample_mode=ImageResampleMode.PIXEL_ART):
self.draw_image(image_path_or_object, (x, y), image_resample_mode)
def draw_line(self, start_xy, stop_xy, rgb=Palette.WHITE):
line = set()
# Calculate the amount of steps needed between the points to draw a nice line
amount_of_steps = minimum_amount_of_steps(start_xy, stop_xy)
# Iterate over them and create a nice set of pixels
for step in range(amount_of_steps):
if amount_of_steps == 0:
interpolant = 0
else:
interpolant = step / amount_of_steps
# Add a pixel as a rounded location
line.add(
round_location(lerp_location(start_xy, stop_xy, interpolant)))
# Draw the actual pixel line
for pixel in line:
self.draw_pixel(pixel, rgb)
def draw_line_from_start_to_stop_rgb(self, start_x, start_y, stop_x, stop_y,
r=255, g=255, b=255):
self.draw_line((start_x, start_y), (stop_x, stop_y), (r, g, b))
def draw_pixel(self, xy, rgb):
# If it's not on the screen, we're not going to bother
if xy[0] < 0 or xy[0] >= self.size or xy[1] < 0 or xy[1] >= self.size:
if self.debug:
limit = self.size - 1
print(
f'[!] Invalid coordinates given: ({xy[0]}, {xy[1]}) (maximum coordinates are ({limit}, {limit})')
return
# Calculate the index
index = xy[0] + (xy[1] * self.size)
# Color it
self.draw_pixel_at_index(index, rgb)
def draw_pixel_at_index(self, index, rgb):
# Validate the index
if index < 0 or index >= self.pixel_count:
if self.debug:
print(f'[!] Invalid index given: {index} (maximum index is {self.pixel_count - 1})')
return
# Clamp the color, just to be safe
rgb = clamp_color(rgb)
# Move to place in array
index = index * 3
self.__buffer[index] = rgb[0]
self.__buffer[index + 1] = rgb[1]
self.__buffer[index + 2] = rgb[2]
def draw_pixel_at_index_rgb(self, index, r, g, b):
self.draw_pixel_at_index(index, (r, g, b))
def draw_pixel_at_location_rgb(self, x, y, r, g, b):
self.draw_pixel((x, y), (r, g, b))
def draw_character(self, character, xy=(0, 0), rgb=Palette.WHITE, font=None):
if font is None:
font = FONT_PICO_8 | matrix = retrieve_glyph(character, font) | 1 | 2023-11-05 19:16:34+00:00 | 12k |
jkulhanek/nerfbaselines | nerfbaselines/datasets/colmap.py | [
{
"identifier": "Dataset",
"path": "nerfbaselines/types.py",
"snippet": "NB_PREFIX = os.path.expanduser(os.environ.get(\"NB_PREFIX\", \"~/.cache/nerfbaselines\"))\nclass Dataset:\nclass CurrentProgress:\n class RenderOutput(TypedDict):\nclass MethodInfo:\nclass Method(Protocol):\nclass RayMethod(Meth... | import typing
import logging
import numpy as np
from collections import OrderedDict
from pathlib import Path
from typing import Tuple, Optional, Dict
from ..types import Dataset, DatasetFeature, FrozenSet
from ..utils import Indices
from ..cameras import CameraModel, Cameras
from ._colmap_utils import read_cameras_binary, read_images_binary, read_points3D_binary, qvec2rotmat
from ._colmap_utils import read_cameras_text, read_images_text, read_points3D_text, Image, Camera, Point3D
from ._common import DatasetNotFoundError, padded_stack | 7,315 | # du = u * thetad / r - u;
# dv = v * thetad / r - v;
# else:
# du = dv = 0
fl_x = float(camera_params[0])
fl_y = float(camera_params[1])
cx = float(camera_params[2])
cy = float(camera_params[3])
out["k1"] = float(camera_params[4])
out["k2"] = float(camera_params[5])
out["k3"] = float(camera_params[6])
out["k4"] = float(camera_params[7])
camera_model = CameraModel.OPENCV_FISHEYE
elif camera.model == "FULL_OPENCV":
# fx, fy, cx, cy, k1, k2, p1, p2, k3, k4, k5, k6
# u2 = u ** 2
# uv = u * v
# v2 = v ** 2
# r2 = u2 + v2
# r4 = r2 * r2
# r6 = r4 * r2
# radial = (1 + k1 * r2 + k2 * r4 + k3 * r6) /
# (1 + k4 * r2 + k5 * r4 + k6 * r6)
# du = u * radial + 2 * p1 * uv + p2 * (r2 + 2 * u2) - u
# dv = v * radial + 2 * p2 * uv + p1 * (r2 + 2 * v2) - v
fl_x = float(camera_params[0])
fl_y = float(camera_params[1])
cx = float(camera_params[2])
cy = float(camera_params[3])
out["k1"] = float(camera_params[4])
out["k2"] = float(camera_params[5])
out["p1"] = float(camera_params[6])
out["p2"] = float(camera_params[7])
out["k3"] = float(camera_params[8])
out["k4"] = float(camera_params[9])
out["k5"] = float(camera_params[10])
out["k6"] = float(camera_params[11])
raise NotImplementedError(f"{camera.model} camera model is not supported yet!")
elif camera.model == "FOV":
# fx, fy, cx, cy, omega
fl_x = float(camera_params[0])
fl_y = float(camera_params[1])
cx = float(camera_params[2])
cy = float(camera_params[3])
out["omega"] = float(camera_params[4])
raise NotImplementedError(f"{camera.model} camera model is not supported yet!")
elif camera.model == "SIMPLE_RADIAL_FISHEYE":
# f, cx, cy, k
# r = sqrt(u ** 2 + v ** 2)
# if r > eps:
# theta = atan(r)
# theta2 = theta ** 2
# thetad = theta * (1 + k * theta2)
# du = u * thetad / r - u;
# dv = v * thetad / r - v;
# else:
# du = dv = 0
fl_x = float(camera_params[0])
fl_y = float(camera_params[0])
cx = float(camera_params[1])
cy = float(camera_params[2])
out["k1"] = float(camera_params[3])
camera_model = CameraModel.OPENCV_FISHEYE
elif camera.model == "RADIAL_FISHEYE":
# f, cx, cy, k1, k2
# r = sqrt(u ** 2 + v ** 2)
# if r > eps:
# theta = atan(r)
# theta2 = theta ** 2
# theta4 = theta2 ** 2
# thetad = theta * (1 + k * theta2)
# thetad = theta * (1 + k1 * theta2 + k2 * theta4)
# du = u * thetad / r - u;
# dv = v * thetad / r - v;
# else:
# du = dv = 0
fl_x = float(camera_params[0])
fl_y = float(camera_params[0])
cx = float(camera_params[1])
cy = float(camera_params[2])
out["k1"] = float(camera_params[3])
out["k2"] = float(camera_params[4])
out["k3"] = 0.0
out["k4"] = 0.0
camera_model = CameraModel.OPENCV_FISHEYE
else:
# THIN_PRISM_FISHEYE not supported!
raise NotImplementedError(f"{camera.model} camera model is not supported yet!")
image_width: int = camera.width
image_height: int = camera.height
intrinsics = np.array([fl_x, fl_y, cx, cy], dtype=np.float32) / float(image_width)
distortion_params = np.array([out.get(k, 0.0) for k in ("k1", "k2", "p1", "p2", "k3", "k4")], dtype=np.float32)
return intrinsics, camera_model.value, distortion_params, (image_width, image_height)
def load_colmap_dataset(path: Path, images_path: Optional[Path] = None, split: Optional[str] = None, test_indices: Optional[Indices] = None, features: Optional[FrozenSet[DatasetFeature]] = None):
if features is None:
features = typing.cast(FrozenSet[DatasetFeature], {})
load_points = "points3D_xyz" in features or "points3D_rgb" in features
if split:
assert split in {"train", "test"}
# Load COLMAP dataset
colmap_path = path / "sparse" / "0"
if images_path is None:
images_path = Path("images")
images_path = path / images_path
if not colmap_path.exists():
raise DatasetNotFoundError("Missing 'sparse/0' folder in COLMAP dataset")
if not (colmap_path / "cameras.bin").exists() and not (colmap_path / "cameras.txt").exists():
raise DatasetNotFoundError("Missing 'sparse/0/cameras.{bin,txt}' file in COLMAP dataset")
if not images_path.exists():
raise DatasetNotFoundError("Missing 'images' folder in COLMAP dataset")
if (colmap_path / "cameras.bin").exists():
cameras = read_cameras_binary(colmap_path / "cameras.bin")
elif (colmap_path / "cameras.txt").exists():
|
def _parse_colmap_camera_params(camera: Camera) -> Tuple[np.ndarray, int, np.ndarray, Tuple[int, int]]:
"""
Parses all currently supported COLMAP cameras into the transforms.json metadata
Args:
camera: COLMAP camera
Returns:
transforms.json metadata containing camera's intrinsics and distortion parameters
"""
# Parameters match https://github.com/colmap/colmap/blob/dev/src/base/camera_models.h
out = OrderedDict() # Default in Python 3.7+
camera_params = camera.params
camera_model: CameraModel
if camera.model == "SIMPLE_PINHOLE":
# du = 0
# dv = 0
fl_x = float(camera_params[0])
fl_y = float(camera_params[0])
cx = float(camera_params[1])
cy = float(camera_params[2])
camera_model = CameraModel.PINHOLE
elif camera.model == "PINHOLE":
# f, cx, cy, k
# du = 0
# dv = 0
fl_x = float(camera_params[0])
fl_y = float(camera_params[1])
cx = float(camera_params[2])
cy = float(camera_params[3])
camera_model = CameraModel.PINHOLE
elif camera.model == "SIMPLE_RADIAL":
# f, cx, cy, k
# r2 = u**2 + v**2;
# radial = k * r2
# du = u * radial
# dv = u * radial
fl_x = float(camera_params[0])
fl_y = float(camera_params[0])
cx = float(camera_params[1])
cy = float(camera_params[2])
out["k1"] = float(camera_params[3])
camera_model = CameraModel.OPENCV
elif camera.model == "RADIAL":
# f, cx, cy, k1, k2
# r2 = u**2 + v**2;
# radial = k1 * r2 + k2 * r2 ** 2
# du = u * radial
# dv = v * radial
fl_x = float(camera_params[0])
fl_y = float(camera_params[0])
cx = float(camera_params[1])
cy = float(camera_params[2])
out["k1"] = float(camera_params[3])
out["k2"] = float(camera_params[4])
camera_model = CameraModel.OPENCV
elif camera.model == "OPENCV":
# fx, fy, cx, cy, k1, k2, p1, p2
# uv = u * v;
# r2 = u**2 + v**2
# radial = k1 * r2 + k2 * r2 ** 2
# du = u * radial + 2 * p1 * u*v + p2 * (r2 + 2 * u**2)
# dv = v * radial + 2 * p2 * u*v + p1 * (r2 + 2 * v**2)
fl_x = float(camera_params[0])
fl_y = float(camera_params[1])
cx = float(camera_params[2])
cy = float(camera_params[3])
out["k1"] = float(camera_params[4])
out["k2"] = float(camera_params[5])
out["p1"] = float(camera_params[6])
out["p2"] = float(camera_params[7])
camera_model = CameraModel.OPENCV
elif camera.model == "OPENCV_FISHEYE":
# fx, fy, cx, cy, k1, k2, k3, k4
# r = sqrt(u**2 + v**2)
# if r > eps:
# theta = atan(r)
# theta2 = theta ** 2
# theta4 = theta2 ** 2
# theta6 = theta4 * theta2
# theta8 = theta4 ** 2
# thetad = theta * (1 + k1 * theta2 + k2 * theta4 + k3 * theta6 + k4 * theta8)
# du = u * thetad / r - u;
# dv = v * thetad / r - v;
# else:
# du = dv = 0
fl_x = float(camera_params[0])
fl_y = float(camera_params[1])
cx = float(camera_params[2])
cy = float(camera_params[3])
out["k1"] = float(camera_params[4])
out["k2"] = float(camera_params[5])
out["k3"] = float(camera_params[6])
out["k4"] = float(camera_params[7])
camera_model = CameraModel.OPENCV_FISHEYE
elif camera.model == "FULL_OPENCV":
# fx, fy, cx, cy, k1, k2, p1, p2, k3, k4, k5, k6
# u2 = u ** 2
# uv = u * v
# v2 = v ** 2
# r2 = u2 + v2
# r4 = r2 * r2
# r6 = r4 * r2
# radial = (1 + k1 * r2 + k2 * r4 + k3 * r6) /
# (1 + k4 * r2 + k5 * r4 + k6 * r6)
# du = u * radial + 2 * p1 * uv + p2 * (r2 + 2 * u2) - u
# dv = v * radial + 2 * p2 * uv + p1 * (r2 + 2 * v2) - v
fl_x = float(camera_params[0])
fl_y = float(camera_params[1])
cx = float(camera_params[2])
cy = float(camera_params[3])
out["k1"] = float(camera_params[4])
out["k2"] = float(camera_params[5])
out["p1"] = float(camera_params[6])
out["p2"] = float(camera_params[7])
out["k3"] = float(camera_params[8])
out["k4"] = float(camera_params[9])
out["k5"] = float(camera_params[10])
out["k6"] = float(camera_params[11])
raise NotImplementedError(f"{camera.model} camera model is not supported yet!")
elif camera.model == "FOV":
# fx, fy, cx, cy, omega
fl_x = float(camera_params[0])
fl_y = float(camera_params[1])
cx = float(camera_params[2])
cy = float(camera_params[3])
out["omega"] = float(camera_params[4])
raise NotImplementedError(f"{camera.model} camera model is not supported yet!")
elif camera.model == "SIMPLE_RADIAL_FISHEYE":
# f, cx, cy, k
# r = sqrt(u ** 2 + v ** 2)
# if r > eps:
# theta = atan(r)
# theta2 = theta ** 2
# thetad = theta * (1 + k * theta2)
# du = u * thetad / r - u;
# dv = v * thetad / r - v;
# else:
# du = dv = 0
fl_x = float(camera_params[0])
fl_y = float(camera_params[0])
cx = float(camera_params[1])
cy = float(camera_params[2])
out["k1"] = float(camera_params[3])
camera_model = CameraModel.OPENCV_FISHEYE
elif camera.model == "RADIAL_FISHEYE":
# f, cx, cy, k1, k2
# r = sqrt(u ** 2 + v ** 2)
# if r > eps:
# theta = atan(r)
# theta2 = theta ** 2
# theta4 = theta2 ** 2
# thetad = theta * (1 + k * theta2)
# thetad = theta * (1 + k1 * theta2 + k2 * theta4)
# du = u * thetad / r - u;
# dv = v * thetad / r - v;
# else:
# du = dv = 0
fl_x = float(camera_params[0])
fl_y = float(camera_params[0])
cx = float(camera_params[1])
cy = float(camera_params[2])
out["k1"] = float(camera_params[3])
out["k2"] = float(camera_params[4])
out["k3"] = 0.0
out["k4"] = 0.0
camera_model = CameraModel.OPENCV_FISHEYE
else:
# THIN_PRISM_FISHEYE not supported!
raise NotImplementedError(f"{camera.model} camera model is not supported yet!")
image_width: int = camera.width
image_height: int = camera.height
intrinsics = np.array([fl_x, fl_y, cx, cy], dtype=np.float32) / float(image_width)
distortion_params = np.array([out.get(k, 0.0) for k in ("k1", "k2", "p1", "p2", "k3", "k4")], dtype=np.float32)
return intrinsics, camera_model.value, distortion_params, (image_width, image_height)
def load_colmap_dataset(path: Path, images_path: Optional[Path] = None, split: Optional[str] = None, test_indices: Optional[Indices] = None, features: Optional[FrozenSet[DatasetFeature]] = None):
if features is None:
features = typing.cast(FrozenSet[DatasetFeature], {})
load_points = "points3D_xyz" in features or "points3D_rgb" in features
if split:
assert split in {"train", "test"}
# Load COLMAP dataset
colmap_path = path / "sparse" / "0"
if images_path is None:
images_path = Path("images")
images_path = path / images_path
if not colmap_path.exists():
raise DatasetNotFoundError("Missing 'sparse/0' folder in COLMAP dataset")
if not (colmap_path / "cameras.bin").exists() and not (colmap_path / "cameras.txt").exists():
raise DatasetNotFoundError("Missing 'sparse/0/cameras.{bin,txt}' file in COLMAP dataset")
if not images_path.exists():
raise DatasetNotFoundError("Missing 'images' folder in COLMAP dataset")
if (colmap_path / "cameras.bin").exists():
cameras = read_cameras_binary(colmap_path / "cameras.bin")
elif (colmap_path / "cameras.txt").exists(): | cameras = read_cameras_text(colmap_path / "cameras.txt") | 8 | 2023-11-07 20:22:35+00:00 | 12k |
Subsets and Splits
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have consistent code formatting levels across multiple scales (2k, 4k, 8k, 12k) and reveals the structured formatting patterns within these repositories.
SQL Console for tianyang/repobench_python_v1.1
Compares cross-file and in-file code structure patterns across different complexity levels, revealing how file organization strategies vary with code size and potentially informing better code architecture decisions.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have complete performance data across all seven code complexity levels, revealing consistent benchmarking patterns across different code sizes.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that contain all 7 distinct quality levels (2k through 32k), revealing complete datasets that might be useful for comprehensive analysis.