repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
cyberark/ark-sdk-python
ark_sdk_python/cli_services/dpa/vm/ark_dpa_vm_policies_editor_service.py
[ { "identifier": "ArkInquirerRender", "path": "ark_sdk_python/args/ark_args_formatter.py", "snippet": "class ArkInquirerRender(ConsoleRender):\n # pylint: disable=keyword-arg-before-vararg,protected-access\n def __init__(self, event_generator=None, *args, **kwargs):\n super().__init__(event_...
from datetime import date, timedelta from typing import Dict, Final, List, Optional from overrides import overrides from ark_sdk_python.args.ark_args_formatter import ArkInquirerRender from ark_sdk_python.auth.ark_isp_auth import ArkISPAuth from ark_sdk_python.cli_services.dpa.common.ark_dpa_base_policies_editor_service import ArkDPABasePoliciesEditorService from ark_sdk_python.models.ark_profile import ArkProfile from ark_sdk_python.models.cli_services.dpa.policies_editor.vm import ArkDPAVMGeneratePolicy from ark_sdk_python.models.common import ArkProtocolType, ArkWorkspaceType from ark_sdk_python.models.services import ArkServiceConfig from ark_sdk_python.models.services.dpa.policies.common import ArkDPADeletePolicy, ArkDPAGetPolicy, ArkDPARuleStatus, ArkDPAUserData from ark_sdk_python.models.services.dpa.policies.vm import ( ArkDPAVMAddPolicy, ArkDPAVMAuthorizationRule, ArkDPAVMAWSProviderData, ArkDPAVMAzureProviderData, ArkDPAVMConnectionDataType, ArkDPAVMConnectionInformation, ArkDPAVMFQDNOperator, ArkDPAVMFQDNRule, ArkDPAVMFQDNRulesConjunction, ArkDPAVMGCPProviderData, ArkDPAVMLocalEphemeralUserConnectionMethodData, ArkDPAVMOnPremProviderData, ArkDPAVMPolicy, ArkDPAVMPolicyListItem, ArkDPAVMProvider, ArkDPAVMRDPLocalEphemeralUserConnectionData, ArkDPAVMUpdatePolicy, ) from ark_sdk_python.services.dpa.policies.vm.ark_dpa_vm_policies_service import ArkDPAVMPoliciesService import inquirer
14,223
SERVICE_CONFIG: Final[ArkServiceConfig] = ArkServiceConfig( service_name='dpa-policies-vm-editor', required_authenticator_names=['isp'], optional_authenticator_names=[] ) DEFAULT_GENERATED_POLICY: Final[ArkDPAVMPolicy] = ArkDPAVMPolicy( policy_name='Default VM Policy', status=ArkDPARuleStatus.Draft, description='Auto generated vm policy', providers_data={}, start_date=date.today().strftime('%Y-%m-%d'), end_date=(date.today() + timedelta(days=7)).strftime('%Y-%m-%d'), user_access_rules=[], ) DEFAULT_GENERATED_AUTHORIZATION_RULE: Final[ArkDPAVMAuthorizationRule] = ArkDPAVMAuthorizationRule( rule_name='Default VM Rule', user_data=ArkDPAUserData(roles=['DpaAdmin'], groups=[], users=[]), connection_information=ArkDPAVMConnectionInformation( connect_as={}, grant_access=2, idle_time=10, days_of_week=[], full_days=True, hours_from='07:00', hours_to='17:00', time_zone='Asia/Jerusalem', ), ) DEFAULT_GENERATED_PROVIDERS: Final[Dict[ArkWorkspaceType, ArkDPAVMProvider]] = { ArkWorkspaceType.AWS: ArkDPAVMAWSProviderData(regions=[], tags=[{'key': 'value'}], vpc_ids=[], account_ids=[]), ArkWorkspaceType.AZURE: ArkDPAVMAzureProviderData( regions=[], tags=[{'key': 'value'}], resource_groups=[], vnet_ids=[], subscriptions=[] ), ArkWorkspaceType.GCP: ArkDPAVMGCPProviderData(regions=[], tags=[{'key': 'value'}], network_ids=[], projects=[]), ArkWorkspaceType.ONPREM: ArkDPAVMOnPremProviderData(
SERVICE_CONFIG: Final[ArkServiceConfig] = ArkServiceConfig( service_name='dpa-policies-vm-editor', required_authenticator_names=['isp'], optional_authenticator_names=[] ) DEFAULT_GENERATED_POLICY: Final[ArkDPAVMPolicy] = ArkDPAVMPolicy( policy_name='Default VM Policy', status=ArkDPARuleStatus.Draft, description='Auto generated vm policy', providers_data={}, start_date=date.today().strftime('%Y-%m-%d'), end_date=(date.today() + timedelta(days=7)).strftime('%Y-%m-%d'), user_access_rules=[], ) DEFAULT_GENERATED_AUTHORIZATION_RULE: Final[ArkDPAVMAuthorizationRule] = ArkDPAVMAuthorizationRule( rule_name='Default VM Rule', user_data=ArkDPAUserData(roles=['DpaAdmin'], groups=[], users=[]), connection_information=ArkDPAVMConnectionInformation( connect_as={}, grant_access=2, idle_time=10, days_of_week=[], full_days=True, hours_from='07:00', hours_to='17:00', time_zone='Asia/Jerusalem', ), ) DEFAULT_GENERATED_PROVIDERS: Final[Dict[ArkWorkspaceType, ArkDPAVMProvider]] = { ArkWorkspaceType.AWS: ArkDPAVMAWSProviderData(regions=[], tags=[{'key': 'value'}], vpc_ids=[], account_ids=[]), ArkWorkspaceType.AZURE: ArkDPAVMAzureProviderData( regions=[], tags=[{'key': 'value'}], resource_groups=[], vnet_ids=[], subscriptions=[] ), ArkWorkspaceType.GCP: ArkDPAVMGCPProviderData(regions=[], tags=[{'key': 'value'}], network_ids=[], projects=[]), ArkWorkspaceType.ONPREM: ArkDPAVMOnPremProviderData(
fqdn_rules_conjunction=ArkDPAVMFQDNRulesConjunction.OR,
18
2023-11-13 09:24:31+00:00
16k
mohenghui/detectAuto_v8
ultralytics/models/sam/model.py
[ { "identifier": "Model", "path": "ultralytics/engine/model.py", "snippet": "class Model(nn.Module):\n \"\"\"\n A base class to unify APIs for all models.\n\n Args:\n model (str, Path): Path to the model file to load or create.\n task (Any, optional): Task type for the YOLO model. ...
from pathlib import Path from ultralytics.engine.model import Model from ultralytics.utils.torch_utils import model_info from .build import build_sam from .predict import Predictor
10,990
# Ultralytics YOLO 🚀, AGPL-3.0 license """ SAM model interface. This module provides an interface to the Segment Anything Model (SAM) from Ultralytics, designed for real-time image segmentation tasks. The SAM model allows for promptable segmentation with unparalleled versatility in image analysis, and has been trained on the SA-1B dataset. It features zero-shot performance capabilities, enabling it to adapt to new image distributions and tasks without prior knowledge. Key Features: - Promptable segmentation - Real-time performance - Zero-shot transfer capabilities - Trained on SA-1B dataset """ class SAM(Model): """ SAM (Segment Anything Model) interface class. SAM is designed for promptable real-time image segmentation. It can be used with a variety of prompts such as bounding boxes, points, or labels. The model has capabilities for zero-shot performance and is trained on the SA-1B dataset. """ def __init__(self, model='sam_b.pt') -> None: """ Initializes the SAM model with a pre-trained model file. Args: model (str): Path to the pre-trained SAM model file. File should have a .pt or .pth extension. Raises: NotImplementedError: If the model file extension is not .pt or .pth. """ if model and Path(model).suffix not in ('.pt', '.pth'): raise NotImplementedError('SAM prediction requires pre-trained *.pt or *.pth model.') super().__init__(model=model, task='segment') def _load(self, weights: str, task=None): """ Loads the specified weights into the SAM model. Args: weights (str): Path to the weights file. task (str, optional): Task name. Defaults to None. """
# Ultralytics YOLO 🚀, AGPL-3.0 license """ SAM model interface. This module provides an interface to the Segment Anything Model (SAM) from Ultralytics, designed for real-time image segmentation tasks. The SAM model allows for promptable segmentation with unparalleled versatility in image analysis, and has been trained on the SA-1B dataset. It features zero-shot performance capabilities, enabling it to adapt to new image distributions and tasks without prior knowledge. Key Features: - Promptable segmentation - Real-time performance - Zero-shot transfer capabilities - Trained on SA-1B dataset """ class SAM(Model): """ SAM (Segment Anything Model) interface class. SAM is designed for promptable real-time image segmentation. It can be used with a variety of prompts such as bounding boxes, points, or labels. The model has capabilities for zero-shot performance and is trained on the SA-1B dataset. """ def __init__(self, model='sam_b.pt') -> None: """ Initializes the SAM model with a pre-trained model file. Args: model (str): Path to the pre-trained SAM model file. File should have a .pt or .pth extension. Raises: NotImplementedError: If the model file extension is not .pt or .pth. """ if model and Path(model).suffix not in ('.pt', '.pth'): raise NotImplementedError('SAM prediction requires pre-trained *.pt or *.pth model.') super().__init__(model=model, task='segment') def _load(self, weights: str, task=None): """ Loads the specified weights into the SAM model. Args: weights (str): Path to the weights file. task (str, optional): Task name. Defaults to None. """
self.model = build_sam(weights)
2
2023-11-16 12:49:59+00:00
16k
Aues6uen11Z/Zafkiel
tests/test.py
[ { "identifier": "logger", "path": "zafkiel/logger.py", "snippet": "" }, { "identifier": "Config", "path": "zafkiel/config.py", "snippet": "class Config:\n ST = Settings\n ST.CVSTRATEGY = [\"mstpl\", \"sift\"]\n ST.THRESHOLD = 0.8\n\n GAME_PATH = None\n SERVER_LANG = 'cn'\n...
from zafkiel import API, Template, logger, Timer, simple_report, Config from zafkiel.ocr import Keyword, Ocr, Digit, DigitCounter, Duration, OcrResultButton from zafkiel.ui import Page, Switch, UI
12,428
# Auto import test Keyword Ocr Digit DigitCounter Duration OcrResultButton Page Switch UI
# Auto import test Keyword Ocr Digit DigitCounter Duration OcrResultButton Page Switch UI
API
2
2023-11-12 09:33:35+00:00
16k
doodledood/chat-flock
examples/chatgpt_clone_with_additional_tools.py
[ { "identifier": "InMemoryChatDataBackingStore", "path": "chatflock/backing_stores/in_memory.py", "snippet": "class InMemoryChatDataBackingStore(ChatDataBackingStore):\n messages: List[ChatMessage]\n participants: Dict[str, ChatParticipant]\n last_message_id: Optional[int] = None\n\n def __in...
import typer from dotenv import load_dotenv from halo import Halo from langchain.text_splitter import TokenTextSplitter from chatflock.backing_stores import InMemoryChatDataBackingStore from chatflock.base import Chat from chatflock.code import LocalCodeExecutor from chatflock.code.langchain import CodeExecutionTool from chatflock.conductors.round_robin import RoundRobinChatConductor from chatflock.participants.langchain import LangChainBasedAIChatParticipant from chatflock.participants.user import UserChatParticipant from chatflock.renderers.terminal import TerminalChatRenderer from chatflock.web_research import WebSearch from chatflock.web_research.page_analyzer import OpenAIChatPageQueryAnalyzer from chatflock.web_research.page_retrievers.selenium_retriever import SeleniumPageRetriever from chatflock.web_research.search import GoogleSerperSearchResultsProvider from chatflock.web_research.web_research import WebResearchTool from examples.common import create_chat_model, get_max_context_size
11,146
def chatgpt_clone_with_additional_tools( model: str = "gpt-4-1106-preview", model_for_page_analysis: str = "gpt-3.5-turbo-1106", temperature: float = 0.0, temperature_for_page_analysis: float = 0.0, ) -> None: chat_model = create_chat_model(model=model, temperature=temperature) chat_model_for_page_analysis = create_chat_model( model=model_for_page_analysis, temperature=temperature_for_page_analysis ) max_context_size_for_page_analysis = get_max_context_size(chat_model_for_page_analysis) or 12_000
def chatgpt_clone_with_additional_tools( model: str = "gpt-4-1106-preview", model_for_page_analysis: str = "gpt-3.5-turbo-1106", temperature: float = 0.0, temperature_for_page_analysis: float = 0.0, ) -> None: chat_model = create_chat_model(model=model, temperature=temperature) chat_model_for_page_analysis = create_chat_model( model=model_for_page_analysis, temperature=temperature_for_page_analysis ) max_context_size_for_page_analysis = get_max_context_size(chat_model_for_page_analysis) or 12_000
page_retriever = SeleniumPageRetriever()
10
2023-11-12 11:10:58+00:00
16k
atlantic-quantum/Shipyard
shipyard/passes/semantic_analysis/semantic_analyzer.py
[ { "identifier": "ErrorCode", "path": "shipyard/compiler_error.py", "snippet": "class ErrorCode(Enum):\n \"\"\"Class to enumerate error codes of the shipyard\"\"\"\n\n ID_NOT_FOUND = \"Identifier not found\"\n DUPLICATE_ID = \"Duplicate id found\"\n NOT_IN_GLOBAL_SCOPE = \"Not in global scope...
from contextlib import contextmanager from openpulse import ast from ...compiler_error import ErrorCode, SemanticError from ...logger import LOGGER from ...mangle import Mangler from ...utilities import ScopeContext from ...visitors import GenericVisitor, LiteralVisitor, TypeVisitor from .scoped_symbol_table import CalScopedSymbolTable, ScopedSymbolTable from .symbols import ( AliasSymbol, ClassicalSymbol, ConstantSymbol, DefcalSymbol, ExternSymbol, GateSymbol, IOSymbol, LiteralSymbol, QuantumSymbol, SubroutineSymbol, Symbol, )
12,021
""" Module that host the SemanticAnalyser QASMVisitor class that can be used to perform semantic analysis on openQASM Abstract Syntax Trees. """ # pylint: disable=R0904: # Too many public methods class SemanticAnalyzer(TypeVisitor, LiteralVisitor, GenericVisitor): """ QASMVisitor class that peforms semantic analysis on a openQASM Abstract Syntax Tree usage: qasm_ast = openpulse.parse(qasm_program_string) sa = SemanticAnalyser() sa.visit(qasm_ast) """ def __init__(self) -> None: self.current_scope: ScopedSymbolTable = None self._calibration_scope: CalScopedSymbolTable = None self._scope_context: ScopeContext = None super().__init__() @property def calibration_scope(self) -> CalScopedSymbolTable: """Getter for the 'calibration_scope' symbol table of a SemanticAnalyser instance. Creates and returns an initialised calibration scope on first call. Subsequent calls return the same scope. Returns: CalScopedSymbolTable: a scoped symbol table used for symbols declared within openpulse syntax (cal & defcal) """ if self._calibration_scope is None: self.ensure_in_global_scope(ast.Identifier("init cal scope")) self._calibration_scope = CalScopedSymbolTable( "cal_scope", enclosing_scope=self.current_scope, init_cal=True ) return self._calibration_scope @property def scope_context(self) -> ScopeContext: """Getter for the 'scope_context' property of a SemanticAnalyser instance""" return self._scope_context @scope_context.setter def scope_context(self, value: ScopeContext): LOGGER.debug("SET SCOPE CONTEXT: %s", value) self._scope_context = value # pylint: disable=C0103 # disable snake_case naming style # these functions are of the form "visit_{QASMNode class name}" def visit_Program(self, node: ast.Program) -> None: """ Program node visitor, creates and enters a global symbol table (global scope), visits all other statements in the openQASM program. Args: node (ast.Program): openQASM program ast node to visit """ global_scope = ScopedSymbolTable( scope_name="global", enclosing_scope=self.current_scope, ) with self.scope_context_manager(global_scope, ScopeContext.GLOBAL): for statement in node.statements: self.visit(statement) def visit_ExternDeclaration(self, node: ast.ExternDeclaration) -> None: """ ExternDeclaration node visitor, inserts a symbol representing the external function declaration into current_scope (symbol table) Args: node (ast.ExternDeclaration): openQASM external function declaration ast node to visit """ extern_name = node.name.name params = [ ClassicalSymbol( name=f"{extern_name}_arg_{i}", kind=self.visit(argument.type) ) for i, argument in enumerate(node.arguments) ] return_type = self.visit(node.return_type) if node.return_type else None
""" Module that host the SemanticAnalyser QASMVisitor class that can be used to perform semantic analysis on openQASM Abstract Syntax Trees. """ # pylint: disable=R0904: # Too many public methods class SemanticAnalyzer(TypeVisitor, LiteralVisitor, GenericVisitor): """ QASMVisitor class that peforms semantic analysis on a openQASM Abstract Syntax Tree usage: qasm_ast = openpulse.parse(qasm_program_string) sa = SemanticAnalyser() sa.visit(qasm_ast) """ def __init__(self) -> None: self.current_scope: ScopedSymbolTable = None self._calibration_scope: CalScopedSymbolTable = None self._scope_context: ScopeContext = None super().__init__() @property def calibration_scope(self) -> CalScopedSymbolTable: """Getter for the 'calibration_scope' symbol table of a SemanticAnalyser instance. Creates and returns an initialised calibration scope on first call. Subsequent calls return the same scope. Returns: CalScopedSymbolTable: a scoped symbol table used for symbols declared within openpulse syntax (cal & defcal) """ if self._calibration_scope is None: self.ensure_in_global_scope(ast.Identifier("init cal scope")) self._calibration_scope = CalScopedSymbolTable( "cal_scope", enclosing_scope=self.current_scope, init_cal=True ) return self._calibration_scope @property def scope_context(self) -> ScopeContext: """Getter for the 'scope_context' property of a SemanticAnalyser instance""" return self._scope_context @scope_context.setter def scope_context(self, value: ScopeContext): LOGGER.debug("SET SCOPE CONTEXT: %s", value) self._scope_context = value # pylint: disable=C0103 # disable snake_case naming style # these functions are of the form "visit_{QASMNode class name}" def visit_Program(self, node: ast.Program) -> None: """ Program node visitor, creates and enters a global symbol table (global scope), visits all other statements in the openQASM program. Args: node (ast.Program): openQASM program ast node to visit """ global_scope = ScopedSymbolTable( scope_name="global", enclosing_scope=self.current_scope, ) with self.scope_context_manager(global_scope, ScopeContext.GLOBAL): for statement in node.statements: self.visit(statement) def visit_ExternDeclaration(self, node: ast.ExternDeclaration) -> None: """ ExternDeclaration node visitor, inserts a symbol representing the external function declaration into current_scope (symbol table) Args: node (ast.ExternDeclaration): openQASM external function declaration ast node to visit """ extern_name = node.name.name params = [ ClassicalSymbol( name=f"{extern_name}_arg_{i}", kind=self.visit(argument.type) ) for i, argument in enumerate(node.arguments) ] return_type = self.visit(node.return_type) if node.return_type else None
extern_symbol = ExternSymbol(
14
2023-11-16 17:37:29+00:00
16k
quantuminterface/qiclib
src/qiclib/code/qi_sequencer.py
[ { "identifier": "QiCellProperty", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class QiCellProperty(QiExpression):\n \"\"\"When describing experiments, properties of cells might not yet be defined. Instead a QiCellProperty object will be generated.\n This object can be used as leng...
from enum import Enum from typing import List, Union, Any, Dict, Optional, Tuple from qiclib.code.qi_jobs import ( ForRange, If, Parallel, cQiRecording, cQiSync, ) from .qi_var_definitions import ( QiCellProperty, QiVariableSet, _QiCalcBase, _QiVariableBase, QiExpression, _QiConstValue, QiCondition, QiOpCond, QiOp, ) from .qi_seq_instructions import ( SeqLoad, SeqStore, SeqAwaitQubitState, SequencerInstruction, SeqRegImmediateInst, SeqRegRegInst, SeqLoadUpperImm, SeqJump, SeqBranch, SeqWaitImm, SeqWaitRegister, SeqTrigger, SeqEnd, SeqTriggerWaitRegister, ) from .qi_util import _get_for_range_iterations from .qi_var_definitions import _QiVariableBase from .qi_var_definitions import _QiCalcBase from .qi_var_definitions import _QiVariableBase from .qi_jobs import _cQiPlay_base import warnings import qiclib.packages.utility as util
10,864
"""Returns register to stack; Raises exception when register is already in stack, or addressing is faulty. Releasing register 0 does nothing""" if reg in self._register_stack: raise IndexError("Release Register: Already released register") if (reg.adr > Sequencer.AVAILABLE_REGISTERS) or (reg.adr < 0): raise IndexError("Release Register: Address out of Range") if reg == self.reg0: return reg.valid = True # if register was invalidated and is released again, return it to initial valid state self._register_stack.append(reg) def add_instruction_to_list( self, instruction: SequencerInstruction, length_in_cycles: int = 1, length_valid=True, ): """Adds instruction to list. If pulses are still running, adds choke instruction before adding the current command to the list""" if self._trigger_mods.is_pulse_active: self.trigger_choke_pulse() if length_in_cycles == 0: length_in_cycles = 1 # length is always at least 1 per instruction self.instruction_list.append(instruction) self._prog_cycles.add( length_in_cycles, length_valid ) # Will be deprecated when external sync is possible. def get_prog_size(self) -> int: return len(self.instruction_list) def add_mov_command(self, dst_reg: _Register, src_reg: _Register): """Copies value of src_reg to dst_reg.""" self.add_calculation(src_reg, QiOp.PLUS, 0, dst_reg) def get_upper_immediate_value(self, value: SequencerInstruction.imm_type): """If bit 11 of lower value is 1, ADDI command sign extends the value. To account for that, sign extend lower 12 bits and subtract from upper 20 bits.""" sign_extended_lower = ( value | 0xFFFFF000 if value & 0x00000800 != 0 else value & 0x00000FFF ) return (value - sign_extended_lower) & 0xFFFFF000 def immediate_to_register( self, val: SequencerInstruction.imm_type, dst_reg: Optional[_Register] = None ) -> _Register: """Loads immediate to dst_reg. If dst_reg is not defined a new register is used to save val to. If value == 0 and no register is specified, reg0 is returned, which always contains 0. dst_reg.value is updated to reflect changes.""" if val == 0 and dst_reg is None: return self.reg0 elif dst_reg is None: dst_reg = self.request_register() if isinstance(val, float): raise NotImplementedError("float not implemented yet") if SequencerInstruction.is_value_in_lower_immediate(val): self.add_instruction_to_list( SeqRegImmediateInst(QiOp.PLUS, dst_reg.adr, 0, val) ) # register_0 always contains 0 else: upper_immediate = self.get_upper_immediate_value(val) self.add_instruction_to_list(SeqLoadUpperImm(dst_reg.adr, upper_immediate)) self.add_instruction_to_list( SeqRegImmediateInst(QiOp.PLUS, dst_reg.adr, dst_reg.adr, val) ) dst_reg.update_register_value(val, QiOp.PLUS, 0) return dst_reg def add_calculation( self, val1: Union[_Register, int, float], operator: QiOp, val2: Union[_Register, int, float], dst_reg: Optional[_Register] = None, ) -> _Register: """Adds calculation command to sequencer. Depending on the values and the operation different commands are added. dst_reg.value is updated to reflect changes.""" if (not isinstance(val1, _Register)) and (not isinstance(val2, _Register)): raise RuntimeError("QiCalc should not contain two int/float") if dst_reg is None: dst_reg = self.request_register() self.alu.calculate(dst_reg, val1, operator, val2) dst_reg.update_register_value(val1, operator, val2) return dst_reg def add_condition( self, reg1: _Register, operator: QiOpCond, reg2: _Register, jmp_val=0 ): """Adds condition command to the sequence and returns its reference, to define the jump value at a later point""" cmd = SeqBranch(operator, reg1.adr, reg2.adr, jmp_val) self.add_instruction_to_list(cmd) return cmd def add_jump(self, jmp_val=0) -> SeqJump: """Adds jump command to the sequence and returns its reference, to define the jump value at a later point""" cmd = SeqJump(jmp_val) self.add_instruction_to_list( cmd, length_in_cycles=Sequencer.JUMP_EXECUTION_CYCLES ) return cmd def __evaluate_qicalc_val(self, value: QiExpression) -> Union[_Register, int]: """Return value of QiCalc-Value. If another QiCalc node is found, evaluate node first, then return target register of evaluated node. Return _Register if QiVariable is found. Else return constant register value as int. (Can represent cycles)""" if isinstance(value, _QiCalcBase): return self.add_qi_calc(value) elif isinstance(value, _QiVariableBase): return self.get_var_register(value)
# Copyright © 2017-2023 Quantum Interface (quantuminterface@ipe.kit.edu) # Richard Gebauer, IPE, Karlsruhe Institute of Technology # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. """ The lower level logic of the code generation. This module tracks the sequencer state at the current point (e.g. register values, variable to register mapping, etc.), provides helper functions to generate code for expressions and more. """ class _Register: """Class of Sequencer representing registers. Keeps track of values in register. Values are used for program length. Program length is invalidated by use of If/Else. TODO load commands invalidate value""" def __init__(self, address) -> None: self.adr = address self.value = None self.valid = True def addition(self, val1, val2): self.value = val1 + val2 def subtraction(self, val1, val2): self.value = val1 - val2 def multiplication(self, val1, val2): self.value = val1 * val2 def and_values(self, val1, val2): self.value = val1 & val2 def or_values(self, val1, val2): self.value = val1 | val2 def xor_values(self, val1, val2): self.value = val1 ^ val2 def lshift(self, val1, val2): self.value = val1 << val2 def rshift(self, val1, val2): self.value = val1 >> val2 def inversion(self, val1, val2): self.value = ~val1 # Dictionary used to receive function from input QiOp eval_operation = { QiOp.PLUS: addition, QiOp.MINUS: subtraction, QiOp.MULT: multiplication, QiOp.AND: and_values, QiOp.OR: or_values, QiOp.XOR: xor_values, QiOp.LSH: lshift, QiOp.RSH: rshift, QiOp.NOT: inversion, } def get_value(self): if self.valid: return self.value return None def update_register_value(self, val1, op, val2): """Register Values are updated to allow implicit synchronisations through wait when variable Wait/Pulse is used. When a calculation is done using a invalid variable value, the ensuing value is also invalidated. """ if self.adr == 0: self.value = 0 # reg0 always contains 0 return if isinstance(val1, _Register): if val1.value is None: raise RuntimeError( f"Variable at Register {val1.adr} has not been properly initialised" ) if not val1.valid: self.valid = False val1 = val1.value if isinstance(val2, _Register): if val2.value is None: raise RuntimeError( f"Variable at Register {val2.adr} has not been properly initialised" ) if not val2.valid: self.valid = False val2 = val2.value self.eval_operation[op](self, val1, val2) class ForRangeEntry: def __init__(self, reg_addr, start_val, end_val, step_val) -> None: self.reg_addr = reg_addr self.start = start_val self.end = end_val self.step = step_val self.end_addr = 0 self.iterations = 0 self.aggregate_iterations = 0 self.contained_entries: List[ForRangeEntry] = [] def _calc_aggregate(self): """Calculates the number of loops contained inside, considering nested entries, for later use at progress bar.""" self.iterations = _get_for_range_iterations(self.start, self.end, self.step) if len(self.contained_entries) == 0 or self.iterations is None: if self.iterations is None: self.aggregate_iterations = 0 warnings.warn( "A loop with variable start/end could not be counted towards total loop count. Progress bar might be inaccurate." ) else: self.aggregate_iterations = self.iterations else: nested = 0 for entry in self.contained_entries: if entry.aggregate_iterations == 0: warnings.warn( "A loop with variable start/end could not be counted towards total loop count. Progress bar might be inaccurate." ) continue nested += entry.aggregate_iterations self.aggregate_iterations = self.iterations * (nested if nested != 0 else 1) def get_iteration(self, value: int) -> int: """Returns the current iteration depending on the parameter value""" if isinstance(self.start, _QiVariableBase): return 0 _step = self.step if isinstance(self.step, int) else self.step.value iterations = 0 for _ in range(self.start, value, _step): iterations += 1 return iterations @staticmethod def get_total_loops(entry_list): if len(entry_list) == 0: return 1 iterations = 0 for entry in entry_list: iterations += entry.aggregate_iterations return iterations if iterations > 0 else 1 @staticmethod def calculate_current_loop(entry_list, register_list, prog_counter): loop = 0 for entry in entry_list: if entry.end_addr < prog_counter: loop += entry.aggregate_iterations else: iteration = entry.get_iteration(register_list[entry.reg_addr]) if len(entry.contained_entries) == 0: loop += iteration else: loop += iteration * ForRangeEntry.get_total_loops( entry.contained_entries ) + ForRangeEntry.calculate_current_loop( entry.contained_entries, register_list, prog_counter ) return loop return loop class Sequencer: AVAILABLE_REGISTERS = 31 MULTIPLICATION_LENGTH = 6 JUMP_EXECUTION_CYCLES = 2 LOAD_STORE_LENGTH = 8 # Additional delay to prevent ignored trigger for consecutive readouts RECORDING_MODULE_DELAY_CYCLES = 1 CHOKE_PULSE_INDEX = 14 def __init__(self, cell_index=None): self.alu = _ALU(self) self.reset() self.cell_index = cell_index def reset(self): self._register_stack: List[_Register] = [] self.instruction_list: List[SequencerInstruction] = [] self._prog_cycles = _ProgramCycles() self._var_reg_dict: Dict[Any, _Register] = {} self._trigger_mods = _TriggerModules() self._for_range_list = [] self._for_range_stack: List[ForRangeEntry] = [] # register 0 always contains 0, so is not in stack self.reg0 = _Register(0) for x in range(Sequencer.AVAILABLE_REGISTERS, 0, -1): self._register_stack.append(_Register(x)) def print_assembler(self): pc = 0 for instruction in self.instruction_list: print(str(pc) + "# ", end="") print(instruction) pc += 1 @property def prog_cycles(self): """Program length is used for implicit synchs with Wait-Commands. If a program contains variable If/Else or loads to wait registers prog_length can not be determined. Invalid prog_cycles are some value less than 0. """ if self._prog_cycles.valid: return self._prog_cycles.cycles return _ProgramCycles.INVALID @prog_cycles.setter def prog_cycles(self, x): """Set externally when ForRange is used.""" self._prog_cycles.cycles = x @property def recording_delay(self): return util.conv_cycles_to_time(self.RECORDING_MODULE_DELAY_CYCLES) @property def readout_active(self): return self._trigger_mods.is_readout_active @property def manipulation_active(self): return self._trigger_mods.is_manipulation_active def add_variable(self, var): """Adds variable to sequencer, reserving a register for it""" reg = self.request_register() self._var_reg_dict[var.id] = reg # Named variables can be initialized externally if var.name is not None: reg.valid = False reg.value = 0 def release_variable(self, var): self.release_register(self.get_var_register(var)) def get_var_register(self, var) -> _Register: """Returns _Register of QiVariable var""" reg = self._var_reg_dict.get(var.id) if reg is None: raise RuntimeError( f"Variable not defined for Sequencer, var.id:{var.id}, {self._var_reg_dict}" ) return reg def get_var_value(self, var) -> Union[int, float, None]: return self.get_var_register(var).get_value() def request_register(self) -> _Register: """Returns register from stack, raises exception, if no registers are on stack anymore""" try: return self._register_stack.pop() except IndexError as e: print( "Not enough registers available, sequencer " + str(self) + " error: " + str(e) ) raise def get_cycles_from_length(self, length) -> Union[_Register, int]: """If length is QiVariable, return _Register, else return numbers of cycles ceiled""" if isinstance(length, _QiVariableBase): return self.get_var_register(length) elif isinstance(length, int): length = float(length) return util.conv_time_to_cycles(length, "ceil") def release_register(self, reg: _Register): """Returns register to stack; Raises exception when register is already in stack, or addressing is faulty. Releasing register 0 does nothing""" if reg in self._register_stack: raise IndexError("Release Register: Already released register") if (reg.adr > Sequencer.AVAILABLE_REGISTERS) or (reg.adr < 0): raise IndexError("Release Register: Address out of Range") if reg == self.reg0: return reg.valid = True # if register was invalidated and is released again, return it to initial valid state self._register_stack.append(reg) def add_instruction_to_list( self, instruction: SequencerInstruction, length_in_cycles: int = 1, length_valid=True, ): """Adds instruction to list. If pulses are still running, adds choke instruction before adding the current command to the list""" if self._trigger_mods.is_pulse_active: self.trigger_choke_pulse() if length_in_cycles == 0: length_in_cycles = 1 # length is always at least 1 per instruction self.instruction_list.append(instruction) self._prog_cycles.add( length_in_cycles, length_valid ) # Will be deprecated when external sync is possible. def get_prog_size(self) -> int: return len(self.instruction_list) def add_mov_command(self, dst_reg: _Register, src_reg: _Register): """Copies value of src_reg to dst_reg.""" self.add_calculation(src_reg, QiOp.PLUS, 0, dst_reg) def get_upper_immediate_value(self, value: SequencerInstruction.imm_type): """If bit 11 of lower value is 1, ADDI command sign extends the value. To account for that, sign extend lower 12 bits and subtract from upper 20 bits.""" sign_extended_lower = ( value | 0xFFFFF000 if value & 0x00000800 != 0 else value & 0x00000FFF ) return (value - sign_extended_lower) & 0xFFFFF000 def immediate_to_register( self, val: SequencerInstruction.imm_type, dst_reg: Optional[_Register] = None ) -> _Register: """Loads immediate to dst_reg. If dst_reg is not defined a new register is used to save val to. If value == 0 and no register is specified, reg0 is returned, which always contains 0. dst_reg.value is updated to reflect changes.""" if val == 0 and dst_reg is None: return self.reg0 elif dst_reg is None: dst_reg = self.request_register() if isinstance(val, float): raise NotImplementedError("float not implemented yet") if SequencerInstruction.is_value_in_lower_immediate(val): self.add_instruction_to_list( SeqRegImmediateInst(QiOp.PLUS, dst_reg.adr, 0, val) ) # register_0 always contains 0 else: upper_immediate = self.get_upper_immediate_value(val) self.add_instruction_to_list(SeqLoadUpperImm(dst_reg.adr, upper_immediate)) self.add_instruction_to_list( SeqRegImmediateInst(QiOp.PLUS, dst_reg.adr, dst_reg.adr, val) ) dst_reg.update_register_value(val, QiOp.PLUS, 0) return dst_reg def add_calculation( self, val1: Union[_Register, int, float], operator: QiOp, val2: Union[_Register, int, float], dst_reg: Optional[_Register] = None, ) -> _Register: """Adds calculation command to sequencer. Depending on the values and the operation different commands are added. dst_reg.value is updated to reflect changes.""" if (not isinstance(val1, _Register)) and (not isinstance(val2, _Register)): raise RuntimeError("QiCalc should not contain two int/float") if dst_reg is None: dst_reg = self.request_register() self.alu.calculate(dst_reg, val1, operator, val2) dst_reg.update_register_value(val1, operator, val2) return dst_reg def add_condition( self, reg1: _Register, operator: QiOpCond, reg2: _Register, jmp_val=0 ): """Adds condition command to the sequence and returns its reference, to define the jump value at a later point""" cmd = SeqBranch(operator, reg1.adr, reg2.adr, jmp_val) self.add_instruction_to_list(cmd) return cmd def add_jump(self, jmp_val=0) -> SeqJump: """Adds jump command to the sequence and returns its reference, to define the jump value at a later point""" cmd = SeqJump(jmp_val) self.add_instruction_to_list( cmd, length_in_cycles=Sequencer.JUMP_EXECUTION_CYCLES ) return cmd def __evaluate_qicalc_val(self, value: QiExpression) -> Union[_Register, int]: """Return value of QiCalc-Value. If another QiCalc node is found, evaluate node first, then return target register of evaluated node. Return _Register if QiVariable is found. Else return constant register value as int. (Can represent cycles)""" if isinstance(value, _QiCalcBase): return self.add_qi_calc(value) elif isinstance(value, _QiVariableBase): return self.get_var_register(value)
elif isinstance(value, _QiConstValue):
5
2023-11-10 10:26:10+00:00
16k
jpcadena/fastapi-boilerplate
app/api/api_v1/router/auth.py
[ { "identifier": "get_redis_dep", "path": "app/api/deps.py", "snippet": "async def get_redis_dep(\n redis_dependency: Annotated[RedisDependency, Depends()]\n) -> AsyncGenerator[Redis, None]: # type: ignore\n \"\"\"\n Lazy generation of Redis dependency\n :param redis_dependency: The dependen...
import logging from typing import Annotated, Any, Optional from fastapi import ( APIRouter, Body, Depends, Header, HTTPException, Path, Request, status, ) from fastapi.security import OAuth2PasswordRequestForm from pydantic import EmailStr from redis.asyncio import Redis from starlette.datastructures import Address from app.api.deps import get_redis_dep from app.api.oauth2_validation import get_current_user, get_refresh_current_user from app.config.config import ( get_auth_settings, get_init_settings, get_settings, init_setting, ) from app.config.db.auth_settings import AuthSettings from app.config.init_settings import InitSettings from app.config.settings import Settings from app.core.security.password import verify_password from app.exceptions.exceptions import NotFoundException, ServiceException from app.models.sql.user import User as UserDB from app.schemas.external.msg import Msg from app.schemas.external.token import TokenResetPassword, TokenResponse from app.schemas.external.user import ( UserResponse, UserUpdate, UserUpdateResponse, ) from app.schemas.infrastructure.user import UserAuth from app.services.infrastructure.auth import common_auth_procedure from app.services.infrastructure.token import TokenService from app.services.infrastructure.user import UserService, get_user_service from app.tasks.email_tasks.email_tasks import ( send_password_changed_confirmation_email, send_reset_password_email, ) from app.utils.security.password import ( generate_password_reset_token, verify_password_reset_token, )
13,839
request: Request, auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], user: Annotated[OAuth2PasswordRequestForm, Depends()], user_service: Annotated[UserService, Depends(get_user_service)], redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore ) -> TokenResponse: """ Endpoint to handle user login with OAuth2 authentication using request form. ## Parameter: - `user:` **Request body with username and password** - `type:` **OAuth2PasswordRequestForm** ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: Request object for client host information :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] = request.client if not client: raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: found_user: UserDB = await user_service.get_login_user(user.username) except ServiceException as exc: logger.error(exc) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="Invalid credentials" ) from exc if not verify_password(found_user.password, user.password): detail: str = "Incorrect password" logger.warning(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) if not found_user.is_active: user_detail: str = "Inactive user" logger.warning(user_detail) raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=user_detail ) return await common_auth_procedure( found_user, client_ip, redis, auth_settings ) @router.post( "/refresh", response_model=TokenResponse, status_code=status.HTTP_201_CREATED, ) async def refresh_token( request: Request, user_service: Annotated[UserService, Depends(get_user_service)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], refresh_current_user: Annotated[ UserAuth, Depends(get_refresh_current_user) ], redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore ) -> TokenResponse: """ Generates a refresh token for the current user and saves it to the database ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: The HTTP request on the server :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param refresh_current_user: The current user dependency for refresh token :type refresh_current_user: UserAuth :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] if not (client := request.client): raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: user: UserDB = await user_service.get_login_user( refresh_current_user.username ) except ServiceException as exc: detail: str = "Can not found user information." logger.error(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) from exc return await common_auth_procedure(user, client_ip, redis, auth_settings) @router.post("/validate-token", response_model=UserAuth) async def validate_token( current_user: Annotated[UserAuth, Depends(get_current_user)] ) -> UserAuth: """ Endpoint to validate an access token. ## Response: - `return:` **The authenticated user instance** - `rtype:` **UserAuth** \f :param current_user: The current user :type current_user: UserAuth """ return current_user
""" Authentication API Router. This module provides login and password recovery functionality. """ logger: logging.Logger = logging.getLogger(__name__) router: APIRouter = APIRouter(prefix="/auth", tags=["auth"]) @router.post("/login", response_model=TokenResponse) async def login( request: Request, auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], user: Annotated[OAuth2PasswordRequestForm, Depends()], user_service: Annotated[UserService, Depends(get_user_service)], redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore ) -> TokenResponse: """ Endpoint to handle user login with OAuth2 authentication using request form. ## Parameter: - `user:` **Request body with username and password** - `type:` **OAuth2PasswordRequestForm** ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: Request object for client host information :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] = request.client if not client: raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: found_user: UserDB = await user_service.get_login_user(user.username) except ServiceException as exc: logger.error(exc) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="Invalid credentials" ) from exc if not verify_password(found_user.password, user.password): detail: str = "Incorrect password" logger.warning(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) if not found_user.is_active: user_detail: str = "Inactive user" logger.warning(user_detail) raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=user_detail ) return await common_auth_procedure( found_user, client_ip, redis, auth_settings ) @router.post( "/refresh", response_model=TokenResponse, status_code=status.HTTP_201_CREATED, ) async def refresh_token( request: Request, user_service: Annotated[UserService, Depends(get_user_service)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], refresh_current_user: Annotated[ UserAuth, Depends(get_refresh_current_user) ], redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore ) -> TokenResponse: """ Generates a refresh token for the current user and saves it to the database ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: The HTTP request on the server :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param refresh_current_user: The current user dependency for refresh token :type refresh_current_user: UserAuth :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] if not (client := request.client): raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: user: UserDB = await user_service.get_login_user( refresh_current_user.username ) except ServiceException as exc: detail: str = "Can not found user information." logger.error(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) from exc return await common_auth_procedure(user, client_ip, redis, auth_settings) @router.post("/validate-token", response_model=UserAuth) async def validate_token( current_user: Annotated[UserAuth, Depends(get_current_user)] ) -> UserAuth: """ Endpoint to validate an access token. ## Response: - `return:` **The authenticated user instance** - `rtype:` **UserAuth** \f :param current_user: The current user :type current_user: UserAuth """ return current_user
@router.post("/recover-password/{email}", response_model=Msg)
11
2023-11-17 00:32:32+00:00
16k
vitant-lang/CBAM-ASPP
train.py
[ { "identifier": "DeepLab", "path": "nets/deeplabv3_plus.py", "snippet": "class DeepLab(nn.Module):\n\tdef __init__(self, num_classes, backbone=\"mobilenet\", pretrained=True, downsample_factor=16):\n\t\tsuper(DeepLab, self).__init__()\n\t\tif backbone==\"xception\":\n\t\t\t#-----------------------------...
import os import datetime import numpy as np import torch import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim as optim from torch.utils.data import DataLoader from nets.deeplabv3_plus import DeepLab from nets.deeplabv3_training import (get_lr_scheduler, set_optimizer_lr, weights_init) from utils.callbacks import LossHistory, EvalCallback from utils.dataloader import DeeplabDataset, deeplab_dataset_collate from utils.utils import download_weights, show_config from utils.utils_fit import fit_one_epoch from torch.cuda.amp import GradScaler as GradScaler
12,464
#------------------------------------------------------------------# cls_weights = np.ones([num_classes], np.float32) #------------------------------------------------------------------# # num_workers 用于设置是否使用多线程读取数据,1代表关闭多线程 # 开启后会加快数据读取速度,但是会占用更多内存 # keras里开启多线程有些时候速度反而慢了许多 # 在IO为瓶颈的时候再开启多线程,即GPU运算速度远大于读取图片的速度。 #------------------------------------------------------------------# num_workers = 4 #------------------------------------------------------# # 设置用到的显卡 #------------------------------------------------------# ngpus_per_node = torch.cuda.device_count() if distributed: dist.init_process_group(backend="nccl") local_rank = int(os.environ["LOCAL_RANK"]) rank = int(os.environ["RANK"]) device = torch.device("cuda", local_rank) if local_rank == 0: print(f"[{os.getpid()}] (rank = {rank}, local_rank = {local_rank}) training...") print("Gpu Device Count : ", ngpus_per_node) else: device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') local_rank = 0 #----------------------------------------------------# # 下载预训练权重 #----------------------------------------------------# if pretrained: if distributed: if local_rank == 0: download_weights(backbone) dist.barrier() else: download_weights(backbone) model = DeepLab(num_classes=num_classes, backbone=backbone, downsample_factor=downsample_factor, pretrained=pretrained) if not pretrained: weights_init(model) if model_path != '': #------------------------------------------------------# # 权值文件请看README,百度网盘下载 #------------------------------------------------------# if local_rank == 0: print('Load weights {}.'.format(model_path)) #------------------------------------------------------# # 根据预训练权重的Key和模型的Key进行加载 #------------------------------------------------------# model_dict = model.state_dict() pretrained_dict = torch.load(model_path, map_location = device) load_key, no_load_key, temp_dict = [], [], {} for k, v in pretrained_dict.items(): if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v): temp_dict[k] = v load_key.append(k) else: no_load_key.append(k) model_dict.update(temp_dict) model.load_state_dict(model_dict) #------------------------------------------------------# # 显示没有匹配上的Key #------------------------------------------------------# if local_rank == 0: print("\nSuccessful Load Key:", str(load_key)[:500], "……\nSuccessful Load Key Num:", len(load_key)) print("\nFail To Load Key:", str(no_load_key)[:500], "……\nFail To Load Key num:", len(no_load_key)) print("\n\033[1;33;44m温馨提示,head部分没有载入是正常现象,Backbone部分没有载入是错误的。\033[0m") #----------------------# # 记录Loss #----------------------# if local_rank == 0: time_str = datetime.datetime.strftime(datetime.datetime.now(),'%Y_%m_%d_%H_%M_%S') log_dir = os.path.join(save_dir, "loss_" + str(time_str)) loss_history = LossHistory(log_dir, model, input_shape=input_shape) else: loss_history = None #------------------------------------------------------------------# # torch 1.2不支持amp,建议使用torch 1.7.1及以上正确使用fp16 # 因此torch1.2这里显示"could not be resolve" #------------------------------------------------------------------# if fp16: scaler = GradScaler() else: scaler = None model_train = model.train() #----------------------------# # 多卡同步Bn #----------------------------# if sync_bn and ngpus_per_node > 1 and distributed: model_train = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model_train) elif sync_bn: print("Sync_bn is not support in one gpu or not distributed.") if Cuda: if distributed: #----------------------------# # 多卡平行运行 #----------------------------# model_train = model_train.cuda(local_rank) model_train = torch.nn.parallel.DistributedDataParallel(model_train, device_ids=[local_rank], find_unused_parameters=True) else: model_train = torch.nn.DataParallel(model) cudnn.benchmark = True model_train = model_train.cuda() #---------------------------# # 读取数据集对应的txt #---------------------------# with open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/train.txt"),"r") as f: train_lines = f.readlines() with open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/val.txt"),"r") as f: val_lines = f.readlines() num_train = len(train_lines) num_val = len(val_lines) if local_rank == 0:
''' 训练自己的语义分割模型一定需要注意以下几点: 1、训练前仔细检查自己的格式是否满足要求,该库要求数据集格式为VOC格式,需要准备好的内容有输入图片和标签 输入图片为.jpg图片,无需固定大小,传入训练前会自动进行resize。 灰度图会自动转成RGB图片进行训练,无需自己修改。 输入图片如果后缀非jpg,需要自己批量转成jpg后再开始训练。 标签为png图片,无需固定大小,传入训练前会自动进行resize。 由于许多同学的数据集是网络上下载的,标签格式并不符合,需要再度处理。一定要注意!标签的每个像素点的值就是这个像素点所属的种类。 网上常见的数据集总共对输入图片分两类,背景的像素点值为0,目标的像素点值为255。这样的数据集可以正常运行但是预测是没有效果的! 需要改成,背景的像素点值为0,目标的像素点值为1。 如果格式有误,参考:https://github.com/bubbliiiing/segmentation-format-fix 2、损失值的大小用于判断是否收敛,比较重要的是有收敛的趋势,即验证集损失不断下降,如果验证集损失基本上不改变的话,模型基本上就收敛了。 损失值的具体大小并没有什么意义,大和小只在于损失的计算方式,并不是接近于0才好。如果想要让损失好看点,可以直接到对应的损失函数里面除上10000。 训练过程中的损失值会保存在logs文件夹下的loss_%Y_%m_%d_%H_%M_%S文件夹中 3、训练好的权值文件保存在logs文件夹中,每个训练世代(Epoch)包含若干训练步长(Step),每个训练步长(Step)进行一次梯度下降。 如果只是训练了几个Step是不会保存的,Epoch和Step的概念要捋清楚一下。 ''' if __name__ == "__main__": #---------------------------------# # Cuda 是否使用Cuda # 没有GPU可以设置成False #---------------------------------# Cuda = True #---------------------------------------------------------------------# # distributed 用于指定是否使用单机多卡分布式运行 # 终端指令仅支持Ubuntu。CUDA_VISIBLE_DEVICES用于在Ubuntu下指定显卡。 # Windows系统下默认使用DP模式调用所有显卡,不支持DDP。 # DP模式: # 设置 distributed = False # 在终端中输入 CUDA_VISIBLE_DEVICES=0,1 python train.py # DDP模式: # 设置 distributed = True # 在终端中输入 CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node=2 train.py #---------------------------------------------------------------------# distributed = False #---------------------------------------------------------------------# # sync_bn 是否使用sync_bn,DDP模式多卡可用 #---------------------------------------------------------------------# sync_bn = False #---------------------------------------------------------------------# # fp16 是否使用混合精度训练 # 可减少约一半的显存、需要pytorch1.7.1以上 #---------------------------------------------------------------------# fp16 = False #-----------------------------------------------------# # num_classes 训练自己的数据集必须要修改的 # 自己需要的分类个数+1,如2+1 #-----------------------------------------------------# num_classes = 3 #---------------------------------# # 所使用的的主干网络: # mobilenet # xception #---------------------------------# backbone = "mobilenet" #----------------------------------------------------------------------------------------------------------------------------# # pretrained 是否使用主干网络的预训练权重,此处使用的是主干的权重,因此是在模型构建的时候进行加载的。 # 如果设置了model_path,则主干的权值无需加载,pretrained的值无意义。 # 如果不设置model_path,pretrained = True,此时仅加载主干开始训练。 # 如果不设置model_path,pretrained = False,Freeze_Train = Fasle,此时从0开始训练,且没有冻结主干的过程。 #----------------------------------------------------------------------------------------------------------------------------# pretrained = False #----------------------------------------------------------------------------------------------------------------------------# # 权值文件的下载请看README,可以通过网盘下载。模型的 预训练权重 对不同数据集是通用的,因为特征是通用的。 # 模型的 预训练权重 比较重要的部分是 主干特征提取网络的权值部分,用于进行特征提取。 # 预训练权重对于99%的情况都必须要用,不用的话主干部分的权值太过随机,特征提取效果不明显,网络训练的结果也不会好 # 训练自己的数据集时提示维度不匹配正常,预测的东西都不一样了自然维度不匹配 # # 如果训练过程中存在中断训练的操作,可以将model_path设置成logs文件夹下的权值文件,将已经训练了一部分的权值再次载入。 # 同时修改下方的 冻结阶段 或者 解冻阶段 的参数,来保证模型epoch的连续性。 # # 当model_path = ''的时候不加载整个模型的权值。 # # 此处使用的是整个模型的权重,因此是在train.py进行加载的,pretrain不影响此处的权值加载。 # 如果想要让模型从主干的预训练权值开始训练,则设置model_path = '',pretrain = True,此时仅加载主干。 # 如果想要让模型从0开始训练,则设置model_path = '',pretrain = Fasle,Freeze_Train = Fasle,此时从0开始训练,且没有冻结主干的过程。 # # 一般来讲,网络从0开始的训练效果会很差,因为权值太过随机,特征提取效果不明显,因此非常、非常、非常不建议大家从0开始训练! # 如果一定要从0开始,可以了解imagenet数据集,首先训练分类模型,获得网络的主干部分权值,分类模型的 主干部分 和该模型通用,基于此进行训练。 #----------------------------------------------------------------------------------------------------------------------------# model_path = "model_data/deeplab_mobilenetv2.pth" #---------------------------------------------------------# # downsample_factor 下采样的倍数8、16 # 8下采样的倍数较小、理论上效果更好。 # 但也要求更大的显存 #---------------------------------------------------------# downsample_factor = 8 #------------------------------# # 输入图片的大小 #------------------------------# input_shape = [512, 512] #----------------------------------------------------------------------------------------------------------------------------# # 训练分为两个阶段,分别是冻结阶段和解冻阶段。设置冻结阶段是为了满足机器性能不足的同学的训练需求。 # 冻结训练需要的显存较小,显卡非常差的情况下,可设置Freeze_Epoch等于UnFreeze_Epoch,此时仅仅进行冻结训练。 # # 在此提供若干参数设置建议,各位训练者根据自己的需求进行灵活调整: # (一)从整个模型的预训练权重开始训练: # Adam: # Init_Epoch = 0,Freeze_Epoch = 50,UnFreeze_Epoch = 100,Freeze_Train = True,optimizer_type = 'adam',Init_lr = 5e-4,weight_decay = 0。(冻结) # Init_Epoch = 0,UnFreeze_Epoch = 100,Freeze_Train = False,optimizer_type = 'adam',Init_lr = 5e-4,weight_decay = 0。(不冻结) # SGD: # Init_Epoch = 0,Freeze_Epoch = 50,UnFreeze_Epoch = 100,Freeze_Train = True,optimizer_type = 'sgd',Init_lr = 7e-3,weight_decay = 1e-4。(冻结) # Init_Epoch = 0,UnFreeze_Epoch = 100,Freeze_Train = False,optimizer_type = 'sgd',Init_lr = 7e-3,weight_decay = 1e-4。(不冻结) # 其中:UnFreeze_Epoch可以在100-300之间调整。 # (二)从主干网络的预训练权重开始训练: # Adam: # Init_Epoch = 0,Freeze_Epoch = 50,UnFreeze_Epoch = 100,Freeze_Train = True,optimizer_type = 'adam',Init_lr = 5e-4,weight_decay = 0。(冻结) # Init_Epoch = 0,UnFreeze_Epoch = 100,Freeze_Train = False,optimizer_type = 'adam',Init_lr = 5e-4,weight_decay = 0。(不冻结) # SGD: # Init_Epoch = 0,Freeze_Epoch = 50,UnFreeze_Epoch = 120,Freeze_Train = True,optimizer_type = 'sgd',Init_lr = 7e-3,weight_decay = 1e-4。(冻结) # Init_Epoch = 0,UnFreeze_Epoch = 120,Freeze_Train = False,optimizer_type = 'sgd',Init_lr = 7e-3,weight_decay = 1e-4。(不冻结) # 其中:由于从主干网络的预训练权重开始训练,主干的权值不一定适合语义分割,需要更多的训练跳出局部最优解。 # UnFreeze_Epoch可以在120-300之间调整。 # Adam相较于SGD收敛的快一些。因此UnFreeze_Epoch理论上可以小一点,但依然推荐更多的Epoch。 # (三)batch_size的设置: # 在显卡能够接受的范围内,以大为好。显存不足与数据集大小无关,提示显存不足(OOM或者CUDA out of memory)请调小batch_size。 # 受到BatchNorm层影响,batch_size最小为2,不能为1。 # 正常情况下Freeze_batch_size建议为Unfreeze_batch_size的1-2倍。不建议设置的差距过大,因为关系到学习率的自动调整。 #----------------------------------------------------------------------------------------------------------------------------# #------------------------------------------------------------------# # 冻结阶段训练参数 # 此时模型的主干被冻结了,特征提取网络不发生改变 # 占用的显存较小,仅对网络进行微调 # Init_Epoch 模型当前开始的训练世代,其值可以大于Freeze_Epoch,如设置: # Init_Epoch = 60、Freeze_Epoch = 50、UnFreeze_Epoch = 100 # 会跳过冻结阶段,直接从60代开始,并调整对应的学习率。 # (断点续练时使用) # Freeze_Epoch 模型冻结训练的Freeze_Epoch # (当Freeze_Train=False时失效) # Freeze_batch_size 模型冻结训练的batch_size # (当Freeze_Train=False时失效) #------------------------------------------------------------------# Init_Epoch = 0 Freeze_Epoch = 10 Freeze_batch_size = 8 #------------------------------------------------------------------# # 解冻阶段训练参数 # 此时模型的主干不被冻结了,特征提取网络会发生改变 # 占用的显存较大,网络所有的参数都会发生改变 # UnFreeze_Epoch 模型总共训练的epoch # Unfreeze_batch_size 模型在解冻后的batch_size #------------------------------------------------------------------# UnFreeze_Epoch = 20 Unfreeze_batch_size = 4 #------------------------------------------------------------------# # Freeze_Train 是否进行冻结训练 # 默认先冻结主干训练后解冻训练。 #------------------------------------------------------------------# Freeze_Train = True #------------------------------------------------------------------# # 其它训练参数:学习率、优化器、学习率下降有关 #------------------------------------------------------------------# #------------------------------------------------------------------# # Init_lr 模型的最大学习率 # 当使用Adam优化器时建议设置 Init_lr=5e-4 # 当使用SGD优化器时建议设置 Init_lr=7e-3 # Min_lr 模型的最小学习率,默认为最大学习率的0.01 #------------------------------------------------------------------# Init_lr = 7e-4 Min_lr = Init_lr * 0.01 #------------------------------------------------------------------# # optimizer_type 使用到的优化器种类,可选的有adam、sgd # 当使用Adam优化器时建议设置 Init_lr=5e-4 # 当使用SGD优化器时建议设置 Init_lr=7e-3 # momentum 优化器内部使用到的momentum参数 # weight_decay 权值衰减,可防止过拟合 # adam会导致weight_decay错误,使用adam时建议设置为0。 #------------------------------------------------------------------# optimizer_type = "sgd" momentum = 0.9 weight_decay = 1e-4 #1e-4 sgd是 #------------------------------------------------------------------# # lr_decay_type 使用到的学习率下降方式,可选的有'step'、'cos' #------------------------------------------------------------------# lr_decay_type = 'cos' #------------------------------------------------------------------# # save_period 多少个epoch保存一次权值 #------------------------------------------------------------------# save_period = 800 #------------------------------------------------------------------# # save_dir 权值与日志文件保存的文件夹 #------------------------------------------------------------------# save_dir = 'logs' #------------------------------------------------------------------# # eval_flag 是否在训练时进行评估,评估对象为验证集 # eval_period 代表多少个epoch评估一次,不建议频繁的评估 # 评估需要消耗较多的时间,频繁评估会导致训练非常慢 # 此处获得的mAP会与get_map.py获得的会有所不同,原因有二: # (一)此处获得的mAP为验证集的mAP。 # (二)此处设置评估参数较为保守,目的是加快评估速度。 #------------------------------------------------------------------# eval_flag = True eval_period = 400 #7.13开始跑 #10点40 #------------------------------------------------------------------# # VOCdevkit_path 数据集路径 #------------------------------------------------------------------# VOCdevkit_path = 'VOCdevkit' #------------------------------------------------------------------# # 建议选项: # 种类少(几类)时,设置为True # 种类多(十几类)时,如果batch_size比较大(10以上),那么设置为True # 种类多(十几类)时,如果batch_size比较小(10以下),那么设置为False #------------------------------------------------------------------# dice_loss = False #------------------------------------------------------------------# # 是否使用focal loss来防止正负样本不平衡 #------------------------------------------------------------------# focal_loss = False #------------------------------------------------------------------# # 是否给不同种类赋予不同的损失权值,默认是平衡的。 # 设置的话,注意设置成numpy形式的,长度和num_classes一样。 # 如: # num_classes = 3 # cls_weights = np.array([1, 2, 3], np.float32) #------------------------------------------------------------------# cls_weights = np.ones([num_classes], np.float32) #------------------------------------------------------------------# # num_workers 用于设置是否使用多线程读取数据,1代表关闭多线程 # 开启后会加快数据读取速度,但是会占用更多内存 # keras里开启多线程有些时候速度反而慢了许多 # 在IO为瓶颈的时候再开启多线程,即GPU运算速度远大于读取图片的速度。 #------------------------------------------------------------------# num_workers = 4 #------------------------------------------------------# # 设置用到的显卡 #------------------------------------------------------# ngpus_per_node = torch.cuda.device_count() if distributed: dist.init_process_group(backend="nccl") local_rank = int(os.environ["LOCAL_RANK"]) rank = int(os.environ["RANK"]) device = torch.device("cuda", local_rank) if local_rank == 0: print(f"[{os.getpid()}] (rank = {rank}, local_rank = {local_rank}) training...") print("Gpu Device Count : ", ngpus_per_node) else: device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') local_rank = 0 #----------------------------------------------------# # 下载预训练权重 #----------------------------------------------------# if pretrained: if distributed: if local_rank == 0: download_weights(backbone) dist.barrier() else: download_weights(backbone) model = DeepLab(num_classes=num_classes, backbone=backbone, downsample_factor=downsample_factor, pretrained=pretrained) if not pretrained: weights_init(model) if model_path != '': #------------------------------------------------------# # 权值文件请看README,百度网盘下载 #------------------------------------------------------# if local_rank == 0: print('Load weights {}.'.format(model_path)) #------------------------------------------------------# # 根据预训练权重的Key和模型的Key进行加载 #------------------------------------------------------# model_dict = model.state_dict() pretrained_dict = torch.load(model_path, map_location = device) load_key, no_load_key, temp_dict = [], [], {} for k, v in pretrained_dict.items(): if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v): temp_dict[k] = v load_key.append(k) else: no_load_key.append(k) model_dict.update(temp_dict) model.load_state_dict(model_dict) #------------------------------------------------------# # 显示没有匹配上的Key #------------------------------------------------------# if local_rank == 0: print("\nSuccessful Load Key:", str(load_key)[:500], "……\nSuccessful Load Key Num:", len(load_key)) print("\nFail To Load Key:", str(no_load_key)[:500], "……\nFail To Load Key num:", len(no_load_key)) print("\n\033[1;33;44m温馨提示,head部分没有载入是正常现象,Backbone部分没有载入是错误的。\033[0m") #----------------------# # 记录Loss #----------------------# if local_rank == 0: time_str = datetime.datetime.strftime(datetime.datetime.now(),'%Y_%m_%d_%H_%M_%S') log_dir = os.path.join(save_dir, "loss_" + str(time_str)) loss_history = LossHistory(log_dir, model, input_shape=input_shape) else: loss_history = None #------------------------------------------------------------------# # torch 1.2不支持amp,建议使用torch 1.7.1及以上正确使用fp16 # 因此torch1.2这里显示"could not be resolve" #------------------------------------------------------------------# if fp16: scaler = GradScaler() else: scaler = None model_train = model.train() #----------------------------# # 多卡同步Bn #----------------------------# if sync_bn and ngpus_per_node > 1 and distributed: model_train = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model_train) elif sync_bn: print("Sync_bn is not support in one gpu or not distributed.") if Cuda: if distributed: #----------------------------# # 多卡平行运行 #----------------------------# model_train = model_train.cuda(local_rank) model_train = torch.nn.parallel.DistributedDataParallel(model_train, device_ids=[local_rank], find_unused_parameters=True) else: model_train = torch.nn.DataParallel(model) cudnn.benchmark = True model_train = model_train.cuda() #---------------------------# # 读取数据集对应的txt #---------------------------# with open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/train.txt"),"r") as f: train_lines = f.readlines() with open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/val.txt"),"r") as f: val_lines = f.readlines() num_train = len(train_lines) num_val = len(val_lines) if local_rank == 0:
show_config(
9
2023-11-17 13:25:28+00:00
16k
fg320/DEASC
examples/12B_5x1_farm_dyn_tuning_wso_grouping.py
[ { "identifier": "WfModel", "path": "deasc/wf_model.py", "snippet": "class WfModel:\n \"\"\"\n Class for wind farm modelling (Interface setup but not limited to FLORIS\n framework).\n \"\"\"\n\n def __init__(self, input_file, path):\n \"\"\"\n Initialise wind farm object by p...
import numpy as np from deasc import WfModel from deasc import WSOpt from deasc import GPWrap from deasc import TuningDyn_Grouping from deasc.utils_floris import ( floris_extract_object_dict, floris_param_change_object_dict, floris_param_change_object )
11,439
""" This example shows wake steering optimisation on a 5x1 wind farm of NREL 5 MW turbines. Dynamic parameter tuning with grouping is introduced in the optimisation for the wake expansion parameter k of the Jensen wake model. The tuning variables are the yaw angles two most upstream groups, each of two turbines. """ # Initialise and set layout for wind farm model path = "./inputs/" input_file = "jensen.yaml" wf_model = WfModel(input_file, path) wf_model.set_aligned_layout(5, 1, 7, 5) # Set kd deflection parameter wf_model_dict = floris_extract_object_dict(wf_model) wf_model_dict = floris_param_change_object_dict(wf_model_dict, 'wake_deflection_parameters', 'kd', 0.3)
""" This example shows wake steering optimisation on a 5x1 wind farm of NREL 5 MW turbines. Dynamic parameter tuning with grouping is introduced in the optimisation for the wake expansion parameter k of the Jensen wake model. The tuning variables are the yaw angles two most upstream groups, each of two turbines. """ # Initialise and set layout for wind farm model path = "./inputs/" input_file = "jensen.yaml" wf_model = WfModel(input_file, path) wf_model.set_aligned_layout(5, 1, 7, 5) # Set kd deflection parameter wf_model_dict = floris_extract_object_dict(wf_model) wf_model_dict = floris_param_change_object_dict(wf_model_dict, 'wake_deflection_parameters', 'kd', 0.3)
wf_model = floris_param_change_object(wf_model, wf_model_dict)
6
2023-11-10 18:13:27+00:00
16k
CPES-Power-and-Energy-Systems/interoperable-recommender-tso
energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/bayesian_optimization.py
[ { "identifier": "GaussianProcess", "path": "energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/helpers.py", "snippet": "class GaussianProcess(BaseEstimator, RegressorMixin):\n \"\"\"The legacy Gaussian Process model class.\n\n .. deprecated:: 0.18\n ...
import numpy as np from .helpers import GaussianProcess from scipy.optimize import minimize from .helpers import UtilityFunction, unique_rows, PrintLog
11,558
if self.verbose: self.plog.print_step(x, y_init[-1]) # Append any other points passed by the self.initialize method (these # also have a corresponding target value passed by the user). self.init_points += self.x_init # Append the target value of self.initialize method. y_init += self.y_init # Turn it into np array and store. self.X = np.asarray(self.init_points) self.Y = np.asarray(y_init) # Updates the flag self.initialized = True def explore(self, points_dict): """ Method to explore user defined points :param points_dict: :return: """ # Consistency check param_tup_lens = [] for key in self.keys: param_tup_lens.append(len(list(points_dict[key]))) if all([e == param_tup_lens[0] for e in param_tup_lens]): pass else: raise ValueError('The same number of initialization points ' 'must be entered for every parameter.') # Turn into list of lists all_points = [] for key in self.keys: all_points.append(points_dict[key]) # Take transpose of list self.init_points = list(map(list, zip(*all_points))) def initialize(self, points_dict): """ Method to introduce point for which the target function value is known :param points_dict: :return: """ for target in points_dict: self.y_init.append(target) all_points = [] for key in self.keys: all_points.append(points_dict[target][key]) self.x_init.append(all_points) def set_bounds(self, new_bounds): """ A method that allows changing the lower and upper searching bounds :param new_bounds: A dictionary with the parameter name and its new bounds """ # Update the internal object stored dict self.pbounds.update(new_bounds) # Loop through the all bounds and reset the min-max bound matrix for row, key in enumerate(self.pbounds.keys()): # Reset all entries, even if the same. self.bounds[row] = self.pbounds[key] def maximize(self, init_points=5, n_iter=25, acq='ei', kappa=2.576, xi=0.0, **gp_params): """ Main optimization method. Parameters ---------- :param init_points: Number of randomly chosen points to sample the target function before fitting the gp. :param n_iter: Total number of times the process is to repeated. Note that currently this methods does not have stopping criteria (due to a number of reasons), therefore the total number of points to be sampled must be specified. :param acq: Acquisition function to be used, defaults to Expected Improvement. :param gp_params: Parameters to be passed to the Scikit-learn Gaussian Process object Returns ------- :return: Nothing """ # Reset timer self.plog.reset_timer() # Set acquisition function
""" BAYESIAN OPTIMIZATION MODULE - Version 0.1.0 Created by Fernando Nogueira (fmfn). Available in - https://github.com/fmfn/BayesianOptimization """ __author__ = 'fmfn' def acq_max(ac, gp, y_max, bounds): """ A function to find the maximum of the acquisition function using the 'L-BFGS-B' method. Parameters ---------- :param ac: The acquisition function object that return its point-wise value. :param gp: A gaussian process fitted to the relevant data. :param y_max: The current maximum known value of the target function. :param bounds: The variables bounds to limit the search of the acq max. Returns ------- :return: x_max, The arg max of the acquisition function. """ # Start with the lower bound as the argmax x_max = bounds[:, 0] max_acq = None x_tries = np.random.uniform(bounds[:, 0], bounds[:, 1], size=(100, bounds.shape[0])) for x_try in x_tries: # Find the minimum of minus the acquisition function res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max), x_try.reshape(1, -1), bounds=bounds, method="L-BFGS-B") # Store it if better than previous minimum(maximum). if max_acq is None or -res.fun >= max_acq: x_max = res.x max_acq = -res.fun # Clip output to make sure it lies within the bounds. Due to floating # point technicalities this is not always the case. return np.clip(x_max, bounds[:, 0], bounds[:, 1]) def matern52(theta, d): """ Matern 5/2 correlation model.:: theta, d --> r(theta, d) = (1+sqrt(5)*r + 5/3*r^2)*exp(-sqrt(5)*r) n where r = sqrt(sum (d_i)^2 / (theta_i)^2 ) i = 1 Parameters ---------- theta : array_like An array with shape 1 (isotropic) or n (anisotropic) giving the autocorrelation parameter(s). d : array_like An array with shape (n_eval, n_features) giving the componentwise distances between locations x and x' at which the correlation model should be evaluated. Returns ------- r : array_like An array with shape (n_eval, ) containing the values of the autocorrelation modle. """ theta = np.asarray(theta, dtype=np.float) d = np.asarray(d, dtype=np.float) if d.ndim > 1: n_features = d.shape[1] else: n_features = 1 if theta.size == 1: r = np.sqrt(np.sum(d ** 2, axis=1)) / theta[0] elif theta.size != n_features: raise ValueError("Length of theta must be 1 or %s" % n_features) else: r = np.sqrt(np.sum(d ** 2 / theta.reshape(1, n_features) ** 2, axis=1)) return (1 + np.sqrt(5) * r + 5 / 3. * r ** 2) * np.exp(-np.sqrt(5) * r) class BayesianOptimization(object): def __init__(self, f, pbounds, verbose=1): """ :param f: Function to be maximized. :param pbounds: Dictionary with parameters names as keys and a tuple with minimum and maximum values. :param verbose: Whether or not to print progress. """ # Store the original dictionary self.pbounds = pbounds # Get the name of the parameters self.keys = list(pbounds.keys()) # Find number of parameters self.dim = len(pbounds) # Create an array with parameters bounds self.bounds = [] for key in self.pbounds.keys(): self.bounds.append(self.pbounds[key]) self.bounds = np.asarray(self.bounds) # Some function to be optimized self.f = f # Initialization flag self.initialized = False # Initialization lists --- stores starting points before process begins self.init_points = [] self.x_init = [] self.y_init = [] # Numpy array place holders self.X = None self.Y = None # Counter of iterations self.i = 0 # Since scipy 0.16 passing lower and upper bound to theta seems to be # broken. However, there is a lot of development going on around GP # is scikit-learn. So I'll pick the easy route here and simple specify # only theta0. self.gp = GaussianProcess(corr=matern52, theta0=np.random.uniform(0.001, 0.05, self.dim), thetaL=1e-5 * np.ones(self.dim), thetaU=1e0 * np.ones(self.dim), random_start=30) # Utility Function placeholder self.util = None # PrintLog object self.plog = PrintLog(self.keys) # Output dictionary self.res = {} # Output dictionary self.res['max'] = {'max_val': None, 'max_params': None} self.res['all'] = {'values': [], 'params': []} # Verbose self.verbose = verbose def init(self, init_points): """ Initialization method to kick start the optimization process. It is a combination of points passed by the user, and randomly sampled ones. :param init_points: Number of random points to probe. """ # Generate random points rp = [np.random.uniform(x[0], x[1], size=init_points) for x in self.bounds] # Concatenate new random points to possible existing # points from self.explore method. self.init_points += list(map(list, zip(*rp))) # Create empty list to store the new values of the function y_init = [] # Evaluate target function at all initialization # points (random + explore) for x in self.init_points: y_init.append(self.f(**dict(zip(self.keys, x)))) if self.verbose: self.plog.print_step(x, y_init[-1]) # Append any other points passed by the self.initialize method (these # also have a corresponding target value passed by the user). self.init_points += self.x_init # Append the target value of self.initialize method. y_init += self.y_init # Turn it into np array and store. self.X = np.asarray(self.init_points) self.Y = np.asarray(y_init) # Updates the flag self.initialized = True def explore(self, points_dict): """ Method to explore user defined points :param points_dict: :return: """ # Consistency check param_tup_lens = [] for key in self.keys: param_tup_lens.append(len(list(points_dict[key]))) if all([e == param_tup_lens[0] for e in param_tup_lens]): pass else: raise ValueError('The same number of initialization points ' 'must be entered for every parameter.') # Turn into list of lists all_points = [] for key in self.keys: all_points.append(points_dict[key]) # Take transpose of list self.init_points = list(map(list, zip(*all_points))) def initialize(self, points_dict): """ Method to introduce point for which the target function value is known :param points_dict: :return: """ for target in points_dict: self.y_init.append(target) all_points = [] for key in self.keys: all_points.append(points_dict[target][key]) self.x_init.append(all_points) def set_bounds(self, new_bounds): """ A method that allows changing the lower and upper searching bounds :param new_bounds: A dictionary with the parameter name and its new bounds """ # Update the internal object stored dict self.pbounds.update(new_bounds) # Loop through the all bounds and reset the min-max bound matrix for row, key in enumerate(self.pbounds.keys()): # Reset all entries, even if the same. self.bounds[row] = self.pbounds[key] def maximize(self, init_points=5, n_iter=25, acq='ei', kappa=2.576, xi=0.0, **gp_params): """ Main optimization method. Parameters ---------- :param init_points: Number of randomly chosen points to sample the target function before fitting the gp. :param n_iter: Total number of times the process is to repeated. Note that currently this methods does not have stopping criteria (due to a number of reasons), therefore the total number of points to be sampled must be specified. :param acq: Acquisition function to be used, defaults to Expected Improvement. :param gp_params: Parameters to be passed to the Scikit-learn Gaussian Process object Returns ------- :return: Nothing """ # Reset timer self.plog.reset_timer() # Set acquisition function
self.util = UtilityFunction(kind=acq, kappa=kappa, xi=xi)
1
2023-11-17 09:23:38+00:00
16k
OpenBMB/XAgent
command.py
[ { "identifier": "XAgentServerEnv", "path": "XAgentServer/application/core/envs.py", "snippet": "class XAgentServerEnv:\n \"\"\"\n XAgentServer environment variables\n if you change value of the environment variable, you need to restart \n the XAgentServer by running the following command:\n ...
import asyncio import json import os import threading import traceback import uuid import sys from contextlib import contextmanager from datetime import datetime from typing import List from colorama import Fore from apscheduler.schedulers.asyncio import AsyncIOScheduler from apscheduler.schedulers.blocking import BlockingScheduler from XAgentServer.application.core.envs import XAgentServerEnv from XAgentServer.database.connect import SessionLocal from XAgentServer.enums.status import StatusEnum from XAgentServer.exts.exception_ext import XAgentError from XAgentServer.interaction import XAgentInteraction from XAgentServer.loggers.logs import Logger from XAgentServer.models.interaction import InteractionBase from XAgentServer.models.parameter import InteractionParameter from XAgentServer.models.raw import XAgentRaw from XAgentServer.server import XAgentServer from XAgentServer.application.cruds.interaction import InteractionCRUD from XAgentServer.application.global_val import redis from command_input import CommandLineInput from XAgent.running_recorder import recorder
13,516
role="Assistant", plan=[], upload_files: List[str] = [], download_files: List[str] = [], record_dir: str = None, mode: str = "auto", max_wait_seconds: int = 600, description: str = "XAgent-Test", agent: str = "XAgent", ): self.task = task self.plan = plan self.role = role self.upload_files = upload_files self.download_files = download_files self.record_dir = record_dir # auto is supported only in cmd self.mode = "auto" self.max_wait_seconds = max_wait_seconds self.description = description self.agent = agent class CommandLine(): """ A command-line interface for interacting with XAgentServer. Attributes: env: An instance of the XAgentServer environment. client_id: A unique identifier for the client, generated as a hexadecimal UUID. date_str: The current date as a string in YYYY-MM-DD format. log_dir: The directory where the logs are stored. logger: An instance of the Logger used for logging interactions. interactionDB: A database interface for interacting with either a persistent database (SQLite, MySQL, PostgreSQL) or a local storage file, depending on the configuration of `env`. """ def __init__(self, args: CommandLineParam = None): """ Initialize the CommandLine instance. Args: args (CommandLineParam) : parameters. task is required, mode options: ["auto"] """ self.args = args self.client_id = uuid.uuid4().hex self.date_str = datetime.now().strftime("%Y-%m-%d") self.log_dir = os.path.join(os.path.join(XAgentServerEnv.base_dir, "localstorage", "interact_records"), self.date_str, self.client_id) if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) self.logger = Logger(log_dir=self.log_dir, log_file=f"interact.log") self.logger.typewriter_log( title=f"XAgentServer is running on cmd mode", title_color=Fore.RED) self.logger.info(title=f"XAgentServer log:", title_color=Fore.RED, message=f"{self.log_dir}") self.interrupt = self.args.mode != "auto" self.init_conv_env() self.max_wait_seconds = self.args.max_wait_seconds self.scheduler = AsyncIOScheduler() self.input = None if self.interrupt: self.input = CommandLineInput( do_interrupt=True, max_wait_seconds=self.max_wait_seconds, logger=self.logger) def init_conv_env(self): """initialize the conversation environment, Share the same database resource with webui. If you have initiated a session on the front end but it has not been executed, this ID will be shared. """ user_id = "guest" token = "xagent" description = self.args.description upload_files = self.args.upload_files record_dir = self.args.record_dir agent = self.args.agent goal = self.args.task mode = self.args.mode plan = self.args.plan with get_db() as db: interaction = InteractionCRUD.get_ready_interaction( db=db, user_id=user_id) self.continue_flag = True upload_files = upload_files if upload_files else [] file_list = [] for file in upload_files: file_list.append({ "uuid": file, "name": file }) if interaction is None: base = InteractionBase(interaction_id=self.client_id, user_id=user_id, create_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), description=description, agent=agent, mode=mode, file_list=file_list, recorder_root_dir="", status="ready", message="ready...", current_step="-1", update_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), call_method="cmd") InteractionCRUD.create_interaction(db=db, base=base) else: self.client_id = interaction.interaction_id
@contextmanager def get_db(): """ Provide a transactional scope around a series of operations. """ session = SessionLocal() try: yield session session.commit() except: session.rollback() raise finally: session.close() class CommandLineParam: """Command line parameters. Attributes: task: Task description. role: Role name (default is "Assistant"). plan: List of steps to perform (default is empty list). upload_files: List of files to upload (default is empty list). download_files: List of files to download (default is empty list). record_dir: Directory to store records (default is `None`). mode: Run mode. Can be "auto" (default is "auto"). max_wait_seconds: Maximum wait time in seconds (default is 600). description: Description of the interaction (default is "XAgent-Test"). agent: Agent name (default is "XAgent"). """ def __init__(self, task, role="Assistant", plan=[], upload_files: List[str] = [], download_files: List[str] = [], record_dir: str = None, mode: str = "auto", max_wait_seconds: int = 600, description: str = "XAgent-Test", agent: str = "XAgent", ): self.task = task self.plan = plan self.role = role self.upload_files = upload_files self.download_files = download_files self.record_dir = record_dir # auto is supported only in cmd self.mode = "auto" self.max_wait_seconds = max_wait_seconds self.description = description self.agent = agent class CommandLine(): """ A command-line interface for interacting with XAgentServer. Attributes: env: An instance of the XAgentServer environment. client_id: A unique identifier for the client, generated as a hexadecimal UUID. date_str: The current date as a string in YYYY-MM-DD format. log_dir: The directory where the logs are stored. logger: An instance of the Logger used for logging interactions. interactionDB: A database interface for interacting with either a persistent database (SQLite, MySQL, PostgreSQL) or a local storage file, depending on the configuration of `env`. """ def __init__(self, args: CommandLineParam = None): """ Initialize the CommandLine instance. Args: args (CommandLineParam) : parameters. task is required, mode options: ["auto"] """ self.args = args self.client_id = uuid.uuid4().hex self.date_str = datetime.now().strftime("%Y-%m-%d") self.log_dir = os.path.join(os.path.join(XAgentServerEnv.base_dir, "localstorage", "interact_records"), self.date_str, self.client_id) if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) self.logger = Logger(log_dir=self.log_dir, log_file=f"interact.log") self.logger.typewriter_log( title=f"XAgentServer is running on cmd mode", title_color=Fore.RED) self.logger.info(title=f"XAgentServer log:", title_color=Fore.RED, message=f"{self.log_dir}") self.interrupt = self.args.mode != "auto" self.init_conv_env() self.max_wait_seconds = self.args.max_wait_seconds self.scheduler = AsyncIOScheduler() self.input = None if self.interrupt: self.input = CommandLineInput( do_interrupt=True, max_wait_seconds=self.max_wait_seconds, logger=self.logger) def init_conv_env(self): """initialize the conversation environment, Share the same database resource with webui. If you have initiated a session on the front end but it has not been executed, this ID will be shared. """ user_id = "guest" token = "xagent" description = self.args.description upload_files = self.args.upload_files record_dir = self.args.record_dir agent = self.args.agent goal = self.args.task mode = self.args.mode plan = self.args.plan with get_db() as db: interaction = InteractionCRUD.get_ready_interaction( db=db, user_id=user_id) self.continue_flag = True upload_files = upload_files if upload_files else [] file_list = [] for file in upload_files: file_list.append({ "uuid": file, "name": file }) if interaction is None: base = InteractionBase(interaction_id=self.client_id, user_id=user_id, create_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), description=description, agent=agent, mode=mode, file_list=file_list, recorder_root_dir="", status="ready", message="ready...", current_step="-1", update_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), call_method="cmd") InteractionCRUD.create_interaction(db=db, base=base) else: self.client_id = interaction.interaction_id
parameter = InteractionParameter(
7
2023-10-16 03:44:57+00:00
16k
deepseek-ai/DreamCraft3D
threestudio/systems/base.py
[ { "identifier": "Exporter", "path": "threestudio/models/exporters/base.py", "snippet": "class Exporter(BaseObject):\n @dataclass\n class Config(BaseObject.Config):\n save_video: bool = False\n\n cfg: Config\n\n def configure(\n self,\n geometry: BaseImplicitGeometry,\n ...
import os import pytorch_lightning as pl import torch.nn.functional as F import threestudio from dataclasses import dataclass, field from threestudio.models.exporters.base import Exporter, ExporterOutput from threestudio.systems.utils import parse_optimizer, parse_scheduler from threestudio.utils.base import ( Updateable, update_end_if_possible, update_if_possible, ) from threestudio.utils.config import parse_structured from threestudio.utils.misc import C, cleanup, get_device, load_module_weights, find_last_path from threestudio.utils.saving import SaverMixin from threestudio.utils.typing import * from threestudio.utils.config import load_config, parse_structured
10,840
self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) if self.cfg.cleanup_after_test_step: # cleanup to save vram cleanup() def on_predict_epoch_end(self): pass def preprocess_data(self, batch, stage): pass """ Implementing on_after_batch_transfer of DataModule does the same. But on_after_batch_transfer does not support DP. """ def on_train_batch_start(self, batch, batch_idx, unused=0): self.preprocess_data(batch, "train") self.dataset = self.trainer.train_dataloader.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def on_validation_batch_start(self, batch, batch_idx, dataloader_idx=0): self.preprocess_data(batch, "validation") self.dataset = self.trainer.val_dataloaders.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def on_test_batch_start(self, batch, batch_idx, dataloader_idx=0): self.preprocess_data(batch, "test") self.dataset = self.trainer.test_dataloaders.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def on_predict_batch_start(self, batch, batch_idx, dataloader_idx=0): self.preprocess_data(batch, "predict") self.dataset = self.trainer.predict_dataloaders.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False): pass def on_before_optimizer_step(self, optimizer): """ # some gradient-related debugging goes here, example: from lightning.pytorch.utilities import grad_norm norms = grad_norm(self.geometry, norm_type=2) print(norms) """ pass class BaseLift3DSystem(BaseSystem): @dataclass class Config(BaseSystem.Config): geometry_type: str = "" geometry: dict = field(default_factory=dict) geometry_convert_from: Optional[str] = None geometry_convert_inherit_texture: bool = False # used to override configurations of the previous geometry being converted from, # for example isosurface_threshold geometry_convert_override: dict = field(default_factory=dict) material_type: str = "" material: dict = field(default_factory=dict) background_type: str = "" background: dict = field(default_factory=dict) renderer_type: str = "" renderer: dict = field(default_factory=dict) guidance_type: str = "" guidance: dict = field(default_factory=dict) prompt_processor_type: str = "" prompt_processor: dict = field(default_factory=dict) # geometry export configurations, no need to specify in training exporter_type: str = "mesh-exporter" exporter: dict = field(default_factory=dict) cfg: Config def configure(self) -> None: self.cfg.geometry_convert_from = find_last_path(self.cfg.geometry_convert_from) self.cfg.weights = find_last_path(self.cfg.weights) if ( self.cfg.geometry_convert_from # from_coarse must be specified and not self.cfg.weights # not initialized from coarse when weights are specified and not self.resumed # not initialized from coarse when resumed from checkpoints ): threestudio.info("Initializing geometry from a given checkpoint ...") prev_cfg = load_config( os.path.join( os.path.dirname(self.cfg.geometry_convert_from), "../configs/parsed.yaml", ) ) # TODO: hard-coded relative path prev_system_cfg: BaseLift3DSystem.Config = parse_structured( self.Config, prev_cfg.system ) prev_geometry_cfg = prev_system_cfg.geometry prev_geometry_cfg.update(self.cfg.geometry_convert_override) prev_geometry = threestudio.find(prev_system_cfg.geometry_type)( prev_geometry_cfg ) state_dict, epoch, global_step = load_module_weights( self.cfg.geometry_convert_from, module_name="geometry", map_location="cpu", ) prev_geometry.load_state_dict(state_dict, strict=False) # restore step-dependent states prev_geometry.do_update_step(epoch, global_step, on_load_weights=True) # convert from coarse stage geometry
class BaseSystem(pl.LightningModule, Updateable, SaverMixin): @dataclass class Config: loggers: dict = field(default_factory=dict) loss: dict = field(default_factory=dict) optimizer: dict = field(default_factory=dict) scheduler: Optional[dict] = None weights: Optional[str] = None weights_ignore_modules: Optional[List[str]] = None cleanup_after_validation_step: bool = False cleanup_after_test_step: bool = False cfg: Config def __init__(self, cfg, resumed=False) -> None: super().__init__() self.cfg = parse_structured(self.Config, cfg) self._save_dir: Optional[str] = None self._resumed: bool = resumed self._resumed_eval: bool = False self._resumed_eval_status: dict = {"global_step": 0, "current_epoch": 0} if "loggers" in cfg: self.create_loggers(cfg.loggers) self.configure() if self.cfg.weights is not None: self.load_weights(self.cfg.weights, self.cfg.weights_ignore_modules) self.post_configure() def load_weights(self, weights: str, ignore_modules: Optional[List[str]] = None): state_dict, epoch, global_step = load_module_weights( weights, ignore_modules=ignore_modules, map_location="cpu" ) self.load_state_dict(state_dict, strict=False) # restore step-dependent states self.do_update_step(epoch, global_step, on_load_weights=True) def set_resume_status(self, current_epoch: int, global_step: int): # restore correct epoch and global step in eval self._resumed_eval = True self._resumed_eval_status["current_epoch"] = current_epoch self._resumed_eval_status["global_step"] = global_step @property def resumed(self): # whether from resumed checkpoint return self._resumed @property def true_global_step(self): if self._resumed_eval: return self._resumed_eval_status["global_step"] else: return self.global_step @property def true_current_epoch(self): if self._resumed_eval: return self._resumed_eval_status["current_epoch"] else: return self.current_epoch def configure(self) -> None: pass def post_configure(self) -> None: """ executed after weights are loaded """ pass def C(self, value: Any) -> float: return C(value, self.true_current_epoch, self.true_global_step) def configure_optimizers(self): optim = parse_optimizer(self.cfg.optimizer, self) ret = { "optimizer": optim, } if self.cfg.scheduler is not None: ret.update( { "lr_scheduler": parse_scheduler(self.cfg.scheduler, optim), } ) return ret def training_step(self, batch, batch_idx): raise NotImplementedError def validation_step(self, batch, batch_idx): raise NotImplementedError def on_train_batch_end(self, outputs, batch, batch_idx): self.dataset = self.trainer.train_dataloader.dataset update_end_if_possible( self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) def on_validation_batch_end(self, outputs, batch, batch_idx): self.dataset = self.trainer.val_dataloaders.dataset update_end_if_possible( self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) if self.cfg.cleanup_after_validation_step: # cleanup to save vram cleanup() def on_validation_epoch_end(self): raise NotImplementedError def test_step(self, batch, batch_idx): raise NotImplementedError def on_test_batch_end(self, outputs, batch, batch_idx): self.dataset = self.trainer.test_dataloaders.dataset update_end_if_possible( self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) if self.cfg.cleanup_after_test_step: # cleanup to save vram cleanup() def on_test_epoch_end(self): pass def predict_step(self, batch, batch_idx): raise NotImplementedError def on_predict_batch_end(self, outputs, batch, batch_idx): self.dataset = self.trainer.predict_dataloaders.dataset update_end_if_possible( self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) if self.cfg.cleanup_after_test_step: # cleanup to save vram cleanup() def on_predict_epoch_end(self): pass def preprocess_data(self, batch, stage): pass """ Implementing on_after_batch_transfer of DataModule does the same. But on_after_batch_transfer does not support DP. """ def on_train_batch_start(self, batch, batch_idx, unused=0): self.preprocess_data(batch, "train") self.dataset = self.trainer.train_dataloader.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def on_validation_batch_start(self, batch, batch_idx, dataloader_idx=0): self.preprocess_data(batch, "validation") self.dataset = self.trainer.val_dataloaders.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def on_test_batch_start(self, batch, batch_idx, dataloader_idx=0): self.preprocess_data(batch, "test") self.dataset = self.trainer.test_dataloaders.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def on_predict_batch_start(self, batch, batch_idx, dataloader_idx=0): self.preprocess_data(batch, "predict") self.dataset = self.trainer.predict_dataloaders.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False): pass def on_before_optimizer_step(self, optimizer): """ # some gradient-related debugging goes here, example: from lightning.pytorch.utilities import grad_norm norms = grad_norm(self.geometry, norm_type=2) print(norms) """ pass class BaseLift3DSystem(BaseSystem): @dataclass class Config(BaseSystem.Config): geometry_type: str = "" geometry: dict = field(default_factory=dict) geometry_convert_from: Optional[str] = None geometry_convert_inherit_texture: bool = False # used to override configurations of the previous geometry being converted from, # for example isosurface_threshold geometry_convert_override: dict = field(default_factory=dict) material_type: str = "" material: dict = field(default_factory=dict) background_type: str = "" background: dict = field(default_factory=dict) renderer_type: str = "" renderer: dict = field(default_factory=dict) guidance_type: str = "" guidance: dict = field(default_factory=dict) prompt_processor_type: str = "" prompt_processor: dict = field(default_factory=dict) # geometry export configurations, no need to specify in training exporter_type: str = "mesh-exporter" exporter: dict = field(default_factory=dict) cfg: Config def configure(self) -> None: self.cfg.geometry_convert_from = find_last_path(self.cfg.geometry_convert_from) self.cfg.weights = find_last_path(self.cfg.weights) if ( self.cfg.geometry_convert_from # from_coarse must be specified and not self.cfg.weights # not initialized from coarse when weights are specified and not self.resumed # not initialized from coarse when resumed from checkpoints ): threestudio.info("Initializing geometry from a given checkpoint ...") prev_cfg = load_config( os.path.join( os.path.dirname(self.cfg.geometry_convert_from), "../configs/parsed.yaml", ) ) # TODO: hard-coded relative path prev_system_cfg: BaseLift3DSystem.Config = parse_structured( self.Config, prev_cfg.system ) prev_geometry_cfg = prev_system_cfg.geometry prev_geometry_cfg.update(self.cfg.geometry_convert_override) prev_geometry = threestudio.find(prev_system_cfg.geometry_type)( prev_geometry_cfg ) state_dict, epoch, global_step = load_module_weights( self.cfg.geometry_convert_from, module_name="geometry", map_location="cpu", ) prev_geometry.load_state_dict(state_dict, strict=False) # restore step-dependent states prev_geometry.do_update_step(epoch, global_step, on_load_weights=True) # convert from coarse stage geometry
prev_geometry = prev_geometry.to(get_device())
10
2023-10-23 07:40:20+00:00
16k
zju3dv/4K4D
easyvolcap/utils/gl_utils.py
[ { "identifier": "dotdict", "path": "easyvolcap/utils/base_utils.py", "snippet": "class dotdict(dict, Dict[KT, VT]):\n \"\"\"\n This is the default data passing object used throughout the codebase\n Main function: dot access for dict values & dict like merging and updates\n\n a dictionary tha...
from typing import TYPE_CHECKING from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager # must be imported before OpenGL.GL from torch import nn from enum import Enum, auto from os.path import join, dirname from typing import Dict, Union, List from glm import vec2, vec3, vec4, mat3, mat4, mat4x3, mat2x3 # This is actually highly optimized from easyvolcap.utils.console_utils import * from easyvolcap.utils.base_utils import dotdict from easyvolcap.utils.viewer_utils import Camera from easyvolcap.utils.color_utils import cm_cpu_store from easyvolcap.utils.depth_utils import depth_curve_fn from easyvolcap.utils.data_utils import load_pts, load_mesh, to_cuda from easyvolcap.utils.fcds_utils import prepare_feedback_transform, get_opencv_camera_params from easyvolcap.utils.net_utils import typed, multi_gather, create_meshgrid, volume_rendering, raw2alpha, torch_dtype_to_numpy_dtype, load_pretrained, get_bounds from easyvolcap.utils.net_utils import CHECK_CUDART_ERROR, FORMAT_CUDART_ERROR from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager from OpenGL.GL import shaders from pytorch3d.structures import Pointclouds, Meshes from pytorch3d.structures import Pointclouds, Meshes from cuda import cudart from cuda import cudart from cuda import cudart from easyvolcap.engine.registry import call_from_cfg from easyvolcap.utils.gaussian_utils import GaussianModel from cuda import cudart from cuda import cudart from cuda import cudart from cuda import cudart from cuda import cudart from cuda import cudart import os import glm import torch import ctypes import numpy as np import sys import OpenGL.GL as gl
12,967
print(str(e).encode('utf-8').decode('unicode_escape')) raise e def init_gl_buffers(self, v: int = 0, f: int = 0): if hasattr(self, 'cu_vbo'): CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_vbo)) super().init_gl_buffers(v, f) # Register vertex buffer obejct flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard self.cu_vbo = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterBuffer(self.vbo, flags)) def init_textures(self): if hasattr(self, 'cu_read_index'): CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_index)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_index)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_lower)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_lower)) if hasattr(self, 'write_fbo'): gl.glDeleteFramebuffers(2, [self.write_fbo, self.read_fbo]) gl.glDeleteTextures(6, [self.write_index, self.write_lower, self.write_attach, self.read_index, self.read_lower, self.read_attach]) self.write_index, self.write_lower, self.write_attach, self.write_fbo = hareward_peeling_framebuffer(self.max_H, self.max_W) self.read_index, self.read_lower, self.read_attach, self.read_fbo = hareward_peeling_framebuffer(self.max_H, self.max_W) # Register image to read from flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly self.cu_read_index = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_index, gl.GL_TEXTURE_2D, flags)) self.cu_write_index = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_index, gl.GL_TEXTURE_2D, flags)) self.cu_read_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_lower, gl.GL_TEXTURE_2D, flags)) self.cu_write_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_lower, gl.GL_TEXTURE_2D, flags)) log(f'Created texture of h, w: {self.max_H}, {self.max_W}') def rasterize_generator(self, camera: Camera = None, length: int = None): # some implementation requires no uploading of camera front_fbo, front_index, front_lower = self.read_fbo, self.read_index, self.read_lower back_fbo, back_index, back_lower = self.write_fbo, self.write_index, self.write_lower # Only clear the output once gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, front_fbo) # for offscreen rendering to textures gl.glClearBufferiv(gl.GL_COLOR, 0, [-1]) gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0]) gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing # Only clear the output once gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures gl.glClearBufferiv(gl.GL_COLOR, 0, [-1]) gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0]) gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing # Prepare for the actual rendering, previous operations could rebind the vertex array self.use_gl_program(self.splat_program) self.upload_gl_uniforms(camera) gl.glBindVertexArray(self.vao) # The actual multi pass rendering process happens here for pass_index in range(self.pts_per_pix): # Swap buffers to render the next pass front_fbo, front_index, front_lower, back_fbo, back_index, back_lower = \ back_fbo, back_index, back_lower, front_fbo, front_index, front_lower # Bind the read texture and bind the write render frame buffer gl.glBindTextures(0, 2, [front_index, front_lower]) # Move content from write_fbo to screen fbo if pass_index > self.pts_per_pix * self.blit_last_ratio: # no blitting almost has no effect on the rendering gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, front_fbo) gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, back_fbo) gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0 + 1) gl.glDrawBuffer(gl.GL_COLOR_ATTACHMENT0 + 1) gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1]) else: # Only clear the output once gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures # Clear depth buffer for depth testing gl.glClearBufferiv(gl.GL_COLOR, 0, [-1]) # clear the indices buffer for later rendering and retrieving gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing # The actual drawing pass with render things out to the write_fbo gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices yield back_fbo # give the CUDA end a chance to read from this frame buffer after rendering # Restore states of things gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) gl.glBindVertexArray(0) return def forward(self, xyz: torch.Tensor, rgb: torch.Tensor, rad: torch.Tensor, occ: torch.Tensor, batch: dotdict, return_frags: bool = False, return_full: bool = False, ): """ Get all indices from the depth peeling passes Compute the vertex weight here in torch(cuda) Use the indices to pass through a compositor The backward pass should only be valid on the torch side, and it should've been enough TODO: This function is too memory intensive TODO: Performing IBR is too memory intensive """ # This the slow part, but not differentiable idx, _, _ = self.forward_idx(xyz, rad, batch) # B, H, W, K msk = idx != -1 # B, H, W, K idx = torch.where(msk, idx, 0).long() # Sample things needed for computing screen space weight H, W, K, R, T, C = get_opencv_camera_params(batch) K, R, T, C = K.to(xyz.dtype), R.to(xyz.dtype), T.to(xyz.dtype), C.to(xyz.dtype) pix_xyz = (xyz @ R.mT + T.mT) @ K.mT # B, P, 3 pix_xyz_xy = pix_xyz[..., :-1] / (pix_xyz[..., -1:] + 1e-10) pix_rad = abs(K[..., 1, 1][..., None] * rad[..., 0] / (pix_xyz[..., -1] + 1e-10)) # z: B, 1 * B, N, world space radius
from __future__ import annotations if TYPE_CHECKING: # fmt: off # Environment variable messaging # Need to export EGL_DEVICE_ID before trying to import egl # And we need to consider the case when we're performing distributed training # from easyvolcap.engine import cfg, args # FIXME: GLOBAL IMPORTS if 'easyvolcap.engine' in sys.modules and (sys.modules['easyvolcap.engine'].args.type != 'gui' or sys.modules['easyvolcap.engine'].cfg.viewer_cfg.type == 'UnitySocketViewer'): # FIXME: GLOBAL VARIABLES try: except Exception as e: log(yellow(f'Could not import EGL related modules. {type(e).__name__}: {e}')) os.environ['PYOPENGL_PLATFORM'] = '' try: except Exception as e: print(f'WARNING: OpenGL shaders import error encountered, please install the latest PyOpenGL from github using:') print(f'pip install git+https://github.com/mcfletch/pyopengl') raise e # fmt: on def linearize_depth(d, n: float, f: float): # 0-1 -> -1,1 # ndc -> view return (2.0 * n * f) / (f + n - (d * 2 - 1) * (f - n)) def common_opengl_options(): # Use program point size gl.glEnable(gl.GL_PROGRAM_POINT_SIZE) # Performs face culling gl.glEnable(gl.GL_CULL_FACE) gl.glCullFace(gl.GL_BACK) # Performs alpha trans testing gl.glEnable(gl.GL_ALPHA_TEST) # Performs z-buffer testing gl.glEnable(gl.GL_DEPTH_TEST) # gl.glDepthMask(gl.GL_TRUE) gl.glDepthFunc(gl.GL_LEQUAL) # gl.glDepthRange(-1.0, 1.0) gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) # Enable some masking tests gl.glEnable(gl.GL_SCISSOR_TEST) # Enable this to correctly render points # https://community.khronos.org/t/gl-point-sprite-gone-in-3-2/59310 gl.glEnable(gl.GL_POINT_SPRITE) # MARK: ONLY SPRITE IS WORKING FOR NOW # gl.glEnable(gl.GL_POINT_SMOOTH) # MARK: ONLY SPRITE IS WORKING FOR NOW # # Configure how we store the pixels in memory for our subsequent reading of the FBO to store the rendering into memory. # # The second argument specifies that our pixels will be in bytes. # gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1) def load_shader_source(file: str = 'splat.frag'): # Ideally we can just specify the shader name instead of an variable if not exists(file): file = f'{dirname(__file__)}/shaders/{file}' if not exists(file): file = file.replace('shaders/', '') if not exists(file): raise RuntimeError(f'Shader file: {file} does not exist') with open(file, 'r') as f: return f.read() def use_gl_program(program: Union[shaders.ShaderProgram, dict]): if isinstance(program, dict): # Recompile the program if the user supplied sources program = dotdict(program) program = shaders.compileProgram( shaders.compileShader(program.VERT_SHADER_SRC, gl.GL_VERTEX_SHADER), shaders.compileShader(program.FRAG_SHADER_SRC, gl.GL_FRAGMENT_SHADER) ) return gl.glUseProgram(program) class Mesh: class RenderType(Enum): POINTS = 1 LINES = 2 TRIS = 3 QUADS = 4 # TODO: Support quad loading STRIPS = 5 # Helper class to render a mesh on opengl # This implementation should only be used for debug visualization # Since no differentiable mechanism will be added # We recommend using nvdiffrast and pytorch3d's point renderer directly if you will to optimize these structures directly def __init__(self, verts: torch.Tensor = torch.tensor([[0, 0, 0], [0, 1, 0], [0, 0, 1]]), # need to call update after update faces: torch.Tensor = torch.tensor([[0, 1, 2]]), # need to call update after update colors: torch.Tensor = None, normals: torch.Tensor = None, scalars: dotdict[str, torch.Tensor] = dotdict(), render_type: RenderType = RenderType.TRIS, # Misc info name: str = 'mesh', filename: str = '', visible: bool = True, # Render options shade_flat: bool = False, # smooth shading point_radius: float = 0.015, render_normal: bool = False, # Storage options store_device: str = 'cpu', compute_device: str = 'cuda', vert_sizes=[3, 3, 3], # pos + color + norm # Init options est_normal_thresh: int = 100000, # Ignore unused input **kwargs, ) -> None: super().__init__() self.name = name self.visible = visible self.render_type = render_type self.shade_flat = shade_flat self.point_radius = point_radius self.render_normal = render_normal self.store_device = store_device self.compute_device = compute_device self.vert_sizes = vert_sizes self.est_normal_thresh = est_normal_thresh # Uniform and program self.compile_shaders() self.uniforms = dotdict() # uniform values # Before initialization self.max_verts = 0 self.max_faces = 0 # OpenGL data if filename: self.load_from_file(filename) else: self.load_from_data(verts, faces, colors, normals, scalars) def compile_shaders(self): try: self.mesh_program = shaders.compileProgram( shaders.compileShader(load_shader_source('mesh.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('mesh.frag'), gl.GL_FRAGMENT_SHADER) ) self.point_program = shaders.compileProgram( shaders.compileShader(load_shader_source('point.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('point.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e @property def n_verts_bytes(self): return len(self.verts) * self.vert_size * self.verts.element_size() @property def n_faces_bytes(self): return len(self.faces) * self.face_size * self.faces.element_size() @property def verts_data(self): # a heavy copy operation verts = torch.cat([self.verts, self.colors, self.normals], dim=-1).ravel().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=np.float32, order='C') return verts @property def faces_data(self): # a heavy copy operation faces = self.faces.ravel().numpy() # N, 3 faces = np.asarray(faces, dtype=np.uint32, order='C') return faces @property def face_size(self): return self.render_type.value @property def vert_size(self): return sum(self.vert_sizes) def load_from_file(self, filename: str = 'assets/meshes/bunny.ply'): verts, faces, colors, normals, scalars = self.load_data_from_file(filename) self.load_from_data(verts, faces, colors, normals, scalars) def load_data_from_file(self, filename: str = 'assets/meshes/bunny.ply'): self.name = os.path.split(filename)[-1] verts, faces, colors, normals, scalars = None, None, None, None, None verts, faces = load_mesh(filename, device=self.store_device) if not len(faces): verts, colors, normals, scalars = load_pts(filename) self.render_type = Mesh.RenderType.POINTS else: self.render_type = Mesh.RenderType(faces.shape[-1]) # use value return verts, faces, colors, normals, scalars def load_from_data(self, verts: torch.Tensor, faces: torch.Tensor, colors: torch.Tensor = None, normals: torch.Tensor = None, scalars: dotdict[str, torch.Tensor] = dotdict()): # Data type conversion verts = torch.as_tensor(verts) # convert to tensor if input is of other types if verts.dtype == torch.float32: pass # supports this for now elif verts.dtype == torch.float16: pass # supports this for now else: verts = verts.type(torch.float) # convert to float32 if input is of higher precision gl_dtype = gl.GL_FLOAT if verts.dtype == torch.float else gl.GL_HALF_FLOAT self.vert_gl_types = [gl_dtype] * len(self.vert_sizes) # Prepare main mesh data: vertices and faces self.verts = torch.as_tensor(verts, device=self.store_device) self.faces = torch.as_tensor(faces, device=self.store_device, dtype=torch.int32) # NOTE: No uint32 support # Prepare colors and normals if colors is not None: self.colors = torch.as_tensor(colors, device=self.store_device, dtype=self.verts.dtype) else: bounds = get_bounds(self.verts[None])[0] self.colors = (self.verts - bounds[0]) / (bounds[1] - bounds[0]) if normals is not None: self.normals = torch.as_tensor(normals, device=self.store_device, dtype=self.verts.dtype) else: self.estimate_vertex_normals() # Prepare other scalars if scalars is not None: for k, v in scalars.items(): setattr(self, k, torch.as_tensor(v, device=self.store_device, dtype=self.verts.dtype)) # is this ok? # Prepare OpenGL related buffer self.update_gl_buffers() def estimate_vertex_normals(self): def est_pcd_norms(): if self.verts.dtype == torch.half: self.normals = self.verts else: pcd = Pointclouds([self.verts]).to(self.compute_device) self.normals = pcd.estimate_normals()[0].cpu().to(self.verts.dtype) # no batch dim def est_tri_norms(): if self.verts.dtype == torch.half: self.normals = self.verts else: mesh = Meshes([self.verts], [self.faces]).to(self.compute_device) self.normals = mesh.verts_normals_packed().cpu().to(self.verts.dtype) # no batch dim if not len(self.verts) > self.est_normal_thresh: if self.render_type == Mesh.RenderType.TRIS: est_tri_norms() elif self.render_type == Mesh.RenderType.POINTS: est_pcd_norms() else: # log(yellow(f'Unsupported mesh type: {self.render_type} for normal estimation, skipping')) self.normals = self.verts else: # log(yellow(f'Number of points for mesh too large: {len(self.verts)} > {self.est_normal_thresh}, skipping normal estimation')) self.normals = self.verts def offscreen_render(self, eglctx: "eglContextManager", camera: Camera): eglctx.resize(camera.W, camera.H) self.render(camera) def render(self, camera: Camera): if not self.visible: return # For point rendering if self.render_type == Mesh.RenderType.POINTS: gl.glUseProgram(self.point_program) self.use_gl_program(self.point_program) else: gl.glUseProgram(self.mesh_program) self.use_gl_program(self.mesh_program) self.upload_gl_uniforms(camera) gl.glBindVertexArray(self.vao) if self.render_type == Mesh.RenderType.POINTS: gl.glDrawArrays(gl.GL_POINTS, 0, len(self.verts)) # number of vertices elif self.render_type == Mesh.RenderType.LINES: gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glDrawElements(gl.GL_LINES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices elif self.render_type == Mesh.RenderType.TRIS: gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glDrawElements(gl.GL_TRIANGLES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices elif self.render_type == Mesh.RenderType.QUADS: gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glDrawElements(gl.GL_QUADS, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices elif self.render_type == Mesh.RenderType.STRIPS: gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) else: raise NotImplementedError gl.glBindVertexArray(0) def use_gl_program(self, program: shaders.ShaderProgram): use_gl_program(program) self.uniforms.shade_flat = gl.glGetUniformLocation(program, "shade_flat") self.uniforms.point_radius = gl.glGetUniformLocation(program, "point_radius") self.uniforms.render_normal = gl.glGetUniformLocation(program, "render_normal") self.uniforms.H = gl.glGetUniformLocation(program, "H") self.uniforms.W = gl.glGetUniformLocation(program, "W") self.uniforms.n = gl.glGetUniformLocation(program, "n") self.uniforms.f = gl.glGetUniformLocation(program, "f") self.uniforms.P = gl.glGetUniformLocation(program, "P") self.uniforms.K = gl.glGetUniformLocation(program, "K") self.uniforms.V = gl.glGetUniformLocation(program, "V") self.uniforms.M = gl.glGetUniformLocation(program, "M") def upload_gl_uniforms(self, camera: Camera): K = camera.gl_ixt # hold the reference V = camera.gl_ext # hold the reference M = glm.identity(mat4) P = K * V * M gl.glUniform1i(self.uniforms.shade_flat, self.shade_flat) gl.glUniform1f(self.uniforms.point_radius, self.point_radius) gl.glUniform1i(self.uniforms.render_normal, self.render_normal) gl.glUniform1i(self.uniforms.H, camera.H) # o2w gl.glUniform1i(self.uniforms.W, camera.W) # o2w gl.glUniform1f(self.uniforms.n, camera.n) # o2w gl.glUniform1f(self.uniforms.f, camera.f) # o2w gl.glUniformMatrix4fv(self.uniforms.P, 1, gl.GL_FALSE, glm.value_ptr(P)) # o2clip gl.glUniformMatrix4fv(self.uniforms.K, 1, gl.GL_FALSE, glm.value_ptr(K)) # c2clip gl.glUniformMatrix4fv(self.uniforms.V, 1, gl.GL_FALSE, glm.value_ptr(V)) # w2c gl.glUniformMatrix4fv(self.uniforms.M, 1, gl.GL_FALSE, glm.value_ptr(M)) # o2w def update_gl_buffers(self): # Might be overwritten self.resize_buffers(len(self.verts) if hasattr(self, 'verts') else 0, len(self.faces) if hasattr(self, 'faces') else 0) # maybe repeated if hasattr(self, 'verts'): gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo) gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, self.n_verts_bytes, self.verts_data) # hold the reference if hasattr(self, 'faces'): gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glBufferSubData(gl.GL_ELEMENT_ARRAY_BUFFER, 0, self.n_faces_bytes, self.faces_data) def resize_buffers(self, v: int = 0, f: int = 0): if v > self.max_verts or f > self.max_faces: if v > self.max_verts: self.max_verts = v if f > self.max_faces: self.max_faces = f self.init_gl_buffers(v, f) def init_gl_buffers(self, v: int = 0, f: int = 0): # This will only init the corresponding buffer object n_verts_bytes = v * self.vert_size * self.verts.element_size() if v > 0 else self.n_verts_bytes n_faces_bytes = f * self.face_size * self.faces.element_size() if f > 0 else self.n_faces_bytes # Housekeeping if hasattr(self, 'vao'): gl.glDeleteVertexArrays(1, [self.vao]) gl.glDeleteBuffers(2, [self.vbo, self.ebo]) self.vao = gl.glGenVertexArrays(1) self.vbo = gl.glGenBuffers(1) self.ebo = gl.glGenBuffers(1) gl.glBindVertexArray(self.vao) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo) gl.glBufferData(gl.GL_ARRAY_BUFFER, n_verts_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW) # NOTE: Using pointers here won't work # https://stackoverflow.com/questions/67195932/pyopengl-cannot-render-any-vao cumsum = 0 for i, (s, t) in enumerate(zip(self.vert_sizes, self.vert_gl_types)): gl.glVertexAttribPointer(i, s, t, gl.GL_FALSE, self.vert_size * self.verts.element_size(), ctypes.c_void_p(cumsum * self.verts.element_size())) # we use 32 bit float gl.glEnableVertexAttribArray(i) cumsum += s if n_faces_bytes > 0: # Some implementation has no faces, we dangerously ignore ebo here, assuming they will never be used gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, n_faces_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW) gl.glBindVertexArray(0) def render_imgui(self): pass class Quad(Mesh): # A shared texture for CUDA (pytorch) and OpenGL # Could be rendererd to screen using blitting or just drawing a quad def __init__(self, H: int = 256, W: int = 256, use_cudagl: bool = True, compose: bool = False, compose_power: float = 1.0): # the texture to blip self.use_cudagl = use_cudagl self.vert_sizes = [3] # only position self.vert_gl_types = [gl.GL_FLOAT] # only position self.render_type = Mesh.RenderType.STRIPS # remove side effects of settings _type self.max_verts, self.max_faces = 0, 0 self.verts = torch.as_tensor([[-1., -1., 0.5], [1., -1., 0.5], [-1., 1., 0.5], [1., 1., 0.5],]) self.update_gl_buffers() self.compile_shaders() self.max_H, self.max_W = H, W self.H, self.W = H, W self.compose = compose self.compose_power = compose_power self.init_texture() @property def n_faces_bytes(self): return 0 def use_gl_program(self, program: shaders.ShaderProgram): super().use_gl_program(program) self.uniforms.tex = gl.glGetUniformLocation(program, 'tex') gl.glUseProgram(self.quad_program) # use a different program gl.glUniform1i(self.uniforms.tex, 0) def compile_shaders(self): try: self.quad_program = shaders.compileProgram( shaders.compileShader(load_shader_source('quad.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('quad.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def resize_textures(self, H: int, W: int): # analogy to update_gl_buffers self.H, self.W = H, W if self.H > self.max_H or self.W > self.max_W: # max got updated self.max_H, self.max_W = max(int(self.H * 1.05), self.max_H), max(int(self.W * 1.05), self.max_W) self.init_texture() def init_texture(self): if hasattr(self, 'cu_tex'): CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_tex)) if hasattr(self, 'fbo'): gl.glDeleteFramebuffers(1, [self.fbo]) gl.glDeleteTextures(1, [self.tex]) # Init the texture to be blit onto the screen self.tex = gl.glGenTextures(1) gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA8, self.max_W, self.max_H, 0, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ctypes.c_void_p(0)) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Init the framebuffer object if explicit blitting is used (slower than drawing quad) self.fbo = gl.glGenFramebuffers(1) old_fbo = gl.glGetIntegerv(gl.GL_FRAMEBUFFER_BINDING) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self.tex, 0) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, old_fbo) if self.use_cudagl: if self.compose: # Both reading and writing of this resource is required flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsNone else: flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard self.cu_tex = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.tex, gl.GL_TEXTURE_2D, flags)) def copy_to_texture(self, image: torch.Tensor, x: int = 0, y: int = 0, w: int = 0, h: int = 0): assert self.use_cudagl, "Need to enable cuda-opengl interop to copy from device to device, check creation of this Quad" w = w or self.W h = h or self.H if image.shape[-1] == 3: image = torch.cat([image, image.new_ones(image.shape[:-1] + (1,)) * 255], dim=-1) # add alpha channel kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream)) cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(self.cu_tex, 0, 0)) if self.compose: """ Blit current framebuffer to this texture (self.tex) Read content of this texture into a cuda buffer Perform alpha blending based on the frame's alpha channel Copy the blended image back into the texture (self.tex) """ old = gl.glGetInteger(gl.GL_DRAW_FRAMEBUFFER_BINDING) gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, self.fbo) # read buffer defaults to 0 gl.glBlitFramebuffer(x, y, w, h, x, y, w, h, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) # now self.tex contains the content of the already rendered frame gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, old) buffer = torch.empty_like(image) CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(buffer.data_ptr(), # dst w * 4 * buffer.element_size(), # dpitch cu_tex_arr, # src x * 4 * image.element_size(), # wOffset y, # hOffset w * 4 * buffer.element_size(), # width Width of matrix transfer (columns in bytes) h, # height kind, # kind torch.cuda.current_stream().cuda_stream)) # stream # cv2.imwrite('image.png', image.flip(0).detach().cpu().numpy()[..., [2,1,0,3]]) alpha = image[..., -1:] / 255 image[..., :-1] = buffer[..., :-1] * (1 - alpha ** self.compose_power) + image[..., :-1] * alpha # storing float into int image[..., -1:] = buffer[..., -1:] + image[..., -1:] image = image.clip(0, 255) CHECK_CUDART_ERROR(cudart.cudaMemcpy2DToArrayAsync(cu_tex_arr, x * 4 * image.element_size(), y, image.data_ptr(), w * 4 * image.element_size(), # differently sized w * 4 * image.element_size(), # rgba, should do a composition first h, kind, torch.cuda.current_stream().cuda_stream)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream)) def upload_to_texture(self, ptr: np.ndarray): H, W = ptr.shape[:2] H, W = min(self.H, H), min(self.W, W) gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex) gl.glTexSubImage2D(gl.GL_TEXTURE_2D, 0, 0, 0, W, H, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ptr[:H, :W]) # to gpu, might slow down? @property def verts_data(self): # a heavy copy operation verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=np.float32, order='C') return verts def render(self, camera: Camera = None): self.draw() # no uploading needed def draw(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0): """ Upload the texture instead of the camera This respects the OpenGL convension of lower left corners """ w = w or self.W h = h or self.H _, _, W, H = gl.glGetIntegerv(gl.GL_VIEWPORT) gl.glViewport(x, y, w, h) gl.glScissor(x, y, w, h) # only render in this small region of the viewport gl.glUseProgram(self.quad_program) # use a different program gl.glActiveTexture(gl.GL_TEXTURE0) gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex) gl.glBindVertexArray(self.vao) gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) gl.glBindVertexArray(0) # Some house keepings gl.glViewport(0, 0, W, H) gl.glScissor(0, 0, W, H) def blit(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0): """ This respects the OpenGL convension of lower left corners """ w = w or self.W h = h or self.H old = gl.glGetInteger(gl.GL_READ_FRAMEBUFFER_BINDING) gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, self.fbo) # write buffer defaults to 0 gl.glBlitFramebuffer(x, y, x + w, y + h, # the height is flipped x, y, x + w, y + h, # the height is flipped gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, old) class UQuad(Mesh): """ Responsible for initializing textures with a single value or blitting a texture to a framebuffer (possibly better done with blit instead of quad drawing) Effectively clearing the texture for real, see: https://stackoverflow.com/questions/37335281/is-glcleargl-color-buffer-bit-preferred-before-a-whole-frame-buffer-overwritte """ def __init__(self): self.n_blit_values = 3 self.vert_sizes = [3] # only position self.vert_gl_types = [gl.GL_FLOAT] # only position self.max_verts, self.max_faces = 0, 0 self.verts = torch.as_tensor([[-1., -1., 0.5], [1., -1., 0.5], [-1., 1., 0.5], [1., 1., 0.5],]) self.compile_shaders() self.uniforms = dotdict() # uniform values self.use_gl_programs(self.quad_program) self.update_gl_buffers() @property def n_faces_bytes(self): return 0 @property def verts_data(self): # a heavy copy operation verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=np.float32, order='C') return verts def use_gl_programs(self, program: shaders.ShaderProgram): for i in range(self.n_blit_values): self.uniforms[f'value{i}'] = gl.glGetUniformLocation(program, f'value{i}') for i in range(self.n_blit_values): self.uniforms[f'use_tex{i}'] = gl.glGetUniformLocation(program, f'use_tex{i}') gl.glUseProgram(self.program) # use a different program for i in range(self.n_blit_values): self.uniforms[f'tex{i}'] = gl.glGetUniformLocation(program, f'tex{i}') gl.glUniform1i(self.uniforms[f'tex{i}'], i) def upload_gl_uniforms(self, values: List[List[float]], use_texs: List[bool]): for i, v in enumerate(values): v = vec4(v) # HACK: Hold the reference for this upload gl.glUniform4fv(self.uniforms[f'value{i}'], 1, glm.value_ptr(v)) # as float array for i, v in enumerate(use_texs): gl.glUniform1i(self.uniforms[f'use_tex{i}'], v) def compile_shaders(self): try: self.quad_program = shaders.compileProgram( shaders.compileShader(load_shader_source('uquad.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('uquad.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def draw(self, values: List[List[float]] = [], use_texs=[]): """ This function will render 'value' to the currently bound framebuffer, up to six outputs """ old_prog = gl.glGetIntegerv(gl.GL_CURRENT_PROGRAM) old_vao = gl.glGetIntegerv(gl.GL_VERTEX_ARRAY_BINDING) gl.glUseProgram(self.quad_program) self.upload_gl_uniforms(values, use_texs) # should be a noop # Prepare to render to textures gl.glBindVertexArray(self.vao) gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) # number of vertices gl.glBindVertexArray(old_vao) gl.glUseProgram(old_prog) class DQuad(UQuad): def compile_shaders(self): try: self.quad_program = shaders.compileProgram( shaders.compileShader(load_shader_source('dquad.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('dquad.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def draw(self, values: List[List[float]] = [], use_texs=[]): old_function = gl.glGetIntegerv(gl.GL_DEPTH_FUNC) gl.glDepthFunc(gl.GL_ALWAYS) super().draw(values, use_texs) gl.glDepthFunc(old_function) def hardware_rendering_framebuffer(H: int, W: int, gl_tex_dtype=gl.GL_RGBA16F): # Prepare for write frame buffers color_buffer = gl.glGenTextures(1) depth_upper = gl.glGenTextures(1) depth_lower = gl.glGenTextures(1) depth_attach = gl.glGenTextures(1) fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb # Init the texture (call the resizing function), will simply allocate empty memory # The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter). gl.glBindTexture(gl.GL_TEXTURE_2D, color_buffer) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl_tex_dtype, W, H, 0, gl.GL_RGBA, gl.GL_FLOAT, ctypes.c_void_p(0)) # 16 * 4 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_upper) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Bind texture to fbo gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, color_buffer, 0) # location 0 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_upper, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT2, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0) gl.glDrawBuffers(3, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1, gl.GL_COLOR_ATTACHMENT2]) # Check framebuffer status if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE: log(red('Framebuffer not complete, exiting...')) raise RuntimeError('Incomplete framebuffer') # Restore the original state gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) return color_buffer, depth_upper, depth_lower, depth_attach, fbo def hareward_peeling_framebuffer(H: int, W: int): # Prepare for write frame buffers index_buffer = gl.glGenTextures(1) depth_lower = gl.glGenTextures(1) depth_attach = gl.glGenTextures(1) fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb # Init the texture (call the resizing function), will simply allocate empty memory # The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter). gl.glBindTexture(gl.GL_TEXTURE_2D, index_buffer) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32I, W, H, 0, gl.GL_RED_INTEGER, gl.GL_INT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Bind texture to fbo gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, index_buffer, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0) gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1]) # Check framebuffer status if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE: log(red('Framebuffer not complete, exiting...')) raise RuntimeError('Incomplete framebuffer') # Restore the original state gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) return index_buffer, depth_lower, depth_attach, fbo class Gaussian(Mesh): def __init__(self, filename: str = 'assets/meshes/zju3dv.npz', gaussian_cfg: dotdict = dotdict(), quad_cfg: dotdict = dotdict(), render_depth: bool = False, # show depth or show color dpt_cm: str = 'linear', H: int = 1024, W: int = 1024, **kwargs, ): # Import Gaussian Model # Housekeeping super().__init__(**kwargs) self.name = split(filename)[-1] # Init Gaussian related models, for now only the first gaussian model is supported if filename.endswith('.npz') or filename.endswith('.pt') or filename.endswith('.pth'): # Load from GaussianTSampler pretrained, _ = load_pretrained(filename) # loaded model and updated path (maybe) pretrained = pretrained.model state_dict = dotdict() for k, v in pretrained.items(): if k.startswith('sampler.pcds.0'): state_dict[k.replace('sampler.pcds.0.', '')] = v # Load the parameters into the gaussian model self.gaussian_model: GaussianModel = call_from_cfg(GaussianModel, gaussian_cfg) # init empty gaussian model self.gaussian_model.load_state_dict(state_dict) # load the first gaussian model self.gaussian_model.cuda() # move the parameters to GPU elif filename.endswith('.ply'): # Load raw GaussianModel pass else: raise NotImplementedError # Init rendering quad self.quad: Quad = call_from_cfg(Quad, quad_cfg, H=H, W=W) # Other configurations self.render_depth = render_depth self.dpt_cm = dpt_cm # Disabling initialization def load_from_file(self, *args, **kwargs): pass def load_from_data(self, *args, **kwargs): pass def compile_shaders(self): pass def update_gl_buffers(self): pass def resize_textures(self, H: int, W: int): self.quad.resize_textures(H, W) # The actual rendering function @torch.no_grad() def render(self, camera: Camera): # Perform actual gaussian rendering batch = to_cuda(camera.to_batch()) rgb, acc, dpt = self.gaussian_model.render(batch) if self.render_depth: rgba = torch.cat([depth_curve_fn(dpt, cm=self.dpt_cm), acc], dim=-1) # H, W, 4 else: rgba = torch.cat([rgb, acc], dim=-1) # H, W, 4 # Copy rendered tensor to screen rgba = (rgba.clip(0, 1) * 255).type(torch.uint8).flip(0) # transform self.quad.copy_to_texture(rgba) self.quad.render() class Splat(Mesh): def __init__(self, *args, H: int = 512, W: int = 512, tex_dtype: str = torch.half, pts_per_pix: int = 24, # render less for the static background since we're only doing a demo blit_last_ratio: float = 0.0, volume_rendering: bool = True, radii_mult_volume: float = 1.00, # 2 / 3 is the right integration, but will leave holes, 1.0 will make it bloat, 0.85 looks visually better radii_mult_solid: float = 0.85, # 2 / 3 is the right integration, but will leave holes, 1.0 will make it bloat, 0.85 looks visually better point_smooth: bool = True, alpha_blending: bool = True, **kwargs): kwargs = dotdict(kwargs) kwargs.vert_sizes = kwargs.get('vert_sizes', [3, 3, 1, 1]) self.tex_dtype = getattr(torch, tex_dtype) if isinstance(tex_dtype, str) else tex_dtype self.gl_tex_dtype = gl.GL_RGBA16F if self.tex_dtype == torch.half else gl.GL_RGBA32F super().__init__(*args, **kwargs) self.use_gl_program(self.splat_program) self.pts_per_pix = pts_per_pix self.blit_last_ratio = blit_last_ratio self.volume_rendering = volume_rendering self.radii_mult_volume = radii_mult_volume self.radii_mult_solid = radii_mult_solid self.point_smooth = point_smooth self.alpha_blending = alpha_blending self.max_H, self.max_W = H, W self.H, self.W = H, W self.init_textures() @property def verts_data(self): # a heavy copy operation verts = torch.cat([self.verts, self.colors, self.radius, self.alpha], dim=-1).ravel().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=np.float32, order='C') # this should only be invoked once return verts def use_gl_program(self, program: shaders.ShaderProgram): super().use_gl_program(program) # Special controlling variables self.uniforms.alpha_blending = gl.glGetUniformLocation(program, f'alpha_blending') self.uniforms.point_smooth = gl.glGetUniformLocation(program, f'point_smooth') self.uniforms.radii_mult = gl.glGetUniformLocation(program, f'radii_mult') # Special rendering variables self.uniforms.pass_index = gl.glGetUniformLocation(program, f'pass_index') self.uniforms.read_color = gl.glGetUniformLocation(program, f'read_color') self.uniforms.read_upper = gl.glGetUniformLocation(program, f'read_upper') self.uniforms.read_lower = gl.glGetUniformLocation(program, f'read_lower') gl.glUniform1i(self.uniforms.read_color, 0) gl.glUniform1i(self.uniforms.read_upper, 1) gl.glUniform1i(self.uniforms.read_lower, 2) def compile_shaders(self): try: self.splat_program = shaders.compileProgram( shaders.compileShader(load_shader_source('splat.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('splat.frag'), gl.GL_FRAGMENT_SHADER) ) self.usplat_program = shaders.compileProgram( shaders.compileShader(load_shader_source('usplat.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('usplat.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def rasterize(self, camera: Camera = None, length: int = None): if self.volume_rendering: return self.rasterize_volume(camera, length) else: return self.rasterize_solid(camera, length) def rasterize_volume(self, camera: Camera = None, length: int = None): # some implementation requires no uploading of camera """ Let's try to analyze what's happening here We want to: 1. Render the front-most color to color buffer 2. UNUSED: Render the front-most depth + some large margin to a depth upper limit buffer 3. Render the front-most depth + some small margin to a depth lower limit buffer 4. Switch between the render target and sampling target 5. Use the previous rendered color, depth upper limit and lower limit as textures 6. When current depth is smaller than the lower limit, we've already rendered this in the first pass, discard 7. UNUSED: When current depth is larger than the upper limit, it will probabily not contribute much to final results, discard 8. UNUSED: When the accumulated opacity reaches almost 1, subsequent rendering would not have much effect, return directly 9. When the point coordinates falls out of bound of the current sphere, dicard (this could be optimized with finutining in rectangle) 10. Finally, try to render the final color using the volume rendering equation (by accumulating alpha values from front to back) Required cleanup checklist: 1. Before rendering the first pass, we need to clear the color and depth texture, this is not done, need to check multi-frame accumulation on this 2. Before rendering next pass, it's also recommended to blit color and depth values from previous pass to avoid assign them in the shader """ front_fbo, front_color, front_upper, front_lower = self.read_fbo, self.read_color, self.read_upper, self.read_lower back_fbo, back_color, back_upper, back_lower = self.write_fbo, self.write_color, self.write_upper, self.write_lower # Only clear the output once gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, front_fbo) # for offscreen rendering to textures gl.glClearBufferfv(gl.GL_COLOR, 0, [0.0, 0.0, 0.0, 0.0]) # gl.glClearBufferfv(gl.GL_COLOR, 1, [1e9]) gl.glClearBufferfv(gl.GL_COLOR, 2, [0.0, 0.0, 0.0, 0.0]) gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing # Only clear the output once gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures gl.glClearBufferfv(gl.GL_COLOR, 0, [0.0, 0.0, 0.0, 0.0]) # gl.glClearBufferfv(gl.GL_COLOR, 1, [1e9]) gl.glClearBufferfv(gl.GL_COLOR, 2, [0.0, 0.0, 0.0, 0.0]) gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing # Prepare for the actual rendering, previous operations could rebind the vertex array self.use_gl_program(self.splat_program) # TODO: Implement this with a mapping and a lazy modification self.upload_gl_uniforms(camera) gl.glBindVertexArray(self.vao) # The actual multi pass rendering process happens here for pass_index in range(self.pts_per_pix): # Swap buffers to render the next pass front_fbo, front_color, front_upper, front_lower, back_fbo, back_color, back_upper, back_lower = \ back_fbo, back_color, back_upper, back_lower, front_fbo, front_color, front_upper, front_lower # Bind the read texture and bind the write render frame buffer gl.glBindTextures(0, 3, [front_color, front_upper, front_lower]) # Move content from write_fbo to screen fbo if pass_index > self.pts_per_pix * self.blit_last_ratio: # no blitting almost has no effect on the rendering gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, front_fbo) gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, back_fbo) for i in range(3): gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0 + i) gl.glDrawBuffer(gl.GL_COLOR_ATTACHMENT0 + i) gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) gl.glDrawBuffers(3, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1, gl.GL_COLOR_ATTACHMENT2]) # Clear depth buffer for depth testing gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing gl.glUniform1i(self.uniforms.pass_index, pass_index) # pass index # The actual drawing pass with render things out to the write_fbo gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices # Restore states of things gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) gl.glBindVertexArray(0) return back_fbo def upload_gl_uniforms(self, camera: Camera): super().upload_gl_uniforms(camera) gl.glUniform1i(self.uniforms.point_smooth, self.point_smooth) gl.glUniform1i(self.uniforms.alpha_blending, self.alpha_blending) if self.volume_rendering: gl.glUniform1f(self.uniforms.radii_mult, self.radii_mult_volume) # radii mult else: gl.glUniform1f(self.uniforms.radii_mult, self.radii_mult_solid) # radii mult def rasterize_solid(self, camera: Camera = None, length: int = None): # Only clear the output once back_fbo = self.write_fbo gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures gl.glClearBufferfv(gl.GL_COLOR, 0, [0.0, 0.0, 0.0, 0.0]) # color # gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0]) # depth upper gl.glClearBufferfv(gl.GL_COLOR, 2, [0.0, 0.0, 0.0, 0.0]) # depth lower gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing # Prepare for the actual rendering, previous operations could rebind the vertex array self.use_gl_program(self.usplat_program) self.upload_gl_uniforms(camera) gl.glUniform1i(self.uniforms.pass_index, 0) # pass index gl.glBindVertexArray(self.vao) # The actual drawing pass with render things out to the write_fbo gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices # Restore states of things gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) gl.glBindVertexArray(0) return back_fbo def show(self, back_fbo: int): # Move content from write_fbo to screen fbo gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, back_fbo) gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, 0) # render the final content onto screen gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0) gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) def render(self, camera): if not self.visible: return self.show(self.rasterize(camera)) def resize_textures(self, H: int, W: int): # analogy to update_gl_buffers self.H, self.W = H, W if self.H > self.max_H or self.W > self.max_W: # max got updated self.max_H, self.max_W = max(int(self.H * 1.05), self.max_H), max(int(self.W * 1.05), self.max_W) self.init_textures() def init_textures(self): if hasattr(self, 'write_fbo'): gl.glDeleteFramebuffers(2, [self.write_fbo, self.read_fbo]) gl.glDeleteTextures(8, [self.write_color, self.write_upper, self.write_lower, self.write_attach, self.read_color, self.read_upper, self.read_lower, self.read_attach]) self.write_color, self.write_upper, self.write_lower, self.write_attach, self.write_fbo = hardware_rendering_framebuffer(self.max_H, self.max_W, self.gl_tex_dtype) self.read_color, self.read_upper, self.read_lower, self.read_attach, self.read_fbo = hardware_rendering_framebuffer(self.max_H, self.max_W, self.gl_tex_dtype) log(f'Created texture of h, w: {self.max_H}, {self.max_W}') class HardwareRendering(Splat): def __init__(self, dtype=torch.half, **kwargs, ): self.dtype = getattr(torch, dtype) if isinstance(dtype, str) else dtype self.gl_dtype = gl.GL_HALF_FLOAT if self.dtype == torch.half else gl.GL_FLOAT super().__init__(**kwargs, blit_last_ratio=0.90, vert_sizes=[3, 3, 1, 1], ) # verts, color, radius, alpha @property def verts_data(self): # a heavy copy operation verts = torch.cat([self.verts, self.colors, self.radius, self.alpha], dim=-1).ravel().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=torch_dtype_to_numpy_dtype(self.dtype), order='C') # this should only be invoked once return verts def init_gl_buffers(self, v: int = 0, f: int = 0): if hasattr(self, 'cu_vbo'): CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_vbo)) super().init_gl_buffers(v, f) # Register vertex buffer obejct flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard self.cu_vbo = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterBuffer(self.vbo, flags)) def init_textures(self): if hasattr(self, 'cu_read_color'): CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_color)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_color)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_lower)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_lower)) super().init_textures() # Register image to read from flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly self.cu_read_color = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_color, gl.GL_TEXTURE_2D, flags)) self.cu_write_color = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_color, gl.GL_TEXTURE_2D, flags)) self.cu_read_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_lower, gl.GL_TEXTURE_2D, flags)) self.cu_write_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_lower, gl.GL_TEXTURE_2D, flags)) def forward(self, xyz: torch.Tensor, rgb: torch.Tensor, rad: torch.Tensor, occ: torch.Tensor, batch: dotdict): """ Renders a 3D point cloud using OpenGL and returns the rendered RGB image, accumulated alpha image, and depth map. Args: xyz (torch.Tensor): A tensor of shape (B, N, 3) containing the 3D coordinates of the points. rgb (torch.Tensor): A tensor of shape (B, N, 3) containing the RGB color values of the points. rad (torch.Tensor): A tensor of shape (B, N, 1) containing the radii of the points. batch (dotdict): A dictionary containing the camera parameters and other metadata for the batch. Returns: A tuple containing the rendered RGB image, accumulated alpha image, and depth map, all as torch.Tensors. The RGB image has shape (1, H, W, 3), the alpha image has shape (1, H, W, 1), and the depth map has shape (1, H, W, 1). The method first resizes the OpenGL texture to match the height and width of the output image. It then sets the OpenGL viewport and scissor to only render in the region of the viewport specified by the output image size. It concatenates the `xyz`, `rgb`, and `rad` tensors along the last dimension and flattens the result into a 1D tensor. The method then uploads the input data to OpenGL for rendering and performs depth peeling using OpenGL. The method uploads the camera parameters to OpenGL and renders the point cloud, saving the output buffer to the `back_fbo` attribute of the class. Finally, the method copies the rendered image and depth back to the CPU as torch.Tensors and reshapes them to match the output image size. The RGB image is returned with shape (1, H, W, 3), the accumulated alpha image is returned with shape (1, H, W, 1), and the depth map is returned with shape (1, H, W, 1). """ kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice # !: BATCH H, W = batch.meta.H[0].item(), batch.meta.W[0].item() self.resize_textures(H, W) # maybe resize the texture self.resize_buffers(xyz.shape[1]) # maybe resize the buffer _, _, old_W, old_H = gl.glGetIntegerv(gl.GL_VIEWPORT) gl.glViewport(0, 0, W, H) gl.glScissor(0, 0, W, H) # only render in this small region of the viewport # Prepare for input data data = torch.cat([xyz, rgb, rad, occ], dim=-1).type(self.dtype).ravel() # Upload to opengl for rendering CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream)) cu_vbo_ptr, cu_vbo_size = CHECK_CUDART_ERROR(cudart.cudaGraphicsResourceGetMappedPointer(self.cu_vbo)) assert cu_vbo_size >= data.numel() * data.element_size(), f'PyTorch(CUDA) and OpenGL vertex buffer size mismatch ({data.numel() * data.element_size()} v.s. {cu_vbo_size}), CUDA side should be less than or equal to the OpenGL side' CHECK_CUDART_ERROR(cudart.cudaMemcpyAsync(cu_vbo_ptr, data.data_ptr(), data.numel() * data.element_size(), kind, torch.cuda.current_stream().cuda_stream)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream)) # Perform rasterization (depth peeling using OpenGL) if 'meta_stream' in batch.meta: batch.meta.meta_stream.synchronize() # wait for gpu -> cpu copy to finish back_fbo = self.rasterize(Camera(batch=batch.meta), xyz.shape[-2]) # will upload and render, save output buffer to back_fbo # Copy rendered image and depth back as tensor cu_tex = self.cu_write_color if back_fbo == self.write_fbo else self.cu_read_color # double buffered depth peeling cu_dpt = self.cu_write_lower if back_fbo == self.write_fbo else self.cu_read_lower # double buffered depth peeling # Prepare the output # !: BATCH rgb_map = torch.empty((H, W, 4), dtype=self.tex_dtype, device='cuda') # to hold the data from opengl dpt_map = torch.empty((H, W, 1), dtype=torch.float, device='cuda') # to hold the data from opengl # The resources in resources may be accessed by CUDA until they are unmapped. # The graphics API from which resources were registered should not access any resources while they are mapped by CUDA. # If an application does so, the results are undefined. CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream)) CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream)) cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_tex, 0, 0)) cu_dpt_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_dpt, 0, 0)) CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(rgb_map.data_ptr(), # dst W * 4 * rgb_map.element_size(), # dpitch cu_tex_arr, # src 0, # wOffset 0, # hOffset W * 4 * rgb_map.element_size(), # width Width of matrix transfer (columns in bytes) H, # height kind, # kind torch.cuda.current_stream().cuda_stream)) # stream CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(dpt_map.data_ptr(), W * 1 * dpt_map.element_size(), cu_dpt_arr, 0, 0, W * 1 * dpt_map.element_size(), H, kind, torch.cuda.current_stream().cuda_stream)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC # Ouput reshaping rgb_map, dpt_map = rgb_map[None].flip(1), dpt_map[None].flip(1) rgb_map, acc_map = rgb_map[..., :3], rgb_map[..., 3:] dpt_map = torch.where(dpt_map == 0, dpt_map.max(), dpt_map) # Some house keepings gl.glViewport(0, 0, old_W, old_H) gl.glScissor(0, 0, old_W, old_H) return rgb_map, acc_map, dpt_map class HardwarePeeling(Splat): def __init__(self, dtype=torch.float, **kwargs): self.dtype = getattr(torch, dtype) if isinstance(dtype, str) else dtype self.gl_dtype = gl.GL_HALF_FLOAT if self.dtype == torch.half else gl.GL_FLOAT super().__init__(**kwargs, blit_last_ratio=-10.0, vert_sizes=[3, 1], ) # verts, radius, index # from pytorch3d.renderer import AlphaCompositor # self.compositor = AlphaCompositor() # this the key to convergence, this is differentiable @property def verts_data(self): # a heavy copy operation verts = torch.cat([self.verts, self.radius], dim=-1).ravel().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=torch_dtype_to_numpy_dtype(self.dtype), order='C') # this should only be invoked once return verts def use_gl_program(self, program): super().use_gl_program(program) gl.glUseProgram(self.splat_program) # use a different program self.uniforms.read_index = gl.glGetUniformLocation(program, f'read_index') self.uniforms.read_lower = gl.glGetUniformLocation(program, f'read_lower') gl.glUniform1i(self.uniforms.read_index, 0) gl.glUniform1i(self.uniforms.read_lower, 1) def upload_gl_uniforms(self, camera: Camera): super().upload_gl_uniforms(camera) def compile_shaders(self): try: self.splat_program = shaders.compileProgram( shaders.compileShader(load_shader_source('idx_splat.vert'), gl.GL_VERTEX_SHADER), # use the pass through quad shader shaders.compileShader(load_shader_source('idx_splat.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def init_gl_buffers(self, v: int = 0, f: int = 0): if hasattr(self, 'cu_vbo'): CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_vbo)) super().init_gl_buffers(v, f) # Register vertex buffer obejct flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard self.cu_vbo = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterBuffer(self.vbo, flags)) def init_textures(self): if hasattr(self, 'cu_read_index'): CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_index)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_index)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_lower)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_lower)) if hasattr(self, 'write_fbo'): gl.glDeleteFramebuffers(2, [self.write_fbo, self.read_fbo]) gl.glDeleteTextures(6, [self.write_index, self.write_lower, self.write_attach, self.read_index, self.read_lower, self.read_attach]) self.write_index, self.write_lower, self.write_attach, self.write_fbo = hareward_peeling_framebuffer(self.max_H, self.max_W) self.read_index, self.read_lower, self.read_attach, self.read_fbo = hareward_peeling_framebuffer(self.max_H, self.max_W) # Register image to read from flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly self.cu_read_index = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_index, gl.GL_TEXTURE_2D, flags)) self.cu_write_index = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_index, gl.GL_TEXTURE_2D, flags)) self.cu_read_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_lower, gl.GL_TEXTURE_2D, flags)) self.cu_write_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_lower, gl.GL_TEXTURE_2D, flags)) log(f'Created texture of h, w: {self.max_H}, {self.max_W}') def rasterize_generator(self, camera: Camera = None, length: int = None): # some implementation requires no uploading of camera front_fbo, front_index, front_lower = self.read_fbo, self.read_index, self.read_lower back_fbo, back_index, back_lower = self.write_fbo, self.write_index, self.write_lower # Only clear the output once gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, front_fbo) # for offscreen rendering to textures gl.glClearBufferiv(gl.GL_COLOR, 0, [-1]) gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0]) gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing # Only clear the output once gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures gl.glClearBufferiv(gl.GL_COLOR, 0, [-1]) gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0]) gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing # Prepare for the actual rendering, previous operations could rebind the vertex array self.use_gl_program(self.splat_program) self.upload_gl_uniforms(camera) gl.glBindVertexArray(self.vao) # The actual multi pass rendering process happens here for pass_index in range(self.pts_per_pix): # Swap buffers to render the next pass front_fbo, front_index, front_lower, back_fbo, back_index, back_lower = \ back_fbo, back_index, back_lower, front_fbo, front_index, front_lower # Bind the read texture and bind the write render frame buffer gl.glBindTextures(0, 2, [front_index, front_lower]) # Move content from write_fbo to screen fbo if pass_index > self.pts_per_pix * self.blit_last_ratio: # no blitting almost has no effect on the rendering gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, front_fbo) gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, back_fbo) gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0 + 1) gl.glDrawBuffer(gl.GL_COLOR_ATTACHMENT0 + 1) gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1]) else: # Only clear the output once gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures # Clear depth buffer for depth testing gl.glClearBufferiv(gl.GL_COLOR, 0, [-1]) # clear the indices buffer for later rendering and retrieving gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing # The actual drawing pass with render things out to the write_fbo gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices yield back_fbo # give the CUDA end a chance to read from this frame buffer after rendering # Restore states of things gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) gl.glBindVertexArray(0) return def forward(self, xyz: torch.Tensor, rgb: torch.Tensor, rad: torch.Tensor, occ: torch.Tensor, batch: dotdict, return_frags: bool = False, return_full: bool = False, ): """ Get all indices from the depth peeling passes Compute the vertex weight here in torch(cuda) Use the indices to pass through a compositor The backward pass should only be valid on the torch side, and it should've been enough TODO: This function is too memory intensive TODO: Performing IBR is too memory intensive """ # This the slow part, but not differentiable idx, _, _ = self.forward_idx(xyz, rad, batch) # B, H, W, K msk = idx != -1 # B, H, W, K idx = torch.where(msk, idx, 0).long() # Sample things needed for computing screen space weight H, W, K, R, T, C = get_opencv_camera_params(batch) K, R, T, C = K.to(xyz.dtype), R.to(xyz.dtype), T.to(xyz.dtype), C.to(xyz.dtype) pix_xyz = (xyz @ R.mT + T.mT) @ K.mT # B, P, 3 pix_xyz_xy = pix_xyz[..., :-1] / (pix_xyz[..., -1:] + 1e-10) pix_rad = abs(K[..., 1, 1][..., None] * rad[..., 0] / (pix_xyz[..., -1] + 1e-10)) # z: B, 1 * B, N, world space radius
mean_xy = multi_gather(pix_xyz_xy, idx.view(idx.shape[0], -1), dim=-2).view(*idx.shape, 2) # B, HWK, 2 -> B, H, W, K, 2
10
2023-10-17 04:48:46+00:00
16k
0xbitches/sd-webui-lcm
scripts/main.py
[ { "identifier": "LCMScheduler", "path": "lcm/lcm_scheduler.py", "snippet": "class LCMScheduler(SchedulerMixin, ConfigMixin):\n \"\"\"\n `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with\n non-Markovian guidance.\n\n This model ...
from concurrent.futures import ThreadPoolExecutor from pathlib import Path from typing import Optional from lcm.lcm_scheduler import LCMScheduler from lcm.lcm_pipeline import LatentConsistencyModelPipeline from lcm.lcm_i2i_pipeline import LatentConsistencyModelImg2ImgPipeline from diffusers.image_processor import PipelineImageInput from modules import script_callbacks from PIL import Image, PngImagePlugin import uuid import modules.scripts as scripts import modules.shared import os import random import time import numpy as np import gradio as gr import torch import cv2
11,633
return scripts.AlwaysVisible def ui(self, is_img2img): return () def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: if randomize_seed: seed = random.randint(0, MAX_SEED) return seed def save_image(img, metadata: dict): save_dir = os.path.join(scripts.basedir(), "outputs/txt2img-images/LCM/") Path(save_dir).mkdir(exist_ok=True, parents=True) seed = metadata["seed"] unique_id = uuid.uuid4() filename = save_dir + f"{unique_id}-{seed}" + ".png" meta_tuples = [(k, str(v)) for k, v in metadata.items()] png_info = PngImagePlugin.PngInfo() for k, v in meta_tuples: png_info.add_text(k, v) img.save(filename, pnginfo=png_info) return filename def save_images(image_array, metadata: dict): paths = [] with ThreadPoolExecutor() as executor: paths = list(executor.map(save_image, image_array, [metadata]*len(image_array))) return paths def generate( prompt: str, seed: int = 0, width: int = 512, height: int = 512, guidance_scale: float = 8.0, num_inference_steps: int = 4, num_images: int = 4, randomize_seed: bool = False, use_fp16: bool = True, use_torch_compile: bool = False, use_cpu: bool = False, progress=gr.Progress(track_tqdm=True) ) -> Image.Image: seed = randomize_seed_fn(seed, randomize_seed) torch.manual_seed(seed) selected_device = modules.shared.device if use_cpu: selected_device = "cpu" if use_fp16: use_fp16 = False print("LCM warning: running on CPU, overrode FP16 with FP32") scheduler = LCMScheduler.from_pretrained( "SimianLuo/LCM_Dreamshaper_v7", subfolder="scheduler") pipe = LatentConsistencyModelPipeline.from_pretrained( "SimianLuo/LCM_Dreamshaper_v7", scheduler = scheduler, safety_checker = None) if use_fp16: pipe.to(torch_device=selected_device, torch_dtype=torch.float16) else: pipe.to(torch_device=selected_device, torch_dtype=torch.float32) # Windows does not support torch.compile for now if os.name != 'nt' and use_torch_compile: pipe.unet = torch.compile(pipe.unet, mode='max-autotune') start_time = time.time() result = pipe( prompt=prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, num_images_per_prompt=num_images, original_inference_steps=50, output_type="pil", device = selected_device ).images paths = save_images(result, metadata={"prompt": prompt, "seed": seed, "width": width, "height": height, "guidance_scale": guidance_scale, "num_inference_steps": num_inference_steps}) elapsed_time = time.time() - start_time print("LCM inference time: ", elapsed_time, "seconds") return paths, seed def generate_i2i( prompt: str, image: PipelineImageInput = None, strength: float = 0.8, seed: int = 0, guidance_scale: float = 8.0, num_inference_steps: int = 4, num_images: int = 4, randomize_seed: bool = False, use_fp16: bool = True, use_torch_compile: bool = False, use_cpu: bool = False, progress=gr.Progress(track_tqdm=True), width: Optional[int] = 512, height: Optional[int] = 512, ) -> Image.Image: seed = randomize_seed_fn(seed, randomize_seed) torch.manual_seed(seed) selected_device = modules.shared.device if use_cpu: selected_device = "cpu" if use_fp16: use_fp16 = False print("LCM warning: running on CPU, overrode FP16 with FP32")
DESCRIPTION = '''# Latent Consistency Model Running [LCM_Dreamshaper_v7](https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7) | [Project Page](https://latent-consistency-models.github.io) | [Extension Page](https://github.com/0xbitches/sd-webui-lcm) ''' MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "768")) class Script(scripts.Script): def __init__(self) -> None: super().__init__() def title(self): return "LCM" def show(self, is_img2img): return scripts.AlwaysVisible def ui(self, is_img2img): return () def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: if randomize_seed: seed = random.randint(0, MAX_SEED) return seed def save_image(img, metadata: dict): save_dir = os.path.join(scripts.basedir(), "outputs/txt2img-images/LCM/") Path(save_dir).mkdir(exist_ok=True, parents=True) seed = metadata["seed"] unique_id = uuid.uuid4() filename = save_dir + f"{unique_id}-{seed}" + ".png" meta_tuples = [(k, str(v)) for k, v in metadata.items()] png_info = PngImagePlugin.PngInfo() for k, v in meta_tuples: png_info.add_text(k, v) img.save(filename, pnginfo=png_info) return filename def save_images(image_array, metadata: dict): paths = [] with ThreadPoolExecutor() as executor: paths = list(executor.map(save_image, image_array, [metadata]*len(image_array))) return paths def generate( prompt: str, seed: int = 0, width: int = 512, height: int = 512, guidance_scale: float = 8.0, num_inference_steps: int = 4, num_images: int = 4, randomize_seed: bool = False, use_fp16: bool = True, use_torch_compile: bool = False, use_cpu: bool = False, progress=gr.Progress(track_tqdm=True) ) -> Image.Image: seed = randomize_seed_fn(seed, randomize_seed) torch.manual_seed(seed) selected_device = modules.shared.device if use_cpu: selected_device = "cpu" if use_fp16: use_fp16 = False print("LCM warning: running on CPU, overrode FP16 with FP32") scheduler = LCMScheduler.from_pretrained( "SimianLuo/LCM_Dreamshaper_v7", subfolder="scheduler") pipe = LatentConsistencyModelPipeline.from_pretrained( "SimianLuo/LCM_Dreamshaper_v7", scheduler = scheduler, safety_checker = None) if use_fp16: pipe.to(torch_device=selected_device, torch_dtype=torch.float16) else: pipe.to(torch_device=selected_device, torch_dtype=torch.float32) # Windows does not support torch.compile for now if os.name != 'nt' and use_torch_compile: pipe.unet = torch.compile(pipe.unet, mode='max-autotune') start_time = time.time() result = pipe( prompt=prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, num_images_per_prompt=num_images, original_inference_steps=50, output_type="pil", device = selected_device ).images paths = save_images(result, metadata={"prompt": prompt, "seed": seed, "width": width, "height": height, "guidance_scale": guidance_scale, "num_inference_steps": num_inference_steps}) elapsed_time = time.time() - start_time print("LCM inference time: ", elapsed_time, "seconds") return paths, seed def generate_i2i( prompt: str, image: PipelineImageInput = None, strength: float = 0.8, seed: int = 0, guidance_scale: float = 8.0, num_inference_steps: int = 4, num_images: int = 4, randomize_seed: bool = False, use_fp16: bool = True, use_torch_compile: bool = False, use_cpu: bool = False, progress=gr.Progress(track_tqdm=True), width: Optional[int] = 512, height: Optional[int] = 512, ) -> Image.Image: seed = randomize_seed_fn(seed, randomize_seed) torch.manual_seed(seed) selected_device = modules.shared.device if use_cpu: selected_device = "cpu" if use_fp16: use_fp16 = False print("LCM warning: running on CPU, overrode FP16 with FP32")
pipe = LatentConsistencyModelImg2ImgPipeline.from_pretrained(
2
2023-10-22 11:53:48+00:00
16k
kylesargent/ZeroNVS
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio
13,662
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config )
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config )
self.feature_network = get_mlp(
8
2023-10-24 19:02:44+00:00
16k
princeton-nlp/LLM-Shearing
llmshearing/models/composer_pythia.py
[ { "identifier": "L0Module", "path": "llmshearing/models/l0_module.py", "snippet": "class L0Module(nn.Module):\n def __init__(self, cfg, device):\n super(L0Module, self).__init__()\n\n # base and target model info\n n_matrix_mlp = 2 if \"pythia\" in cfg.name else 3\n self.b...
import math import torch import torch.nn as nn from typing import List, Optional, Tuple from einops import rearrange from omegaconf import DictConfig from torch.nn import functional as F from transformers.pytorch_utils import (find_pruneable_heads_and_indices, prune_linear_layer) from llmshearing.models.l0_module import L0Module from llmshearing.models.composer_llama import ComposerMosaicLlama, prepare_decoder_attention_mask, turn_head_z, turn_mlp_z, normal_attn_fn, flash_attn_fn from transformers.models.gpt_neox.modeling_gpt_neox import apply_rotary_pos_emb
10,948
head_layer_z: Optional[torch.Tensor] = None, intermediate_z: Optional[torch.Tensor] = None, mlp_z: Optional[torch.Tensor] = None, hidden_z: Optional[torch.Tensor] = None, qk_head_dim_z: Optional[torch.Tensor] = None, vo_head_dim_z: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]: if self.ln_1 is not None: a = self.ln_1(x, hidden_z=hidden_z) attn_output, _, past_key_value = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, attention_mask=attention_mask, retain_grad=retain_grad, head_z=head_z, head_layer_z=head_layer_z, hidden_z=hidden_z, qk_head_dim_z=qk_head_dim_z, vo_head_dim_z=vo_head_dim_z) else: attn_output = 0 if self.use_parallel_residual: # pseudocode: # x = x + attn(ln1(x)) + mlp(ln2(x)) if self.ln_2 is not None: b = self.ln_2(x, hidden_z=hidden_z) mlp_output = self.mlp(b, retain_grad, intermediate_z, mlp_z, hidden_z) x = mlp_output + attn_output + x else: x = attn_output + x else: # pseudocode: # x = x + attn(ln1(x)) # x = x + mlp(ln2(x)) if self.ln_2 is not None: attn_output = x + attn_output hidden_states = self.ln_2(attn_output, hidden_z=hidden_z) mlp_output = self.mlp(hidden_states, retain_grad, intermediate_z, mlp_z, hidden_z) x = mlp_output + attn_output else: x = x + attn_output return x, past_key_value class PythiaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, cfg: DictConfig, device: Optional[str] = None): super().__init__() self.attn_impl = cfg.get('attn_impl') self.d_model = cfg.d_model self.n_heads = cfg.n_heads self.all_head_size = cfg.d_model self.head_dim = self.d_model // self.n_heads self.pruned_heads = set() self.softmax_scale = cfg.get('softmax_scale') if self.softmax_scale is None: self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads) self.attn_dropout_p = cfg.get('attn_pdrop') # self.Wqkv = nn.Linear(self.d_model, 3 * self.d_model, device=device, bias=False) # for param init fn; enables shape based init of fused layers # fuse_splits = (cfg.d_model, 2 * cfg.d_model) # self.Wqkv._fused = (0, fuse_splits) # type: ignore self.query_key_value = nn.Linear(self.d_model, 3 * self.d_model, device=device, bias=True) fuse_splits = (cfg.d_model, 2 * cfg.d_model) self.query_key_value._fused = (0, fuse_splits) self.attn_fn = flash_attn_fn if self.attn_impl == 'flash' else normal_attn_fn self.out_proj = nn.Linear(self.d_model, self.d_model, device=device, bias=True) self.out_proj._is_residual = True # type: ignore self.rotary_ndims = int(self.head_dim * cfg.rotary_pct) self.rotary_emb = RotaryEmbedding(self.rotary_ndims, max_position_embeddings=cfg.max_seq_len, device=device) def prune_params(self, zs_block): head_z = None; head_layer_z = None; hidden_z = None; qk_head_dim_z = None; vo_head_dim_z = None if "head_z" in zs_block: head_z = zs_block["head_z"].squeeze() if "head_layer_z" in zs_block: head_layer_z = zs_block["head_layer_z"].squeeze() if "hidden_z" in zs_block: hidden_z = zs_block["hidden_z"].squeeze() # update params # if head_z is not None: head_z_for_update = torch.repeat_interleave(head_z, self.head_dim) start_index = torch.arange(0, self.n_heads * 3, 3) + 2 end_index = start_index + 1 index = torch.cat([torch.arange(i, j) for i, j in zip(start_index * self.head_dim, end_index * self.head_dim)]) self.query_key_value.weight.data[index, :] = \ self.query_key_value.weight.data.transpose(0, 1)[:, index].mul(head_z_for_update).transpose(0, 1) self.query_key_value.bias.data[index] = \ self.query_key_value.bias.data[index].mul(head_z_for_update) if head_layer_z is not None: self.out_proj.weight.data = self.out_proj.weight.data.transpose(0, 1).mul(head_layer_z).transpose(0, 1) self.out_proj.bias.data = self.out_proj.bias.data.mul(head_layer_z) if hidden_z is not None: self.out_proj.weight.data = self.out_proj.weight.data.transpose(0, 1).mul(hidden_z).transpose(0, 1) self.out_proj.bias.data = self.out_proj.bias.data.mul(hidden_z) ################# if hidden_z is not None: remaining_index = torch.where(~hidden_z.eq(0))[0] print(f" Head hidden: {len(hidden_z)} -> {len(remaining_index)}") half = next(self.query_key_value.parameters()).dtype == torch.float16 self.query_key_value = prune_linear_layer(self.query_key_value, remaining_index, dim=1) self.out_proj = prune_linear_layer(self.out_proj, remaining_index) if half: self.query_key_value.half() self.out_proj.half()
class ComposerMosaicPythia(ComposerMosaicLlama): def __init__(self, cfg): super().__init__(cfg) self.model = PythiaModel(cfg) class CoFiLayerNorm(torch.nn.LayerNorm): def __init__(self, normalized_shape, eps: float = 1e-5, elementwise_affine: bool = True, device=None) -> None: super().__init__(normalized_shape, eps, elementwise_affine, device) def forward(self, input, hidden_z=None): if hidden_z is not None: remaining_index = torch.where(~hidden_z.eq(0))[0] compressed_input = torch.index_select( input, dim=-1, index=remaining_index) compressed_weight = self.weight[remaining_index] compressed_bias = self.bias[remaining_index] normalized_shape = len(remaining_index) normed_input = F.layer_norm( compressed_input, [normalized_shape], compressed_weight, compressed_bias, self.eps) output = input.clone() normed_input = normed_input.to(output.dtype) output[..., remaining_index] = normed_input else: output = F.layer_norm( input, self.normalized_shape, self.weight, self.bias, self.eps) return output def prune_params(self, hidden_z): remaining_index = torch.where(~hidden_z.eq(0))[0] # self.weight = torch.nn.Parameter(self.weight.data.mul(hidden_z.squeeze())[remaining_index]) self.weight = torch.nn.parameter.Parameter(self.weight.index_select(0, remaining_index)) self.bias = torch.nn.parameter.Parameter(self.bias.index_select(0, remaining_index)) self.normalized_shape = (len(remaining_index),) class PythiaEmbedding(nn.Embedding): def forward(self, input, hidden_z=None): embeddings = super().forward(input) if hidden_z is not None: embeddings = embeddings.mul(hidden_z) return embeddings def prune_params(self, hidden_z): remaining_index = torch.where(~hidden_z.eq(0))[0] self.weight.data = self.weight.data.mul(hidden_z) self.weight = torch.nn.parameter.Parameter(self.weight.index_select(1, remaining_index).clone()) self.embedding_dim = len(remaining_index) print(f" Embedding: {len(hidden_z)} -> {len(remaining_index)}") class PythiaModel(nn.Module): def __init__(self, cfg: DictConfig): super().__init__() print(f'Tried to build Pythia model with cfg.name={cfg.name}') self.cfg = cfg ### added ### self.l0_module = None if getattr(self.cfg, "l0_module", None) is not None: self.l0_module = L0Module(self.cfg, device=cfg.init_device) ############# layernorm_class = CoFiLayerNorm self.attn_impl = cfg.attn_impl self.embedding_fraction = cfg.get('embedding_fraction', 1) assert 0 < self.embedding_fraction <= 1, 'model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!' self.transformer = nn.ModuleDict({ "wte": PythiaEmbedding(cfg.vocab_size, cfg.d_model, device=cfg.init_device), }) self.transformer.update({ 'blocks': nn.ModuleList([ PythiaBlock(cfg, device=cfg.init_device) for _ in range(cfg.n_layers) ]) }) self.transformer.update({ "output": nn.Linear(cfg.d_model, cfg.vocab_size, device=cfg.init_device, bias=False), }) self.transformer.update({ "ln_f": layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=cfg.init_device), # TODO: add to config }) self.is_causal = True if cfg.get('verbose') and cfg.get('verbose') > 2: print(self) def prune_params(self, zs=None): # TODO if zs is None: self.l0_module.eval() zs = self.l0_module(calculate_lagrangian=False) # wte as well :) # ln_f if hidden states are to be pruned if "hidden_z" in zs: hidden_z = zs["hidden_z"] remaining_index = torch.where(~hidden_z.eq(0))[0] self.transformer.ln_f.prune_params(hidden_z) self.transformer.wte.weight.data = self.transformer.wte.weight.data.mul(hidden_z) self.transformer.wte.weight = torch.nn.parameter.Parameter( self.transformer.wte.weight.index_select(1, remaining_index).clone()) self.transformer.wte.embedding_dim = len(remaining_index) # self.transformer.output.weight.data = self.transformer.output.weight.data.mul(hidden_z) half = self.transformer.output.weight.data.dtype == torch.float16 self.transformer.output = prune_linear_layer(self.transformer.output, remaining_index, dim=1) if half: self.transformer.output = self.transformer.output.half() for i, block in enumerate(self.transformer.blocks): zs_block = self.get_zs_block(zs, i) block.prune_params(zs_block) def get_zs_block(self, zs, block_idx): zs_block = {} if zs is not None: for key in zs: if key == "hidden_z": zs_block["hidden_z"] = zs["hidden_z"] else: zs_block[key] = zs[key][block_idx] return zs_block def forward( self, input_ids: torch.LongTensor, key_padding_mask: Optional[torch.ByteTensor] = None, past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None, pruned_steps: int = 0, retain_grad: bool = False, **zs,): S = input_ids.size(1) assert S <= self.cfg.max_seq_len, f"Sequence length ({S}) exceeds model maximum sequence length ({self.cfg.max_seq_len})!" tok_emb = self.transformer.wte(input_ids) if "hidden_z" in zs: tok_emb = tok_emb.mul(zs["hidden_z"]) x = tok_emb attn_bias = None # only consider the flash attention case attention_mask = prepare_decoder_attention_mask((tok_emb.size(0), tok_emb.size(1)), tok_emb) l0_output = None if self.l0_module is not None: assert zs == {}, "zs should be empty when using L0Module" zs = self.l0_module(calculate_lagrangian=False, pruned_steps=pruned_steps) for b_idx, block in enumerate(self.transformer.blocks): zs_block = self.get_zs_block(zs, b_idx) past_key_value = past_key_values[ b_idx] if past_key_values is not None else None x, past_key_value = block( x, past_key_value=past_key_value, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=self.is_causal, attention_mask=attention_mask, retain_grad=retain_grad, **zs_block ) if past_key_values is not None: past_key_values[b_idx] = past_key_value x = self.transformer.ln_f(x, hidden_z=zs.get("hidden_z", None)) logits = self.transformer.output(x) if self.l0_module is not None: l0_output = self.l0_module(calculate_lagrangian=True, pruned_steps=pruned_steps) return {"logits": logits, "l0_output": l0_output, "zs": zs} def param_init_fn(self, module): pass def fsdp_wrap_fn(self, module): return isinstance(module, PythiaBlock) # Activation Checkpointing def activation_checkpointing_fn(self, module): return isinstance(module, PythiaBlock) class PythiaBlock(nn.Module): def __init__(self, cfg: DictConfig, device: Optional[str] = None): super().__init__() layernorm_class = CoFiLayerNorm # TODO: CoFiLayerNorm,RMSLayerNorm self.ln_1 = layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=device) self.attn = PythiaAttention(cfg, device) self.ln_2 = layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=device) self.mlp = PythiaMLP(cfg, device) self.use_parallel_residual = cfg.get('use_parallel_residual', False) # TODO: add to config def prune_params(self, zs_block): self.attn.prune_params(zs_block) self.mlp.prune_params(zs_block) if self.attn.query_key_value is None: self.ln_1 = None if self.mlp.up_proj is None: self.ln_2 = None if "hidden_z" in zs_block: hidden_z = zs_block["hidden_z"] if self.ln_1 is not None: self.ln_1.prune_params(hidden_z) if self.ln_2 is not None: self.ln_2.prune_params(hidden_z) def forward( self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]] = None, attn_bias: Optional[torch.Tensor] = None, key_padding_mask: Optional[torch.ByteTensor] = None, is_causal: bool = True, attention_mask: Optional[torch.Tensor] = None, retain_grad: bool = False, head_z: Optional[torch.Tensor] = None, head_layer_z: Optional[torch.Tensor] = None, intermediate_z: Optional[torch.Tensor] = None, mlp_z: Optional[torch.Tensor] = None, hidden_z: Optional[torch.Tensor] = None, qk_head_dim_z: Optional[torch.Tensor] = None, vo_head_dim_z: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]: if self.ln_1 is not None: a = self.ln_1(x, hidden_z=hidden_z) attn_output, _, past_key_value = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, attention_mask=attention_mask, retain_grad=retain_grad, head_z=head_z, head_layer_z=head_layer_z, hidden_z=hidden_z, qk_head_dim_z=qk_head_dim_z, vo_head_dim_z=vo_head_dim_z) else: attn_output = 0 if self.use_parallel_residual: # pseudocode: # x = x + attn(ln1(x)) + mlp(ln2(x)) if self.ln_2 is not None: b = self.ln_2(x, hidden_z=hidden_z) mlp_output = self.mlp(b, retain_grad, intermediate_z, mlp_z, hidden_z) x = mlp_output + attn_output + x else: x = attn_output + x else: # pseudocode: # x = x + attn(ln1(x)) # x = x + mlp(ln2(x)) if self.ln_2 is not None: attn_output = x + attn_output hidden_states = self.ln_2(attn_output, hidden_z=hidden_z) mlp_output = self.mlp(hidden_states, retain_grad, intermediate_z, mlp_z, hidden_z) x = mlp_output + attn_output else: x = x + attn_output return x, past_key_value class PythiaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, cfg: DictConfig, device: Optional[str] = None): super().__init__() self.attn_impl = cfg.get('attn_impl') self.d_model = cfg.d_model self.n_heads = cfg.n_heads self.all_head_size = cfg.d_model self.head_dim = self.d_model // self.n_heads self.pruned_heads = set() self.softmax_scale = cfg.get('softmax_scale') if self.softmax_scale is None: self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads) self.attn_dropout_p = cfg.get('attn_pdrop') # self.Wqkv = nn.Linear(self.d_model, 3 * self.d_model, device=device, bias=False) # for param init fn; enables shape based init of fused layers # fuse_splits = (cfg.d_model, 2 * cfg.d_model) # self.Wqkv._fused = (0, fuse_splits) # type: ignore self.query_key_value = nn.Linear(self.d_model, 3 * self.d_model, device=device, bias=True) fuse_splits = (cfg.d_model, 2 * cfg.d_model) self.query_key_value._fused = (0, fuse_splits) self.attn_fn = flash_attn_fn if self.attn_impl == 'flash' else normal_attn_fn self.out_proj = nn.Linear(self.d_model, self.d_model, device=device, bias=True) self.out_proj._is_residual = True # type: ignore self.rotary_ndims = int(self.head_dim * cfg.rotary_pct) self.rotary_emb = RotaryEmbedding(self.rotary_ndims, max_position_embeddings=cfg.max_seq_len, device=device) def prune_params(self, zs_block): head_z = None; head_layer_z = None; hidden_z = None; qk_head_dim_z = None; vo_head_dim_z = None if "head_z" in zs_block: head_z = zs_block["head_z"].squeeze() if "head_layer_z" in zs_block: head_layer_z = zs_block["head_layer_z"].squeeze() if "hidden_z" in zs_block: hidden_z = zs_block["hidden_z"].squeeze() # update params # if head_z is not None: head_z_for_update = torch.repeat_interleave(head_z, self.head_dim) start_index = torch.arange(0, self.n_heads * 3, 3) + 2 end_index = start_index + 1 index = torch.cat([torch.arange(i, j) for i, j in zip(start_index * self.head_dim, end_index * self.head_dim)]) self.query_key_value.weight.data[index, :] = \ self.query_key_value.weight.data.transpose(0, 1)[:, index].mul(head_z_for_update).transpose(0, 1) self.query_key_value.bias.data[index] = \ self.query_key_value.bias.data[index].mul(head_z_for_update) if head_layer_z is not None: self.out_proj.weight.data = self.out_proj.weight.data.transpose(0, 1).mul(head_layer_z).transpose(0, 1) self.out_proj.bias.data = self.out_proj.bias.data.mul(head_layer_z) if hidden_z is not None: self.out_proj.weight.data = self.out_proj.weight.data.transpose(0, 1).mul(hidden_z).transpose(0, 1) self.out_proj.bias.data = self.out_proj.bias.data.mul(hidden_z) ################# if hidden_z is not None: remaining_index = torch.where(~hidden_z.eq(0))[0] print(f" Head hidden: {len(hidden_z)} -> {len(remaining_index)}") half = next(self.query_key_value.parameters()).dtype == torch.float16 self.query_key_value = prune_linear_layer(self.query_key_value, remaining_index, dim=1) self.out_proj = prune_linear_layer(self.out_proj, remaining_index) if half: self.query_key_value.half() self.out_proj.half()
to_prune_heads = turn_head_z(head_z, head_layer_z)
3
2023-10-16 12:26:08+00:00
16k
hkchengrex/Cutie
cutie/inference/inference_core.py
[ { "identifier": "MemoryManager", "path": "cutie/inference/memory_manager.py", "snippet": "class MemoryManager:\n \"\"\"\n Manages all three memory stores and the transition between working/long-term memory\n \"\"\"\n def __init__(self, cfg: DictConfig, object_manager: ObjectManager):\n ...
from typing import List, Optional, Iterable, Dict from omegaconf import DictConfig from cutie.inference.memory_manager import MemoryManager from cutie.inference.object_manager import ObjectManager from cutie.inference.image_feature_store import ImageFeatureStore from cutie.model.cutie import CUTIE from cutie.utils.tensor_utils import pad_divide_by, unpad, aggregate import logging import numpy as np import torch import torch.nn.functional as F
11,064
shrinkage, msk_value, obj_value, self.object_manager.all_obj_ids, selection=selection, as_permanent=as_permanent) self.last_mem_ti = self.curr_ti if is_deep_update: self.memory.update_sensory(sensory, self.object_manager.all_obj_ids) def _segment(self, key: torch.Tensor, selection: torch.Tensor, pix_feat: torch.Tensor, ms_features: Iterable[torch.Tensor], update_sensory: bool = True) -> torch.Tensor: """ Produce a segmentation using the given features and the memory The batch dimension is 1 if flip augmentation is not used. key/selection: for anisotropic l2: (1/2) * _ * H * W pix_feat: from the key encoder, (1/2) * _ * H * W ms_features: an iterable of multiscale features from the encoder, each is (1/2)*_*H*W with strides 16, 8, and 4 respectively update_sensory: whether to update the sensory memory Returns: (num_objects+1)*H*W normalized probability; the first channel is the background """ bs = key.shape[0] if self.flip_aug: assert bs == 2 else: assert bs == 1 if not self.memory.engaged: log.warn('Trying to segment without any memory!') return torch.zeros((1, key.shape[-2] * 16, key.shape[-1] * 16), device=key.device, dtype=key.dtype) memory_readout = self.memory.read(pix_feat, key, selection, self.last_mask, self.network) memory_readout = self.object_manager.realize_dict(memory_readout) sensory, _, pred_prob_with_bg = self.network.segment(ms_features, memory_readout, self.memory.get_sensory( self.object_manager.all_obj_ids), chunk_size=self.chunk_size, update_sensory=update_sensory) # remove batch dim if self.flip_aug: # average predictions of the non-flipped and flipped version pred_prob_with_bg = (pred_prob_with_bg[0] + torch.flip(pred_prob_with_bg[1], dims=[-1])) / 2 else: pred_prob_with_bg = pred_prob_with_bg[0] if update_sensory: self.memory.update_sensory(sensory, self.object_manager.all_obj_ids) return pred_prob_with_bg def step(self, image: torch.Tensor, mask: Optional[torch.Tensor] = None, objects: Optional[List[int]] = None, *, idx_mask: bool = True, end: bool = False, delete_buffer: bool = True, force_permanent: bool = False) -> torch.Tensor: """ Take a step with a new incoming image. If there is an incoming mask with new objects, we will memorize them. If there is no incoming mask, we will segment the image using the memory. In both cases, we will update the memory and return a segmentation. image: 3*H*W mask: H*W (if idx mask) or len(objects)*H*W or None objects: list of object ids that are valid in the mask Tensor. The ids themselves do not need to be consecutive/in order, but they need to be in the same position in the list as the corresponding mask in the tensor in non-idx-mask mode. objects is ignored if the mask is None. If idx_mask is False and objects is None, we sequentially infer the object ids. idx_mask: if True, mask is expected to contain an object id at every pixel. If False, mask should have multiple channels with each channel representing one object. end: if we are at the end of the sequence, we do not need to update memory if unsure just set it to False delete_buffer: whether to delete the image feature buffer after this step force_permanent: the memory recorded this frame will be added to the permanent memory """ if objects is None and mask is not None: assert not idx_mask objects = list(range(1, mask.shape[0] + 1)) # resize input if needed -- currently only used for the GUI resize_needed = False if self.max_internal_size > 0: h, w = image.shape[-2:] min_side = min(h, w) if min_side > self.max_internal_size: resize_needed = True new_h = int(h / min_side * self.max_internal_size) new_w = int(w / min_side * self.max_internal_size) image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False)[0] if mask is not None: if idx_mask: mask = F.interpolate(mask.unsqueeze(0).unsqueeze(0).float(), size=(new_h, new_w), mode='nearest', align_corners=False)[0, 0].round().long() else: mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False)[0] self.curr_ti += 1
log = logging.getLogger() class InferenceCore: def __init__(self, network: CUTIE, cfg: DictConfig, *, image_feature_store: ImageFeatureStore = None): self.network = network self.cfg = cfg self.mem_every = cfg.mem_every stagger_updates = cfg.stagger_updates self.chunk_size = cfg.chunk_size self.save_aux = cfg.save_aux self.max_internal_size = cfg.max_internal_size self.flip_aug = cfg.flip_aug self.curr_ti = -1 self.last_mem_ti = 0 # at which time indices should we update the sensory memory if stagger_updates >= self.mem_every: self.stagger_ti = set(range(1, self.mem_every + 1)) else: self.stagger_ti = set( np.round(np.linspace(1, self.mem_every, stagger_updates)).astype(int)) self.object_manager = ObjectManager() self.memory = MemoryManager(cfg=cfg, object_manager=self.object_manager) if image_feature_store is None: self.image_feature_store = ImageFeatureStore(self.network) else: self.image_feature_store = image_feature_store self.last_mask = None def clear_memory(self): self.curr_ti = -1 self.last_mem_ti = 0 self.memory = MemoryManager(cfg=self.cfg, object_manager=self.object_manager) def clear_non_permanent_memory(self): self.curr_ti = -1 self.last_mem_ti = 0 self.memory.clear_non_permanent_memory() def clear_sensory_memory(self): self.curr_ti = -1 self.last_mem_ti = 0 self.memory.clear_sensory_memory() def update_config(self, cfg): self.mem_every = cfg['mem_every'] self.memory.update_config(cfg) def _add_memory(self, image: torch.Tensor, pix_feat: torch.Tensor, prob: torch.Tensor, key: torch.Tensor, shrinkage: torch.Tensor, selection: torch.Tensor, *, is_deep_update: bool = True, force_permanent: bool = False) -> None: """ Memorize the given segmentation in all memory stores. The batch dimension is 1 if flip augmentation is not used. image: RGB image, (1/2)*3*H*W pix_feat: from the key encoder, (1/2)*_*H*W prob: (1/2)*num_objects*H*W, in [0, 1] key/shrinkage/selection: for anisotropic l2, (1/2)*_*H*W selection can be None if not using long-term memory is_deep_update: whether to use deep update (e.g. with the mask encoder) force_permanent: whether to force the memory to be permanent """ if prob.shape[1] == 0: # nothing to add log.warn('Trying to add an empty object mask to memory!') return if force_permanent: as_permanent = 'all' else: as_permanent = 'first' self.memory.initialize_sensory_if_needed(key, self.object_manager.all_obj_ids) msk_value, sensory, obj_value, self.obj_logits = self.network.encode_mask( image, pix_feat, self.memory.get_sensory(self.object_manager.all_obj_ids), prob, deep_update=is_deep_update, chunk_size=self.chunk_size, need_weights=self.save_aux) self.memory.add_memory(key, shrinkage, msk_value, obj_value, self.object_manager.all_obj_ids, selection=selection, as_permanent=as_permanent) self.last_mem_ti = self.curr_ti if is_deep_update: self.memory.update_sensory(sensory, self.object_manager.all_obj_ids) def _segment(self, key: torch.Tensor, selection: torch.Tensor, pix_feat: torch.Tensor, ms_features: Iterable[torch.Tensor], update_sensory: bool = True) -> torch.Tensor: """ Produce a segmentation using the given features and the memory The batch dimension is 1 if flip augmentation is not used. key/selection: for anisotropic l2: (1/2) * _ * H * W pix_feat: from the key encoder, (1/2) * _ * H * W ms_features: an iterable of multiscale features from the encoder, each is (1/2)*_*H*W with strides 16, 8, and 4 respectively update_sensory: whether to update the sensory memory Returns: (num_objects+1)*H*W normalized probability; the first channel is the background """ bs = key.shape[0] if self.flip_aug: assert bs == 2 else: assert bs == 1 if not self.memory.engaged: log.warn('Trying to segment without any memory!') return torch.zeros((1, key.shape[-2] * 16, key.shape[-1] * 16), device=key.device, dtype=key.dtype) memory_readout = self.memory.read(pix_feat, key, selection, self.last_mask, self.network) memory_readout = self.object_manager.realize_dict(memory_readout) sensory, _, pred_prob_with_bg = self.network.segment(ms_features, memory_readout, self.memory.get_sensory( self.object_manager.all_obj_ids), chunk_size=self.chunk_size, update_sensory=update_sensory) # remove batch dim if self.flip_aug: # average predictions of the non-flipped and flipped version pred_prob_with_bg = (pred_prob_with_bg[0] + torch.flip(pred_prob_with_bg[1], dims=[-1])) / 2 else: pred_prob_with_bg = pred_prob_with_bg[0] if update_sensory: self.memory.update_sensory(sensory, self.object_manager.all_obj_ids) return pred_prob_with_bg def step(self, image: torch.Tensor, mask: Optional[torch.Tensor] = None, objects: Optional[List[int]] = None, *, idx_mask: bool = True, end: bool = False, delete_buffer: bool = True, force_permanent: bool = False) -> torch.Tensor: """ Take a step with a new incoming image. If there is an incoming mask with new objects, we will memorize them. If there is no incoming mask, we will segment the image using the memory. In both cases, we will update the memory and return a segmentation. image: 3*H*W mask: H*W (if idx mask) or len(objects)*H*W or None objects: list of object ids that are valid in the mask Tensor. The ids themselves do not need to be consecutive/in order, but they need to be in the same position in the list as the corresponding mask in the tensor in non-idx-mask mode. objects is ignored if the mask is None. If idx_mask is False and objects is None, we sequentially infer the object ids. idx_mask: if True, mask is expected to contain an object id at every pixel. If False, mask should have multiple channels with each channel representing one object. end: if we are at the end of the sequence, we do not need to update memory if unsure just set it to False delete_buffer: whether to delete the image feature buffer after this step force_permanent: the memory recorded this frame will be added to the permanent memory """ if objects is None and mask is not None: assert not idx_mask objects = list(range(1, mask.shape[0] + 1)) # resize input if needed -- currently only used for the GUI resize_needed = False if self.max_internal_size > 0: h, w = image.shape[-2:] min_side = min(h, w) if min_side > self.max_internal_size: resize_needed = True new_h = int(h / min_side * self.max_internal_size) new_w = int(w / min_side * self.max_internal_size) image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False)[0] if mask is not None: if idx_mask: mask = F.interpolate(mask.unsqueeze(0).unsqueeze(0).float(), size=(new_h, new_w), mode='nearest', align_corners=False)[0, 0].round().long() else: mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False)[0] self.curr_ti += 1
image, self.pad = pad_divide_by(image, 16)
4
2023-10-19 17:49:24+00:00
16k
stanford-oval/WikiChat
benchmark/scripts/prepare_for_scale.py
[ { "identifier": "DialogueTurn", "path": "pipelines/dialog_turn.py", "snippet": "class DialogueTurn:\n def __init__(\n self,\n agent_utterance: str = None,\n user_utterance: str = None,\n pipeline: str = None,\n engine: str = None,\n generate_engine: str = Non...
import argparse import json import pandas as pd import sys from typing import List from tqdm import tqdm from pipelines.dialog_turn import DialogueTurn from pipelines.chatbot import Chatbot from pipelines.utils import make_parent_directories from pipelines.pipeline_arguments import ( add_pipeline_arguments, check_pipeline_arguments, ) from llm.load_prompt import _fill_template from llm.global_variables import get_total_cost
12,722
return evidence_texts, evidence_titles # TODO parallelize this function def format_simulated_data(args): simulation_pipeline = args.pipeline args.pipeline = "generate_and_correct" # needed to use claim_splitter from chatbot chatbot = Chatbot(args) dlg_history = [] dlg_claims = set() make_parent_directories(args.output_file) content_list = [] metadata_list = [] example_id = 0 turn_num = 0 dlg_topic = "" with open(args.input_file, "r") as f: for line in tqdm(f.readlines(), desc="Lines"): line_split = line.split("): ") if line_split[0].startswith("User"): cur_dlg_turn = DialogueTurn(user_utterance=line_split[1].strip()) elif line_split[0].startswith("Chatbot"): turn_num += 1 cur_dlg_turn.agent_utterance = line_split[1].strip() claims = chatbot.claim_splitter.split_claim( dialog_history=dlg_history, new_user_utterance=cur_dlg_turn.user_utterance, current_agent_utterance=cur_dlg_turn.agent_utterance, system_parameters={"engine": "gpt-4"}, dialog_topic=dlg_topic, ) # print(claims) dlg_history.append(cur_dlg_turn) ret_output = chatbot._retrieve_evidences(claims, top_p=0.7) for claim_idx, evidences in ret_output.items(): claim_idx = int(claim_idx) if claims[claim_idx][0] in dlg_claims: # print("Skipping duplicate claim") continue claim = claims[claim_idx][0] evidence_texts = [e[2] for e in evidences] evidence_titles = [e[0] for e in evidences] evidence_texts, evidence_titles = highlight_keywords_from_claim( claim, evidence_texts, evidence_titles ) turn_params = { "user_utterance": cur_dlg_turn.user_utterance, "dialog_history": dlg_history, "claim": claim, "evidence_titles": evidence_titles, "evidence_texts": evidence_texts, } content, _ = _fill_template(args.scale_template_file, turn_params) # print(content) # exit() content_list.append(content) metadata_list.append( json.dumps( { "pipeline": simulation_pipeline, "subset": args.subset, "engine": args.engine, # "atlas" "id": str(example_id), "turn_num": str(turn_num), "agent_utterance": cur_dlg_turn.agent_utterance, } ) ) example_id += 1 for c in claims: dlg_claims.add(c[0]) elif line.startswith("====="): turn_num = 0 dlg_history = [] dlg_claims = set() elif line.startswith("Topic:"): dlg_topic = line[7:].strip() # print("dialog topic = ", dlg_topic) else: raise ValueError("ERROR: Unknown line type %s" % line) df = pd.DataFrame({"text": content_list, "metadata": metadata_list}) df.to_csv(args.output_file) if __name__ == "__main__": parser = argparse.ArgumentParser() add_pipeline_arguments(parser) parser.add_argument( "--input_file", type=str, required=True, help="Where to read the partial conversations from, with the last line of each conversation being the model response.", ) parser.add_argument( "--output_file", type=str, required=True, help="Where to write the outputs." ) parser.add_argument( "--scale_template_file", type=str, default="benchmark/prompts/scale_factuality.prompt", help="prompt file to generate input data file for scale ai human evaluation.", ) parser.add_argument( "--subset", type=str, required=True, help="The subset of the benchamrk.", ) args = parser.parse_args()
sys.path.insert(0, "./") stopwords = [ "the", "a", "and", "or", "then", "he", "she", "it", "they", "you", "to", "me", "on", "was", "at", "in", "was", "of", "for", "is", "are", "were", "not", "be", "had", "I", "would", "will", ] stopwords += [s.capitalize() for s in stopwords] def highlight_keywords_from_claim( claim: str, evidence_texts: List[str], evidence_titles: List[str] ): claim_keywords = [ w for w in claim.replace(".", " ") .replace(",", " ") .replace("?", " ") .replace('"', " ") .replace("'", " ") .split(" ") if w not in stopwords and len(w) > 0 ] evidence_texts = [ e.replace("$", "\$").replace("–", "-") for e in evidence_texts ] # escape $ to work with Scale's UI for prefix in [" ", "\n", "(", '"']: for suffix in [" ", ".", ",", ";", "?", ")", "\n", '"']: for i in range(len(evidence_texts)): for k in claim_keywords: evidence_texts[i] = evidence_texts[i].replace( prefix + k + suffix, prefix + '<strong style="background-color:beige;"">' + k + "</strong>" + suffix, ) evidence_titles[i] = evidence_titles.replace( prefix + k + suffix, prefix + '<strong style="background-color:beige;"">' + k + "</strong>" + suffix, ) return evidence_texts, evidence_titles # TODO parallelize this function def format_simulated_data(args): simulation_pipeline = args.pipeline args.pipeline = "generate_and_correct" # needed to use claim_splitter from chatbot chatbot = Chatbot(args) dlg_history = [] dlg_claims = set() make_parent_directories(args.output_file) content_list = [] metadata_list = [] example_id = 0 turn_num = 0 dlg_topic = "" with open(args.input_file, "r") as f: for line in tqdm(f.readlines(), desc="Lines"): line_split = line.split("): ") if line_split[0].startswith("User"): cur_dlg_turn = DialogueTurn(user_utterance=line_split[1].strip()) elif line_split[0].startswith("Chatbot"): turn_num += 1 cur_dlg_turn.agent_utterance = line_split[1].strip() claims = chatbot.claim_splitter.split_claim( dialog_history=dlg_history, new_user_utterance=cur_dlg_turn.user_utterance, current_agent_utterance=cur_dlg_turn.agent_utterance, system_parameters={"engine": "gpt-4"}, dialog_topic=dlg_topic, ) # print(claims) dlg_history.append(cur_dlg_turn) ret_output = chatbot._retrieve_evidences(claims, top_p=0.7) for claim_idx, evidences in ret_output.items(): claim_idx = int(claim_idx) if claims[claim_idx][0] in dlg_claims: # print("Skipping duplicate claim") continue claim = claims[claim_idx][0] evidence_texts = [e[2] for e in evidences] evidence_titles = [e[0] for e in evidences] evidence_texts, evidence_titles = highlight_keywords_from_claim( claim, evidence_texts, evidence_titles ) turn_params = { "user_utterance": cur_dlg_turn.user_utterance, "dialog_history": dlg_history, "claim": claim, "evidence_titles": evidence_titles, "evidence_texts": evidence_texts, } content, _ = _fill_template(args.scale_template_file, turn_params) # print(content) # exit() content_list.append(content) metadata_list.append( json.dumps( { "pipeline": simulation_pipeline, "subset": args.subset, "engine": args.engine, # "atlas" "id": str(example_id), "turn_num": str(turn_num), "agent_utterance": cur_dlg_turn.agent_utterance, } ) ) example_id += 1 for c in claims: dlg_claims.add(c[0]) elif line.startswith("====="): turn_num = 0 dlg_history = [] dlg_claims = set() elif line.startswith("Topic:"): dlg_topic = line[7:].strip() # print("dialog topic = ", dlg_topic) else: raise ValueError("ERROR: Unknown line type %s" % line) df = pd.DataFrame({"text": content_list, "metadata": metadata_list}) df.to_csv(args.output_file) if __name__ == "__main__": parser = argparse.ArgumentParser() add_pipeline_arguments(parser) parser.add_argument( "--input_file", type=str, required=True, help="Where to read the partial conversations from, with the last line of each conversation being the model response.", ) parser.add_argument( "--output_file", type=str, required=True, help="Where to write the outputs." ) parser.add_argument( "--scale_template_file", type=str, default="benchmark/prompts/scale_factuality.prompt", help="prompt file to generate input data file for scale ai human evaluation.", ) parser.add_argument( "--subset", type=str, required=True, help="The subset of the benchamrk.", ) args = parser.parse_args()
check_pipeline_arguments(args)
4
2023-10-19 18:17:25+00:00
16k
jhejna/cpl
research/algs/off_policy_algorithm.py
[ { "identifier": "ReplayBuffer", "path": "research/datasets/replay_buffer/buffer.py", "snippet": "class ReplayBuffer(torch.utils.data.IterableDataset):\n \"\"\"\n Generic Replay Buffer Class.\n\n This class adheres to the following conventions to support multiprocessing:\n 1. Variables/functi...
import datetime import functools import os import sys import tempfile import gym import numpy as np import torch from abc import abstractmethod from typing import Any, Dict, Optional, Union from research.datasets import ReplayBuffer from research.datasets.replay_buffer import storage from research.envs.base import EmptyEnv from research.networks.base import ModuleContainer from research.utils import runners, utils from .base import Algorithm from research.utils.config import Config
11,234
self._episode_length = 0 self._num_ep = 0 self._env_steps = 0 # Note that currently the very first (s, a) pair is thrown away because # we don't add to the dataset here. # This was done for better compatibility for offline to online learning. self.dataset.add(obs=self._current_obs) # add the first observation. self.env_step = self._env_step else: raise ValueError("Invalid env passed") def _empty_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: return dict() def _env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # Return if env is Empty or we we aren't at every env_freq steps if step <= self.offline_steps: # Purposefully set to nan so we write CSV log. return dict(steps=self._env_steps, reward=-np.inf, length=np.inf, num_ep=self._num_ep) if step < self.random_steps: action = env.action_space.sample() else: self.eval() action = self._get_train_action(self._current_obs, step, total_steps) self.train() if isinstance(env.action_space, gym.spaces.Box): action = np.clip(action, env.action_space.low, env.action_space.high) next_obs, reward, done, info = env.step(action) self._env_steps += 1 self._episode_length += 1 self._episode_reward += reward if "discount" in info: discount = info["discount"] elif hasattr(env, "_max_episode_steps") and self._episode_length == env._max_episode_steps: discount = 1.0 else: discount = 1 - float(done) # Store the consequences. self.dataset.add(obs=next_obs, action=action, reward=reward, done=done, discount=discount) if done: self._num_ep += 1 # Compute metrics metrics = dict( steps=self._env_steps, reward=self._episode_reward, length=self._episode_length, num_ep=self._num_ep ) # Reset the environment self._current_obs = env.reset() self.dataset.add(obs=self._current_obs) # Add the first timestep self._episode_length = 0 self._episode_reward = 0 return metrics else: self._current_obs = next_obs return dict(steps=self._env_steps) def _async_env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # Recieve Data from the last step and add to buffer. Should only call recv! if self._resetting: self._current_obs = env.reset_recv() self._num_ep += 1 self._episode_length = 0 self._episode_reward = 0 self.dataset.add(obs=self._current_obs) self._resetting = False done = False else: self._current_obs, reward, done, info = env.step_recv() self._env_steps += 1 self._episode_length += 1 self._episode_reward += reward self.dataset.add( obs=self._current_obs, action=self._current_action, reward=reward, done=done, discount=info["discount"] ) # Send data for the next step and return metrics. Should only call send! if done: # If the episode terminated, then we need to reset and send the reset message self._resetting = True env.reset_send() return dict( steps=self._env_steps, reward=self._episode_reward, length=self._episode_length, num_ep=self._num_ep ) else: # Otherwise, compute the action we should take and send it. self._resetting = False if step < self.random_steps: self._current_action = env.action_space.sample() else: self.eval() self._current_action = self._get_train_action(self._current_obs, step, total_steps) self.train() if isinstance(env.action_space, gym.spaces.Box): self._current_action = np.clip(self._current_action, env.action_space.low, env.action_space.high) env.step_send(self._current_action) return dict(steps=self._env_steps) def _runner_env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # All we do is check the pipe to see if there is data! metrics = env() if len(metrics) > 0: # If the metrics are non-empty, then it means that we have completed an episode. # As such, decrement the counter self._eps_since_last_checkpoint += 1 if self._eps_since_last_checkpoint == self.async_runner_ep_lag: self.save(self._checkpoint_dir, str(step), dict(step=step)) self._eps_since_last_checkpoint = 0 return metrics @abstractmethod def _get_train_action(self, obs: Any, step: int, total_steps: int) -> np.ndarray: raise NotImplementedError @functools.cached_property def action_range(self): action_range = (self.processor.action_space.low, self.processor.action_space.high)
class OffPolicyAlgorithm(Algorithm): def __init__( self, *args, offline_steps: int = 0, # Run fully offline by setting to -1 random_steps: int = 1000, async_runner_ep_lag: int = 1, **kwargs, ): super().__init__(*args, **kwargs) self.offline_steps = offline_steps self.random_steps = random_steps self.async_runner_ep_lag = async_runner_ep_lag def setup_datasets(self, env: gym.Env, total_steps: int): super().setup_datasets(env, total_steps) # Assign the correct update function based on what is passed in. if env is None or isinstance(env, EmptyEnv) or self.offline_steps < 0: self.env_step = self._empty_step elif isinstance(env, runners.AsyncEnv): self._episode_reward = 0 self._episode_length = 0 self._num_ep = 0 self._env_steps = 0 self._resetting = True env.reset_send() # Ask the env to start resetting. self.env_step = self._async_env_step elif isinstance(env, runners.MPRunner): assert isinstance(self.dataset, ReplayBuffer), "must use replaybuffer for MP RUnner." assert self.dataset.distributed, "ReplayBuffer must be distributed for use with Fully MPRunner." # Launch the runner subprocess. self._eps_since_last_checkpoint = 0 self._checkpoint_dir = tempfile.mkdtemp(prefix="checkpoints_") assert self.offline_steps <= 0, "MPRunner does not currently support offline to online." env.start( fn=_off_policy_collector_subprocess, checkpoint_path=self._checkpoint_dir, storage_path=self.dataset.storage_path, random_steps=self.random_steps, exclude_keys=self.dataset.exclude_keys, total_steps=total_steps, ) self.env_step = self._runner_env_step elif isinstance(env, gym.Env): # Setup Env Metrics self._current_obs = env.reset() self._episode_reward = 0 self._episode_length = 0 self._num_ep = 0 self._env_steps = 0 # Note that currently the very first (s, a) pair is thrown away because # we don't add to the dataset here. # This was done for better compatibility for offline to online learning. self.dataset.add(obs=self._current_obs) # add the first observation. self.env_step = self._env_step else: raise ValueError("Invalid env passed") def _empty_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: return dict() def _env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # Return if env is Empty or we we aren't at every env_freq steps if step <= self.offline_steps: # Purposefully set to nan so we write CSV log. return dict(steps=self._env_steps, reward=-np.inf, length=np.inf, num_ep=self._num_ep) if step < self.random_steps: action = env.action_space.sample() else: self.eval() action = self._get_train_action(self._current_obs, step, total_steps) self.train() if isinstance(env.action_space, gym.spaces.Box): action = np.clip(action, env.action_space.low, env.action_space.high) next_obs, reward, done, info = env.step(action) self._env_steps += 1 self._episode_length += 1 self._episode_reward += reward if "discount" in info: discount = info["discount"] elif hasattr(env, "_max_episode_steps") and self._episode_length == env._max_episode_steps: discount = 1.0 else: discount = 1 - float(done) # Store the consequences. self.dataset.add(obs=next_obs, action=action, reward=reward, done=done, discount=discount) if done: self._num_ep += 1 # Compute metrics metrics = dict( steps=self._env_steps, reward=self._episode_reward, length=self._episode_length, num_ep=self._num_ep ) # Reset the environment self._current_obs = env.reset() self.dataset.add(obs=self._current_obs) # Add the first timestep self._episode_length = 0 self._episode_reward = 0 return metrics else: self._current_obs = next_obs return dict(steps=self._env_steps) def _async_env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # Recieve Data from the last step and add to buffer. Should only call recv! if self._resetting: self._current_obs = env.reset_recv() self._num_ep += 1 self._episode_length = 0 self._episode_reward = 0 self.dataset.add(obs=self._current_obs) self._resetting = False done = False else: self._current_obs, reward, done, info = env.step_recv() self._env_steps += 1 self._episode_length += 1 self._episode_reward += reward self.dataset.add( obs=self._current_obs, action=self._current_action, reward=reward, done=done, discount=info["discount"] ) # Send data for the next step and return metrics. Should only call send! if done: # If the episode terminated, then we need to reset and send the reset message self._resetting = True env.reset_send() return dict( steps=self._env_steps, reward=self._episode_reward, length=self._episode_length, num_ep=self._num_ep ) else: # Otherwise, compute the action we should take and send it. self._resetting = False if step < self.random_steps: self._current_action = env.action_space.sample() else: self.eval() self._current_action = self._get_train_action(self._current_obs, step, total_steps) self.train() if isinstance(env.action_space, gym.spaces.Box): self._current_action = np.clip(self._current_action, env.action_space.low, env.action_space.high) env.step_send(self._current_action) return dict(steps=self._env_steps) def _runner_env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # All we do is check the pipe to see if there is data! metrics = env() if len(metrics) > 0: # If the metrics are non-empty, then it means that we have completed an episode. # As such, decrement the counter self._eps_since_last_checkpoint += 1 if self._eps_since_last_checkpoint == self.async_runner_ep_lag: self.save(self._checkpoint_dir, str(step), dict(step=step)) self._eps_since_last_checkpoint = 0 return metrics @abstractmethod def _get_train_action(self, obs: Any, step: int, total_steps: int) -> np.ndarray: raise NotImplementedError @functools.cached_property def action_range(self): action_range = (self.processor.action_space.low, self.processor.action_space.high)
return utils.to_device(utils.to_tensor(action_range), self.device)
5
2023-10-19 17:25:45+00:00
16k
nbasyl/LLM-FP4
configs/FPQ_config_llama.py
[ { "identifier": "FPPTQSLBatchingQuantLinear_fpq", "path": "quant_layers/fp_linear.py", "snippet": "class FPPTQSLBatchingQuantLinear_fpq(FPPTQSLQuantLinear):\n def __init__(self, \n in_features: int,\n out_features: int,\n bias: bool = True,\n mode = \"raw\",\n w_bit...
from quant_layers.fp_linear import FPPTQSLBatchingQuantLinear_fpq from quant_layers.fp_embed import FPPTQSLQuantEmbedding_fpq_baseline
11,265
bit = 8 exp_bit = 4 embed_name_list = ["qembedding"] fc_name_list = [ "qlinear_query", "qlinear_key", "qlinear_value", "qlinear_o","qlinear_gate","qlinear_down","qlinear_up","qlinear_score"] matmul_name_list = [ "qmatmul_qk", "qmatmul_scorev"] w_bit = {name: bit for name in fc_name_list} a_bit = {name: bit for name in fc_name_list} embed_bit = {name: bit for name in embed_name_list} A_bit = {name: bit for name in matmul_name_list} B_bit = {name: bit for name in matmul_name_list} w_exp_bit = {name: exp_bit for name in fc_name_list} a_exp_bit = {name: exp_bit for name in fc_name_list} embed_exp_bit = {name: exp_bit for name in embed_name_list} A_exp_bit = {name: exp_bit for name in matmul_name_list} B_exp_bit = {name: exp_bit for name in matmul_name_list} ptqsl_embedding_kwargs = { "metric": "L2_norm", "eq_alpha": 0.01, "eq_beta": 1.2, "eq_n": 100, 'search_round': 3, "n_V": 1, "n_H": 1 } ptqsl_linear_kwargs = { "metric": "L2_norm", "eq_alpha": 0.01, "eq_beta": 1.2, "eq_n": 100, 'search_round': 3, "n_V": 1, "n_H": 1, "n_a": 1, "bias_correction":True # Conventionally I'll not add an actual bias correction in linear } def get_module(module_type, *args, **kwargs): if "embedding" in module_type: kwargs.update(ptqsl_embedding_kwargs) module= FPPTQSLQuantEmbedding_fpq_baseline(*args,**kwargs,bit= embed_bit[module_type], exponent_bit=embed_exp_bit[module_type], padding_idx=0) elif "qlinear" in module_type: kwargs.update(ptqsl_linear_kwargs) if module_type == "qlinear_score": kwargs["n_V"] = 1
bit = 8 exp_bit = 4 embed_name_list = ["qembedding"] fc_name_list = [ "qlinear_query", "qlinear_key", "qlinear_value", "qlinear_o","qlinear_gate","qlinear_down","qlinear_up","qlinear_score"] matmul_name_list = [ "qmatmul_qk", "qmatmul_scorev"] w_bit = {name: bit for name in fc_name_list} a_bit = {name: bit for name in fc_name_list} embed_bit = {name: bit for name in embed_name_list} A_bit = {name: bit for name in matmul_name_list} B_bit = {name: bit for name in matmul_name_list} w_exp_bit = {name: exp_bit for name in fc_name_list} a_exp_bit = {name: exp_bit for name in fc_name_list} embed_exp_bit = {name: exp_bit for name in embed_name_list} A_exp_bit = {name: exp_bit for name in matmul_name_list} B_exp_bit = {name: exp_bit for name in matmul_name_list} ptqsl_embedding_kwargs = { "metric": "L2_norm", "eq_alpha": 0.01, "eq_beta": 1.2, "eq_n": 100, 'search_round': 3, "n_V": 1, "n_H": 1 } ptqsl_linear_kwargs = { "metric": "L2_norm", "eq_alpha": 0.01, "eq_beta": 1.2, "eq_n": 100, 'search_round': 3, "n_V": 1, "n_H": 1, "n_a": 1, "bias_correction":True # Conventionally I'll not add an actual bias correction in linear } def get_module(module_type, *args, **kwargs): if "embedding" in module_type: kwargs.update(ptqsl_embedding_kwargs) module= FPPTQSLQuantEmbedding_fpq_baseline(*args,**kwargs,bit= embed_bit[module_type], exponent_bit=embed_exp_bit[module_type], padding_idx=0) elif "qlinear" in module_type: kwargs.update(ptqsl_linear_kwargs) if module_type == "qlinear_score": kwargs["n_V"] = 1
module= FPPTQSLBatchingQuantLinear_fpq(*args,**kwargs,w_bit=w_bit[module_type],a_bit=a_bit[module_type],w_exponent_bit=w_exp_bit[module_type],a_exponent_bit=a_exp_bit[module_type])
0
2023-10-15 06:05:13+00:00
16k
bcmi/libcom
libcom/shadow_generation/source/ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "libcom/shadow_generation/source/ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\...
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities import rank_zero_only from omegaconf import ListConfig from libcom.shadow_generation.source.ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from libcom.shadow_generation.source.ldm.modules.ema import LitEma from libcom.shadow_generation.source.ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from libcom.shadow_generation.source.ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from libcom.shadow_generation.source.ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from libcom.shadow_generation.source.ldm.models.diffusion.ddim import DDIMSampler from libcom.shadow_generation.source.sampler.pndm import PNDMSampler from libcom.shadow_generation.source.ldm.models.mask_predictor.mask_predictor import latent_guidance_predictor
13,913
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, mode, ddim_steps, input, add_noise_strength, **kwargs): if mode == 'ddim': ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) elif mode == 'pndm': pndm_sampler = PNDMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = pndm_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, input=input, strength=add_noise_strength, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, mode='ddim', input=None, add_noise_strength=1, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', "cls"]: try: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc except KeyError: # probably no "human_label" in batch pass elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim_steps=ddim_steps, eta=ddim_eta, mode=mode, input=input, add_noise_strength=add_noise_strength) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mask=None, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") if mask is not None: ratio = torch.sum(torch.greater_equal(mask, 0.6), dim=(1,2)) / (mask.shape[1] * mask.shape[2]) scale = torch.clamp((1/ratio).type(torch.int32), 2, 19) mask = mask * scale[:, None, None] + 1 mask = mask[:, None, :, :] loss *= mask return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): #TODO # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, return_x=False): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None and not self.force_null_conditioning: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', "txt"]: xc = batch[cond_key] elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_x: out.extend([x]) if return_original_cond: out.append(xc) return out def decode_first_stage_with_grad(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c, mask= self.get_input(batch, self.first_stage_key) loss = self(x, c, mask) return loss def forward(self, x, c, mask=None, *args, **kwargs): train_mask_only = kwargs.pop('train_mask_only', False) if train_mask_only: t = torch.randint(0, int(0.3 * self.num_timesteps), (x.shape[0],), device=self.device).long() else: t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, mask=mask, train_mask_only=train_mask_only, *args, **kwargs) def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, mask=None, noise=None, train_mask_only=False): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False, mask=mask).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) if train_mask_only: pred_x0 = self.predict_start_from_noise(x_t=x_noisy, t=t, noise=model_output) return loss, loss_dict, pred_x0 return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, mode, ddim_steps, input, add_noise_strength, **kwargs): if mode == 'ddim': ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) elif mode == 'pndm': pndm_sampler = PNDMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = pndm_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, input=input, strength=add_noise_strength, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, mode='ddim', input=None, add_noise_strength=1, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', "cls"]: try: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc except KeyError: # probably no "human_label" in batch pass elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim_steps=ddim_steps, eta=ddim_eta, mode=mode, input=input, add_noise_strength=add_noise_strength) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid
if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
12
2023-10-19 05:08:12+00:00
16k
e4s2023/E4S2023
training/coach.py
[ { "identifier": "torch_utils", "path": "utils/torch_utils.py", "snippet": "def saveTensorToFile(tensor, save_path):\ndef interpolate(img, size):\ndef readImgAsTensor(img_path, gray=False, to_tensor=True, size=1024):\ndef featMap2im(var):\ndef tensor2im(var, is_zero_center: bool = True, ):\ndef im2tensor...
from utils import torch_utils from datasets.dataset import CelebAHQDataset, get_transforms, TO_TENSOR, NORMALIZE, MASK_CONVERT_TF, FFHQDataset, FFHQ_MASK_CONVERT_TF, MASK_CONVERT_TF_DETAILED, FFHQ_MASK_CONVERT_TF_DETAILED from criteria.w_norm import WNormLoss from criteria.id_loss import IDLoss from criteria.face_parsing.face_parsing_loss import FaceParsingLoss from criteria.lpips.lpips import LPIPS from criteria.adv_loss import AdvDLoss,AdvGLoss,DR1Loss,GPathRegularizer from criteria.style_loss import StyleLoss from training.ranger import Ranger from models.networks import Net, Net2, Net3, NetStage2,MultiScaleNet from tensorboardX import SummaryWriter from torch.utils.data import DataLoader from torch import nn from models.stylegan2.model import Generator,Discriminator from collections import OrderedDict from models.encoder_with_optim import EncoderPlusOptimNet import torchvision.transforms as transforms import torch.nn.functional as F import torch import os import matplotlib import matplotlib.pyplot as plt import torch.distributed as dist import math
13,075
# ==== Initialize network ==== self.net = Net3(self.opts) # print(self.device) self.net = nn.SyncBatchNorm.convert_sync_batchnorm(self.net) self.net = self.net.to(self.device) self.net_ema = Net3(self.opts).to(self.device).eval() torch_utils.accumulate(self.net_ema,self.net, 0) if self.opts.train_D: self.D = Discriminator(self.opts.out_size).to(self.device).eval() if self.opts.dist_train: # Wrap the model self.net = nn.parallel.DistributedDataParallel(self.net, device_ids=[self.local_rank], output_device=self.local_rank, broadcast_buffers=False, find_unused_parameters=True ) if self.opts.train_D: self.D = nn.parallel.DistributedDataParallel(self.D, device_ids=[self.local_rank], output_device=self.local_rank, broadcast_buffers=False, find_unused_parameters=True ) # 加载整个模型预训练好的参数,继续训练 if self.opts.checkpoint_path is not None: ckpt_dict=torch.load(self.opts.checkpoint_path) self.global_step= ckpt_dict["opts"]["max_steps"]+1 if self.opts.dist_train: self.net.module.latent_avg = ckpt_dict['latent_avg'].to(self.device) self.net.load_state_dict(ckpt_dict["state_dict"]) if self.opts.train_D: self.D.module.load_state_dict(ckpt_dict["D_state_dict"]) else: self.net.latent_avg = ckpt_dict['latent_avg'].to(self.device) self.net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["state_dict"],prefix="module.")) if self.opts.train_D: self.D.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["D_state_dict"],prefix="module.")) print("Resume training at step %d..."%self.global_step) # 加载 stage-1 训练好的参数 elif self.opts.stage1_checkpoint_path is not None: stage1_ckpt = torch.load(self.opts.stage1_checkpoint_path) if self.opts.dist_train: self.net.module.stage1_net.load_state_dict(torch_utils.remove_module_prefix(stage1_ckpt["state_dict"],prefix="module.")) if self.opts.train_D: self.D.module.load_state_dict(torch_utils.remove_module_prefix(stage1_ckpt["D_state_dict"],prefix="module.")) # avg latent code self.net.module.latent_avg = stage1_ckpt['latent_avg'].to(self.device) if self.opts.learn_in_w: self.net.module.latent_avg = self.net.module.latent_avg else: self.net.module.latent_avg = self.net.module.latent_avg else: self.net.stage1_net.load_state_dict(torch_utils.remove_module_prefix(stage1_ckpt["state_dict"],prefix="module.")) if self.opts.train_D: self.D.load_state_dict(torch_utils.remove_module_prefix(stage1_ckpt["D_state_dict"],prefix="module.")) # avg latent code self.net.latent_avg = stage1_ckpt['latent_avg'].to(self.device) if self.opts.learn_in_w: self.net.latent_avg = self.net.latent_avg else: self.net.latent_avg = self.net.latent_avg print('Loading stage-1 pretrained weights!') # 加载styleGAN预训练权重 else: styleGAN2_ckpt = torch.load(self.opts.stylegan_weights) if self.opts.dist_train: self.net.module.G.load_state_dict(styleGAN2_ckpt['g_ema'], strict=False) if self.opts.train_D: if self.opts.out_size == 1024: self.D.module.load_state_dict(styleGAN2_ckpt['d'], strict=False) # 1024分辨率 可以直接加载 else: self.custom_load_D_state_dict(self.D.module, styleGAN2_ckpt['d']) # 只加载判别器的部分层 # avg latent code self.net.module.latent_avg = styleGAN2_ckpt['latent_avg'].to(self.device) if self.opts.learn_in_w: self.net.module.latent_avg = self.net.module.latent_avg.repeat(1, 1) else: self.net.module.latent_avg = self.net.module.latent_avg.repeat(2 * int(math.log(self.opts.out_size, 2)) -2 , 1) else: self.net.G.load_state_dict(styleGAN2_ckpt['g_ema'], strict=False) if self.opts.train_D: if self.opts.out_size == 1024: self.D.load_state_dict(styleGAN2_ckpt['d'], strict=False) # 1024分辨率 可以直接加载 else: self.custom_load_D_state_dict(self.D, styleGAN2_ckpt['d']) # 只加载判别器的部分层 # avg latent code self.net.latent_avg = styleGAN2_ckpt['latent_avg'].to(self.device) if self.opts.learn_in_w: self.net.latent_avg = self.net.latent_avg.repeat(1, 1) else: self.net.latent_avg = self.net.latent_avg.repeat(2 * int(math.log(self.opts.out_size, 2)) -2 , 1) print('Loading pretrained styleGAN2 weights!') # Estimate latent_avg via dense sampling if latent_avg is not available if self.opts.dist_train: if self.net.module.latent_avg is None: self.net.module.latent_avg = self.net.module.G.mean_latent(int(1e5))[0].detach() else: if self.net.latent_avg is None: self.net.latent_avg = self.net.G.mean_latent(int(1e5))[0].detach() self.mse_loss = nn.MSELoss().to(self.device).eval() if self.opts.lpips_lambda > 0: self.lpips_loss = LPIPS(net_type='alex').to(self.device).eval() if self.opts.id_lambda > 0:
matplotlib.use('Agg') # torch.autograd.set_detect_anomaly(True) ACCUM = 0.5 ** (32 / (100 * 1000)) # 0.9977843871238888 class Coach: def __init__(self, opts): self.opts = opts self.global_step = 0 # 分布式训练 if self.opts.dist_train: self.num_gpus = torch.cuda.device_count() self.rank = int(os.environ["RANK"]) self.world_size = int(os.environ["WORLD_SIZE"]) self.local_rank = int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(self.rank % self.num_gpus) dist.init_process_group( backend='nccl', world_size=self.world_size, rank=self.rank, ) self.device = torch.device("cuda", self.local_rank) else: self.rank=0 # dummy rank self.device = torch.device("cuda", 0) self.opts.device=self.device # ==== Initialize network ==== self.net = Net3(self.opts) # print(self.device) self.net = nn.SyncBatchNorm.convert_sync_batchnorm(self.net) self.net = self.net.to(self.device) self.net_ema = Net3(self.opts).to(self.device).eval() torch_utils.accumulate(self.net_ema,self.net, 0) if self.opts.train_D: self.D = Discriminator(self.opts.out_size).to(self.device).eval() if self.opts.dist_train: # Wrap the model self.net = nn.parallel.DistributedDataParallel(self.net, device_ids=[self.local_rank], output_device=self.local_rank, broadcast_buffers=False, find_unused_parameters=True ) if self.opts.train_D: self.D = nn.parallel.DistributedDataParallel(self.D, device_ids=[self.local_rank], output_device=self.local_rank, broadcast_buffers=False, find_unused_parameters=True ) # 加载整个模型预训练好的参数,继续训练 if self.opts.checkpoint_path is not None: ckpt_dict=torch.load(self.opts.checkpoint_path) self.global_step= ckpt_dict["opts"]["max_steps"]+1 if self.opts.dist_train: self.net.module.latent_avg = ckpt_dict['latent_avg'].to(self.device) self.net.load_state_dict(ckpt_dict["state_dict"]) if self.opts.train_D: self.D.module.load_state_dict(ckpt_dict["D_state_dict"]) else: self.net.latent_avg = ckpt_dict['latent_avg'].to(self.device) self.net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["state_dict"],prefix="module.")) if self.opts.train_D: self.D.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["D_state_dict"],prefix="module.")) print("Resume training at step %d..."%self.global_step) # 加载 stage-1 训练好的参数 elif self.opts.stage1_checkpoint_path is not None: stage1_ckpt = torch.load(self.opts.stage1_checkpoint_path) if self.opts.dist_train: self.net.module.stage1_net.load_state_dict(torch_utils.remove_module_prefix(stage1_ckpt["state_dict"],prefix="module.")) if self.opts.train_D: self.D.module.load_state_dict(torch_utils.remove_module_prefix(stage1_ckpt["D_state_dict"],prefix="module.")) # avg latent code self.net.module.latent_avg = stage1_ckpt['latent_avg'].to(self.device) if self.opts.learn_in_w: self.net.module.latent_avg = self.net.module.latent_avg else: self.net.module.latent_avg = self.net.module.latent_avg else: self.net.stage1_net.load_state_dict(torch_utils.remove_module_prefix(stage1_ckpt["state_dict"],prefix="module.")) if self.opts.train_D: self.D.load_state_dict(torch_utils.remove_module_prefix(stage1_ckpt["D_state_dict"],prefix="module.")) # avg latent code self.net.latent_avg = stage1_ckpt['latent_avg'].to(self.device) if self.opts.learn_in_w: self.net.latent_avg = self.net.latent_avg else: self.net.latent_avg = self.net.latent_avg print('Loading stage-1 pretrained weights!') # 加载styleGAN预训练权重 else: styleGAN2_ckpt = torch.load(self.opts.stylegan_weights) if self.opts.dist_train: self.net.module.G.load_state_dict(styleGAN2_ckpt['g_ema'], strict=False) if self.opts.train_D: if self.opts.out_size == 1024: self.D.module.load_state_dict(styleGAN2_ckpt['d'], strict=False) # 1024分辨率 可以直接加载 else: self.custom_load_D_state_dict(self.D.module, styleGAN2_ckpt['d']) # 只加载判别器的部分层 # avg latent code self.net.module.latent_avg = styleGAN2_ckpt['latent_avg'].to(self.device) if self.opts.learn_in_w: self.net.module.latent_avg = self.net.module.latent_avg.repeat(1, 1) else: self.net.module.latent_avg = self.net.module.latent_avg.repeat(2 * int(math.log(self.opts.out_size, 2)) -2 , 1) else: self.net.G.load_state_dict(styleGAN2_ckpt['g_ema'], strict=False) if self.opts.train_D: if self.opts.out_size == 1024: self.D.load_state_dict(styleGAN2_ckpt['d'], strict=False) # 1024分辨率 可以直接加载 else: self.custom_load_D_state_dict(self.D, styleGAN2_ckpt['d']) # 只加载判别器的部分层 # avg latent code self.net.latent_avg = styleGAN2_ckpt['latent_avg'].to(self.device) if self.opts.learn_in_w: self.net.latent_avg = self.net.latent_avg.repeat(1, 1) else: self.net.latent_avg = self.net.latent_avg.repeat(2 * int(math.log(self.opts.out_size, 2)) -2 , 1) print('Loading pretrained styleGAN2 weights!') # Estimate latent_avg via dense sampling if latent_avg is not available if self.opts.dist_train: if self.net.module.latent_avg is None: self.net.module.latent_avg = self.net.module.G.mean_latent(int(1e5))[0].detach() else: if self.net.latent_avg is None: self.net.latent_avg = self.net.G.mean_latent(int(1e5))[0].detach() self.mse_loss = nn.MSELoss().to(self.device).eval() if self.opts.lpips_lambda > 0: self.lpips_loss = LPIPS(net_type='alex').to(self.device).eval() if self.opts.id_lambda > 0:
self.id_loss = IDLoss(self.opts).to(self.device).eval()
11
2023-10-15 12:15:01+00:00
16k
sotopia-lab/sotopia
examples/experiment_eval.py
[ { "identifier": "LLMAgent", "path": "sotopia/agents/llm_agent.py", "snippet": "class LLMAgent(BaseAgent[Observation, AgentAction]):\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n agent_profile: AgentProfile | None = None,\n ...
import asyncio import logging import os import subprocess import sys import gin from datetime import datetime from logging import FileHandler from typing import Any, Callable, Generator, Literal, Sequence, cast from absl import app, flags from rich import print from rich.logging import RichHandler from tqdm import tqdm from sotopia.agents import LLMAgent from sotopia.database import ( AgentProfile, EnvAgentComboStorage, EnvironmentProfile, EpisodeLog, ) from sotopia.envs.evaluators import ( ReachGoalLLMEvaluator, RuleBasedTerminatedEvaluator, ) from sotopia.envs.parallel import ParallelSotopiaEnv from sotopia.generation_utils.generate import LLM_Name from sotopia.messages import AgentAction, Message, Observation from sotopia.samplers import ( BaseSampler, ConstraintBasedSampler, EnvAgentCombo, ) from sotopia.server import run_async_server from sotopia_conf.gin_utils import parse_gin_flags, run
12,663
_DEFAULT_GIN_SEARCH_PATHS = [ os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ] FLAGS = flags.FLAGS # date and message only FORMAT = "%(asctime)s - %(levelname)s - %(name)s - %(message)s" process = subprocess.Popen( ["git", "rev-parse", "HEAD"], shell=False, stdout=subprocess.PIPE ) git_head_hash = process.communicate()[0].strip() logging.basicConfig( level=15, format=FORMAT, datefmt="[%X]", handlers=[ RichHandler(), FileHandler( datetime.now().strftime( f"./logs/%H_%M_%d_%m_%Y_{str(git_head_hash.decode('utf-8'))}.log" ) ), ], ) env_ids: list[str] = list(EnvironmentProfile.all_pks()) assert all( isinstance(env_id, str) for env_id in env_ids ), "env_ids should be a list of strings" def check_existing_episodes( env_id: str, agent_ids: list[str], models: dict[str, LLM_Name], tag: str | None = None, ) -> bool: if tag: existing_episode = EpisodeLog.find( (EpisodeLog.environment == env_id) & (EpisodeLog.tag == tag) ).all() else: existing_episode = EpisodeLog.find( EpisodeLog.environment == env_id ).all() if existing_episode: for episode in existing_episode: assert isinstance( episode, EpisodeLog ), "episode should be an EpisodeLog" if episode.agents == agent_ids and episode.models == list( models.values() ): return True return False else: return False def _sample_env_agent_combo_and_push_to_db(env_id: str) -> None: sampler = ConstraintBasedSampler[Observation, AgentAction]( env_candidates=[env_id] ) env_agent_combo_list = list( sampler.sample(agent_classes=[LLMAgent] * 2, replacement=False) ) for env, agent in env_agent_combo_list: EnvAgentComboStorage( env_id=env.profile.pk, agent_ids=[agent[0].profile.pk, agent[1].profile.pk], ).save() @gin.configurable def _iterate_env_agent_combo_not_in_db( model_names: dict[str, LLM_Name], env_ids: list[str] = [], tag: str | None = None,
_DEFAULT_GIN_SEARCH_PATHS = [ os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ] FLAGS = flags.FLAGS # date and message only FORMAT = "%(asctime)s - %(levelname)s - %(name)s - %(message)s" process = subprocess.Popen( ["git", "rev-parse", "HEAD"], shell=False, stdout=subprocess.PIPE ) git_head_hash = process.communicate()[0].strip() logging.basicConfig( level=15, format=FORMAT, datefmt="[%X]", handlers=[ RichHandler(), FileHandler( datetime.now().strftime( f"./logs/%H_%M_%d_%m_%Y_{str(git_head_hash.decode('utf-8'))}.log" ) ), ], ) env_ids: list[str] = list(EnvironmentProfile.all_pks()) assert all( isinstance(env_id, str) for env_id in env_ids ), "env_ids should be a list of strings" def check_existing_episodes( env_id: str, agent_ids: list[str], models: dict[str, LLM_Name], tag: str | None = None, ) -> bool: if tag: existing_episode = EpisodeLog.find( (EpisodeLog.environment == env_id) & (EpisodeLog.tag == tag) ).all() else: existing_episode = EpisodeLog.find( EpisodeLog.environment == env_id ).all() if existing_episode: for episode in existing_episode: assert isinstance( episode, EpisodeLog ), "episode should be an EpisodeLog" if episode.agents == agent_ids and episode.models == list( models.values() ): return True return False else: return False def _sample_env_agent_combo_and_push_to_db(env_id: str) -> None: sampler = ConstraintBasedSampler[Observation, AgentAction]( env_candidates=[env_id] ) env_agent_combo_list = list( sampler.sample(agent_classes=[LLMAgent] * 2, replacement=False) ) for env, agent in env_agent_combo_list: EnvAgentComboStorage( env_id=env.profile.pk, agent_ids=[agent[0].profile.pk, agent[1].profile.pk], ).save() @gin.configurable def _iterate_env_agent_combo_not_in_db( model_names: dict[str, LLM_Name], env_ids: list[str] = [], tag: str | None = None,
) -> Generator[EnvAgentCombo[Observation, AgentAction], None, None]:
12
2023-10-23 19:47:26+00:00
16k
uukuguy/multi_loras
multi_loras/slora/router/manager.py
[ { "identifier": "SamplingParams", "path": "multi_loras/slora/sampling_params.py", "snippet": "class SamplingParams:\n\n def __init__(\n self,\n do_sample: bool = False,\n presence_penalty: float = 0.0,\n frequency_penalty: float = 0.0,\n temperature: float = 1.0,\n ...
import uvloop import asyncio import os import pickle import time import torch import zmq import zmq.asyncio import traceback from typing import Dict, List, Optional from rpyc.utils.classic import obtain from slora.utils.infer_utils import calculate_time from ..sampling_params import SamplingParams from ..io_struct import Req, Batch, BatchAbortReq, BatchTokenIdOut, AbortReq from .input_params import InputParams from .model_infer.model_rpc import start_model_process, ModelRpcClient from .req_queue import ReqQueue from .stats import Stats from .profiler import AlphaModel, BetaModel from .pets_req_queue import PETSReqQueue from .peft_req_queue import PEFTReqQueue from .cluster_req_queue import ClusterReqQueue from .abort_req_queue import AbortReqQueue from ..models.peft.lora_adapter import get_lora_config
12,422
if input_params.scheduler == "pets": self.req_queue = PETSReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "peft": self.req_queue = PEFTReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.batch_num_adapters is not None: self.req_queue = ClusterReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, input_params.batch_num_adapters) elif input_params.enable_abort: self.req_queue = AbortReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) else: self.req_queue = ReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) self.running_batch: Batch = None self.eos_id = eos_id self.has_wait_tokens = 0 self.max_wait_tokens = 10 context = zmq.asyncio.Context(2) self.recv_from_httpserver = context.socket(zmq.PULL) self.recv_from_httpserver.bind(f"tcp://127.0.0.1:{router_port}") self.send_to_detokenization = context.socket(zmq.PUSH) self.send_to_detokenization.connect(f"tcp://127.0.0.1:{detokenization_port}") self.model_rpc_ports = model_rpc_ports self.stats_tool = Stats(log_stats, log_stats_interval) async def wait_to_model_ready(self): self.model_rpcs: List[ModelRpcClient] = [] for rank_id in range(self.world_size): rpc_model = await start_model_process(port=self.model_rpc_ports[rank_id], world_size=self.world_size) self.model_rpcs.append(rpc_model) init_model_ret = [] for rank_id in range(self.world_size): # async init model process init_model_ret.append( self.model_rpcs[rank_id].init_model( rank_id, self.world_size, self.model_weightdir, self.adapter_dirs, self.input_params.max_total_token_num, self.load_way, self.mode, input_params=self.input_params, prefetch_stream=self.prefetch_stream, )) await asyncio.gather(*init_model_ret) return async def profile_prefill(self): res = [] for rank_id in range(self.world_size): # async init model process res.append( self.model_rpcs[rank_id].profile_prefill()) results = await asyncio.gather(*res) self.alpha_model = AlphaModel(results[0]) self.beta_model = BetaModel(results[0]) # check if the path exists else create it cache_dir = os.path.expanduser("~/.cache/slora") if not os.path.exists(cache_dir): os.makedirs(cache_dir) with open(cache_dir+"/profile_results.pkl", "wb") as f: pickle.dump(results[0], f) return def add_req( self, adapter_dir: str, prompt_ids: List[int], sampling_params: SamplingParams, request_id: str ): req = Req(adapter_dir, request_id, prompt_ids, sampling_params) self.req_queue.append(req) self.send_to_detokenization.send_pyobj(req.to_req_detokenization_state()) return async def abort(self, request_id): if self.running_batch is not None: for req in self.running_batch.reqs: if req.request_id == request_id: req.has_generate_finished = True req.aborted = True for req in self.req_queue.waiting_req_list: if req.request_id == request_id: req.has_generate_finished = True req.aborted = True return async def loop_for_fwd(self,): counter_count = 0 while True: await self._step() counter_count += 1 if self.running_batch is not None: if counter_count % 50 == 0: print("current batch size:", len(self.running_batch.reqs), "token used ratio:", self.running_batch.calcu_used_tokens() / self.input_params.max_total_token_num) pass self.stats_tool.print_stats() if self.running_batch is None: await asyncio.sleep(0.01) # 10ms async def _step(self): """ 事件处理循环 """ # 删除所有已经 finished 的 req if self.running_batch is None: new_batch = self.req_queue.generate_new_batch(self.running_batch, self.lora_ranks) if self.input_params.enable_abort and len(self.req_queue.abort_req_list) > 0:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) class RouterManager: def __init__(self, weightdir, adapter_dirs, load_way, world_size, eos_id, router_port, detokenization_port, model_rpc_ports, input_params, mode=[], log_stats=True, log_stats_interval=10): self.model_weightdir = weightdir self.adapter_dirs = adapter_dirs self.world_size = world_size self.load_way = load_way self.mode = mode self.input_params = input_params if self.input_params.prefetch: self.prefetch_stream = torch.cuda.Stream() else: self.prefetch_stream = None # get adapter rank self.lora_ranks = {} for lora_dir in adapter_dirs: config, _ = get_lora_config(lora_dir, input_params.dummy) self.lora_ranks[lora_dir] = config["r"] self.lora_ranks[None] = 0 if input_params.scheduler == "pets": self.req_queue = PETSReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "peft": self.req_queue = PEFTReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.batch_num_adapters is not None: self.req_queue = ClusterReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, input_params.batch_num_adapters) elif input_params.enable_abort: self.req_queue = AbortReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) else: self.req_queue = ReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) self.running_batch: Batch = None self.eos_id = eos_id self.has_wait_tokens = 0 self.max_wait_tokens = 10 context = zmq.asyncio.Context(2) self.recv_from_httpserver = context.socket(zmq.PULL) self.recv_from_httpserver.bind(f"tcp://127.0.0.1:{router_port}") self.send_to_detokenization = context.socket(zmq.PUSH) self.send_to_detokenization.connect(f"tcp://127.0.0.1:{detokenization_port}") self.model_rpc_ports = model_rpc_ports self.stats_tool = Stats(log_stats, log_stats_interval) async def wait_to_model_ready(self): self.model_rpcs: List[ModelRpcClient] = [] for rank_id in range(self.world_size): rpc_model = await start_model_process(port=self.model_rpc_ports[rank_id], world_size=self.world_size) self.model_rpcs.append(rpc_model) init_model_ret = [] for rank_id in range(self.world_size): # async init model process init_model_ret.append( self.model_rpcs[rank_id].init_model( rank_id, self.world_size, self.model_weightdir, self.adapter_dirs, self.input_params.max_total_token_num, self.load_way, self.mode, input_params=self.input_params, prefetch_stream=self.prefetch_stream, )) await asyncio.gather(*init_model_ret) return async def profile_prefill(self): res = [] for rank_id in range(self.world_size): # async init model process res.append( self.model_rpcs[rank_id].profile_prefill()) results = await asyncio.gather(*res) self.alpha_model = AlphaModel(results[0]) self.beta_model = BetaModel(results[0]) # check if the path exists else create it cache_dir = os.path.expanduser("~/.cache/slora") if not os.path.exists(cache_dir): os.makedirs(cache_dir) with open(cache_dir+"/profile_results.pkl", "wb") as f: pickle.dump(results[0], f) return def add_req( self, adapter_dir: str, prompt_ids: List[int], sampling_params: SamplingParams, request_id: str ): req = Req(adapter_dir, request_id, prompt_ids, sampling_params) self.req_queue.append(req) self.send_to_detokenization.send_pyobj(req.to_req_detokenization_state()) return async def abort(self, request_id): if self.running_batch is not None: for req in self.running_batch.reqs: if req.request_id == request_id: req.has_generate_finished = True req.aborted = True for req in self.req_queue.waiting_req_list: if req.request_id == request_id: req.has_generate_finished = True req.aborted = True return async def loop_for_fwd(self,): counter_count = 0 while True: await self._step() counter_count += 1 if self.running_batch is not None: if counter_count % 50 == 0: print("current batch size:", len(self.running_batch.reqs), "token used ratio:", self.running_batch.calcu_used_tokens() / self.input_params.max_total_token_num) pass self.stats_tool.print_stats() if self.running_batch is None: await asyncio.sleep(0.01) # 10ms async def _step(self): """ 事件处理循环 """ # 删除所有已经 finished 的 req if self.running_batch is None: new_batch = self.req_queue.generate_new_batch(self.running_batch, self.lora_ranks) if self.input_params.enable_abort and len(self.req_queue.abort_req_list) > 0:
self.send_to_detokenization.send_pyobj(BatchAbortReq(self.req_queue.abort_req_list))
3
2023-10-16 02:39:47+00:00
16k
MobileLLM/AutoDroid
droidbot/input_manager.py
[ { "identifier": "EventLog", "path": "droidbot/input_event.py", "snippet": "class EventLog(object):\n \"\"\"\n save an event to local file system\n \"\"\"\n\n def __init__(self, device, app, event, profiling_method=None, tag=None):\n self.device = device\n self.app = app\n ...
import json import logging import subprocess import time from .input_event import EventLog from .input_policy import UtgBasedInputPolicy, UtgNaiveSearchPolicy, UtgGreedySearchPolicy, \ UtgReplayPolicy, \ ManualPolicy, TaskPolicy, \ POLICY_NAIVE_DFS, POLICY_GREEDY_DFS, \ POLICY_NAIVE_BFS, POLICY_GREEDY_BFS, \ POLICY_REPLAY, POLICY_MEMORY_GUIDED, \ POLICY_MANUAL, POLICY_MONKEY, POLICY_NONE, POLICY_TASK from .input_script import DroidBotScript from .input_policy2 import MemoryGuidedPolicy
13,575
DEFAULT_POLICY = POLICY_GREEDY_DFS DEFAULT_EVENT_INTERVAL = 1 DEFAULT_EVENT_COUNT = 100000000 DEFAULT_TIMEOUT = -1 class UnknownInputException(Exception): pass class InputManager(object): """ This class manages all events to send during app running """ def __init__(self, device, app, task, policy_name, random_input, event_count, event_interval, script_path=None, profiling_method=None, master=None, replay_output=None): """ manage input event sent to the target device :param device: instance of Device :param app: instance of App :param policy_name: policy of generating events, string :return: """ self.logger = logging.getLogger('InputEventManager') self.enabled = True self.device = device self.app = app self.task = task self.policy_name = policy_name self.random_input = random_input self.events = [] self.policy = None self.script = None self.event_count = event_count self.event_interval = event_interval self.replay_output = replay_output self.monkey = None if script_path is not None: f = open(script_path, 'r') script_dict = json.load(f) self.script = DroidBotScript(script_dict) self.policy = self.get_input_policy(device, app, master) self.profiling_method = profiling_method def get_input_policy(self, device, app, master): if self.policy_name == POLICY_NONE: input_policy = None elif self.policy_name == POLICY_MONKEY: input_policy = None elif self.policy_name in [POLICY_NAIVE_DFS, POLICY_NAIVE_BFS]: input_policy = UtgNaiveSearchPolicy(device, app, self.random_input, self.policy_name) elif self.policy_name in [POLICY_GREEDY_DFS, POLICY_GREEDY_BFS]: input_policy = UtgGreedySearchPolicy(device, app, self.random_input, self.policy_name) elif self.policy_name == POLICY_MEMORY_GUIDED: input_policy = MemoryGuidedPolicy(device, app, self.random_input) elif self.policy_name == POLICY_REPLAY: input_policy = UtgReplayPolicy(device, app, self.replay_output) elif self.policy_name == POLICY_MANUAL: input_policy = ManualPolicy(device, app)
DEFAULT_POLICY = POLICY_GREEDY_DFS DEFAULT_EVENT_INTERVAL = 1 DEFAULT_EVENT_COUNT = 100000000 DEFAULT_TIMEOUT = -1 class UnknownInputException(Exception): pass class InputManager(object): """ This class manages all events to send during app running """ def __init__(self, device, app, task, policy_name, random_input, event_count, event_interval, script_path=None, profiling_method=None, master=None, replay_output=None): """ manage input event sent to the target device :param device: instance of Device :param app: instance of App :param policy_name: policy of generating events, string :return: """ self.logger = logging.getLogger('InputEventManager') self.enabled = True self.device = device self.app = app self.task = task self.policy_name = policy_name self.random_input = random_input self.events = [] self.policy = None self.script = None self.event_count = event_count self.event_interval = event_interval self.replay_output = replay_output self.monkey = None if script_path is not None: f = open(script_path, 'r') script_dict = json.load(f) self.script = DroidBotScript(script_dict) self.policy = self.get_input_policy(device, app, master) self.profiling_method = profiling_method def get_input_policy(self, device, app, master): if self.policy_name == POLICY_NONE: input_policy = None elif self.policy_name == POLICY_MONKEY: input_policy = None elif self.policy_name in [POLICY_NAIVE_DFS, POLICY_NAIVE_BFS]: input_policy = UtgNaiveSearchPolicy(device, app, self.random_input, self.policy_name) elif self.policy_name in [POLICY_GREEDY_DFS, POLICY_GREEDY_BFS]: input_policy = UtgGreedySearchPolicy(device, app, self.random_input, self.policy_name) elif self.policy_name == POLICY_MEMORY_GUIDED: input_policy = MemoryGuidedPolicy(device, app, self.random_input) elif self.policy_name == POLICY_REPLAY: input_policy = UtgReplayPolicy(device, app, self.replay_output) elif self.policy_name == POLICY_MANUAL: input_policy = ManualPolicy(device, app)
elif self.policy_name == POLICY_TASK:
16
2023-10-23 03:32:58+00:00
16k
f0uriest/interpax
tests/test_interpolate.py
[ { "identifier": "fft_interp1d", "path": "interpax/_fourier.py", "snippet": "@partial(jit, static_argnames=\"n\")\ndef fft_interp1d(f: jax.Array, n: int, sx: jax.Array = None, dx: float = 1.0):\n \"\"\"Interpolation of a 1d periodic function via FFT.\n\n Parameters\n ----------\n f : ndarray,...
import jax import jax.numpy as jnp import numpy as np import pytest from jax import config as jax_config from interpax import ( Interpolator1D, Interpolator2D, Interpolator3D, fft_interp1d, fft_interp2d, interp1d, interp2d, interp3d, )
13,196
"""Tests for interpolation functions.""" jax_config.update("jax_enable_x64", True) class TestInterp1D: """Tests for interp1d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x", [ np.linspace(0, 2 * np.pi, 10000), 0.0, ], ) def test_interp1d(self, x): """Test accuracy of different 1d interpolation methods.""" xp = np.linspace(0, 2 * np.pi, 100) f = lambda x: np.sin(x) fp = f(xp) interp1 = lambda xq, *args, **kwargs: interp1d(xq, *args, **kwargs)
"""Tests for interpolation functions.""" jax_config.update("jax_enable_x64", True) class TestInterp1D: """Tests for interp1d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x", [ np.linspace(0, 2 * np.pi, 10000), 0.0, ], ) def test_interp1d(self, x): """Test accuracy of different 1d interpolation methods.""" xp = np.linspace(0, 2 * np.pi, 100) f = lambda x: np.sin(x) fp = f(xp) interp1 = lambda xq, *args, **kwargs: interp1d(xq, *args, **kwargs)
interp2 = lambda xq, *args, **kwargs: Interpolator1D(*args, **kwargs)(xq)
2
2023-10-18 13:12:20+00:00
16k
apple/ml-nvas3d
demo/generate_demo_video.py
[ { "identifier": "convolve_moving_receiver", "path": "nvas3d/utils/dynamic_utils.py", "snippet": "def convolve_moving_receiver(\n source_audio: np.ndarray,\n rirs: np.ndarray,\n interp_index: T.List[int],\n interp_weight: T.List[float]\n) -> np.ndarray:\n \"\"\"\n Apply convolution betw...
import os import json import argparse import itertools import subprocess import typing as T import torch import imageio import torchaudio import numpy as np import matplotlib.pyplot as plt from moviepy.editor import * from nvas3d.utils.dynamic_utils import convolve_moving_receiver, setup_dynamic_interp from nvas3d.utils.audio_utils import clip_two, clip_all from soundspaces_nvas3d.utils.ss_utils import create_scene, render_rir_parallel from soundspaces_nvas3d.utils.aihabitat_utils import load_room_grid from soundspaces_nvas3d.soundspaces_nvas3d import Receiver, Source, Scene
10,957
""" # Set source and receiver points source_point_list = grid_points_source[source_idx_list] receiver_point_list = grid_points_receiver[receiver_idx_list] source_points_pair, receiver_points_pair = all_pairs(source_point_list, receiver_point_list) _, receiver_rotation_pair = all_pairs(source_point_list, receiver_rotation_list) room_list = [room] * len(source_points_pair) filename_list = None # Render RIR for grid points ir_list = render_rir_parallel(room_list, source_points_pair, receiver_points_pair, receiver_rotation_list=receiver_rotation_pair, filename_list=filename_list, channel_type=channel_type, channel_order=channel_order) ir_list = clip_all(ir_list) # make the length consistent num_channel = len(ir_list[0]) # Reshape RIR num_sources = len(source_idx_list) num_receivers = len(receiver_idx_list) ir_output = torch.stack(ir_list).reshape(num_sources, num_receivers, num_channel, -1) # '-1' will infer the remaining dimension based on the size of each tensor in ir_list ir_output /= ir_output.abs().max() return ir_output def interpolate_values( start: float, end: float, interp_weight: float ) -> float: """ Interpolate between two values based on the weight values. Args: - start: Beginning value. - end: Ending value. - interp_weight: Weight for linear interpolation Returns: - Interpolated value. """ return (1 - interp_weight) * start + interp_weight * end def main(args): """ Generate NVAS video from the estimated dry sound. Save: ├── {results_demo} = results/nvas3d_demo/default/demo/{room}/0 │ ├── video/ │ │ ├── moving_audio.wav : Audio interpolated for the moving receiver. │ │ ├── moving_audio_1.wav : Audio interpolated specifically for source 1. │ │ ├── moving_audio_2.wav : Audio interpolated specifically for source 2. │ │ ├── moving_video.mp4 : Video visualization of movement (no audio). │ │ ├── nvas.mp4 : NVAS video results with combined audio. │ │ ├── nvas_source1.mp4 : NVAS video results for only source 1 audio. │ │ ├── nvas_source2.mp4 : NVAS video results for only source 2 audio. │ │ └── rgb_receiver.png : A rendered view from the perspective of the receiver. """ # Constants sample_rate = args.sample_rate sample_rate_video = args.sample_rate_video novel_path_config = args.novel_path_config use_gt_location = args.use_gt_location channel_type = args.channel_type use_placeholder_mesh = args.use_placeholder_mesh # Load data and metadata metadata = torch.load(f'{args.results_dir}/results_detection/metadata.pt') room = metadata['room'][0] grid_points_source = metadata['grid_points'][0] receiver_idx_list_original = torch.tensor(metadata['receiver_idx_list'])[:4] if use_gt_location: # Use estimated dry sound from GT source location source1_idx = metadata['source1_idx'][0].item() source2_idx = metadata['source2_idx'][0].item() source_idx_list = [source1_idx, source2_idx] else: # Use estimated dry sound from detected source location detected_source1_idx = metadata['detected_source_idx'][0] detected_source2_idx = metadata['detected_source_idx'][1] source_idx_list = [detected_source1_idx, detected_source2_idx] # Define receiver path and rotations with open(f'demo/config_demo/{novel_path_config}.json', 'r') as file: json_path = json.load(file) receiver_idx_list = json_path['receiver_idx_list'] receiver_rotation_list = json_path['receiver_rotation_list'] # Load grid points grid_points_receiver = load_room_grid(room, grid_distance=args.grid_distance)['grid_points'] # Generate RIRs output_dir = f'{args.results_dir}/video_{channel_type}' os.makedirs(output_dir, exist_ok=True) ir_save_dir = f'{output_dir}/ir_save_{novel_path_config}_{channel_type}.pt' if os.path.exists(ir_save_dir): ir_output = torch.load(ir_save_dir) else: ir_output = generate_rir_combination( room, source_idx_list, grid_points_source, receiver_idx_list, receiver_rotation_list, grid_points_receiver, channel_type ) torch.save(ir_output, ir_save_dir) ir1_list, ir2_list = ir_output # Prepare source audio if use_gt_location: source1_audio, _ = torchaudio.load(f'{args.results_dir}/results_drysound/dry1_estimated.wav') source2_audio, _ = torchaudio.load(f'{args.results_dir}/results_drysound/dry2_estimated.wav') else: source1_audio, _ = torchaudio.load(f'{args.results_dir}/results_drysound/detected/dry_{source_idx_list[0]}.wav') source2_audio, _ = torchaudio.load(f'{args.results_dir}/results_drysound/detected/dry_{source_idx_list[1]}.wav')
# # For licensing see accompanying LICENSE file. # Copyright (C) 2023 Apple Inc. All Rights Reserved. # def normalize(input: torch.Tensor) -> torch.Tensor: output = (input - input.min()) / (input.max() - input.min()) output = 2 * output - 1 return output def configure_scene_from_metadata( metadata: T.Dict[str, T.Any], image_size: T.Tuple[int, int] = (1000, 1000), hfov: float = 90.0, use_placeholder_mesh: bool = False ) -> Scene: """ Configures a scene using the provided metadata. Args: - metadata: Dictionary containing room and grid point information. - image_size: The size of the rendered image. - hfov: Horizontal field of view. - use_placeholder_mesh: Flag to determine if placeholder meshes should be used. Returns: - Configured scene object. """ room = metadata['room'][0] grid_points_source = metadata['grid_points'][0] source_idx_list = [metadata['source1_idx'][0].item(), metadata['source2_idx'][0].item()] receiver_idx_list_original = torch.tensor(metadata['receiver_idx_list'])[:4] scene = create_scene(room, image_size=image_size, hfov=hfov) if use_placeholder_mesh: # Add placeholder mesh for sources and receivers to the scene # Download the following mesh objects and locate it under data/objects/{mesh_name}.glb: # - "Bluetooth Speaker" (https://skfb.ly/6VLyL) by Ramanan is licensed under Creative Commons Attribution (http://creativecommons.org/licenses/by/4.0/). # - “Classic Microphone” (https://skfb.ly/6Aryq) by urbanmasque is licensed under Creative Commons Attribution (http://creativecommons.org/licenses/by/4.0/) # - "Standard Drum Set" (https://skfb.ly/owroB) by Heataker is licensed under Creative Commons Attribution (http://creativecommons.org/licenses/by/4.0/). # - "3D Posed People" (https://renderpeople.com/free-3d-people/) by Renderpeople: The licensing for our Renderpeople products includes that customers are allowed to use the data for rendering still images and animations for commercial or private purposes, such as video production, broadcasting, print, movies, advertising, illustrations and presentations (https://renderpeople.com/faq/) ss_source1 = Source( position=grid_points_source[source_idx_list[0]], rotation=0, dry_sound='', mesh='bluetooth_speaker', # Need mesh object device=torch.device('cpu') ) ss_source2 = Source( position=grid_points_source[source_idx_list[1]], rotation=-90, dry_sound='', mesh='bluetooth_speaker', # Need mesh object device=torch.device('cpu') ) ss_mic_list = [ Source( position=grid_points_source[idx], rotation=180, dry_sound='', mesh='classic_microphone', # Need mesh object device=torch.device('cpu') ) for idx in receiver_idx_list_original ] scene.add_source_mesh = True scene.source_list = [None] * (len(source_idx_list) + len(receiver_idx_list_original)) scene.update_source(ss_source1, 0) scene.update_source(ss_source2, 1) for m, mic in enumerate(ss_mic_list): scene.update_source(mic, m + 2) return scene def interpolate_moving_audio( source1_audio: torch.Tensor, source2_audio: torch.Tensor, ir1_list: T.List[torch.Tensor], ir2_list: T.List[torch.Tensor], receiver_position: torch.Tensor ) -> T.Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Interpolates audio for a moving receiver. Args: - source1_audio: First source audio. - source2_audio: Second source audio. - ir1_list: List of impulse responses for source 1. - ir2_list: List of impulse responses for source 2. - receiver_position: Positions of the moving receiver. Returns: - Tuple containing combined audio, interpolated audio from source 1, and interpolated audio from source 2. """ # Prepare for interpolation audio_len = source1_audio.shape[-1] interp_index, interp_weight = setup_dynamic_interp(receiver_position.numpy(), audio_len) # Generate audio for moving receiver receiver_audio_1 = convolve_moving_receiver(source1_audio.numpy()[0], ir1_list.numpy(), interp_index, interp_weight) receiver_audio_2 = convolve_moving_receiver(source2_audio.numpy()[0], ir2_list.numpy(), interp_index, interp_weight) receiver_audio_1 = receiver_audio_1[..., :source1_audio.shape[-1]] receiver_audio_2 = receiver_audio_2[..., :source1_audio.shape[-1]] # Mix and normalize audios receiver_audio = (receiver_audio_1 + receiver_audio_2) scale = np.max(abs(receiver_audio)) receiver_audio /= scale receiver_audio_1 /= scale receiver_audio_2 /= scale return torch.from_numpy(receiver_audio), torch.from_numpy(receiver_audio_1), torch.from_numpy(receiver_audio_2) def interpolate_rgb_images( scene: Scene, receiver_position: torch.Tensor, receiver_rotation_list: T.List[float], video_len: int ) -> T.List[np.ndarray]: """ Interpolates RGB images based on receiver movement and rotation. Args: - scene: Scene object to render the images from. - receiver_position: Positions of the receiver along the path. - receiver_rotation_list: List of rotations for the receiver. - video_len: Number of frames in the video. Returns: - List of interpolated RGB images. """ interp_index, interp_weight = setup_dynamic_interp(receiver_position.numpy(), video_len) interpolated_rgb_list = [] for t in range(len(interp_index)): # Find the positions and rotations between which we're interpolating start_idx = interp_index[t] end_idx = start_idx + 1 start_pos = receiver_position[start_idx] end_pos = receiver_position[end_idx] start_rot = receiver_rotation_list[start_idx] end_rot = receiver_rotation_list[end_idx] # Interpolate position and rotation receiver_position_interp = interpolate_values(start_pos, end_pos, interp_weight[t]) receiver_rotation_interp = interpolate_values(start_rot, end_rot, interp_weight[t]) receiver = Receiver(receiver_position_interp, receiver_rotation_interp) scene.update_receiver(receiver) rgb, _ = scene.render_image() interpolated_rgb_list.append(rgb[..., :3]) return interpolated_rgb_list def all_pairs( list1: T.List[T.Any], list2: T.List[T.Any] ) -> T.Tuple[T.List[T.Any], T.List[T.Any]]: """ Computes all pairs of combinations between two lists. Args: - list1: First list. - list2: Second list. Returns: - Two lists containing paired elements from list1 and list2. """ list_pair = list(itertools.product(list1, list2)) list1_pair, list2_pair = zip(*list_pair) list1_pair = list(list1_pair) list2_pair = list(list2_pair) return list1_pair, list2_pair def generate_rir_combination( room: str, source_idx_list: T.List[int], grid_points_source: torch.Tensor, receiver_idx_list: T.List[int], receiver_rotation_list: T.List[float], grid_points_receiver: torch.Tensor, channel_type: str = 'Binaural', channel_order: int = 0 ) -> T.List[T.List[torch.Tensor]]: """ Generates room impulse responses (RIR) for given source and receiver combinations. Args: - room: Room object for which RIRs need to be computed. - source_idx_list: List of source indices. - grid_points_source: Grid points for the source. - receiver_idx_list: List of receiver indices. - receiver_rotation_list: List of receiver rotations. - grid_points_receiver: Grid points for the receiver. - channel_type: Type of the channel. Defaults to 'Ambisonics'. - channel_order: Order of the channel for Ambisonics. Defulats to 0, as video usually does not support HOA. Returns: - A 2D list containing RIRs for every source-receiver combination. """ # Set source and receiver points source_point_list = grid_points_source[source_idx_list] receiver_point_list = grid_points_receiver[receiver_idx_list] source_points_pair, receiver_points_pair = all_pairs(source_point_list, receiver_point_list) _, receiver_rotation_pair = all_pairs(source_point_list, receiver_rotation_list) room_list = [room] * len(source_points_pair) filename_list = None # Render RIR for grid points ir_list = render_rir_parallel(room_list, source_points_pair, receiver_points_pair, receiver_rotation_list=receiver_rotation_pair, filename_list=filename_list, channel_type=channel_type, channel_order=channel_order) ir_list = clip_all(ir_list) # make the length consistent num_channel = len(ir_list[0]) # Reshape RIR num_sources = len(source_idx_list) num_receivers = len(receiver_idx_list) ir_output = torch.stack(ir_list).reshape(num_sources, num_receivers, num_channel, -1) # '-1' will infer the remaining dimension based on the size of each tensor in ir_list ir_output /= ir_output.abs().max() return ir_output def interpolate_values( start: float, end: float, interp_weight: float ) -> float: """ Interpolate between two values based on the weight values. Args: - start: Beginning value. - end: Ending value. - interp_weight: Weight for linear interpolation Returns: - Interpolated value. """ return (1 - interp_weight) * start + interp_weight * end def main(args): """ Generate NVAS video from the estimated dry sound. Save: ├── {results_demo} = results/nvas3d_demo/default/demo/{room}/0 │ ├── video/ │ │ ├── moving_audio.wav : Audio interpolated for the moving receiver. │ │ ├── moving_audio_1.wav : Audio interpolated specifically for source 1. │ │ ├── moving_audio_2.wav : Audio interpolated specifically for source 2. │ │ ├── moving_video.mp4 : Video visualization of movement (no audio). │ │ ├── nvas.mp4 : NVAS video results with combined audio. │ │ ├── nvas_source1.mp4 : NVAS video results for only source 1 audio. │ │ ├── nvas_source2.mp4 : NVAS video results for only source 2 audio. │ │ └── rgb_receiver.png : A rendered view from the perspective of the receiver. """ # Constants sample_rate = args.sample_rate sample_rate_video = args.sample_rate_video novel_path_config = args.novel_path_config use_gt_location = args.use_gt_location channel_type = args.channel_type use_placeholder_mesh = args.use_placeholder_mesh # Load data and metadata metadata = torch.load(f'{args.results_dir}/results_detection/metadata.pt') room = metadata['room'][0] grid_points_source = metadata['grid_points'][0] receiver_idx_list_original = torch.tensor(metadata['receiver_idx_list'])[:4] if use_gt_location: # Use estimated dry sound from GT source location source1_idx = metadata['source1_idx'][0].item() source2_idx = metadata['source2_idx'][0].item() source_idx_list = [source1_idx, source2_idx] else: # Use estimated dry sound from detected source location detected_source1_idx = metadata['detected_source_idx'][0] detected_source2_idx = metadata['detected_source_idx'][1] source_idx_list = [detected_source1_idx, detected_source2_idx] # Define receiver path and rotations with open(f'demo/config_demo/{novel_path_config}.json', 'r') as file: json_path = json.load(file) receiver_idx_list = json_path['receiver_idx_list'] receiver_rotation_list = json_path['receiver_rotation_list'] # Load grid points grid_points_receiver = load_room_grid(room, grid_distance=args.grid_distance)['grid_points'] # Generate RIRs output_dir = f'{args.results_dir}/video_{channel_type}' os.makedirs(output_dir, exist_ok=True) ir_save_dir = f'{output_dir}/ir_save_{novel_path_config}_{channel_type}.pt' if os.path.exists(ir_save_dir): ir_output = torch.load(ir_save_dir) else: ir_output = generate_rir_combination( room, source_idx_list, grid_points_source, receiver_idx_list, receiver_rotation_list, grid_points_receiver, channel_type ) torch.save(ir_output, ir_save_dir) ir1_list, ir2_list = ir_output # Prepare source audio if use_gt_location: source1_audio, _ = torchaudio.load(f'{args.results_dir}/results_drysound/dry1_estimated.wav') source2_audio, _ = torchaudio.load(f'{args.results_dir}/results_drysound/dry2_estimated.wav') else: source1_audio, _ = torchaudio.load(f'{args.results_dir}/results_drysound/detected/dry_{source_idx_list[0]}.wav') source2_audio, _ = torchaudio.load(f'{args.results_dir}/results_drysound/detected/dry_{source_idx_list[1]}.wav')
source1_audio, source2_audio = clip_two(source1_audio, source2_audio)
2
2023-10-19 05:35:54+00:00
16k
openvpi/SingingVocoders
training/nsf_HiFigan_chroma_task.py
[ { "identifier": "Generator", "path": "models/nsf_HiFigan_chroma/models.py", "snippet": "class Generator(torch.nn.Module):\n def __init__(self, h):\n super(Generator, self).__init__()\n self.h = h\n self.num_kernels = len(h.resblock_kernel_sizes)\n self.num_upsamples = len(...
import logging import os import pathlib import random import sys import lightning.pytorch as pl import matplotlib import numpy as np import torch.utils.data import utils from typing import Dict from lightning.pytorch.utilities.rank_zero import rank_zero_debug, rank_zero_info, rank_zero_only from matplotlib import pyplot as plt from torch import nn from torch.utils.data import Dataset from torchmetrics import Metric, MeanMetric from models.nsf_HiFigan_chroma.models import Generator, AttrDict, MultiScaleDiscriminator, MultiPeriodDiscriminator from modules.loss.HiFiloss import HiFiloss from training.base_task_gan import GanBaseTask from utils.training_utils import ( DsBatchSampler, DsEvalBatchSampler, get_latest_checkpoint_path ) from utils.wav2mel import PitchAdjustableMelSpectrogram
11,452
start = random.randint(0, record['spectrogram'].shape[0] - 1 - crop_mel_frames) end = start + crop_mel_frames if self.infer: record['spectrogram'] = record['spectrogram'].T record['f0'] = record['f0'] else: record['spectrogram'] = record['spectrogram'][start:end].T record['f0'] = record['f0'][start:end] start *= samples_per_frame end *= samples_per_frame if self.infer: cty=(len(record['spectrogram'].T) * samples_per_frame) record['audio'] = record['audio'][:cty] record['audio'] = np.pad(record['audio'], ( 0, (len(record['spectrogram'].T) * samples_per_frame) - len(record['audio'])), mode='constant') pass else: # record['spectrogram'] = record['spectrogram'][start:end].T record['audio'] = record['audio'][start:end] record['audio'] = np.pad(record['audio'], (0, (end - start) - len(record['audio'])), mode='constant') if self.volume_aug: for record in minibatch: if random.random() < self.volume_aug_prob: audio = record['audio'] audio_mel = record['spectrogram'] max_amp = float(np.max(np.abs(audio))) + 1e-5 max_shift = min(3, np.log(1 / max_amp)) log_mel_shift = random.uniform(-3, max_shift) # audio *= (10 ** log_mel_shift) audio *= np.exp(log_mel_shift) audio_mel += log_mel_shift audio_mel = torch.clamp(torch.from_numpy(audio_mel), min=np.log(1e-5)).numpy() record['audio'] = audio record['spectrogram'] = audio_mel audio = np.stack([record['audio'] for record in minibatch if 'audio' in record]) spectrogram = np.stack([record['spectrogram'] for record in minibatch if 'spectrogram' in record]) f0 = np.stack([record['f0'] for record in minibatch if 'f0' in record]) return { 'audio': torch.from_numpy(audio).unsqueeze(1), 'mel': torch.from_numpy(spectrogram), 'f0': torch.from_numpy(f0), } class stftlog: def __init__(self, n_fft=2048, win_length=2048, hop_length=512, center=False,): self.hop_length=hop_length self.win_size=win_length self.n_fft = n_fft self.win_size = win_length self.center = center self.hann_window = {} def exc(self,y): hann_window_key = f"{y.device}" if hann_window_key not in self.hann_window: self.hann_window[hann_window_key] = torch.hann_window( self.win_size, device=y.device ) y = torch.nn.functional.pad( y.unsqueeze(1), ( int((self.win_size - self.hop_length) // 2), int((self.win_size - self.hop_length+1) // 2), ), mode="reflect", ) y = y.squeeze(1) spec = torch.stft( y, self.n_fft, hop_length=self.hop_length, win_length=self.win_size, window=self.hann_window[hann_window_key], center=self.center, pad_mode="reflect", normalized=False, onesided=True, return_complex=True, ).abs() return spec class nsf_HiFigan_chroma(GanBaseTask): def __init__(self, config): super().__init__(config) self.TF = PitchAdjustableMelSpectrogram( f_min=0, f_max=None, n_mels=256,) self.logged_gt_wav = set() self.stft=stftlog() def build_dataset(self): self.train_dataset = nsf_HiFigan_dataset(config=self.config, data_dir=pathlib.Path(self.config['DataIndexPath']) / self.config[ 'train_set_name']) self.valid_dataset = nsf_HiFigan_dataset(config=self.config, data_dir=pathlib.Path(self.config['DataIndexPath']) / self.config[ 'valid_set_name'], infer=True) def build_model(self): cfg=self.config['model_args'] cfg.update({'sampling_rate':self.config['audio_sample_rate'],'num_mels':self.config['audio_num_mel_bins'],'hop_size':self.config['hop_size']}) h=AttrDict(cfg) self.generator=Generator(h)
# from utils.indexed_datasets import IndexedDataset def spec_to_figure(spec, vmin=None, vmax=None): if isinstance(spec, torch.Tensor): spec = spec.cpu().numpy() fig = plt.figure(figsize=(12, 9),dpi=100) plt.pcolor(spec.T, vmin=vmin, vmax=vmax) plt.tight_layout() return fig class nsf_HiFigan_dataset(Dataset): def __init__(self, config: dict, data_dir, infer=False): super().__init__() self.config = config self.data_dir = data_dir if isinstance(data_dir, pathlib.Path) else pathlib.Path(data_dir) with open(self.data_dir, 'r', encoding='utf8') as f: fills = f.read().strip().split('\n') self.data_index = fills self.infer = infer self.volume_aug = self.config['volume_aug'] self.volume_aug_prob = self.config['volume_aug_prob'] if not infer else 0 def __getitem__(self, index): data_path = self.data_index[index] data = np.load(data_path) return {'f0':data['f0'],'spectrogram':data['mel'],'audio':data['audio']} def __len__(self): return len(self.data_index) def collater(self, minibatch): samples_per_frame = self.config['hop_size'] if self.infer: crop_mel_frames = 0 else: crop_mel_frames = self.config['crop_mel_frames'] for record in minibatch: # Filter out records that aren't long enough. if len(record['spectrogram']) < crop_mel_frames: del record['spectrogram'] del record['audio'] del record['f0'] continue start = random.randint(0, record['spectrogram'].shape[0] - 1 - crop_mel_frames) end = start + crop_mel_frames if self.infer: record['spectrogram'] = record['spectrogram'].T record['f0'] = record['f0'] else: record['spectrogram'] = record['spectrogram'][start:end].T record['f0'] = record['f0'][start:end] start *= samples_per_frame end *= samples_per_frame if self.infer: cty=(len(record['spectrogram'].T) * samples_per_frame) record['audio'] = record['audio'][:cty] record['audio'] = np.pad(record['audio'], ( 0, (len(record['spectrogram'].T) * samples_per_frame) - len(record['audio'])), mode='constant') pass else: # record['spectrogram'] = record['spectrogram'][start:end].T record['audio'] = record['audio'][start:end] record['audio'] = np.pad(record['audio'], (0, (end - start) - len(record['audio'])), mode='constant') if self.volume_aug: for record in minibatch: if random.random() < self.volume_aug_prob: audio = record['audio'] audio_mel = record['spectrogram'] max_amp = float(np.max(np.abs(audio))) + 1e-5 max_shift = min(3, np.log(1 / max_amp)) log_mel_shift = random.uniform(-3, max_shift) # audio *= (10 ** log_mel_shift) audio *= np.exp(log_mel_shift) audio_mel += log_mel_shift audio_mel = torch.clamp(torch.from_numpy(audio_mel), min=np.log(1e-5)).numpy() record['audio'] = audio record['spectrogram'] = audio_mel audio = np.stack([record['audio'] for record in minibatch if 'audio' in record]) spectrogram = np.stack([record['spectrogram'] for record in minibatch if 'spectrogram' in record]) f0 = np.stack([record['f0'] for record in minibatch if 'f0' in record]) return { 'audio': torch.from_numpy(audio).unsqueeze(1), 'mel': torch.from_numpy(spectrogram), 'f0': torch.from_numpy(f0), } class stftlog: def __init__(self, n_fft=2048, win_length=2048, hop_length=512, center=False,): self.hop_length=hop_length self.win_size=win_length self.n_fft = n_fft self.win_size = win_length self.center = center self.hann_window = {} def exc(self,y): hann_window_key = f"{y.device}" if hann_window_key not in self.hann_window: self.hann_window[hann_window_key] = torch.hann_window( self.win_size, device=y.device ) y = torch.nn.functional.pad( y.unsqueeze(1), ( int((self.win_size - self.hop_length) // 2), int((self.win_size - self.hop_length+1) // 2), ), mode="reflect", ) y = y.squeeze(1) spec = torch.stft( y, self.n_fft, hop_length=self.hop_length, win_length=self.win_size, window=self.hann_window[hann_window_key], center=self.center, pad_mode="reflect", normalized=False, onesided=True, return_complex=True, ).abs() return spec class nsf_HiFigan_chroma(GanBaseTask): def __init__(self, config): super().__init__(config) self.TF = PitchAdjustableMelSpectrogram( f_min=0, f_max=None, n_mels=256,) self.logged_gt_wav = set() self.stft=stftlog() def build_dataset(self): self.train_dataset = nsf_HiFigan_dataset(config=self.config, data_dir=pathlib.Path(self.config['DataIndexPath']) / self.config[ 'train_set_name']) self.valid_dataset = nsf_HiFigan_dataset(config=self.config, data_dir=pathlib.Path(self.config['DataIndexPath']) / self.config[ 'valid_set_name'], infer=True) def build_model(self): cfg=self.config['model_args'] cfg.update({'sampling_rate':self.config['audio_sample_rate'],'num_mels':self.config['audio_num_mel_bins'],'hop_size':self.config['hop_size']}) h=AttrDict(cfg) self.generator=Generator(h)
self.discriminator=nn.ModuleDict({'msd':MultiScaleDiscriminator(), 'mpd':MultiPeriodDiscriminator(periods=cfg['discriminator_periods'])})
2
2023-10-17 13:45:09+00:00
16k
Jacob-Zhou/gecdi
gec/parser.py
[ { "identifier": "Dataset", "path": "gec/data.py", "snippet": "class Dataset(torch.utils.data.Dataset):\n r\"\"\"\n Dataset that is compatible with :class:`torch.utils.data.Dataset`, serving as a wrapper for manipulating all data fields\n with the operating behaviours defined in :class:`~supar.u...
import os import shutil import tempfile import math import dill import torch import torch.distributed as dist from datetime import datetime, timedelta from typing import Iterable, Union from gec.data import Dataset from gec.fn import map_token_ids from supar.parser import Parser from supar.utils import Config from supar.utils.common import MIN, NUL, UNK from supar.utils.field import RawField from supar.utils.fn import set_rng_state from supar.utils.logging import get_logger, init_logger, progress_bar from supar.utils.metric import Metric from supar.utils.optim import PolynomialLR from supar.utils.parallel import DistributedDataParallel as DDP, gather, is_dist from supar.utils.parallel import is_master from supar.utils.tokenizer import TransformerTokenizer from supar.utils.transform import AttachJuxtaposeTree, Batch from torch.cuda.amp import GradScaler from torch.optim import AdamW from torch.optim.lr_scheduler import ExponentialLR from torch.nn.functional import embedding from .metric import PerplexityMetric, SpanMetric from .model import Seq2SeqDetectModel, Seq2SeqModel from .transform import Field, Text, Tree from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import fp16_compress_hook from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import fp16_compress_hook from transformers import AutoTokenizer, GPT2LMHeadModel
14,108
self.epoch += 1 self.patience -= 1 self.elapsed += t if metric > self.best_metric: self.best_e, self.patience, self.best_metric = epoch, patience, metric if is_master(): self.save_checkpoint(args.path) logger.info(f"{t}s elapsed (saved)\n") else: logger.info(f"{t}s elapsed\n") if self.patience < 1: break if dist.is_initialized(): dist.barrier() best = self.load(**args) # only allow the master device to save models if is_master(): best.save(args.path) logger.info(f"Epoch {self.best_e} saved") logger.info(f"{'dev:':5} {self.best_metric}") if args.test: best.model.eval() with best.join(): test_metric = sum( [best.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {best.reduce(test_metric)}") logger.info(f"{self.elapsed}s elapsed, {self.elapsed / epoch}s/epoch") def evaluate(self, data: Union[str, Iterable], batch_size: int = 5000, buckets: int = 8, workers: int = 0, amp: bool = False, cache: bool = False, punct: bool = False, tree: bool = True, proj: bool = False, partial: bool = False, verbose: bool = True, **kwargs): return super().evaluate(**Config().update(locals())) def predict(self, data: Union[str, Iterable], pred: str = None, lang: str = None, prob: bool = False, batch_size: int = 5000, buckets: int = 8, workers: int = 0, amp: bool = False, cache: bool = False, tree: bool = True, proj: bool = False, verbose: bool = True, **kwargs): return super().predict(**Config().update(locals())) def train_step(self, batch: Batch) -> torch.Tensor: src, tgt = batch src_mask, tgt_mask = batch.mask, tgt.ne(self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_mask, tgt_mask) return loss @torch.no_grad() def eval_step(self, batch: Batch) -> PerplexityMetric: src, tgt = batch src_mask, tgt_mask = batch.mask, tgt.ne(self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_mask, tgt_mask) preds = golds = None if self.args.eval_tgt: golds = [(s.values[0], s.values[1]) for s in batch.sentences] preds = [(s.values[0], self.TGT.tokenize.decode(i[0])) for s, i in zip(batch.sentences, self.model.decode(x, batch.mask).tolist()) ] return PerplexityMetric(loss, preds, golds, tgt_mask, not self.args.eval_tgt) @torch.no_grad() def pred_step(self, batch: Batch) -> Batch: src, = batch x = self.model(src) tgt = self.model.decode(x, batch.mask) batch.tgt = [[self.TGT.tokenize.decode(cand) for cand in i] for i in tgt.tolist()] return batch @classmethod def build(cls, path, min_freq=2, fix_len=20, **kwargs): r""" Build a brand-new Parser, including initialization of all data fields and model parameters. Args: path (str): The path of the model to be saved. min_freq (str): The minimum frequency needed to include a token in the vocabulary. Default: 2. fix_len (int): The max length of all subword pieces. The excess part of each piece will be truncated. Required if using CharLSTM/BERT. Default: 20. kwargs (dict): A dict holding the unconsumed arguments. """ args = Config(**locals()) os.makedirs(os.path.dirname(path) or './', exist_ok=True) if os.path.exists(path) and not args.build: return cls.load(**args) logger.info("Building the fields") t = TransformerTokenizer(name=args.bart)
# -*- coding: utf-8 -*- logger = get_logger(__name__) class Seq2SeqParser(Parser): NAME = 'seq2seq' MODEL = Seq2SeqModel def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.SRC = self.transform.SRC self.TGT = self.transform.TGT def train(self, train: Union[str, Iterable], dev: Union[str, Iterable], test: Union[str, Iterable], epochs: int, patience: int, batch_size: int = 5000, update_steps: int = 1, buckets: int = 32, workers: int = 0, clip: float = 5.0, amp: bool = False, cache: bool = False, verbose: bool = True, **kwargs) -> None: args = self.args.update(locals()) init_logger(logger, verbose=args.verbose) self.transform.train() batch_size = batch_size // update_steps if dist.is_initialized(): batch_size = batch_size // dist.get_world_size() logger.info("Loading the data") if args.cache: args.bin = os.path.join(os.path.dirname(args.path), 'bin') train = Dataset(self.transform, args.train, **args).build(batch_size, buckets, True, dist.is_initialized(), workers, chunk_size=args.chunk_size, seed=args.seed) dev = Dataset(self.transform, args.dev, **args).build(batch_size, buckets, False, dist.is_initialized(), workers) logger.info(f"{'train:':6} {train}") if not args.test: logger.info(f"{'dev:':6} {dev}\n") else: test = Dataset(self.transform, args.test, **args).build(batch_size, buckets, False, dist.is_initialized(), workers) logger.info(f"{'dev:':6} {dev}") logger.info(f"{'test:':6} {test}\n") self.optimizer = AdamW(self.model.parameters(), args.lr, (args.mu, args.nu), args.eps, args.weight_decay) steps = len(train.loader) * epochs // args.update_steps self.scheduler = PolynomialLR(self.optimizer, warmup_steps=self.args.warmup_steps, steps=steps) self.scaler = GradScaler(enabled=args.amp) if dist.is_initialized(): self.model = DDP(self.model, device_ids=[args.local_rank], find_unused_parameters=args.get( 'find_unused_parameters', True)) if args.amp: self.model.register_comm_hook(dist.group.WORLD, fp16_compress_hook) self.step, self.epoch, self.best_e, self.patience, self.n_batches = 1, 1, 1, patience, len( train.loader) self.best_metric, self.elapsed = Metric(), timedelta() if self.args.checkpoint: try: self.optimizer.load_state_dict( self.checkpoint_state_dict.pop('optimizer_state_dict')) self.scheduler.load_state_dict( self.checkpoint_state_dict.pop('scheduler_state_dict')) self.scaler.load_state_dict( self.checkpoint_state_dict.pop('scaler_state_dict')) set_rng_state(self.checkpoint_state_dict.pop('rng_state')) for k, v in self.checkpoint_state_dict.items(): setattr(self, k, v) train.loader.batch_sampler.epoch = self.epoch except AttributeError: logger.warning( "No checkpoint found. Try re-launching the traing procedure instead" ) for epoch in range(self.epoch, args.epochs + 1): start = datetime.now() bar, metric = progress_bar(train.loader), Metric() logger.info(f"Epoch {epoch} / {args.epochs}:") self.model.train() if self.epoch == 1: torch.cuda.empty_cache() with self.join(): # we should zero `step` as the number of batches in different processes is not necessarily equal self.step = 0 for batch in bar: with self.sync(): with torch.autocast(self.device, enabled=self.args.amp): loss = self.train_step(batch) self.backward(loss) if self.sync_grad: self.clip_grad_norm_(self.model.parameters(), self.args.clip) self.scaler.step(self.optimizer) self.scaler.update() self.scheduler.step() self.optimizer.zero_grad(True) bar.set_postfix_str( f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f}" ) self.step += 1 logger.info(f"{bar.postfix}") self.model.eval() with self.join(), torch.autocast(self.device, enabled=self.args.amp): metric = self.reduce( sum([self.eval_step(i) for i in progress_bar(dev.loader)], Metric())) logger.info(f"{'dev:':5} {metric}") if args.test: test_metric = sum( [self.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {self.reduce(test_metric)}") t = datetime.now() - start self.epoch += 1 self.patience -= 1 self.elapsed += t if metric > self.best_metric: self.best_e, self.patience, self.best_metric = epoch, patience, metric if is_master(): self.save_checkpoint(args.path) logger.info(f"{t}s elapsed (saved)\n") else: logger.info(f"{t}s elapsed\n") if self.patience < 1: break if dist.is_initialized(): dist.barrier() best = self.load(**args) # only allow the master device to save models if is_master(): best.save(args.path) logger.info(f"Epoch {self.best_e} saved") logger.info(f"{'dev:':5} {self.best_metric}") if args.test: best.model.eval() with best.join(): test_metric = sum( [best.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {best.reduce(test_metric)}") logger.info(f"{self.elapsed}s elapsed, {self.elapsed / epoch}s/epoch") def evaluate(self, data: Union[str, Iterable], batch_size: int = 5000, buckets: int = 8, workers: int = 0, amp: bool = False, cache: bool = False, punct: bool = False, tree: bool = True, proj: bool = False, partial: bool = False, verbose: bool = True, **kwargs): return super().evaluate(**Config().update(locals())) def predict(self, data: Union[str, Iterable], pred: str = None, lang: str = None, prob: bool = False, batch_size: int = 5000, buckets: int = 8, workers: int = 0, amp: bool = False, cache: bool = False, tree: bool = True, proj: bool = False, verbose: bool = True, **kwargs): return super().predict(**Config().update(locals())) def train_step(self, batch: Batch) -> torch.Tensor: src, tgt = batch src_mask, tgt_mask = batch.mask, tgt.ne(self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_mask, tgt_mask) return loss @torch.no_grad() def eval_step(self, batch: Batch) -> PerplexityMetric: src, tgt = batch src_mask, tgt_mask = batch.mask, tgt.ne(self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_mask, tgt_mask) preds = golds = None if self.args.eval_tgt: golds = [(s.values[0], s.values[1]) for s in batch.sentences] preds = [(s.values[0], self.TGT.tokenize.decode(i[0])) for s, i in zip(batch.sentences, self.model.decode(x, batch.mask).tolist()) ] return PerplexityMetric(loss, preds, golds, tgt_mask, not self.args.eval_tgt) @torch.no_grad() def pred_step(self, batch: Batch) -> Batch: src, = batch x = self.model(src) tgt = self.model.decode(x, batch.mask) batch.tgt = [[self.TGT.tokenize.decode(cand) for cand in i] for i in tgt.tolist()] return batch @classmethod def build(cls, path, min_freq=2, fix_len=20, **kwargs): r""" Build a brand-new Parser, including initialization of all data fields and model parameters. Args: path (str): The path of the model to be saved. min_freq (str): The minimum frequency needed to include a token in the vocabulary. Default: 2. fix_len (int): The max length of all subword pieces. The excess part of each piece will be truncated. Required if using CharLSTM/BERT. Default: 20. kwargs (dict): A dict holding the unconsumed arguments. """ args = Config(**locals()) os.makedirs(os.path.dirname(path) or './', exist_ok=True) if os.path.exists(path) and not args.build: return cls.load(**args) logger.info("Building the fields") t = TransformerTokenizer(name=args.bart)
SRC = Field('src',
6
2023-10-18 10:55:33+00:00
16k
jianlanluo/SAQ
vqn/conservative_sac_main.py
[ { "identifier": "VQN", "path": "vqn/vqn.py", "snippet": "class VQN(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.embedding_dim = 128\n config.codebook_size = 64\n config.commitment_cost = 1.0\n config.quantiza...
import os import time import uuid import numpy as np import pprint import jax import jax.numpy as jnp import flax import gym import d4rl import absl.app import absl.flags from copy import deepcopy from .vqn import VQN from .conservative_sac import ConservativeSAC from .replay_buffer import get_d4rl_dataset, subsample_batch from .jax_utils import batch_to_jax from .model import TanhGaussianPolicy, FullyConnectedQFunction, SamplerPolicy from .sampler import StepSampler, TrajSampler from .robomimic_utils import ( SequenceDataset, make_dataset, process_robomimic_dataset, D4RLDataset, get_robomimic_env, ENV_TO_HORIZON_MAP, OBS_KEYS ) from .utils import ( Timer, define_flags_with_default, set_random_seed, print_flags, get_user_flags, prefix_metrics, WandBLogger ) from viskit.logging import logger, setup_logger
11,006
FLAGS_DEF = define_flags_with_default( env='halfcheetah-medium-v2', algorithm='cql', max_traj_length=200, seed=42, save_model=False, batch_size=256, reward_scale=1.0, reward_bias=0.0, clip_action=0.999, policy_arch='256-256', qf_arch='256-256', orthogonal_init=False, policy_log_std_multiplier=1.0, policy_log_std_offset=-1.0, n_epochs=1000, bc_epochs=1000, n_train_step_per_epoch=1000, eval_period=10, eval_n_trajs=5, cql=ConservativeSAC.get_default_config(), logging=WandBLogger.get_default_config(), ) def main(argv): FLAGS = absl.flags.FLAGS variant = get_user_flags(FLAGS, FLAGS_DEF) wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant) setup_logger( variant=variant, exp_id=wandb_logger.experiment_id, seed=FLAGS.seed, base_log_dir=FLAGS.logging.output_dir, include_exp_prefix_sub_dir=False ) set_random_seed(FLAGS.seed) if FLAGS.env in ENV_TO_HORIZON_MAP: dataset_path = f'./robomimic/datasets/{FLAGS.env}/low_dim_v141.hdf5' seq_dataset = SequenceDataset(hdf5_path=dataset_path, obs_keys=OBS_KEYS, dataset_keys=("actions", "rewards", "dones"), hdf5_cache_mode="all", load_next_obs=True) dataset = process_robomimic_dataset(seq_dataset) dataset = D4RLDataset(env=None, custom_dataset=dataset) example_ob = dataset.dataset_dict['observations'][0][np.newaxis] example_action = dataset.dataset_dict['actions'][0][np.newaxis] env = get_robomimic_env(dataset_path, example_action, FLAGS.env) max_len = ENV_TO_HORIZON_MAP[FLAGS.env] else: env = gym.make(FLAGS.env).unwrapped dataset = get_d4rl_dataset(env) dataset['rewards'] = dataset['rewards'] * FLAGS.reward_scale + FLAGS.reward_bias dataset['actions'] = np.clip(dataset['actions'], -FLAGS.clip_action, FLAGS.clip_action) max_len = FLAGS.max_traj_length example_ob = env.observation_space.sample()[np.newaxis] example_action = env.action_space.sample()[np.newaxis] eval_sampler = TrajSampler(env, max_len) observation_dim = example_ob.shape[1] action_dim = example_action.shape[1] dataset = make_dataset(dataset, FLAGS.env) policy = TanhGaussianPolicy( observation_dim, action_dim, FLAGS.policy_arch, FLAGS.orthogonal_init, FLAGS.policy_log_std_multiplier, FLAGS.policy_log_std_offset ) qf = FullyConnectedQFunction(observation_dim, action_dim, FLAGS.qf_arch, FLAGS.orthogonal_init) if FLAGS.cql.target_entropy >= 0.0: FLAGS.cql.target_entropy = -np.prod(eval_sampler.env.action_space.shape).item() sac = ConservativeSAC(FLAGS.cql, policy, qf) sampler_policy = SamplerPolicy(sac.policy, sac.train_params['policy']) viskit_metrics = {} for epoch in range(FLAGS.n_epochs): metrics = {'epoch': epoch}
FLAGS_DEF = define_flags_with_default( env='halfcheetah-medium-v2', algorithm='cql', max_traj_length=200, seed=42, save_model=False, batch_size=256, reward_scale=1.0, reward_bias=0.0, clip_action=0.999, policy_arch='256-256', qf_arch='256-256', orthogonal_init=False, policy_log_std_multiplier=1.0, policy_log_std_offset=-1.0, n_epochs=1000, bc_epochs=1000, n_train_step_per_epoch=1000, eval_period=10, eval_n_trajs=5, cql=ConservativeSAC.get_default_config(), logging=WandBLogger.get_default_config(), ) def main(argv): FLAGS = absl.flags.FLAGS variant = get_user_flags(FLAGS, FLAGS_DEF) wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant) setup_logger( variant=variant, exp_id=wandb_logger.experiment_id, seed=FLAGS.seed, base_log_dir=FLAGS.logging.output_dir, include_exp_prefix_sub_dir=False ) set_random_seed(FLAGS.seed) if FLAGS.env in ENV_TO_HORIZON_MAP: dataset_path = f'./robomimic/datasets/{FLAGS.env}/low_dim_v141.hdf5' seq_dataset = SequenceDataset(hdf5_path=dataset_path, obs_keys=OBS_KEYS, dataset_keys=("actions", "rewards", "dones"), hdf5_cache_mode="all", load_next_obs=True) dataset = process_robomimic_dataset(seq_dataset) dataset = D4RLDataset(env=None, custom_dataset=dataset) example_ob = dataset.dataset_dict['observations'][0][np.newaxis] example_action = dataset.dataset_dict['actions'][0][np.newaxis] env = get_robomimic_env(dataset_path, example_action, FLAGS.env) max_len = ENV_TO_HORIZON_MAP[FLAGS.env] else: env = gym.make(FLAGS.env).unwrapped dataset = get_d4rl_dataset(env) dataset['rewards'] = dataset['rewards'] * FLAGS.reward_scale + FLAGS.reward_bias dataset['actions'] = np.clip(dataset['actions'], -FLAGS.clip_action, FLAGS.clip_action) max_len = FLAGS.max_traj_length example_ob = env.observation_space.sample()[np.newaxis] example_action = env.action_space.sample()[np.newaxis] eval_sampler = TrajSampler(env, max_len) observation_dim = example_ob.shape[1] action_dim = example_action.shape[1] dataset = make_dataset(dataset, FLAGS.env) policy = TanhGaussianPolicy( observation_dim, action_dim, FLAGS.policy_arch, FLAGS.orthogonal_init, FLAGS.policy_log_std_multiplier, FLAGS.policy_log_std_offset ) qf = FullyConnectedQFunction(observation_dim, action_dim, FLAGS.qf_arch, FLAGS.orthogonal_init) if FLAGS.cql.target_entropy >= 0.0: FLAGS.cql.target_entropy = -np.prod(eval_sampler.env.action_space.shape).item() sac = ConservativeSAC(FLAGS.cql, policy, qf) sampler_policy = SamplerPolicy(sac.policy, sac.train_params['policy']) viskit_metrics = {} for epoch in range(FLAGS.n_epochs): metrics = {'epoch': epoch}
with Timer() as train_timer:
11
2023-10-18 06:31:20+00:00
16k
SLDGroup/G-CASCADE
lib/networks.py
[ { "identifier": "pvt_v2_b2", "path": "lib/pvtv2.py", "snippet": "class pvt_v2_b2(PyramidVisionTransformerImpr):\n def __init__(self, **kwargs):\n super(pvt_v2_b2, self).__init__(\n patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],\n ...
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import timm import logging from scipy import ndimage from lib.pvtv2 import pvt_v2_b2, pvt_v2_b5, pvt_v2_b0 from lib.decoders import CUP, CASCADE, CASCADE_Cat, GCUP, GCUP_Cat, GCASCADE, GCASCADE_Cat from lib.pyramid_vig import pvig_ti_224_gelu, pvig_s_224_gelu, pvig_m_224_gelu, pvig_b_224_gelu from lib.maxxvit_4out import maxvit_tiny_rw_224 as maxvit_tiny_rw_224_4out from lib.maxxvit_4out import maxvit_rmlp_tiny_rw_256 as maxvit_rmlp_tiny_rw_256_4out from lib.maxxvit_4out import maxxvit_rmlp_small_rw_256 as maxxvit_rmlp_small_rw_256_4out from lib.maxxvit_4out import maxvit_rmlp_small_rw_224 as maxvit_rmlp_small_rw_224_4out
13,376
# Prediction heads initialization self.out_head1 = nn.Conv2d(self.channels[0], self.n_class, 1) self.out_head2 = nn.Conv2d(self.channels[1], self.n_class, 1) self.out_head3 = nn.Conv2d(self.channels[2], self.n_class, 1) self.out_head4 = nn.Conv2d(self.channels[3], self.n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv_1cto3c(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_GCASCADE(nn.Module): def __init__(self, n_class=1, img_size=224, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', skip_aggregation='additive'): super(PVT_GCASCADE, self).__init__() self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) self.channels = [512, 320, 128, 64] # decoder initialization if self.skip_aggregation == 'additive': self.decoder = GCASCADE(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) elif self.skip_aggregation == 'concatenation': self.decoder = GCASCADE_Cat(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) self.channels = [self.channels[0], self.channels[1]*2, self.channels[2]*2, self.channels[3]*2] else: print('No implementation found for the skip_aggregation ' + self.skip_aggregation + '. Continuing with the default additive aggregation.') self.decoder = GCASCADE(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) print('Model %s created, param count: %d' % ('GCASCADE decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(self.channels[0], self.n_class, 1) self.out_head2 = nn.Conv2d(self.channels[1], self.n_class, 1) self.out_head3 = nn.Conv2d(self.channels[2], self.n_class, 1) self.out_head4 = nn.Conv2d(self.channels[3], self.n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv_1cto3c(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class MERIT_GCASCADE(nn.Module): def __init__(self, n_class=1, img_size_s1=(256,256), img_size_s2=(224,224), k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', interpolation='bilinear', skip_aggregation='additive'): super(MERIT_GCASCADE, self).__init__() self.interpolation = interpolation self.img_size_s1 = img_size_s1 self.img_size_s2 = img_size_s2 self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone1 = maxxvit_rmlp_small_rw_256_4out() # [64, 128, 320, 512]
logger = logging.getLogger(__name__) def np2th(weights, conv=False): """Possibly convert HWIO to OIHW.""" if conv: weights = weights.transpose([3, 2, 0, 1]) return torch.from_numpy(weights) class PVT_CUP(nn.Module): def __init__(self, n_class=1): super(PVT_CUP, self).__init__() # conv block to convert single channel to 3 channels self.conv = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) # decoder initialization self.decoder = CUP(channels=[512, 320, 128, 64]) print('Model %s created, param count: %d' % ('CUP decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(512, n_class, 1) self.out_head2 = nn.Conv2d(320, n_class, 1) self.out_head3 = nn.Conv2d(128, n_class, 1) self.out_head4 = nn.Conv2d(64, n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_CASCADE(nn.Module): def __init__(self, n_class=1): super(PVT_CASCADE, self).__init__() # conv block to convert single channel to 3 channels self.conv = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) # decoder initialization self.decoder = CASCADE(channels=[512, 320, 128, 64]) print('Model %s created, param count: %d' % ('CASCADE decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(512, n_class, 1) self.out_head2 = nn.Conv2d(320, n_class, 1) self.out_head3 = nn.Conv2d(128, n_class, 1) self.out_head4 = nn.Conv2d(64, n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_CASCADE_Cat(nn.Module): def __init__(self, n_class=1): super(PVT_CASCADE_Cat, self).__init__() # conv block to convert single channel to 3 channels self.conv = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) print('Model %s created, param count: %d' % ('PVT backbone: ', sum([m.numel() for m in self.backbone.parameters()]))) # decoder initialization self.decoder = CASCADE_Cat(channels=[512, 320, 128, 64]) print('Model %s created, param count: %d' % ('CASCADE_Cat decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(512, n_class, 1) self.out_head2 = nn.Conv2d(320, n_class, 1) self.out_head3 = nn.Conv2d(128, n_class, 1) self.out_head4 = nn.Conv2d(64, n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_GCUP(nn.Module): def __init__(self, n_class=1, img_size=224, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', skip_aggregation='additive'): super(PVT_GCUP, self).__init__() self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) self.channels = [512, 320, 128, 64] # decoder initialization if self.skip_aggregation == 'additive': self.decoder = GCUP(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) elif self.skip_aggregation == 'concatenation': self.decoder = GCUP_Cat(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) self.channels = [self.channels[0], self.channels[1]*2, self.channels[2]*2, self.channels[3]*2] else: print('No implementation found for the skip_aggregation ' + self.skip_aggregation + '. Continuing with the default additive aggregation.') self.decoder = GCUP(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) print('Model %s created, param count: %d' % ('GCUP_decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(self.channels[0], self.n_class, 1) self.out_head2 = nn.Conv2d(self.channels[1], self.n_class, 1) self.out_head3 = nn.Conv2d(self.channels[2], self.n_class, 1) self.out_head4 = nn.Conv2d(self.channels[3], self.n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv_1cto3c(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_GCASCADE(nn.Module): def __init__(self, n_class=1, img_size=224, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', skip_aggregation='additive'): super(PVT_GCASCADE, self).__init__() self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) self.channels = [512, 320, 128, 64] # decoder initialization if self.skip_aggregation == 'additive': self.decoder = GCASCADE(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) elif self.skip_aggregation == 'concatenation': self.decoder = GCASCADE_Cat(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) self.channels = [self.channels[0], self.channels[1]*2, self.channels[2]*2, self.channels[3]*2] else: print('No implementation found for the skip_aggregation ' + self.skip_aggregation + '. Continuing with the default additive aggregation.') self.decoder = GCASCADE(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) print('Model %s created, param count: %d' % ('GCASCADE decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(self.channels[0], self.n_class, 1) self.out_head2 = nn.Conv2d(self.channels[1], self.n_class, 1) self.out_head3 = nn.Conv2d(self.channels[2], self.n_class, 1) self.out_head4 = nn.Conv2d(self.channels[3], self.n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv_1cto3c(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class MERIT_GCASCADE(nn.Module): def __init__(self, n_class=1, img_size_s1=(256,256), img_size_s2=(224,224), k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', interpolation='bilinear', skip_aggregation='additive'): super(MERIT_GCASCADE, self).__init__() self.interpolation = interpolation self.img_size_s1 = img_size_s1 self.img_size_s2 = img_size_s2 self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone1 = maxxvit_rmlp_small_rw_256_4out() # [64, 128, 320, 512]
self.backbone2 = maxvit_rmlp_small_rw_224_4out() # [64, 128, 320, 512]
9
2023-10-24 17:49:10+00:00
16k
boppreh/hello_tls
src/hello_tls/scan.py
[ { "identifier": "ClientHello", "path": "src/hello_tls/protocol.py", "snippet": "class ScanError(Exception):\nclass ServerAlertError(ScanError):\nclass BadServerResponse(ScanError):\nclass ServerHello:\nclass ClientHello:\n def __init__(self, level: AlertLevel, description: AlertDescription):\ndef _ma...
from enum import Enum from multiprocessing.pool import ThreadPool from typing import Iterable, Union, List, Optional, Iterator, Callable, Any from urllib.parse import urlparse from datetime import datetime, timezone from .protocol import ClientHello, ScanError, make_client_hello, parse_server_hello, ServerAlertError, BadServerResponse, ServerHello, logger from .names_and_numbers import AlertDescription, CipherSuite, Group, Protocol, CompressionMethod from OpenSSL import SSL, crypto import socket import re import dataclasses import ssl, select
14,182
# Default number of workers/threads/concurrent connections to use. DEFAULT_MAX_WORKERS: int = 6 # Default socket connection timeout, in seconds. DEFAULT_TIMEOUT: float = 2 class DowngradeError(ScanError): """ Error for servers that attempt to downgrade beyond supported versions. """ pass class ConnectionError(ScanError): """ Class for error in resolving or connecting to a server. """ pass class ProxyError(ConnectionError): """ Class for errors in connecting through a proxy. """ pass @dataclasses.dataclass class ConnectionSettings: """ Settings for a connection to a server, including the host, port, and proxy. """ host: str port: int = 443 proxy: Optional[str] = None timeout_in_seconds: Optional[float] = DEFAULT_TIMEOUT date: datetime = dataclasses.field(default_factory=lambda: datetime.now(tz=timezone.utc).replace(microsecond=0)) def make_socket(settings: ConnectionSettings) -> socket.socket: """ Creates and connects a socket to the target server, through the chosen proxy if any. """ socket_host, socket_port = None, None # To appease the type checker. try: if not settings.proxy: socket_host, socket_port = settings.host, settings.port return socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) if not settings.proxy.startswith('http://'): raise ProxyError("Only HTTP proxies are supported at the moment.", settings.proxy) socket_host, socket_port = parse_target(settings.proxy, 80) sock = socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) sock.send(f"CONNECT {settings.host}:{settings.port} HTTP/1.1\r\nhost:{socket_host}\r\n\r\n".encode('utf-8')) sock_file = sock.makefile('r', newline='\r\n') line = sock_file.readline() if not re.fullmatch(r'HTTP/1\.[01] 200 Connection [Ee]stablished\r\n', line): sock_file.close() sock.close() raise ProxyError("Proxy refused the connection: ", line) while True: if sock_file.readline() == '\r\n': break return sock except TimeoutError as e: raise ConnectionError(f"Connection to {socket_host}:{socket_port} timed out after {settings.timeout_in_seconds} seconds") from e except socket.gaierror as e: raise ConnectionError(f"Could not resolve host {socket_host}") from e except socket.error as e: raise ConnectionError(f"Could not connect to {socket_host}:{socket_port}") from e
# Default number of workers/threads/concurrent connections to use. DEFAULT_MAX_WORKERS: int = 6 # Default socket connection timeout, in seconds. DEFAULT_TIMEOUT: float = 2 class DowngradeError(ScanError): """ Error for servers that attempt to downgrade beyond supported versions. """ pass class ConnectionError(ScanError): """ Class for error in resolving or connecting to a server. """ pass class ProxyError(ConnectionError): """ Class for errors in connecting through a proxy. """ pass @dataclasses.dataclass class ConnectionSettings: """ Settings for a connection to a server, including the host, port, and proxy. """ host: str port: int = 443 proxy: Optional[str] = None timeout_in_seconds: Optional[float] = DEFAULT_TIMEOUT date: datetime = dataclasses.field(default_factory=lambda: datetime.now(tz=timezone.utc).replace(microsecond=0)) def make_socket(settings: ConnectionSettings) -> socket.socket: """ Creates and connects a socket to the target server, through the chosen proxy if any. """ socket_host, socket_port = None, None # To appease the type checker. try: if not settings.proxy: socket_host, socket_port = settings.host, settings.port return socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) if not settings.proxy.startswith('http://'): raise ProxyError("Only HTTP proxies are supported at the moment.", settings.proxy) socket_host, socket_port = parse_target(settings.proxy, 80) sock = socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) sock.send(f"CONNECT {settings.host}:{settings.port} HTTP/1.1\r\nhost:{socket_host}\r\n\r\n".encode('utf-8')) sock_file = sock.makefile('r', newline='\r\n') line = sock_file.readline() if not re.fullmatch(r'HTTP/1\.[01] 200 Connection [Ee]stablished\r\n', line): sock_file.close() sock.close() raise ProxyError("Proxy refused the connection: ", line) while True: if sock_file.readline() == '\r\n': break return sock except TimeoutError as e: raise ConnectionError(f"Connection to {socket_host}:{socket_port} timed out after {settings.timeout_in_seconds} seconds") from e except socket.gaierror as e: raise ConnectionError(f"Could not resolve host {socket_host}") from e except socket.error as e: raise ConnectionError(f"Could not connect to {socket_host}:{socket_port}") from e
def send_hello(connection_settings: ConnectionSettings, client_hello: ClientHello) -> ServerHello:
0
2023-10-21 02:00:13+00:00
16k
YefanZhou/TempBalance
object_detection/src/YOLOv8/ultralytics/vit/sam/modules/mask_generator.py
[ { "identifier": "MaskData", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kw...
from typing import Any, Dict, List, Optional, Tuple from torchvision.ops.boxes import batched_nms, box_area # type: ignore from ..amg import (MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points) from .prompt_predictor import PromptPredictor from .sam import Sam from pycocotools import mask as mask_utils # type: ignore # noqa: F401 import numpy as np import torch import cv2 # type: ignore # noqa: F401
11,323
return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes(orig_size, self.crop_n_layers, self.crop_overlap_ratio) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data['crop_boxes']) scores = scores.to(data['boxes'].device) keep_by_nms = batched_nms( data['boxes'].float(), scores, torch.zeros_like(data['boxes'][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points, ) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data['boxes'].float(), data['iou_preds'], torch.zeros_like(data['boxes'][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data['boxes'] = uncrop_boxes_xyxy(data['boxes'], crop_box) data['points'] = uncrop_points(data['points'], crop_box) data['crop_boxes'] = torch.tensor([crop_box for _ in range(len(data['rles']))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data['iou_preds'] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data['stability_score'] = calculate_stability_score(data['masks'], self.predictor.model.mask_threshold, self.stability_score_offset) if self.stability_score_thresh > 0.0: keep_mask = data['stability_score'] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data['masks'] = data['masks'] > self.predictor.model.mask_threshold data['boxes'] = batched_mask_to_box(data['masks']) # Filter boxes that touch crop boundaries keep_mask = ~is_box_near_crop_edge(data['boxes'], crop_box, [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): data.filter(keep_mask) # Compress to RLE data['masks'] = uncrop_masks(data['masks'], crop_box, orig_h, orig_w)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamAutomaticMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = 'binary_mask', ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int, None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray), None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != (point_grids is None), \ 'Exactly one of points_per_side or point_grid must be provided.' if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in {'binary_mask', 'uncompressed_rle', 'coco_rle'}, f'Unknown output_mode {output_mode}.' if output_mode == 'coco_rle': if min_mask_region_area > 0: self.predictor = PromptPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode # TODO: Temporary implementation for compatibility def __call__(self, image: np.ndarray, augment=False, visualize=False) -> List[Dict[str, Any]]: return self.generate(image) @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any), np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == 'coco_rle': mask_data['segmentations'] = [coco_encode_rle(rle) for rle in mask_data['rles']] elif self.output_mode == 'binary_mask': mask_data['segmentations'] = [rle_to_mask(rle) for rle in mask_data['rles']] else: mask_data['segmentations'] = mask_data['rles'] # Write mask records curr_anns = [] for idx in range(len(mask_data['segmentations'])): ann = { 'segmentation': mask_data['segmentations'][idx], 'area': area_from_rle(mask_data['rles'][idx]), 'bbox': box_xyxy_to_xywh(mask_data['boxes'][idx]).tolist(), 'predicted_iou': mask_data['iou_preds'][idx].item(), 'point_coords': [mask_data['points'][idx].tolist()], 'stability_score': mask_data['stability_score'][idx].item(), 'crop_box': box_xyxy_to_xywh(mask_data['crop_boxes'][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes(orig_size, self.crop_n_layers, self.crop_overlap_ratio) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data['crop_boxes']) scores = scores.to(data['boxes'].device) keep_by_nms = batched_nms( data['boxes'].float(), scores, torch.zeros_like(data['boxes'][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points, ) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data['boxes'].float(), data['iou_preds'], torch.zeros_like(data['boxes'][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data['boxes'] = uncrop_boxes_xyxy(data['boxes'], crop_box) data['points'] = uncrop_points(data['points'], crop_box) data['crop_boxes'] = torch.tensor([crop_box for _ in range(len(data['rles']))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data['iou_preds'] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data['stability_score'] = calculate_stability_score(data['masks'], self.predictor.model.mask_threshold, self.stability_score_offset) if self.stability_score_thresh > 0.0: keep_mask = data['stability_score'] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data['masks'] = data['masks'] > self.predictor.model.mask_threshold data['boxes'] = batched_mask_to_box(data['masks']) # Filter boxes that touch crop boundaries keep_mask = ~is_box_near_crop_edge(data['boxes'], crop_box, [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): data.filter(keep_mask) # Compress to RLE data['masks'] = uncrop_masks(data['masks'], crop_box, orig_h, orig_w)
data['rles'] = mask_to_rle_pytorch(data['masks'])
10
2023-10-24 00:45:55+00:00
16k
bytedance/ColTrack
models/dino/dino.py
[ { "identifier": "box_ops", "path": "util/box_ops.py", "snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef box_iou_pairwise(boxes1, boxes2):\ndef generalized_box_iou_pairwise(boxes1, boxes2):\ndef masks_to_boxes(m...
import copy import math import torch import torch.nn.functional as F from typing import List from torch import nn from torchvision.ops.boxes import nms from util import box_ops from util.misc import (NestedTensor, nested_tensor_from_tensor_list, accuracy, get_world_size, interpolate, is_dist_avail_and_initialized, inverse_sigmoid, scale_sigmoid) from .backbone import build_backbone from .matcher import build_matcher from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm, dice_loss) from .deformable_transformer import build_deformable_transformer from .utils import sigmoid_focal_loss, MLP from ..registry import MODULE_BUILD_FUNCS from .dn_components import prepare_for_cdn,dn_post_process
11,745
# for encoder output if hs_enc is not None: # prepare intermediate outputs interm_coord = ref_enc[-1] interm_class = self.transformer.enc_out_class_embed(hs_enc[-1]) out['interm_outputs'] = {'pred_logits': interm_class, 'pred_boxes': interm_coord} out['interm_outputs_for_matching_pre'] = {'pred_logits': interm_class, 'pred_boxes': init_box_proposal} # prepare enc outputs # import ipdb; ipdb.set_trace() if hs_enc.shape[0] > 1: enc_outputs_coord = [] enc_outputs_class = [] for layer_id, (layer_box_embed, layer_class_embed, layer_hs_enc, layer_ref_enc) in enumerate(zip(self.enc_bbox_embed, self.enc_class_embed, hs_enc[:-1], ref_enc[:-1])): layer_enc_delta_unsig = layer_box_embed(layer_hs_enc) layer_enc_outputs_coord_unsig = layer_enc_delta_unsig + inverse_sigmoid(layer_ref_enc) layer_enc_outputs_coord = scale_sigmoid(layer_enc_outputs_coord_unsig.sigmoid()) layer_enc_outputs_class = layer_class_embed(layer_hs_enc) enc_outputs_coord.append(layer_enc_outputs_coord) enc_outputs_class.append(layer_enc_outputs_class) # enc_delta_unsig = self.enc_bbox_embed(hs_enc[:-1]) # enc_outputs_unsig = enc_delta_unsig + ref_enc[:-1] # enc_outputs_coord = enc_outputs_unsig.sigmoid() # enc_outputs_class = self.enc_class_embed(hs_enc[:-1]) out['enc_outputs'] = [ {'pred_logits': a, 'pred_boxes': b} for a, b in zip(enc_outputs_class, enc_outputs_coord) ] out['dn_meta'] = dn_meta return out @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{'pred_logits': a, 'pred_boxes': b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] class SetCriterion(nn.Module): """ This class computes the loss for Conditional DETR. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) """ def __init__(self, num_classes, matcher, weight_dict, focal_alpha, losses): """ Create the criterion. Parameters: num_classes: number of object categories, omitting the special no-object category matcher: module able to compute a matching between targets and proposals weight_dict: dict containing as key the names of the losses and as values their relative weight. losses: list of all the losses to be applied. See get_loss for list of available losses. focal_alpha: alpha in Focal Loss """ super().__init__() self.num_classes = num_classes self.matcher = matcher self.weight_dict = weight_dict self.losses = losses self.focal_alpha = focal_alpha def loss_labels(self, outputs, targets, indices, num_boxes, log=True): """Classification loss (Binary focal loss) targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] """ assert 'pred_logits' in outputs src_logits = outputs['pred_logits'] idx = self._get_src_permutation_idx(indices) target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2]+1], dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:,:,:-1] loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * src_logits.shape[1] losses = {'loss_ce': loss_ce} if log: # TODO this should probably be a separate loss, not hacked in this one here losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0] return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients """ pred_logits = outputs['pred_logits'] device = pred_logits.device tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1) card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) losses = {'cardinality_error': card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ assert 'pred_boxes' in outputs idx = self._get_src_permutation_idx(indices) src_boxes = outputs['pred_boxes'][idx] target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') losses = {} losses['loss_bbox'] = loss_bbox.sum() / num_boxes
# ------------------------------------------------------------------------ # DINO # Copyright (c) 2022 IDEA. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Conditional DETR model and criterion classes. # Copyright (c) 2021 Microsoft. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # ------------------------------------------------------------------------ # Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) # Copyright (c) 2020 SenseTime. All Rights Reserved. # ------------------------------------------------------------------------ class DINO(nn.Module): """ This is the Cross-Attention Detector module that performs object detection """ def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False, iter_update=False, query_dim=2, random_refpoints_xy=False, fix_refpoints_hw=-1, num_feature_levels=1, nheads=8, # two stage two_stage_type='no', # ['no', 'standard'] two_stage_add_query_num=0, dec_pred_class_embed_share=True, dec_pred_bbox_embed_share=True, two_stage_class_embed_share=True, two_stage_bbox_embed_share=True, decoder_sa_type = 'sa', num_patterns = 0, dn_number = 100, dn_box_noise_scale = 0.4, dn_label_noise_ratio = 0.5, dn_labelbook_size = 100, ): """ Initializes the model. Parameters: backbone: torch module of the backbone to be used. See backbone.py transformer: torch module of the transformer architecture. See transformer.py num_classes: number of object classes num_queries: number of object queries, ie detection slot. This is the maximal number of objects Conditional DETR can detect in a single image. For COCO, we recommend 100 queries. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. fix_refpoints_hw: -1(default): learn w and h for each box seperately >0 : given fixed number -2 : learn a shared w and h """ super().__init__() self.num_queries = num_queries self.transformer = transformer self.num_classes = num_classes self.hidden_dim = hidden_dim = transformer.d_model self.num_feature_levels = num_feature_levels self.nheads = nheads self.label_enc = nn.Embedding(dn_labelbook_size + 1, hidden_dim) # setting query dim self.query_dim = query_dim assert query_dim == 4 self.random_refpoints_xy = random_refpoints_xy self.fix_refpoints_hw = fix_refpoints_hw # for dn training self.num_patterns = num_patterns self.dn_number = dn_number self.dn_box_noise_scale = dn_box_noise_scale self.dn_label_noise_ratio = dn_label_noise_ratio self.dn_labelbook_size = dn_labelbook_size # prepare input projection layers if num_feature_levels > 1: num_backbone_outs = len(backbone.num_channels) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), )) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: assert two_stage_type == 'no', "two_stage_type should be no if num_feature_levels=1 !!!" self.input_proj = nn.ModuleList([ nn.Sequential( nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )]) self.backbone = backbone self.aux_loss = aux_loss self.box_pred_damping = box_pred_damping = None self.iter_update = iter_update assert iter_update, "Why not iter_update?" # prepare pred layers self.dec_pred_class_embed_share = dec_pred_class_embed_share self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share # prepare class & box embed _class_embed = nn.Linear(hidden_dim, num_classes) _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) # init the two embed layers prior_prob = 0.01 bias_value = -math.log((1 - prior_prob) / prior_prob) _class_embed.bias.data = torch.ones(self.num_classes) * bias_value nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) if dec_pred_bbox_embed_share: box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] else: box_embed_layerlist = [copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers)] if dec_pred_class_embed_share: class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] else: class_embed_layerlist = [copy.deepcopy(_class_embed) for i in range(transformer.num_decoder_layers)] self.bbox_embed = nn.ModuleList(box_embed_layerlist) self.class_embed = nn.ModuleList(class_embed_layerlist) self.transformer.decoder.bbox_embed = self.bbox_embed self.transformer.decoder.class_embed = self.class_embed # two stage self.two_stage_type = two_stage_type self.two_stage_add_query_num = two_stage_add_query_num assert two_stage_type in ['no', 'standard'], "unknown param {} of two_stage_type".format(two_stage_type) if two_stage_type != 'no': if two_stage_bbox_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None if self.two_stage_add_query_num > 0: self.init_ref_points(two_stage_add_query_num) self.decoder_sa_type = decoder_sa_type assert decoder_sa_type in ['sa', 'ca_label', 'ca_content'] # self.replace_sa_with_double_ca = replace_sa_with_double_ca if decoder_sa_type == 'ca_label': self.label_embedding = nn.Embedding(num_classes, hidden_dim) for layer in self.transformer.decoder.layers: layer.label_embedding = self.label_embedding else: for layer in self.transformer.decoder.layers: layer.label_embedding = None self.label_embedding = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def init_ref_points(self, use_num_queries): raise NotImplementedError def forward(self, samples: NestedTensor, targets:List=None): """ The forward expects a NestedTensor, which consists of: - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels It returns a dict with the following elements: - "pred_logits": the classification logits (including no-object) for all queries. Shape= [batch_size x num_queries x num_classes] - "pred_boxes": The normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image (disregarding possible padding). See PostProcess for information on how to retrieve the unnormalized bounding box. - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of dictionnaries containing the two above keys for each decoder layer. """ if isinstance(samples, (list, torch.Tensor)): samples = nested_tensor_from_tensor_list(samples) features, poss = self.backbone(samples) srcs = [] masks = [] for l, feat in enumerate(features): src, mask = feat.decompose() srcs.append(self.input_proj[l](src)) masks.append(mask) assert mask is not None if self.num_feature_levels > len(srcs): _len_srcs = len(srcs) for l in range(_len_srcs, self.num_feature_levels): if l == _len_srcs: src = self.input_proj[l](features[-1].tensors) else: src = self.input_proj[l](srcs[-1]) m = samples.mask mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0] pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype) srcs.append(src) masks.append(mask) poss.append(pos_l) if self.dn_number > 0 or targets is not None: input_query_label, input_query_bbox, attn_mask, dn_meta =\ prepare_for_cdn(dn_args=(targets, self.dn_number, self.dn_label_noise_ratio, self.dn_box_noise_scale), training=self.training,num_queries=self.num_queries,num_classes=self.num_classes, hidden_dim=self.hidden_dim,label_enc=self.label_enc) else: assert targets is None input_query_bbox = input_query_label = attn_mask = dn_meta = None hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer(srcs, masks, input_query_bbox, poss,input_query_label,attn_mask) # In case num object=0 hs[0]+=self.label_enc.weight[0,0]*0.0 outputs_coord_list = [] for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate(zip(reference[:-1], self.bbox_embed, hs)): layer_delta_unsig = layer_bbox_embed(layer_hs) layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig) layer_outputs_unsig = scale_sigmoid(layer_outputs_unsig.sigmoid()) outputs_coord_list.append(layer_outputs_unsig) outputs_coord_list = torch.stack(outputs_coord_list) # outputs_class = self.class_embed(hs) outputs_class = torch.stack([layer_cls_embed(layer_hs) for layer_cls_embed, layer_hs in zip(self.class_embed, hs)]) if self.dn_number > 0 and dn_meta is not None: outputs_class, outputs_coord_list = \ dn_post_process(outputs_class, outputs_coord_list, dn_meta,self.aux_loss,self._set_aux_loss) out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord_list[-1]} if self.aux_loss: out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord_list) # for encoder output if hs_enc is not None: # prepare intermediate outputs interm_coord = ref_enc[-1] interm_class = self.transformer.enc_out_class_embed(hs_enc[-1]) out['interm_outputs'] = {'pred_logits': interm_class, 'pred_boxes': interm_coord} out['interm_outputs_for_matching_pre'] = {'pred_logits': interm_class, 'pred_boxes': init_box_proposal} # prepare enc outputs # import ipdb; ipdb.set_trace() if hs_enc.shape[0] > 1: enc_outputs_coord = [] enc_outputs_class = [] for layer_id, (layer_box_embed, layer_class_embed, layer_hs_enc, layer_ref_enc) in enumerate(zip(self.enc_bbox_embed, self.enc_class_embed, hs_enc[:-1], ref_enc[:-1])): layer_enc_delta_unsig = layer_box_embed(layer_hs_enc) layer_enc_outputs_coord_unsig = layer_enc_delta_unsig + inverse_sigmoid(layer_ref_enc) layer_enc_outputs_coord = scale_sigmoid(layer_enc_outputs_coord_unsig.sigmoid()) layer_enc_outputs_class = layer_class_embed(layer_hs_enc) enc_outputs_coord.append(layer_enc_outputs_coord) enc_outputs_class.append(layer_enc_outputs_class) # enc_delta_unsig = self.enc_bbox_embed(hs_enc[:-1]) # enc_outputs_unsig = enc_delta_unsig + ref_enc[:-1] # enc_outputs_coord = enc_outputs_unsig.sigmoid() # enc_outputs_class = self.enc_class_embed(hs_enc[:-1]) out['enc_outputs'] = [ {'pred_logits': a, 'pred_boxes': b} for a, b in zip(enc_outputs_class, enc_outputs_coord) ] out['dn_meta'] = dn_meta return out @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{'pred_logits': a, 'pred_boxes': b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] class SetCriterion(nn.Module): """ This class computes the loss for Conditional DETR. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) """ def __init__(self, num_classes, matcher, weight_dict, focal_alpha, losses): """ Create the criterion. Parameters: num_classes: number of object categories, omitting the special no-object category matcher: module able to compute a matching between targets and proposals weight_dict: dict containing as key the names of the losses and as values their relative weight. losses: list of all the losses to be applied. See get_loss for list of available losses. focal_alpha: alpha in Focal Loss """ super().__init__() self.num_classes = num_classes self.matcher = matcher self.weight_dict = weight_dict self.losses = losses self.focal_alpha = focal_alpha def loss_labels(self, outputs, targets, indices, num_boxes, log=True): """Classification loss (Binary focal loss) targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] """ assert 'pred_logits' in outputs src_logits = outputs['pred_logits'] idx = self._get_src_permutation_idx(indices) target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2]+1], dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:,:,:-1] loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * src_logits.shape[1] losses = {'loss_ce': loss_ce} if log: # TODO this should probably be a separate loss, not hacked in this one here losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0] return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients """ pred_logits = outputs['pred_logits'] device = pred_logits.device tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1) card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) losses = {'cardinality_error': card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ assert 'pred_boxes' in outputs idx = self._get_src_permutation_idx(indices) src_boxes = outputs['pred_boxes'][idx] target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') losses = {} losses['loss_bbox'] = loss_bbox.sum() / num_boxes
loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(
0
2023-10-16 02:18:33+00:00
16k
YuroFR/freqtrade-modded-crypto-trading-bot
tests/strategy/test_strategy_helpers.py
[ { "identifier": "DataProvider", "path": "freqtrade/data/dataprovider.py", "snippet": "class DataProvider:\n\n def __init__(\n self,\n config: Config,\n exchange: Optional[Exchange],\n pairlists=None,\n rpc: Optional[RPCManager] = None\n ) -> None:\n self._...
import numpy as np import pandas as pd import pytest from freqtrade.data.dataprovider import DataProvider from freqtrade.enums import CandleType from freqtrade.resolvers.strategy_resolver import StrategyResolver from freqtrade.strategy import merge_informative_pair, stoploss_from_absolute, stoploss_from_open from tests.conftest import generate_test_data, get_patched_exchange
13,539
for open_range in open_price_ranges: for open_price in np.linspace(*open_range): for desired_stop in np.linspace(-0.50, 0.50, 30): if side == 'long': # -1 is not a valid current_profit, should return 1 assert stoploss_from_open(desired_stop, -1) == 1 else: # 1 is not a valid current_profit for shorts, should return 1 assert stoploss_from_open(desired_stop, 1, True) == 1 for current_profit in np.linspace(*profitrange): if side == 'long': current_price = open_price * (1 + current_profit) expected_stop_price = open_price * (1 + desired_stop) stoploss = stoploss_from_open(desired_stop, current_profit) stop_price = current_price * (1 - stoploss) else: current_price = open_price * (1 - current_profit) expected_stop_price = open_price * (1 - desired_stop) stoploss = stoploss_from_open(desired_stop, current_profit, True) stop_price = current_price * (1 + stoploss) assert stoploss >= 0 # Technically the formula can yield values greater than 1 for shorts # eventhough it doesn't make sense because the position would be liquidated if side == 'long': assert stoploss <= 1 # there is no correct answer if the expected stop price is above # the current price if ((side == 'long' and expected_stop_price > current_price) or (side == 'short' and expected_stop_price < current_price)): assert stoploss == 0 else: assert pytest.approx(stop_price) == expected_stop_price @pytest.mark.parametrize("side,rel_stop,curr_profit,leverage,expected", [ # profit range for long is [-1, inf] while for shorts is [-inf, 1] ("long", 0, -1, 1, 1), ("long", 0, 0.1, 1, 0.09090909), ("long", -0.1, 0.1, 1, 0.18181818), ("long", 0.1, 0.2, 1, 0.08333333), ("long", 0.1, 0.5, 1, 0.266666666), ("long", 0.1, 5, 1, 0.816666666), # 500% profit, set stoploss to 10% above open price ("long", 0, 5, 10, 3.3333333), # 500% profit, set stoploss break even ("long", 0.1, 5, 10, 3.26666666), # 500% profit, set stoploss to 10% above open price ("long", -0.1, 5, 10, 3.3999999), # 500% profit, set stoploss to 10% belowopen price ("short", 0, 0.1, 1, 0.1111111), ("short", -0.1, 0.1, 1, 0.2222222), ("short", 0.1, 0.2, 1, 0.125), ("short", 0.1, 1, 1, 1), ("short", -0.01, 5, 10, 10.01999999), # 500% profit at 10x ]) def test_stoploss_from_open_leverage(side, rel_stop, curr_profit, leverage, expected): stoploss = stoploss_from_open(rel_stop, curr_profit, side == 'short', leverage) assert pytest.approx(stoploss) == expected open_rate = 100 if stoploss != 1: if side == 'long': current_rate = open_rate * (1 + curr_profit / leverage) stop = current_rate * (1 - stoploss / leverage) assert pytest.approx(stop) == open_rate * (1 + rel_stop / leverage) else: current_rate = open_rate * (1 - curr_profit / leverage) stop = current_rate * (1 + stoploss / leverage) assert pytest.approx(stop) == open_rate * (1 - rel_stop / leverage) def test_stoploss_from_absolute(): assert pytest.approx(stoploss_from_absolute(90, 100)) == 1 - (90 / 100) assert pytest.approx(stoploss_from_absolute(90, 100)) == 0.1 assert pytest.approx(stoploss_from_absolute(95, 100)) == 0.05 assert pytest.approx(stoploss_from_absolute(100, 100)) == 0 assert pytest.approx(stoploss_from_absolute(110, 100)) == 0 assert pytest.approx(stoploss_from_absolute(100, 0)) == 1 assert pytest.approx(stoploss_from_absolute(0, 100)) == 1 assert pytest.approx(stoploss_from_absolute(0, 100, False, leverage=5)) == 5 assert pytest.approx(stoploss_from_absolute(90, 100, True)) == 0 assert pytest.approx(stoploss_from_absolute(100, 100, True)) == 0 assert pytest.approx(stoploss_from_absolute(110, 100, True)) == -(1 - (110 / 100)) assert pytest.approx(stoploss_from_absolute(110, 100, True)) == 0.1 assert pytest.approx(stoploss_from_absolute(105, 100, True)) == 0.05 assert pytest.approx(stoploss_from_absolute(105, 100, True, 5)) == 0.05 * 5 assert pytest.approx(stoploss_from_absolute(100, 0, True)) == 1 assert pytest.approx(stoploss_from_absolute(0, 100, True)) == 0 assert pytest.approx(stoploss_from_absolute(100, 1, is_short=True)) == 1 assert pytest.approx(stoploss_from_absolute(100, 1, is_short=True, leverage=5)) == 5 @pytest.mark.parametrize('trading_mode', ['futures', 'spot']) def test_informative_decorator(mocker, default_conf_usdt, trading_mode): candle_def = CandleType.get_default(trading_mode) default_conf_usdt['candle_type_def'] = candle_def test_data_5m = generate_test_data('5m', 40) test_data_30m = generate_test_data('30m', 40) test_data_1h = generate_test_data('1h', 40) data = { ('XRP/USDT', '5m', candle_def): test_data_5m, ('XRP/USDT', '30m', candle_def): test_data_30m, ('XRP/USDT', '1h', candle_def): test_data_1h, ('LTC/USDT', '5m', candle_def): test_data_5m, ('LTC/USDT', '30m', candle_def): test_data_30m, ('LTC/USDT', '1h', candle_def): test_data_1h, ('NEO/USDT', '30m', candle_def): test_data_30m, ('NEO/USDT', '5m', CandleType.SPOT): test_data_5m, # Explicit request with '' as candletype ('NEO/USDT', '15m', candle_def): test_data_5m, # Explicit request with '' as candletype ('NEO/USDT', '1h', candle_def): test_data_1h, ('ETH/USDT', '1h', candle_def): test_data_1h, ('ETH/USDT', '30m', candle_def): test_data_30m, ('ETH/BTC', '1h', CandleType.SPOT): test_data_1h, # Explicitly selected as spot } default_conf_usdt['strategy'] = 'InformativeDecoratorTest' strategy = StrategyResolver.load_strategy(default_conf_usdt) exchange = get_patched_exchange(mocker, default_conf_usdt)
def test_merge_informative_pair(): data = generate_test_data('15m', 40) informative = generate_test_data('1h', 40) result = merge_informative_pair(data, informative, '15m', '1h', ffill=True) assert isinstance(result, pd.DataFrame) assert len(result) == len(data) assert 'date' in result.columns assert result['date'].equals(data['date']) assert 'date_1h' in result.columns assert 'open' in result.columns assert 'open_1h' in result.columns assert result['open'].equals(data['open']) assert 'close' in result.columns assert 'close_1h' in result.columns assert result['close'].equals(data['close']) assert 'volume' in result.columns assert 'volume_1h' in result.columns assert result['volume'].equals(data['volume']) # First 3 rows are empty assert result.iloc[0]['date_1h'] is pd.NaT assert result.iloc[1]['date_1h'] is pd.NaT assert result.iloc[2]['date_1h'] is pd.NaT # Next 4 rows contain the starting date (0:00) assert result.iloc[3]['date_1h'] == result.iloc[0]['date'] assert result.iloc[4]['date_1h'] == result.iloc[0]['date'] assert result.iloc[5]['date_1h'] == result.iloc[0]['date'] assert result.iloc[6]['date_1h'] == result.iloc[0]['date'] # Next 4 rows contain the next Hourly date original date row 4 assert result.iloc[7]['date_1h'] == result.iloc[4]['date'] assert result.iloc[8]['date_1h'] == result.iloc[4]['date'] informative = generate_test_data('1h', 40) result = merge_informative_pair(data, informative, '15m', '1h', ffill=False) # First 3 rows are empty assert result.iloc[0]['date_1h'] is pd.NaT assert result.iloc[1]['date_1h'] is pd.NaT assert result.iloc[2]['date_1h'] is pd.NaT # Next 4 rows contain the starting date (0:00) assert result.iloc[3]['date_1h'] == result.iloc[0]['date'] assert result.iloc[4]['date_1h'] is pd.NaT assert result.iloc[5]['date_1h'] is pd.NaT assert result.iloc[6]['date_1h'] is pd.NaT # Next 4 rows contain the next Hourly date original date row 4 assert result.iloc[7]['date_1h'] == result.iloc[4]['date'] assert result.iloc[8]['date_1h'] is pd.NaT def test_merge_informative_pair_same(): data = generate_test_data('15m', 40) informative = generate_test_data('15m', 40) result = merge_informative_pair(data, informative, '15m', '15m', ffill=True) assert isinstance(result, pd.DataFrame) assert len(result) == len(data) assert 'date' in result.columns assert result['date'].equals(data['date']) assert 'date_15m' in result.columns assert 'open' in result.columns assert 'open_15m' in result.columns assert result['open'].equals(data['open']) assert 'close' in result.columns assert 'close_15m' in result.columns assert result['close'].equals(data['close']) assert 'volume' in result.columns assert 'volume_15m' in result.columns assert result['volume'].equals(data['volume']) # Dates match 1:1 assert result['date_15m'].equals(result['date']) def test_merge_informative_pair_lower(): data = generate_test_data('1h', 40) informative = generate_test_data('15m', 40) with pytest.raises(ValueError, match=r"Tried to merge a faster timeframe .*"): merge_informative_pair(data, informative, '1h', '15m', ffill=True) def test_merge_informative_pair_empty(): data = generate_test_data('1h', 40) informative = pd.DataFrame(columns=data.columns) result = merge_informative_pair(data, informative, '1h', '2h', ffill=True) assert result['date'].equals(data['date']) assert list(result.columns) == [ 'date', 'open', 'high', 'low', 'close', 'volume', 'date_2h', 'open_2h', 'high_2h', 'low_2h', 'close_2h', 'volume_2h' ] # We merge an empty dataframe, so all values should be NaN for col in ['date_2h', 'open_2h', 'high_2h', 'low_2h', 'close_2h', 'volume_2h']: assert result[col].isnull().all() def test_merge_informative_pair_suffix(): data = generate_test_data('15m', 20) informative = generate_test_data('1h', 20) result = merge_informative_pair(data, informative, '15m', '1h', append_timeframe=False, suffix="suf") assert 'date' in result.columns assert result['date'].equals(data['date']) assert 'date_suf' in result.columns assert 'open_suf' in result.columns assert 'open_1h' not in result.columns assert list(result.columns) == [ 'date', 'open', 'high', 'low', 'close', 'volume', 'date_suf', 'open_suf', 'high_suf', 'low_suf', 'close_suf', 'volume_suf' ] def test_merge_informative_pair_suffix_append_timeframe(): data = generate_test_data('15m', 20) informative = generate_test_data('1h', 20) with pytest.raises(ValueError, match=r"You can not specify `append_timeframe` .*"): merge_informative_pair(data, informative, '15m', '1h', suffix="suf") @pytest.mark.parametrize("side,profitrange", [ # profit range for long is [-1, inf] while for shorts is [-inf, 1] ("long", [-0.99, 2, 30]), ("short", [-2.0, 0.99, 30]), ]) def test_stoploss_from_open(side, profitrange): open_price_ranges = [ [0.01, 1.00, 30], [1, 100, 30], [100, 10000, 30], ] for open_range in open_price_ranges: for open_price in np.linspace(*open_range): for desired_stop in np.linspace(-0.50, 0.50, 30): if side == 'long': # -1 is not a valid current_profit, should return 1 assert stoploss_from_open(desired_stop, -1) == 1 else: # 1 is not a valid current_profit for shorts, should return 1 assert stoploss_from_open(desired_stop, 1, True) == 1 for current_profit in np.linspace(*profitrange): if side == 'long': current_price = open_price * (1 + current_profit) expected_stop_price = open_price * (1 + desired_stop) stoploss = stoploss_from_open(desired_stop, current_profit) stop_price = current_price * (1 - stoploss) else: current_price = open_price * (1 - current_profit) expected_stop_price = open_price * (1 - desired_stop) stoploss = stoploss_from_open(desired_stop, current_profit, True) stop_price = current_price * (1 + stoploss) assert stoploss >= 0 # Technically the formula can yield values greater than 1 for shorts # eventhough it doesn't make sense because the position would be liquidated if side == 'long': assert stoploss <= 1 # there is no correct answer if the expected stop price is above # the current price if ((side == 'long' and expected_stop_price > current_price) or (side == 'short' and expected_stop_price < current_price)): assert stoploss == 0 else: assert pytest.approx(stop_price) == expected_stop_price @pytest.mark.parametrize("side,rel_stop,curr_profit,leverage,expected", [ # profit range for long is [-1, inf] while for shorts is [-inf, 1] ("long", 0, -1, 1, 1), ("long", 0, 0.1, 1, 0.09090909), ("long", -0.1, 0.1, 1, 0.18181818), ("long", 0.1, 0.2, 1, 0.08333333), ("long", 0.1, 0.5, 1, 0.266666666), ("long", 0.1, 5, 1, 0.816666666), # 500% profit, set stoploss to 10% above open price ("long", 0, 5, 10, 3.3333333), # 500% profit, set stoploss break even ("long", 0.1, 5, 10, 3.26666666), # 500% profit, set stoploss to 10% above open price ("long", -0.1, 5, 10, 3.3999999), # 500% profit, set stoploss to 10% belowopen price ("short", 0, 0.1, 1, 0.1111111), ("short", -0.1, 0.1, 1, 0.2222222), ("short", 0.1, 0.2, 1, 0.125), ("short", 0.1, 1, 1, 1), ("short", -0.01, 5, 10, 10.01999999), # 500% profit at 10x ]) def test_stoploss_from_open_leverage(side, rel_stop, curr_profit, leverage, expected): stoploss = stoploss_from_open(rel_stop, curr_profit, side == 'short', leverage) assert pytest.approx(stoploss) == expected open_rate = 100 if stoploss != 1: if side == 'long': current_rate = open_rate * (1 + curr_profit / leverage) stop = current_rate * (1 - stoploss / leverage) assert pytest.approx(stop) == open_rate * (1 + rel_stop / leverage) else: current_rate = open_rate * (1 - curr_profit / leverage) stop = current_rate * (1 + stoploss / leverage) assert pytest.approx(stop) == open_rate * (1 - rel_stop / leverage) def test_stoploss_from_absolute(): assert pytest.approx(stoploss_from_absolute(90, 100)) == 1 - (90 / 100) assert pytest.approx(stoploss_from_absolute(90, 100)) == 0.1 assert pytest.approx(stoploss_from_absolute(95, 100)) == 0.05 assert pytest.approx(stoploss_from_absolute(100, 100)) == 0 assert pytest.approx(stoploss_from_absolute(110, 100)) == 0 assert pytest.approx(stoploss_from_absolute(100, 0)) == 1 assert pytest.approx(stoploss_from_absolute(0, 100)) == 1 assert pytest.approx(stoploss_from_absolute(0, 100, False, leverage=5)) == 5 assert pytest.approx(stoploss_from_absolute(90, 100, True)) == 0 assert pytest.approx(stoploss_from_absolute(100, 100, True)) == 0 assert pytest.approx(stoploss_from_absolute(110, 100, True)) == -(1 - (110 / 100)) assert pytest.approx(stoploss_from_absolute(110, 100, True)) == 0.1 assert pytest.approx(stoploss_from_absolute(105, 100, True)) == 0.05 assert pytest.approx(stoploss_from_absolute(105, 100, True, 5)) == 0.05 * 5 assert pytest.approx(stoploss_from_absolute(100, 0, True)) == 1 assert pytest.approx(stoploss_from_absolute(0, 100, True)) == 0 assert pytest.approx(stoploss_from_absolute(100, 1, is_short=True)) == 1 assert pytest.approx(stoploss_from_absolute(100, 1, is_short=True, leverage=5)) == 5 @pytest.mark.parametrize('trading_mode', ['futures', 'spot']) def test_informative_decorator(mocker, default_conf_usdt, trading_mode): candle_def = CandleType.get_default(trading_mode) default_conf_usdt['candle_type_def'] = candle_def test_data_5m = generate_test_data('5m', 40) test_data_30m = generate_test_data('30m', 40) test_data_1h = generate_test_data('1h', 40) data = { ('XRP/USDT', '5m', candle_def): test_data_5m, ('XRP/USDT', '30m', candle_def): test_data_30m, ('XRP/USDT', '1h', candle_def): test_data_1h, ('LTC/USDT', '5m', candle_def): test_data_5m, ('LTC/USDT', '30m', candle_def): test_data_30m, ('LTC/USDT', '1h', candle_def): test_data_1h, ('NEO/USDT', '30m', candle_def): test_data_30m, ('NEO/USDT', '5m', CandleType.SPOT): test_data_5m, # Explicit request with '' as candletype ('NEO/USDT', '15m', candle_def): test_data_5m, # Explicit request with '' as candletype ('NEO/USDT', '1h', candle_def): test_data_1h, ('ETH/USDT', '1h', candle_def): test_data_1h, ('ETH/USDT', '30m', candle_def): test_data_30m, ('ETH/BTC', '1h', CandleType.SPOT): test_data_1h, # Explicitly selected as spot } default_conf_usdt['strategy'] = 'InformativeDecoratorTest' strategy = StrategyResolver.load_strategy(default_conf_usdt) exchange = get_patched_exchange(mocker, default_conf_usdt)
strategy.dp = DataProvider({}, exchange, None)
0
2023-10-21 10:02:05+00:00
16k
generative-skill-chaining/gsc-code
generative_skill_chaining/envs/pybullet/table/predicates.py
[ { "identifier": "primitive_actions", "path": "generative_skill_chaining/envs/pybullet/table/primitive_actions.py", "snippet": "class PrimitiveAction:\nclass PickAction(PrimitiveAction):\nclass PlaceAction(PrimitiveAction):\nclass PullAction(PrimitiveAction):\nclass PushAction(PrimitiveAction):\n RANG...
import dataclasses import random import numpy as np import pybullet as p import symbolic from typing import Optional, Dict, List, Sequence, Tuple, Type from ctrlutils import eigen from shapely.geometry import Polygon, LineString from generative_skill_chaining.envs.pybullet.table import primitive_actions, utils from generative_skill_chaining.envs.pybullet.table.objects import Box, Hook, Null, Object, Rack from generative_skill_chaining.envs.pybullet.sim import math from generative_skill_chaining.envs.pybullet.sim.robot import Robot
11,521
def sample( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: """Samples a geometric grounding of the InHand(a) predicate.""" obj = self.get_arg_objects(objects)[0] if obj.is_static: return True # Generate grasp pose. for i in range(Inhand.MAX_GRASP_ATTEMPTS): grasp_pose = self.generate_grasp_pose( obj, handlegrasp=f"handlegrasp({obj})" in state, upperhandlegrasp=f"upperhandlegrasp({obj})" in state, ) obj_pose = math.Pose.from_eigen(grasp_pose.to_eigen().inverse()) obj_pose.pos += robot.home_pose.pos # Use fake grasp. obj.disable_collisions() obj.set_pose(obj_pose) robot.grasp_object(obj, realistic=False) obj.enable_collisions() # Make sure object isn't touching gripper. obj.unfreeze() p.stepSimulation(physicsClientId=robot.physics_id) if not utils.is_touching(obj, robot): break elif i + 1 == Inhand.MAX_GRASP_ATTEMPTS: dbprint(f"{self}.sample():", False, "- exceeded max grasp attempts") return False dbprint(f"{self}.sample():", True) return True @staticmethod def generate_grasp_pose( obj: Object, handlegrasp: bool = False, upperhandlegrasp: bool = False ) -> math.Pose: """Generates a grasp pose in the object frame of reference.""" # Maximum deviation of the object from the gripper's center y. MAX_GRASP_Y_OFFSET = 0.01 # Gap required between control point and object bottom. FINGER_COLLISION_MARGIN = 0.02 FINGER_WIDTH = 0.022 FINGER_HEIGHT = 0.04 FINGER_DISTANCE = 0.08 THETA_STDDEV = 0.05 if obj.isinstance(Hook): hook: Hook = obj # type: ignore pos_handle, pos_head, pos_joint = Hook.compute_link_positions( head_length=hook.head_length, handle_length=hook.handle_length, handle_y=hook.handle_y, radius=hook.radius, ) if ( handlegrasp or upperhandlegrasp or np.random.random() < hook.handle_length / (hook.handle_length + hook.head_length) ): # Handle. min_xyz, max_xyz = np.array(obj.bbox) if upperhandlegrasp: min_xyz[0] = 0.0 min_xyz[1] = pos_handle[1] - MAX_GRASP_Y_OFFSET min_xyz[2] += FINGER_COLLISION_MARGIN max_xyz[0] = pos_head[0] - hook.radius - 0.5 * FINGER_WIDTH if handlegrasp: max_xyz[0] = 0.0 max_xyz[1] = pos_handle[1] + MAX_GRASP_Y_OFFSET theta = 0.0 else: # Head. min_xyz, max_xyz = np.array(obj.bbox) min_xyz[0] = pos_head[0] - MAX_GRASP_Y_OFFSET if hook.handle_y < 0: min_xyz[1] = pos_handle[1] + hook.radius + 0.5 * FINGER_WIDTH min_xyz[2] += FINGER_COLLISION_MARGIN max_xyz[0] = pos_head[0] + MAX_GRASP_Y_OFFSET if hook.handle_y > 0: max_xyz[1] = pos_handle[1] - hook.radius - 0.5 * FINGER_WIDTH theta = np.pi / 2 else: # Fit object between gripper fingers. theta = np.random.choice([0.0, np.pi / 2]) min_xyz, max_xyz = np.array(obj.bbox) if theta == 0.0: y_center = 0.5 * (min_xyz[1] + max_xyz[1]) min_xyz[1] = max( min_xyz[1] + 0.5 * FINGER_DISTANCE, y_center - MAX_GRASP_Y_OFFSET ) max_xyz[1] = min( max_xyz[1] - 0.5 * FINGER_DISTANCE, y_center + MAX_GRASP_Y_OFFSET ) elif theta == np.pi / 2: x_center = 0.5 * (min_xyz[0] + max_xyz[0]) min_xyz[0] = max( min_xyz[0] + 0.5 * FINGER_DISTANCE, x_center - MAX_GRASP_Y_OFFSET ) max_xyz[0] = min( max_xyz[0] - 0.5 * FINGER_DISTANCE, x_center + MAX_GRASP_Y_OFFSET ) min_xyz[2] += FINGER_COLLISION_MARGIN min_xyz[2] = max(min_xyz[2], max_xyz[0] - FINGER_HEIGHT) xyz = np.random.uniform(min_xyz, max_xyz) theta += np.random.normal(scale=THETA_STDDEV)
dbprint = lambda *args: None # noqa # dbprint = print @dataclasses.dataclass class Predicate: args: List[str] @classmethod def create(cls, proposition: str) -> "Predicate": predicate, args = symbolic.parse_proposition(proposition) predicate_classes = { name.lower(): predicate_class for name, predicate_class in globals().items() } predicate_class = predicate_classes[predicate] return predicate_class(args) def sample( self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"] ) -> bool: """Generates a geometric grounding of a predicate.""" return True def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"] ) -> bool: """Evaluates to True if the geometrically grounded predicate is satisfied.""" return True def get_arg_objects(self, objects: Dict[str, Object]) -> List[Object]: return [objects[arg] for arg in self.args] def __str__(self) -> str: return f"{type(self).__name__.lower()}({', '.join(self.args)})" def __hash__(self) -> int: return hash(str(self)) def __eq__(self, other) -> bool: return str(self) == str(other) class HandleGrasp(Predicate): """Unary predicate enforcing a handle grasp towards the tail end on a hook object.""" pass class UpperHandleGrasp(Predicate): """Unary predicate enforcing a handle grasp towards the head on a hook object.""" pass class Free(Predicate): """Unary predicate enforcing that no top-down occlusions exist on the object.""" DISTANCE_MIN: Dict[Tuple[Type[Object], Type[Object]], float] = { (Box, Box): 0.05, (Box, Hook): 0.05, (Box, Rack): 0.1, (Hook, Rack): 0.1, } def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: child_obj = self.get_arg_objects(objects)[0] if child_obj.isinstance(Null): return True for obj in objects.values(): if f"inhand({obj})" in state or obj.isinstance(Null) or obj == child_obj: continue if utils.is_under(child_obj, obj): dbprint(f"{self}.value():", False, f"{child_obj} under {obj}") return False obj_a, obj_b = sorted( (child_obj.type(), obj.type()), key=lambda x: x.__name__ ) try: min_distance = Free.DISTANCE_MIN[(obj_a, obj_b)] except KeyError: continue if ( (obj.isinstance(Rack) and f"beyondworkspace({obj})" in state) or f"infront({child_obj}, rack)" in state or f"infront({obj}, rack)" in state ): min_distance = 0.04 if utils.is_within_distance( child_obj, obj, min_distance, obj.physics_id ) and not utils.is_above(child_obj, obj): dbprint( f"{self}.value():", False, f"{child_obj} and {obj} are within min distance", ) return False return True class Tippable(Predicate): """Unary predicate admitting non-upright configurations of an object.""" pass class TableBounds: """Predicate that specifies minimum and maximum x-y bounds on the table.""" MARGIN_SCALE: Dict[Type[Object], float] = {Hook: 0.25} def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: """Returns the minimum and maximum x-y bounds on the table as well as the modified margins.""" assert parent_obj.name == "table" zone = type(self).__name__.lower() poslimit = TableBounds.get_poslimit(child_obj, state) if poslimit is not None: pos_bounds = poslimit.bounds(child_obj) zone = random.choice(list(pos_bounds.keys())) # Compute poslimit zone-specific angle if f"aligned({child_obj})" in state: theta = Aligned.sample_angle(obj=child_obj, zone=zone) child_obj.set_pose(utils.compute_object_pose(child_obj, theta)) margin = utils.compute_margins(child_obj) return pos_bounds[zone], margin elif f"aligned({child_obj})" in state: theta = Aligned.sample_angle(obj=child_obj, zone=zone) child_obj.set_pose(utils.compute_object_pose(child_obj, theta)) margin = utils.compute_margins(child_obj) bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["table_x_min"] xy_min += margin xy_max -= margin return bounds, margin @staticmethod def get_poslimit( obj: Object, state: Sequence[Predicate], ) -> Optional["PosLimit"]: try: idx_prop = state.index(f"poslimit({obj})") except ValueError: return None prop = state[idx_prop] assert isinstance(prop, PosLimit) return prop @classmethod def get_zone( cls, obj: Object, state: Sequence[Predicate], ) -> Optional["TableBounds"]: zones = [ prop for prop in state if isinstance(prop, TableBounds) and prop.args[0] == obj ] if not zones and f"on({obj}, table)" in state: return cls() elif len(zones) == 1: return zones[0] elif len(zones) != 1: raise ValueError(f"{obj} cannot be in multiple zones: {zones}") return None @staticmethod def scale_margin(obj: Object, margins: np.ndarray) -> np.ndarray: try: bounds = TableBounds.MARGIN_SCALE[obj.type()] except KeyError: return margins return bounds * margins class Aligned(Predicate): """Unary predicate enforcing that the object and world coordinate frames align.""" ANGLE_EPS: float = 0.002 ANGLE_STD: float = 0.05 ANGLE_ABS: float = 0.1 ZONE_ANGLES: Dict[Tuple[Type[Object], Optional[str]], float] = { (Rack, "inworkspace"): 0.5 * np.pi, (Rack, "beyondworkspace"): 0.0, } # def value( # self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] # ) -> bool: # obj = self.get_arg_objects(objects)[0] # if obj.isinstance(Null): # return True # try: # zone = TableBounds.get_zone(obj=obj, state=state) # angle_mean = Aligned.ZONE_ANGLES[(obj.type(), type(zone).__name__.lower())] # if ( # angle_mean - Aligned.ANGLE_ABS < -np.pi # or angle_mean + Aligned.ANGLE_ABS > np.pi # ): # raise ValueError("Cannot recover wrapped angle.") # except KeyError: # angle_mean = 0.0 # angle = eigen.AngleAxisd(eigen.Quaterniond(obj.pose().quat)).angle - angle_mean # if not ( # Aligned.ANGLE_EPS <= abs(angle) <= Aligned.ANGLE_ABS # and utils.is_upright(obj) # ): # dbprint(f"{self}.value():", False) # return False # return True @staticmethod def sample_angle(obj: Object, zone: Optional[str] = None) -> float: angle = 0.0 while abs(angle) < Aligned.ANGLE_EPS: angle = np.random.randn() * Aligned.ANGLE_STD try: angle_mu = Aligned.ZONE_ANGLES[(obj.type(), zone)] except KeyError: angle_mu = 0.0 angle = np.clip( angle + angle_mu, angle_mu - Aligned.ANGLE_ABS, angle_mu + Aligned.ANGLE_ABS, ) angle = (angle + np.pi) % (2 * np.pi) - np.pi return angle class PosLimit(Predicate): """Unary predicate limiting the placement positions of particular object types.""" POS_EPS: Dict[Type[Object], float] = {Rack: 0.01} POS_SPEC: Dict[Type[Object], Dict[str, np.ndarray]] = { Rack: { "inworkspace": np.array([0.44, -0.33]), "beyondworkspace": np.array([0.82, 0.00]), } } def bounds(self, child_obj: Object) -> Dict[str, np.ndarray]: assert child_obj.name == self.args[0] if child_obj.type() not in PosLimit.POS_SPEC: raise ValueError(f"Positions not specified for {child_obj.type()}") eps = PosLimit.POS_EPS[child_obj.type()] xys = PosLimit.POS_SPEC[child_obj.type()] bounds = {k: np.array([xy - eps, xy + eps]) for k, xy in xys.items()} return bounds class InWorkspace(Predicate, TableBounds): """Unary predicate ensuring than an object is in the robot workspace.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance((Null, Rack)): # Rack is in workspace by construction. return True obj_pos = obj.pose().pos[:2] distance = float(np.linalg.norm(obj_pos)) if not utils.is_inworkspace(obj_pos=obj_pos, distance=distance): dbprint( f"{self}.value():", False, "- pos:", obj_pos[:2], "distance:", distance ) return False return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: """Returns the minimum and maximum x-y bounds inside the workspace.""" assert child_obj.name == self.args[0] and parent_obj.name == "table" zone = type(self).__name__.lower() if f"aligned({child_obj})" in state: theta = Aligned.sample_angle(obj=child_obj, zone=zone) child_obj.set_pose(utils.compute_object_pose(child_obj, theta)) margin = utils.compute_margins(child_obj) poslimit = TableBounds.get_poslimit(child_obj, state) if poslimit is not None: return poslimit.bounds(child_obj)[zone], margin bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["workspace_x_min"] xy_max[0] = utils.TABLE_CONSTRAINTS["workspace_radius"] xy_min += margin xy_max -= margin return bounds, margin class InCollisionZone(Predicate, TableBounds): """Unary predicate ensuring the object is in the collision zone.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True obj_pos = obj.pose().pos[:2] distance = float(np.linalg.norm(obj_pos)) if not ( utils.TABLE_CONSTRAINTS["workspace_x_min"] <= obj.pose().pos[0] < utils.TABLE_CONSTRAINTS["operational_x_min"] and distance < utils.TABLE_CONSTRAINTS["workspace_radius"] ): dbprint(f"{self}.value():", False, "- pos:", obj_pos, "distance:", distance) return False return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: assert child_obj.name == self.args[0] and parent_obj.name == "table" margin = TableBounds.scale_margin(child_obj, margin) bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["workspace_x_min"] xy_max[0] = utils.TABLE_CONSTRAINTS["operational_x_min"] xy_min += margin xy_max -= margin return bounds, margin class InOperationalZone(Predicate, TableBounds): """Unary predicate ensuring the object is in the operational zone.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True obj_pos = obj.pose().pos[:2] distance = float(np.linalg.norm(obj_pos)) if not ( utils.TABLE_CONSTRAINTS["operational_x_min"] <= obj_pos[0] < utils.TABLE_CONSTRAINTS["operational_x_max"] and distance < utils.TABLE_CONSTRAINTS["workspace_radius"] ): dbprint(f"{self}.value():", False, "- pos:", obj_pos, "distance:", distance) return False return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: assert child_obj.name == self.args[0] and parent_obj.name == "table" margin = TableBounds.scale_margin(child_obj, margin) bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["operational_x_min"] xy_max[0] = utils.TABLE_CONSTRAINTS["operational_x_max"] xy_min += margin xy_max -= margin return bounds, margin class InObstructionZone(Predicate, TableBounds): """Unary predicate ensuring the object is in the obstruction zone.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True obj_pos = obj.pose().pos[:2] distance = float(np.linalg.norm(obj_pos)) if not ( obj_pos[0] >= utils.TABLE_CONSTRAINTS["obstruction_x_min"] and distance < utils.TABLE_CONSTRAINTS["workspace_radius"] ): dbprint(f"{self}.value():", False, "- pos:", obj_pos, "distance:", distance) return False return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: assert child_obj.name == self.args[0] and parent_obj.name == "table" margin = TableBounds.scale_margin(child_obj, margin) bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["obstruction_x_min"] xy_max[0] = utils.TABLE_CONSTRAINTS["workspace_radius"] xy_min += margin xy_max -= margin return bounds, margin class BeyondWorkspace(Predicate, TableBounds): """Unary predicate ensuring than an object is in beyond the robot workspace.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True distance = float(np.linalg.norm(obj.pose().pos[:2])) if not utils.is_beyondworkspace(obj=obj, distance=distance): return False dbprint(f"{self}.value():", False, "- distance:", distance) return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: """Returns the minimum and maximum x-y bounds outside the workspace.""" assert child_obj.name == self.args[0] and parent_obj.name == "table" zone = type(self).__name__.lower() if f"aligned({child_obj})" in state: theta = Aligned.sample_angle(obj=child_obj, zone=zone) child_obj.set_pose(utils.compute_object_pose(child_obj, theta)) margin = utils.compute_margins(child_obj) poslimit = TableBounds.get_poslimit(child_obj, state) if poslimit is not None: return poslimit.bounds(child_obj)[zone], margin bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds r = utils.TABLE_CONSTRAINTS["workspace_radius"] xy_min[0] = r * np.cos(np.arcsin(0.5 * (xy_max[1] - xy_min[1]) / r)) xy_min += margin xy_max -= margin return bounds, margin class InOodZone(Predicate, TableBounds): """Unary predicate ensuring than an object is in beyond the robot workspace.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: """Returns the minimum and maximum x-y bounds outside the workspace.""" assert child_obj.name == self.args[0] and parent_obj.name == "table" bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = bounds[0, 0] xy_max[0] = utils.TABLE_CONSTRAINTS["table_x_min"] xy_min += margin xy_max -= margin return bounds, margin class Inhand(Predicate): MAX_GRASP_ATTEMPTS = 1 def sample( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: """Samples a geometric grounding of the InHand(a) predicate.""" obj = self.get_arg_objects(objects)[0] if obj.is_static: return True # Generate grasp pose. for i in range(Inhand.MAX_GRASP_ATTEMPTS): grasp_pose = self.generate_grasp_pose( obj, handlegrasp=f"handlegrasp({obj})" in state, upperhandlegrasp=f"upperhandlegrasp({obj})" in state, ) obj_pose = math.Pose.from_eigen(grasp_pose.to_eigen().inverse()) obj_pose.pos += robot.home_pose.pos # Use fake grasp. obj.disable_collisions() obj.set_pose(obj_pose) robot.grasp_object(obj, realistic=False) obj.enable_collisions() # Make sure object isn't touching gripper. obj.unfreeze() p.stepSimulation(physicsClientId=robot.physics_id) if not utils.is_touching(obj, robot): break elif i + 1 == Inhand.MAX_GRASP_ATTEMPTS: dbprint(f"{self}.sample():", False, "- exceeded max grasp attempts") return False dbprint(f"{self}.sample():", True) return True @staticmethod def generate_grasp_pose( obj: Object, handlegrasp: bool = False, upperhandlegrasp: bool = False ) -> math.Pose: """Generates a grasp pose in the object frame of reference.""" # Maximum deviation of the object from the gripper's center y. MAX_GRASP_Y_OFFSET = 0.01 # Gap required between control point and object bottom. FINGER_COLLISION_MARGIN = 0.02 FINGER_WIDTH = 0.022 FINGER_HEIGHT = 0.04 FINGER_DISTANCE = 0.08 THETA_STDDEV = 0.05 if obj.isinstance(Hook): hook: Hook = obj # type: ignore pos_handle, pos_head, pos_joint = Hook.compute_link_positions( head_length=hook.head_length, handle_length=hook.handle_length, handle_y=hook.handle_y, radius=hook.radius, ) if ( handlegrasp or upperhandlegrasp or np.random.random() < hook.handle_length / (hook.handle_length + hook.head_length) ): # Handle. min_xyz, max_xyz = np.array(obj.bbox) if upperhandlegrasp: min_xyz[0] = 0.0 min_xyz[1] = pos_handle[1] - MAX_GRASP_Y_OFFSET min_xyz[2] += FINGER_COLLISION_MARGIN max_xyz[0] = pos_head[0] - hook.radius - 0.5 * FINGER_WIDTH if handlegrasp: max_xyz[0] = 0.0 max_xyz[1] = pos_handle[1] + MAX_GRASP_Y_OFFSET theta = 0.0 else: # Head. min_xyz, max_xyz = np.array(obj.bbox) min_xyz[0] = pos_head[0] - MAX_GRASP_Y_OFFSET if hook.handle_y < 0: min_xyz[1] = pos_handle[1] + hook.radius + 0.5 * FINGER_WIDTH min_xyz[2] += FINGER_COLLISION_MARGIN max_xyz[0] = pos_head[0] + MAX_GRASP_Y_OFFSET if hook.handle_y > 0: max_xyz[1] = pos_handle[1] - hook.radius - 0.5 * FINGER_WIDTH theta = np.pi / 2 else: # Fit object between gripper fingers. theta = np.random.choice([0.0, np.pi / 2]) min_xyz, max_xyz = np.array(obj.bbox) if theta == 0.0: y_center = 0.5 * (min_xyz[1] + max_xyz[1]) min_xyz[1] = max( min_xyz[1] + 0.5 * FINGER_DISTANCE, y_center - MAX_GRASP_Y_OFFSET ) max_xyz[1] = min( max_xyz[1] - 0.5 * FINGER_DISTANCE, y_center + MAX_GRASP_Y_OFFSET ) elif theta == np.pi / 2: x_center = 0.5 * (min_xyz[0] + max_xyz[0]) min_xyz[0] = max( min_xyz[0] + 0.5 * FINGER_DISTANCE, x_center - MAX_GRASP_Y_OFFSET ) max_xyz[0] = min( max_xyz[0] - 0.5 * FINGER_DISTANCE, x_center + MAX_GRASP_Y_OFFSET ) min_xyz[2] += FINGER_COLLISION_MARGIN min_xyz[2] = max(min_xyz[2], max_xyz[0] - FINGER_HEIGHT) xyz = np.random.uniform(min_xyz, max_xyz) theta += np.random.normal(scale=THETA_STDDEV)
theta = np.clip(theta, *primitive_actions.PickAction.RANGES["theta"])
0
2023-10-16 00:22:40+00:00
16k
akashgreninja/GreSec
backend/venv/lib/python3.10/site-packages/charset_normalizer/cd.py
[ { "identifier": "FREQUENCIES", "path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/constant.py", "snippet": "FREQUENCIES: Dict[str, List[str]] = {\n \"English\": [\n \"e\",\n \"a\",\n \"t\",\n \"i\",\n \"o\",\n \"n\",\n \"s\",\n ...
import importlib from codecs import IncrementalDecoder from collections import Counter from functools import lru_cache from typing import Counter as TypeCounter, Dict, List, Optional, Tuple from .constant import ( FREQUENCIES, KO_NAMES, LANGUAGE_SUPPORTED_COUNT, TOO_SMALL_SEQUENCE, ZH_NAMES, ) from .md import is_suspiciously_successive_range from .models import CoherenceMatches from .utils import ( is_accentuated, is_latin, is_multi_byte_encoding, is_unicode_range_secondary, unicode_range, )
11,342
if target_have_accents is False and source_have_accents: continue character_count: int = len(language_characters) character_match_count: int = len( [c for c in language_characters if c in characters] ) ratio: float = character_match_count / character_count if ratio >= 0.2: languages.append((language, ratio)) languages = sorted(languages, key=lambda x: x[1], reverse=True) return [compatible_language[0] for compatible_language in languages] def characters_popularity_compare( language: str, ordered_characters: List[str] ) -> float: """ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) """ if language not in FREQUENCIES: raise ValueError("{} not available".format(language)) character_approved_count: int = 0 FREQUENCIES_language_set = set(FREQUENCIES[language]) ordered_characters_count: int = len(ordered_characters) target_language_characters_count: int = len(FREQUENCIES[language]) large_alphabet: bool = target_language_characters_count > 26 for character, character_rank in zip( ordered_characters, range(0, ordered_characters_count) ): if character not in FREQUENCIES_language_set: continue character_rank_in_language: int = FREQUENCIES[language].index(character) expected_projection_ratio: float = ( target_language_characters_count / ordered_characters_count ) character_rank_projection: int = int(character_rank * expected_projection_ratio) if ( large_alphabet is False and abs(character_rank_projection - character_rank_in_language) > 4 ): continue if ( large_alphabet is True and abs(character_rank_projection - character_rank_in_language) < target_language_characters_count / 3 ): character_approved_count += 1 continue characters_before_source: List[str] = FREQUENCIES[language][ 0:character_rank_in_language ] characters_after_source: List[str] = FREQUENCIES[language][ character_rank_in_language: ] characters_before: List[str] = ordered_characters[0:character_rank] characters_after: List[str] = ordered_characters[character_rank:] before_match_count: int = len( set(characters_before) & set(characters_before_source) ) after_match_count: int = len( set(characters_after) & set(characters_after_source) ) if len(characters_before_source) == 0 and before_match_count <= 4: character_approved_count += 1 continue if len(characters_after_source) == 0 and after_match_count <= 4: character_approved_count += 1 continue if ( before_match_count / len(characters_before_source) >= 0.4 or after_match_count / len(characters_after_source) >= 0.4 ): character_approved_count += 1 continue return character_approved_count / len(ordered_characters) def alpha_unicode_split(decoded_sequence: str) -> List[str]: """ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; One containing the latin letters and the other hebrew. """ layers: Dict[str, str] = {} for character in decoded_sequence: if character.isalpha() is False: continue character_range: Optional[str] = unicode_range(character) if character_range is None: continue layer_target_range: Optional[str] = None for discovered_range in layers: if (
def encoding_unicode_range(iana_name: str) -> List[str]: """ Return associated unicode ranges in a single byte code page. """ if is_multi_byte_encoding(iana_name): raise IOError("Function not supported on multi-byte code page") decoder = importlib.import_module( "encodings.{}".format(iana_name) ).IncrementalDecoder p: IncrementalDecoder = decoder(errors="ignore") seen_ranges: Dict[str, int] = {} character_count: int = 0 for i in range(0x40, 0xFF): chunk: str = p.decode(bytes([i])) if chunk: character_range: Optional[str] = unicode_range(chunk) if character_range is None: continue if is_unicode_range_secondary(character_range) is False: if character_range not in seen_ranges: seen_ranges[character_range] = 0 seen_ranges[character_range] += 1 character_count += 1 return sorted( [ character_range for character_range in seen_ranges if seen_ranges[character_range] / character_count >= 0.15 ] ) def unicode_range_languages(primary_range: str) -> List[str]: """ Return inferred languages used with a unicode range. """ languages: List[str] = [] for language, characters in FREQUENCIES.items(): for character in characters: if unicode_range(character) == primary_range: languages.append(language) break return languages @lru_cache() def encoding_languages(iana_name: str) -> List[str]: """ Single-byte encoding language association. Some code page are heavily linked to particular language(s). This function does the correspondence. """ unicode_ranges: List[str] = encoding_unicode_range(iana_name) primary_range: Optional[str] = None for specified_range in unicode_ranges: if "Latin" not in specified_range: primary_range = specified_range break if primary_range is None: return ["Latin Based"] return unicode_range_languages(primary_range) @lru_cache() def mb_encoding_languages(iana_name: str) -> List[str]: """ Multi-byte encoding language association. Some code page are heavily linked to particular language(s). This function does the correspondence. """ if ( iana_name.startswith("shift_") or iana_name.startswith("iso2022_jp") or iana_name.startswith("euc_j") or iana_name == "cp932" ): return ["Japanese"] if iana_name.startswith("gb") or iana_name in ZH_NAMES: return ["Chinese"] if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES: return ["Korean"] return [] @lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT) def get_target_features(language: str) -> Tuple[bool, bool]: """ Determine main aspects from a supported language if it contains accents and if is pure Latin. """ target_have_accents: bool = False target_pure_latin: bool = True for character in FREQUENCIES[language]: if not target_have_accents and is_accentuated(character): target_have_accents = True if target_pure_latin and is_latin(character) is False: target_pure_latin = False return target_have_accents, target_pure_latin def alphabet_languages( characters: List[str], ignore_non_latin: bool = False ) -> List[str]: """ Return associated languages associated to given characters. """ languages: List[Tuple[str, float]] = [] source_have_accents = any(is_accentuated(character) for character in characters) for language, language_characters in FREQUENCIES.items(): target_have_accents, target_pure_latin = get_target_features(language) if ignore_non_latin and target_pure_latin is False: continue if target_have_accents is False and source_have_accents: continue character_count: int = len(language_characters) character_match_count: int = len( [c for c in language_characters if c in characters] ) ratio: float = character_match_count / character_count if ratio >= 0.2: languages.append((language, ratio)) languages = sorted(languages, key=lambda x: x[1], reverse=True) return [compatible_language[0] for compatible_language in languages] def characters_popularity_compare( language: str, ordered_characters: List[str] ) -> float: """ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) """ if language not in FREQUENCIES: raise ValueError("{} not available".format(language)) character_approved_count: int = 0 FREQUENCIES_language_set = set(FREQUENCIES[language]) ordered_characters_count: int = len(ordered_characters) target_language_characters_count: int = len(FREQUENCIES[language]) large_alphabet: bool = target_language_characters_count > 26 for character, character_rank in zip( ordered_characters, range(0, ordered_characters_count) ): if character not in FREQUENCIES_language_set: continue character_rank_in_language: int = FREQUENCIES[language].index(character) expected_projection_ratio: float = ( target_language_characters_count / ordered_characters_count ) character_rank_projection: int = int(character_rank * expected_projection_ratio) if ( large_alphabet is False and abs(character_rank_projection - character_rank_in_language) > 4 ): continue if ( large_alphabet is True and abs(character_rank_projection - character_rank_in_language) < target_language_characters_count / 3 ): character_approved_count += 1 continue characters_before_source: List[str] = FREQUENCIES[language][ 0:character_rank_in_language ] characters_after_source: List[str] = FREQUENCIES[language][ character_rank_in_language: ] characters_before: List[str] = ordered_characters[0:character_rank] characters_after: List[str] = ordered_characters[character_rank:] before_match_count: int = len( set(characters_before) & set(characters_before_source) ) after_match_count: int = len( set(characters_after) & set(characters_after_source) ) if len(characters_before_source) == 0 and before_match_count <= 4: character_approved_count += 1 continue if len(characters_after_source) == 0 and after_match_count <= 4: character_approved_count += 1 continue if ( before_match_count / len(characters_before_source) >= 0.4 or after_match_count / len(characters_after_source) >= 0.4 ): character_approved_count += 1 continue return character_approved_count / len(ordered_characters) def alpha_unicode_split(decoded_sequence: str) -> List[str]: """ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; One containing the latin letters and the other hebrew. """ layers: Dict[str, str] = {} for character in decoded_sequence: if character.isalpha() is False: continue character_range: Optional[str] = unicode_range(character) if character_range is None: continue layer_target_range: Optional[str] = None for discovered_range in layers: if (
is_suspiciously_successive_range(discovered_range, character_range)
5
2023-10-23 18:09:28+00:00
16k
zju3dv/nr_in_a_room
data_gen/batch_real_scene_neural_render.py
[ { "identifier": "read_json", "path": "utils/util.py", "snippet": "def read_json(fname):\n fname = Path(fname)\n with fname.open(\"rt\") as handle:\n return json.load(handle, object_hook=OrderedDict)" }, { "identifier": "read_yaml", "path": "utils/util.py", "snippet": "def re...
import sys import os import torch import numpy as np import imageio import time import cv2 from tqdm import tqdm from argparse import ArgumentParser from utils.util import read_json, read_yaml from optim.room_optimizer import RoomOptimizer from optim.misc_utils import read_real_scene_localization, read_testing_config from scipy.spatial.transform import Rotation
14,139
os.environ["OMP_NUM_THREADS"] = "1" # noqa os.environ["MKL_NUM_THREADS"] = "1" # noqa sys.path.append(".") # noqa def render_frame(config, target_dir): # or load from config active_instance_id = config.active_instance_id dataset_config = config.dataset_config["dataset"] scene_info_json_path = config.scene_info_json active_instance_id = [0]
os.environ["OMP_NUM_THREADS"] = "1" # noqa os.environ["MKL_NUM_THREADS"] = "1" # noqa sys.path.append(".") # noqa def render_frame(config, target_dir): # or load from config active_instance_id = config.active_instance_id dataset_config = config.dataset_config["dataset"] scene_info_json_path = config.scene_info_json active_instance_id = [0]
for obj_info in read_json(scene_info_json_path)["objs"]:
0
2023-10-15 08:41:29+00:00
16k
WenzhengZhang/Seq2seqCoref
trainer.py
[ { "identifier": "CorefAllMetrics", "path": "metrics.py", "snippet": "class CorefAllMetrics(object):\n \"\"\"\n Wrapper for coreference resolution metrics.\n \"\"\"\n\n @staticmethod\n def _get_mention_to_x(clusters: List[list]) -> dict:\n mention_to_x = {}\n for cluster in c...
import time import torch.distributed as dist import sys import numpy as np import os import json import re import torch.nn as nn import torch import shutil import math import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met import torch_xla.distributed.parallel_loader as pl import smdistributed.modelparallel.torch as smp import safetensors.torch from tqdm.auto import tqdm from transformers.trainer_utils import HPSearchBackend, speed_metrics, \ TrainOutput from pathlib import Path from torch.utils.data import RandomSampler from torch.utils.data.distributed import DistributedSampler from transformers.trainer_callback import TrainerState from transformers.trainer import TRAINER_STATE_NAME, OptimizerNames from transformers.utils import is_apex_available from transformers.integrations import hp_params from transformers import Seq2SeqTrainer from packaging import version from collections import defaultdict from metrics import CorefAllMetrics from typing import Dict, Union, Any, Optional, Tuple, List from transformers.debug_utils import DebugOption, DebugUnderflowOverflow from transformers.pytorch_utils import is_torch_less_than_1_11 from torch.utils.data import DataLoader from transformers.trainer_utils import EvalLoopOutput, has_length, \ denumpify_detensorize, ShardedDDPOption from data import get_document_predicts, parse_int_output_tokens, \ parse_short_target_tokens, parse_nonint_output_tokens from constants import SPECIAL_IDS, MARK_SPECIAL_IDS, NON_INT_SPECIAL_IDS, \ MENTION_END_NON_INT_SPECIAL_IDS from transformers.deepspeed import deepspeed_init from transformers.trainer_pt_utils import find_batch_size, nested_concat, \ nested_numpify, IterableDatasetShard, nested_truncate, get_parameter_names from transformers.modeling_utils import PreTrainedModel, unwrap_model, \ load_sharded_checkpoint from transformers.utils import logging, is_torch_tpu_available, \ is_sagemaker_mp_enabled, is_safetensors_available, SAFE_WEIGHTS_NAME, \ WEIGHTS_NAME, WEIGHTS_INDEX_NAME from transformers.integrations import is_fairscale_available from transformers.dependency_versions_check import dep_version_check from smdistributed.modelparallel import __version__ as SMP_VERSION from apex import amp from transformers import LogitsProcessorList from logits_processor import ShortSeqProcessor, IntProcessor, NonIntProcessor from transformers.trainer_seq2seq import is_deepspeed_zero3_enabled
11,163
# Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of # samplers has been rounded to a multiple of batch_size, so we truncate. if all_losses is not None: all_losses = all_losses[:num_samples] if all_preds is not None: all_preds = nested_truncate(all_preds, num_samples) if all_labels is not None: all_labels = nested_truncate(all_labels, num_samples) if all_inputs is not None: all_inputs = nested_truncate(all_inputs, num_samples) # Metrics! doc_labels = eval_dataset.doc_labels eval_samples = eval_dataset.samples split = eval_dataset.split if self.args.joint_train: doc_id_to_name = eval_dataset.id_to_name else: doc_id_to_name = None # allow_singletons = eval_dataset.data_args.allow_singletons assert all_preds is not None metrics = self.my_compute_metrics(doc_labels, all_preds, eval_samples, split, doc_id_to_name) # if all_preds is not None and doc_labels is not None: # metrics = self.get_eval_metrics(doc_labels, all_preds, # eval_samples, split) # else: # metrics = {} # To be JSON-serializable, we need to remove numpy types or zero-d tensors metrics = denumpify_detensorize(metrics) if all_losses is not None: metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item() # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(f"{metric_key_prefix}_"): metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) if self.args.gradient_checkpointing: self.model.config.use_cache = False return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples) def prediction_step( self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]] = None, ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: """ Perform an evaluation step on `model` using `inputs`. Subclass and override to inject custom behavior. Args: model (`nn.Module`): The model to evaluate. inputs (`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument `labels`. Check your model's documentation for all accepted arguments. prediction_loss_only (`bool`): Whether or not to return the loss only. ignore_keys: list of ignore keys Return: Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and labels (each being optional). """ if not self.args.predict_with_generate or prediction_loss_only: return super().prediction_step( model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys ) has_labels = "labels" in inputs inputs = self._prepare_inputs(inputs) # XXX: adapt synced_gpus for fairscale as well gen_kwargs = self._gen_kwargs.copy() gen_kwargs["max_length"] = ( gen_kwargs["max_length"] if gen_kwargs.get( "max_length") is not None else self.model.config.max_length ) gen_kwargs["num_beams"] = ( gen_kwargs["num_beams"] if gen_kwargs.get( "num_beams") is not None else self.model.config.num_beams ) default_synced_gpus = True if is_deepspeed_zero3_enabled() else False gen_kwargs["synced_gpus"] = ( gen_kwargs["synced_gpus"] if gen_kwargs.get( "synced_gpus") is not None else default_synced_gpus ) if "attention_mask" in inputs: gen_kwargs["attention_mask"] = inputs.get("attention_mask", None) if "global_attention_mask" in inputs: gen_kwargs["global_attention_mask"] = inputs.get( "global_attention_mask", None) # prepare generation inputs # some encoder-decoder models can have varying encoder's and thus # varying model input names if hasattr(self.model, "encoder") and self.model.encoder.main_input_name != self.model.main_input_name: generation_inputs = inputs[self.model.encoder.main_input_name] else: generation_inputs = inputs[self.model.main_input_name] # add our logits_processor here if self.args.seq2seq_type != 'short_seq': if self.args.action_type == 'non_integer': special_ids = MENTION_END_NON_INT_SPECIAL_IDS if \ self.args.add_mention_end else NON_INT_SPECIAL_IDS gen_kwargs['logits_processor'] = LogitsProcessorList(
if is_torch_tpu_available(check_device=False): if is_fairscale_available(): dep_version_check("fairscale") if is_sagemaker_mp_enabled(): IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse( "1.10") else: IS_SAGEMAKER_MP_POST_1_10 = False if is_safetensors_available(): if is_apex_available(): logger = logging.get_logger(__name__) TRAINING_ARGS_NAME = "training_args.bin" TRAINER_STATE_NAME = "trainer_state.json" OPTIMIZER_NAME = "optimizer.pt" SCHEDULER_NAME = "scheduler.pt" SCALER_NAME = "scaler.pt" class CorefTrainer(Seq2SeqTrainer): def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None: if self.args.save_total_limit is None or self.args.save_total_limit <= 0: return # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir) if self.args.val_after_train and self.args.eval_delay < \ self.state.global_step: for checkpoint in checkpoints_sorted[:-1]: states_dir = [str(x) for x in Path( checkpoint).glob(f'global_step*') if os.path.isdir(x)] for state_dir in states_dir: logger.info(f"Deleting optimizer states of saved " f"checkpoint {checkpoint}") if os.path.exists(state_dir) and os.path.isdir( state_dir): shutil.rmtree(state_dir) else: if len(checkpoints_sorted) <= self.args.save_total_limit: return # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which # we don't do to allow resuming. save_total_limit = self.args.save_total_limit if ( self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1 and checkpoints_sorted[ -1] != self.state.best_model_checkpoint ): save_total_limit = 2 number_of_checkpoints_to_delete = max(0, len( checkpoints_sorted) - save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[ :number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info( f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint) def _save(self, output_dir: Optional[str] = None, state_dict=None): # If we are executing this function, we are the process zero, so we don't check for that. output_dir = output_dir if output_dir is not None else self.args.output_dir os.makedirs(output_dir, exist_ok=True) logger.info(f"Saving model checkpoint to {output_dir}") # Save a trained model and configuration using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` if not isinstance(self.model, PreTrainedModel) and not hasattr( self.model, 'save_pretrained'): if state_dict is None: state_dict = self.model.state_dict() if isinstance(unwrap_model(self.model), PreTrainedModel): unwrap_model(self.model).save_pretrained( output_dir, state_dict=state_dict, # safe_serialization=self.args.save_safetensors ) else: logger.info( "Trainer.model is not a `PreTrainedModel`, only saving its state dict.") # if self.args.save_safetensors: # safetensors.torch.save_file(state_dict, # os.path.join(output_dir, # SAFE_WEIGHTS_NAME)) # else: torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: self.model.save_pretrained( output_dir, state_dict=state_dict, # safe_serialization=self.args.save_safetensors ) if self.tokenizer is not None: self.tokenizer.save_pretrained(output_dir) # Good practice: save your training arguments together with the trained model torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) def _inner_training_loop( self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None ): self._train_batch_size = batch_size # Data loader and number of training steps train_dataloader = self.get_train_dataloader() # Setting up training control variables: # number of training epochs: num_train_epochs # number of training steps per epoch: num_update_steps_per_epoch # total number of training steps to execute: max_steps total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size len_dataloader = None if has_length(train_dataloader): len_dataloader = len(train_dataloader) num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1) num_examples = self.num_examples(train_dataloader) if args.max_steps > 0: max_steps = args.max_steps num_train_epochs = args.max_steps // num_update_steps_per_epoch + int( args.max_steps % num_update_steps_per_epoch > 0 ) # May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's # the best we can do. num_train_samples = args.max_steps * total_train_batch_size else: max_steps = math.ceil( args.num_train_epochs * num_update_steps_per_epoch) num_train_epochs = math.ceil(args.num_train_epochs) num_train_samples = self.num_examples( train_dataloader) * args.num_train_epochs elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size max_steps = args.max_steps # Setting a very large number of epochs so we go as many times as necessary over the iterator. num_train_epochs = sys.maxsize num_update_steps_per_epoch = max_steps num_examples = total_train_batch_size * args.max_steps num_train_samples = args.max_steps * total_train_batch_size else: raise ValueError( "args.max_steps must be set to a positive value if dataloader does not have a length, was" f" {args.max_steps}" ) if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug: if self.args.n_gpu > 1: # nn.DataParallel(model) replicates the model, creating new variables and module # references registered here no longer work on other gpus, breaking the module raise ValueError( "Currently --debug underflow_overflow is not supported under DP. Please use DDP" " (torch.distributed.launch)." ) else: debug_overflow = DebugUnderflowOverflow(self.model) # noqa delay_optimizer_creation = ( self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE or is_sagemaker_mp_enabled() or self.fsdp is not None ) if args.deepspeed: deepspeed_engine, optimizer, lr_scheduler = deepspeed_init( self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint ) self.model = deepspeed_engine.module self.model_wrapped = deepspeed_engine self.deepspeed = deepspeed_engine self.optimizer = optimizer self.lr_scheduler = lr_scheduler elif not delay_optimizer_creation: self.create_optimizer_and_scheduler(num_training_steps=max_steps) self.state = TrainerState() self.state.is_hyper_param_search = trial is not None # Activate gradient checkpointing if needed if args.gradient_checkpointing: self.model.gradient_checkpointing_enable() model = self._wrap_model(self.model_wrapped) if is_sagemaker_mp_enabled() and resume_from_checkpoint is not None: self._load_from_checkpoint(resume_from_checkpoint, model) # for the rest of this function `model` is the outside model, whether it was wrapped or not if model is not self.model: self.model_wrapped = model if delay_optimizer_creation: self.create_optimizer_and_scheduler(num_training_steps=max_steps) # Check if saved optimizer or scheduler states exist self._load_optimizer_and_scheduler(resume_from_checkpoint) # important: at this point: # self.model is the Transformers Model # self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc. # Train! logger.info("***** Running training *****") logger.info(f" Num examples = {num_examples}") logger.info(f" Num Epochs = {num_train_epochs}") logger.info( f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info( f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}") logger.info( f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {max_steps}") logger.info( f" Number of trainable parameters = {sum(p.numel() for p in model.parameters() if p.requires_grad)}" ) self.state.epoch = 0 start_time = time.time() epochs_trained = 0 steps_trained_in_current_epoch = 0 steps_trained_progress_bar = None # Check if continuing training from a checkpoint if resume_from_checkpoint is not None and os.path.isfile( os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME) ): self.state = TrainerState.load_from_json( os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) epochs_trained = self.state.global_step // num_update_steps_per_epoch if not args.ignore_data_skip: steps_trained_in_current_epoch = self.state.global_step % ( num_update_steps_per_epoch) steps_trained_in_current_epoch *= args.gradient_accumulation_steps else: steps_trained_in_current_epoch = 0 logger.info( " Continuing training from checkpoint, will skip to saved global_step") logger.info(f" Continuing training from epoch {epochs_trained}") logger.info( f" Continuing training from global step {self.state.global_step}") if not args.ignore_data_skip: logger.info( f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} " "batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` " "flag to your launch command, but you will resume the training on data already seen by your model." ) if self.is_local_process_zero() and not args.disable_tqdm: steps_trained_progress_bar = tqdm( total=steps_trained_in_current_epoch) steps_trained_progress_bar.set_description( "Skipping the first batches") # Update the references self.callback_handler.model = self.model self.callback_handler.optimizer = self.optimizer self.callback_handler.lr_scheduler = self.lr_scheduler self.callback_handler.train_dataloader = train_dataloader if self.hp_name is not None and self._trial is not None: # use self._trial because the SigOpt/Optuna hpo only call `_hp_search_setup(trial)` instead of passing trial # parameter to Train when using DDP. self.state.trial_name = self.hp_name(self._trial) if trial is not None: assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial self.state.trial_params = hp_params(assignments) else: self.state.trial_params = None # This should be the same if the state has been saved but in case the training arguments changed, it's safer # to set this after the load. self.state.max_steps = max_steps self.state.num_train_epochs = num_train_epochs self.state.is_local_process_zero = self.is_local_process_zero() self.state.is_world_process_zero = self.is_world_process_zero() # tr_loss is a tensor to avoid synchronization of TPUs through .item() tr_loss = torch.tensor(0.0).to(args.device) # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses self._total_loss_scalar = 0.0 self._globalstep_last_logged = self.state.global_step model.zero_grad() self.control = self.callback_handler.on_train_begin(args, self.state, self.control) # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point. if not args.ignore_data_skip: for epoch in range(epochs_trained): is_random_sampler = hasattr(train_dataloader, "sampler") and isinstance( train_dataloader.sampler, RandomSampler ) if is_torch_less_than_1_11 or not is_random_sampler: # We just need to begin an iteration to create the randomization of the sampler. # That was before PyTorch 1.11 however... if self.args.joint_train: train_dataloader.dataset.set_samples(epoch) for _ in train_dataloader: break else: # Otherwise we need to call the whooooole sampler cause there is some random operation added # AT THE VERY END! _ = list(train_dataloader.sampler) if args.manual_empty_cache: torch.cuda.empty_cache() for epoch in range(epochs_trained, num_train_epochs): if self.args.joint_train: train_dataloader.dataset.set_samples(epoch) if isinstance(train_dataloader, DataLoader) and isinstance( train_dataloader.sampler, DistributedSampler): train_dataloader.sampler.set_epoch(epoch) elif hasattr(train_dataloader, "dataset") and isinstance( train_dataloader.dataset, IterableDatasetShard): train_dataloader.dataset.set_epoch(epoch) if is_torch_tpu_available(): parallel_loader = pl.ParallelLoader(train_dataloader, [ args.device]).per_device_loader(args.device) epoch_iterator = parallel_loader else: epoch_iterator = train_dataloader # Reset the past mems state at the beginning of each epoch if necessary. if args.past_index >= 0: self._past = None steps_in_epoch = ( len(epoch_iterator) if len_dataloader is not None else args.max_steps * args.gradient_accumulation_steps ) self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control) if epoch == epochs_trained and resume_from_checkpoint is not None and steps_trained_in_current_epoch == 0: self._load_rng_state(resume_from_checkpoint) step = -1 if args.manual_empty_cache: torch.cuda.empty_cache() for step, inputs in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if args.manual_empty_cache: torch.cuda.empty_cache() if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 if steps_trained_progress_bar is not None: steps_trained_progress_bar.update(1) if steps_trained_in_current_epoch == 0: self._load_rng_state(resume_from_checkpoint) continue elif steps_trained_progress_bar is not None: steps_trained_progress_bar.close() steps_trained_progress_bar = None if step % args.gradient_accumulation_steps == 0: self.control = self.callback_handler.on_step_begin(args, self.state, self.control) # if args.manual_empty_cache: # torch.cuda.empty_cache() if ( ((step + 1) % args.gradient_accumulation_steps != 0) and args.local_rank != -1 and args._no_sync_in_gradient_accumulation ): # Avoid unnecessary DDP synchronization since there will be no backward pass on this example. with model.no_sync(): tr_loss_step = self.training_step(model, inputs) else: tr_loss_step = self.training_step(model, inputs) if ( args.logging_nan_inf_filter and not is_torch_tpu_available() and ( torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step)) ): # if loss is nan or inf simply add the average of previous logged losses tr_loss += tr_loss / ( 1 + self.state.global_step - self._globalstep_last_logged) else: tr_loss += tr_loss_step self.current_flos += float(self.floating_point_ops(inputs)) # Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps if self.deepspeed: if args.manual_empty_cache: torch.cuda.empty_cache() self.deepspeed.step() if (step + 1) % args.gradient_accumulation_steps == 0 or ( # last step in epoch but step is always smaller than gradient_accumulation_steps steps_in_epoch <= args.gradient_accumulation_steps and (step + 1) == steps_in_epoch ): # Gradient clipping if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed: # deepspeed does its own clipping if self.do_grad_scaling: # Reduce gradients first for XLA if is_torch_tpu_available(): gradients = xm._fetch_gradients(self.optimizer) xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size()) # AMP: gradients need unscaling self.scaler.unscale_(self.optimizer) if is_sagemaker_mp_enabled() and args.fp16: self.optimizer.clip_master_grads(args.max_grad_norm) elif hasattr(self.optimizer, "clip_grad_norm"): # Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping self.optimizer.clip_grad_norm(args.max_grad_norm) elif hasattr(model, "clip_grad_norm_"): # Some models (like FullyShardedDDP) have a specific way to do gradient clipping model.clip_grad_norm_(args.max_grad_norm) else: # Revert to normal clipping otherwise, handling Apex or full precision nn.utils.clip_grad_norm_( amp.master_params( self.optimizer) if self.use_apex else model.parameters(), args.max_grad_norm, ) # Optimizer step optimizer_was_run = True if self.deepspeed: pass # called outside the loop elif is_torch_tpu_available(): if self.do_grad_scaling: self.scaler.step(self.optimizer) self.scaler.update() else: xm.optimizer_step(self.optimizer) elif self.do_grad_scaling: scale_before = self.scaler.get_scale() self.scaler.step(self.optimizer) self.scaler.update() scale_after = self.scaler.get_scale() optimizer_was_run = scale_before <= scale_after else: self.optimizer.step() if optimizer_was_run and not self.deepspeed: self.lr_scheduler.step() model.zero_grad() self.state.global_step += 1 self.state.epoch = epoch + (step + 1) / steps_in_epoch if args.manual_empty_cache: torch.cuda.empty_cache() self.control = self.callback_handler.on_step_end(args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) else: self.control = self.callback_handler.on_substep_end(args, self.state, self.control) if self.control.should_epoch_stop or self.control.should_training_stop: break if step < 0: logger.warning( "There seems to be not a single sample in your epoch_iterator, stopping training at step" f" {self.state.global_step}! This is expected if you're using an IterableDataset and set" f" num_steps ({max_steps}) higher than the number of available samples." ) self.control.should_training_stop = True self.control = self.callback_handler.on_epoch_end(args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) if DebugOption.TPU_METRICS_DEBUG in self.args.debug: if is_torch_tpu_available(): # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) else: logger.warning( "You enabled PyTorch/XLA debug metrics but you don't have a TPU " "configured. Check your training configuration if this is unexpected." ) if self.control.should_training_stop: break if args.past_index and hasattr(self, "_past"): # Clean the state at the end of training delattr(self, "_past") logger.info( "\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n") if args.load_best_model_at_end and self.state.best_model_checkpoint is not None: # Wait for everyone to get here so we are sur the model has been saved by process 0. if is_torch_tpu_available(): xm.rendezvous("load_best_model_at_end") elif args.local_rank != -1: dist.barrier() elif is_sagemaker_mp_enabled(): smp.barrier() self._load_best_model() # add remaining tr_loss self._total_loss_scalar += tr_loss.item() train_loss = self._total_loss_scalar / self.state.global_step metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps) self.store_flos() metrics["total_flos"] = self.state.total_flos metrics["train_loss"] = train_loss self.is_in_train = False self._memory_tracker.stop_and_update_metrics(metrics) self.log(metrics) run_dir = self._get_output_dir(trial) checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir) # Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint. if self.state.best_model_checkpoint is not None and \ self.args.save_total_limit == 1 and self.is_world_process_zero(): for checkpoint in checkpoints_sorted: if checkpoint != self.state.best_model_checkpoint: logger.info( f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint) self.control = self.callback_handler.on_train_end(args, self.state, self.control) return TrainOutput(self.state.global_step, train_loss, metrics) def my_compute_metrics(self, doc_labels: Dict[str, List[List]], predicts: Any, samples: List, split: str, id_to_name: Dict = None ) -> Dict: if self.args.joint_train: data_names = self.args.joint_data_names.split(',') joint_threds = [ int(t) for t in self.args.joint_min_num_mentions.split(',')] name_to_threds = {n: t for n, t in zip(data_names, joint_threds)} documents_to_chunk_data = defaultdict(list) documents_to_chunk_gold = defaultdict(list) predictions = {} golds = {} assert len(samples) == len(predicts) out_sents = [] last_doc_id = re.sub(r'_\d+$', '', samples[0]['doc_key']) for sample, predict in zip(samples, predicts): doc_key = sample['doc_key'] doc_id = re.sub(r'_\d+$', '', doc_key) # require convert to ids first input_ids = sample['sentence'] subtoken_map = sample['subtoken_map'] offset = sample['offset'] # remove bos predict_ids = predict[1:].tolist() gold_data = sample['seg_clusters'] if self.args.joint_train: thred = name_to_threds[id_to_name[doc_id]] else: thred = self.args.min_num_mentions if self.args.seq2seq_type == "short_seq": special_ids = MARK_SPECIAL_IDS if self.args.mark_sentence \ else SPECIAL_IDS pred_data, aligned_input_ids, aligned_pred_ids = \ parse_short_target_tokens(input_ids, predict_ids, special_ids, subtoken_map, self.tokenizer, self.args.align_mode, thred, self.args.mark_sentence ) pred_tokens = self.tokenizer.convert_ids_to_tokens( predict_ids) out_predict = { 'doc_key': doc_key, 'pred_tokens': pred_tokens, 'pred_text': self.tokenizer.convert_tokens_to_string( pred_tokens), 'pred_aligned_text': self.tokenizer.convert_ids_to_tokens( aligned_pred_ids ), 'input_aligned_text': self.tokenizer.convert_ids_to_tokens( aligned_input_ids ) } else: is_tagging = (self.args.seq2seq_type == 'tagging') if self.args.action_type == 'integer': pred_data, pred_token_mentions, predict_ids = \ parse_int_output_tokens( input_ids, predict_ids, SPECIAL_IDS, subtoken_map, self.tokenizer, thred, is_tagging) else: special_ids = MENTION_END_NON_INT_SPECIAL_IDS if \ self.args.add_mention_end else NON_INT_SPECIAL_IDS pred_data, pred_token_mentions, predict_ids = \ parse_nonint_output_tokens( input_ids, predict_ids, special_ids, subtoken_map, self.tokenizer, self.args.add_mention_end, thred) pred_token_mentions = [(m[0] + offset, m[1] + offset) for m in pred_token_mentions] pred_tokens = self.tokenizer.convert_ids_to_tokens( predict_ids) out_predict = {'doc_key': doc_key, 'pred_tokens': pred_tokens, 'pred_text': self.tokenizer.convert_tokens_to_string( pred_tokens), 'predict_clusters': pred_data, 'gold_clusters': gold_data, 'predict_token_mentions': pred_token_mentions } # list of (m1,m2) documents_to_chunk_data[doc_id].extend(pred_data) documents_to_chunk_gold[doc_id].extend(gold_data) out_sents.append(out_predict) if doc_id != last_doc_id: predictions[last_doc_id] = get_document_predicts( documents_to_chunk_data[ last_doc_id]) golds[last_doc_id] = get_document_predicts( documents_to_chunk_gold[ last_doc_id]) last_doc_id = doc_id # final one predictions[last_doc_id] = get_document_predicts( documents_to_chunk_data[last_doc_id] ) golds[last_doc_id] = get_document_predicts( documents_to_chunk_gold[last_doc_id] ) # print(predictions) if self.args.joint_train: predictions_list = defaultdict(list) labels_list = defaultdict(list) golds_list = defaultdict(list) else: predictions_list = [] labels_list = [] golds_list = [] for document_id, doc_label in doc_labels.items(): if self.args.joint_train: predictions_list[id_to_name[document_id]].append( predictions[document_id]) labels_list[id_to_name[document_id]].append(doc_label) golds_list[id_to_name[document_id]].append(golds[document_id]) else: predictions_list.append(predictions[document_id]) labels_list.append(doc_label) golds_list.append(golds[document_id]) if self.args.joint_train: label_results = {} gold_results = {} for dn in predictions_list.keys(): metrics = CorefAllMetrics().get_all_metrics( labels_list[dn], predictions_list[dn]) metrics_golds = CorefAllMetrics().get_all_metrics( golds_list[dn], predictions_list[dn]) single_label_results = { f'{dn}_{metric_name}_{x}': v for metric_name, metric_values in metrics['micro'].items() for x, v in metric_values.items() } single_gold_results = { f'{dn}_gold_{metric_name}_{x}': v for metric_name, metric_values in metrics_golds['micro'].items() for x, v in metric_values.items() } label_results.update(single_label_results) gold_results.update(single_gold_results) else: metrics = CorefAllMetrics().get_all_metrics(labels_list, predictions_list) metrics_golds = CorefAllMetrics().get_all_metrics(golds_list, predictions_list) label_results = { f'{metric_name}_{x}': v for metric_name, metric_values in metrics['micro'].items() for x, v in metric_values.items() } gold_results = { f'gold_{metric_name}_{x}': v for metric_name, metric_values in metrics_golds['micro'].items() for x, v in metric_values.items() } results = {**label_results, **gold_results} if self.args.joint_train: avg_f1s = [results[f"{dname}_average_f1"] for dname in data_names] results["average_f1"] = sum(avg_f1s) / len(avg_f1s) if self.is_world_process_zero() and self.args.save_predicts: os.makedirs(self.args.save_dir, exist_ok=True) save_path = os.path.join(self.args.save_dir, f'{split}-predicts.txt') results_path = os.path.join(self.args.save_dir, f'{split}-results.json') with open(save_path, 'w') as f: for p in out_sents: f.write('%s\n' % json.dumps(p)) with open(results_path, 'w') as f: json.dump(results, f) return results def evaluation_loop( self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = False, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval", ) -> EvalLoopOutput: """ Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. Works both with or without labels. """ args = self.args prediction_loss_only = False # if eval is called w/o train init deepspeed here if args.deepspeed and not self.deepspeed: # XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval # from the checkpoint eventually deepspeed_engine, _, _ = deepspeed_init( self, num_training_steps=0, resume_from_checkpoint=None, inference=is_deepspeed_zero3_enabled() ) self.model = deepspeed_engine.module self.model_wrapped = deepspeed_engine self.deepspeed = deepspeed_engine if self.args.gradient_checkpointing: self.model.config.use_cache = True model = self._wrap_model(self.model, training=False, dataloader=dataloader) # if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called # while ``train`` is running, cast it to the right dtype first and then put on device if not self.is_in_train: if args.fp16_full_eval: model = model.to(dtype=torch.float16, device=args.device) elif args.bf16_full_eval: model = model.to(dtype=torch.bfloat16, device=args.device) batch_size = self.args.eval_batch_size logger.info(f"***** Running {description} *****") if has_length(dataloader): logger.info(f" Num examples = {self.num_examples(dataloader)}") else: logger.info(" Num examples: Unknown") logger.info(f" Batch size = {batch_size}") model.eval() self.callback_handler.eval_dataloader = dataloader # Do this before wrapping. eval_dataset = getattr(dataloader, "dataset", None) if is_torch_tpu_available(): dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader( args.device) if args.past_index >= 0: self._past = None # Initialize containers # losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps) losses_host = None preds_host = None labels_host = None inputs_host = None # losses/preds/labels on CPU (final containers) all_losses = None all_preds = None all_labels = None all_inputs = None # Will be useful when we have an iterable dataset so don't know its length. observed_num_examples = 0 # Main evaluation loop for step, inputs in enumerate(dataloader): # Update the observed num examples observed_batch_size = find_batch_size(inputs) if observed_batch_size is not None: observed_num_examples += observed_batch_size # For batch samplers, batch_size is not known by the dataloader in advance. if batch_size is None: batch_size = observed_batch_size # Prediction step loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) inputs_decode = self._prepare_input(inputs[ "input_ids"]) if args.include_inputs_for_metrics else None if is_torch_tpu_available(): xm.mark_step() # Update containers on host if loss is not None: losses = self._nested_gather(loss.repeat(batch_size)) losses_host = losses if losses_host is None else torch.cat( (losses_host, losses), dim=0) if labels is not None: labels = self._pad_across_processes(labels) labels = self._nested_gather(labels) labels_host = labels if labels_host is None else nested_concat( labels_host, labels, padding_index=-100) if inputs_decode is not None: inputs_decode = self._pad_across_processes(inputs_decode) inputs_decode = self._nested_gather(inputs_decode) inputs_host = ( inputs_decode if inputs_host is None else nested_concat(inputs_host, inputs_decode, padding_index=-100) ) if logits is not None: logits = self._pad_across_processes(logits) logits = self._nested_gather(logits) if self.preprocess_logits_for_metrics is not None: logits = self.preprocess_logits_for_metrics(logits, labels) preds_host = logits if preds_host is None else nested_concat( preds_host, logits, padding_index=-100) self.control = self.callback_handler.on_prediction_step(args, self.state, self.control) # Gather all tensors and put them back on the CPU if we have done enough accumulation steps. if args.eval_accumulation_steps is not None and ( step + 1) % args.eval_accumulation_steps == 0: if losses_host is not None: losses = nested_numpify(losses_host) all_losses = losses if all_losses is None else np.concatenate( (all_losses, losses), axis=0) if preds_host is not None: logits = nested_numpify(preds_host) all_preds = logits if all_preds is None else nested_concat( all_preds, logits, padding_index=-100) if inputs_host is not None: inputs_decode = nested_numpify(inputs_host) all_inputs = ( inputs_decode if all_inputs is None else nested_concat(all_inputs, inputs_decode, padding_index=-100) ) if labels_host is not None: labels = nested_numpify(labels_host) all_labels = ( labels if all_labels is None else nested_concat( all_labels, labels, padding_index=-100) ) # Set back to None to begin a new accumulation losses_host, preds_host, inputs_host, labels_host = None, None, None, None if args.past_index and hasattr(self, "_past"): # Clean the state at the end of the evaluation loop delattr(self, "_past") # Gather all remaining tensors and put them back on the CPU if losses_host is not None: losses = nested_numpify(losses_host) all_losses = losses if all_losses is None else np.concatenate( (all_losses, losses), axis=0) if preds_host is not None: logits = nested_numpify(preds_host) all_preds = logits if all_preds is None else nested_concat( all_preds, logits, padding_index=-100) if inputs_host is not None: inputs_decode = nested_numpify(inputs_host) all_inputs = ( inputs_decode if all_inputs is None else nested_concat( all_inputs, inputs_decode, padding_index=-100) ) if labels_host is not None: labels = nested_numpify(labels_host) all_labels = labels if all_labels is None else nested_concat( all_labels, labels, padding_index=-100) # Number of samples if has_length(eval_dataset): num_samples = len(eval_dataset) # The instance check is weird and does not actually check for the type, but whether the dataset has the right # methods. Therefore we need to make sure it also has the attribute. elif isinstance(eval_dataset, IterableDatasetShard) and getattr( eval_dataset, "num_examples", 0) > 0: num_samples = eval_dataset.num_examples else: if has_length(dataloader): num_samples = self.num_examples(dataloader) else: # both len(dataloader.dataset) and len(dataloader) fail num_samples = observed_num_examples if num_samples == 0 and observed_num_examples > 0: num_samples = observed_num_examples # Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of # samplers has been rounded to a multiple of batch_size, so we truncate. if all_losses is not None: all_losses = all_losses[:num_samples] if all_preds is not None: all_preds = nested_truncate(all_preds, num_samples) if all_labels is not None: all_labels = nested_truncate(all_labels, num_samples) if all_inputs is not None: all_inputs = nested_truncate(all_inputs, num_samples) # Metrics! doc_labels = eval_dataset.doc_labels eval_samples = eval_dataset.samples split = eval_dataset.split if self.args.joint_train: doc_id_to_name = eval_dataset.id_to_name else: doc_id_to_name = None # allow_singletons = eval_dataset.data_args.allow_singletons assert all_preds is not None metrics = self.my_compute_metrics(doc_labels, all_preds, eval_samples, split, doc_id_to_name) # if all_preds is not None and doc_labels is not None: # metrics = self.get_eval_metrics(doc_labels, all_preds, # eval_samples, split) # else: # metrics = {} # To be JSON-serializable, we need to remove numpy types or zero-d tensors metrics = denumpify_detensorize(metrics) if all_losses is not None: metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item() # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(f"{metric_key_prefix}_"): metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) if self.args.gradient_checkpointing: self.model.config.use_cache = False return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples) def prediction_step( self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]] = None, ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: """ Perform an evaluation step on `model` using `inputs`. Subclass and override to inject custom behavior. Args: model (`nn.Module`): The model to evaluate. inputs (`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument `labels`. Check your model's documentation for all accepted arguments. prediction_loss_only (`bool`): Whether or not to return the loss only. ignore_keys: list of ignore keys Return: Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and labels (each being optional). """ if not self.args.predict_with_generate or prediction_loss_only: return super().prediction_step( model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys ) has_labels = "labels" in inputs inputs = self._prepare_inputs(inputs) # XXX: adapt synced_gpus for fairscale as well gen_kwargs = self._gen_kwargs.copy() gen_kwargs["max_length"] = ( gen_kwargs["max_length"] if gen_kwargs.get( "max_length") is not None else self.model.config.max_length ) gen_kwargs["num_beams"] = ( gen_kwargs["num_beams"] if gen_kwargs.get( "num_beams") is not None else self.model.config.num_beams ) default_synced_gpus = True if is_deepspeed_zero3_enabled() else False gen_kwargs["synced_gpus"] = ( gen_kwargs["synced_gpus"] if gen_kwargs.get( "synced_gpus") is not None else default_synced_gpus ) if "attention_mask" in inputs: gen_kwargs["attention_mask"] = inputs.get("attention_mask", None) if "global_attention_mask" in inputs: gen_kwargs["global_attention_mask"] = inputs.get( "global_attention_mask", None) # prepare generation inputs # some encoder-decoder models can have varying encoder's and thus # varying model input names if hasattr(self.model, "encoder") and self.model.encoder.main_input_name != self.model.main_input_name: generation_inputs = inputs[self.model.encoder.main_input_name] else: generation_inputs = inputs[self.model.main_input_name] # add our logits_processor here if self.args.seq2seq_type != 'short_seq': if self.args.action_type == 'non_integer': special_ids = MENTION_END_NON_INT_SPECIAL_IDS if \ self.args.add_mention_end else NON_INT_SPECIAL_IDS gen_kwargs['logits_processor'] = LogitsProcessorList(
[NonIntProcessor(generation_inputs, special_ids,
11
2023-10-17 17:39:16+00:00
16k
chenxn2020/GOSE
GOSEfinetune/models/LiLTRobertaLike/modeling_LiLTRobertaLike.py
[ { "identifier": "LiLTRobertaLikeConfig", "path": "GOSEfinetune/models/LiLTRobertaLike/configuration_LiLTRobertaLike.py", "snippet": "class LiLTRobertaLikeConfig(RobertaConfig):\n model_type = \"liltrobertalike\"\n\n def __init__(\n self,\n channel_shrink_ratio=4,\n max_2d_posi...
import math import torch import torch.nn as nn import torch.utils.checkpoint import os from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.activations import ACT2FN, gelu from transformers.file_utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from transformers.modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from transformers.utils import logging from .configuration_LiLTRobertaLike import LiLTRobertaLikeConfig from dataclasses import dataclass from typing import Dict, Optional, Tuple from transformers.file_utils import ModelOutput from ...modules.decoders.RE import RE from ...modules.decoders.gose import GOSE from ...utils import ReOutput
12,119
self.layer = nn.ModuleList([LiLTRobertaLikeLayer(config) for _ in range(config.num_hidden_layers)]) def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " "`use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, layout_inputs, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, layout_inputs, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0][0] layout_inputs = layer_outputs[0][1] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ), layout_inputs return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ), layout_inputs class LiLTRobertaLikePooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class LiLTRobertaLikePreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """
# coding=utf-8 logger = logging.get_logger(__name__) class LiLTRobertaLikeTextEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") # End copy self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids( input_ids, self.padding_idx, past_key_values_length ).to(input_ids.device) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings, position_ids def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) class LiLTRobertaLikeLayoutEmbeddings(nn.Module): def __init__(self, config): super(LiLTRobertaLikeLayoutEmbeddings, self).__init__() self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.padding_idx = config.pad_token_id self.box_position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size//config.channel_shrink_ratio, padding_idx=self.padding_idx ) self.box_linear_embeddings = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size//config.channel_shrink_ratio) self.LayerNorm = nn.LayerNorm(config.hidden_size//config.channel_shrink_ratio, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward( self, bbox=None, position_ids=None, ): try: left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1]) right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2]) lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3]) except IndexError as e: raise IndexError("The :obj:`bbox`coordinate values should be within 0-1000 range.") from e h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1]) w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0]) spatial_position_embeddings = torch.cat( [ left_position_embeddings, upper_position_embeddings, right_position_embeddings, lower_position_embeddings, h_position_embeddings, w_position_embeddings, ], dim=-1, ) spatial_position_embeddings = self.box_linear_embeddings(spatial_position_embeddings) box_position_embeddings = self.box_position_embeddings(position_ids) spatial_position_embeddings = spatial_position_embeddings + box_position_embeddings spatial_position_embeddings = self.LayerNorm(spatial_position_embeddings) spatial_position_embeddings = self.dropout(spatial_position_embeddings) return spatial_position_embeddings class LiLTRobertaLikeSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.layout_query = nn.Linear(config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio) self.layout_key = nn.Linear(config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio) self.layout_value = nn.Linear(config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder self.channel_shrink_ratio = config.channel_shrink_ratio def transpose_for_scores(self, x, r=1): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size//r) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): layout_value_layer = self.transpose_for_scores(self.layout_value(layout_inputs), r=self.channel_shrink_ratio) layout_key_layer = self.transpose_for_scores(self.layout_key(layout_inputs), r=self.channel_shrink_ratio) layout_query_layer = self.transpose_for_scores(self.layout_query(layout_inputs), r=self.channel_shrink_ratio) mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) layout_attention_scores = torch.matmul(layout_query_layer, layout_key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key tmp_attention_scores = attention_scores / math.sqrt(self.attention_head_size) tmp_layout_attention_scores = layout_attention_scores / math.sqrt(self.attention_head_size//self.channel_shrink_ratio) attention_scores = tmp_attention_scores + tmp_layout_attention_scores layout_attention_scores = tmp_layout_attention_scores + tmp_attention_scores if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) layout_attention_scores = layout_attention_scores + attention_mask # Normalize the attention scores to probabilities. layout_attention_probs = nn.Softmax(dim=-1)(layout_attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. layout_attention_probs = self.dropout(layout_attention_probs) # Mask heads if we want to if head_mask is not None: layout_attention_probs = layout_attention_probs * head_mask layout_context_layer = torch.matmul(layout_attention_probs, layout_value_layer) layout_context_layer = layout_context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = layout_context_layer.size()[:-2] + (self.all_head_size//self.channel_shrink_ratio,) layout_context_layer = layout_context_layer.view(*new_context_layer_shape) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = ((context_layer, layout_context_layer), attention_probs) if output_attentions else ((context_layer, layout_context_layer),) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class LiLTRobertaLikeSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LiLTRobertaLikeAttention(nn.Module): def __init__(self, config): super().__init__() self.self = LiLTRobertaLikeSelfAttention(config) self.output = LiLTRobertaLikeSelfOutput(config) self.pruned_heads = set() ori_hidden_size = config.hidden_size config.hidden_size = config.hidden_size // config.channel_shrink_ratio self.layout_output = LiLTRobertaLikeSelfOutput(config) config.hidden_size = ori_hidden_size def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): self_outputs = self.self( hidden_states, layout_inputs, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0][0], hidden_states) layout_attention_output = self.layout_output(self_outputs[0][1], layout_inputs) outputs = ((attention_output, layout_attention_output),) + self_outputs[1:] # add attentions if we output them return outputs class LiLTRobertaLikeIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class LiLTRobertaLikeOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LiLTRobertaLikeLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = LiLTRobertaLikeAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added" self.crossattention = LiLTRobertaLikeAttention(config) self.intermediate = LiLTRobertaLikeIntermediate(config) self.output = LiLTRobertaLikeOutput(config) ori_hidden_size = config.hidden_size ori_intermediate_size = config.intermediate_size config.hidden_size = config.hidden_size // config.channel_shrink_ratio config.intermediate_size = config.intermediate_size // config.channel_shrink_ratio self.layout_intermediate = LiLTRobertaLikeIntermediate(config) self.layout_output = LiLTRobertaLikeOutput(config) config.hidden_size = ori_hidden_size config.intermediate_size = ori_intermediate_size def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, layout_inputs, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0][0] layout_attention_output = self_attention_outputs[0][1] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: assert hasattr( self, "crossattention" ), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`" # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) layout_layer_output = apply_chunking_to_forward( self.layout_feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, layout_attention_output ) outputs = ((layer_output, layout_layer_output),) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output def layout_feed_forward_chunk(self, attention_output): intermediate_output = self.layout_intermediate(attention_output) layer_output = self.layout_output(intermediate_output, attention_output) return layer_output class LiLTRobertaLikeEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([LiLTRobertaLikeLayer(config) for _ in range(config.num_hidden_layers)]) def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " "`use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, layout_inputs, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, layout_inputs, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0][0] layout_inputs = layer_outputs[0][1] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ), layout_inputs return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ), layout_inputs class LiLTRobertaLikePooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class LiLTRobertaLikePreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """
config_class = LiLTRobertaLikeConfig
0
2023-10-19 14:36:32+00:00
16k
BurgerBurgerBurger/AA
run.py
[ { "identifier": "add_args", "path": "args.py", "snippet": "def add_args(parser):\n parser.add_argument(\"--do_train\", action=\"store_true\")\n parser.add_argument(\"--data_dir\", default=\"./dataset/docred\", type=str)\n parser.add_argument(\"--transformer_type\", default=\"bert\", type=str)\n...
import argparse import os import numpy as np import torch import ujson as json import pandas as pd import pickle from torch.cuda.amp import GradScaler from torch.utils.data import DataLoader from transformers import AutoConfig, AutoModel, AutoTokenizer from transformers.optimization import AdamW, get_linear_schedule_with_warmup from args import add_args from model import DocREModel from utils import set_seed, collate_fn, create_directory from prepro import read_docred from evaluation import to_official, official_evaluate, merge_results from tqdm import tqdm
11,701
topks.append(outputs["topks"].cpu().numpy()) if "evi_pred" in outputs: # relation extraction and evidence extraction evi_pred = outputs["evi_pred"] evi_pred = evi_pred.cpu().numpy() evi_preds.append(evi_pred) if "attns" in outputs: # attention recorded attn = outputs["attns"] attns.extend([a.cpu().numpy() for a in attn]) preds = np.concatenate(preds, axis=0) if scores: scores = np.concatenate(scores, axis=0) topks = np.concatenate(topks, axis=0) if evi_preds: evi_preds = np.concatenate(evi_preds, axis=0) official_results, results = to_official(preds, features, evi_preds=evi_preds, scores=scores, topks=topks) if len(official_results) > 0: if tag == "test": best_re, best_evi, best_re_ign, _ = official_evaluate(official_results, args.data_dir, args.train_file, args.test_file) else: best_re, best_evi, best_re_ign, _ = official_evaluate(official_results, args.data_dir, args.train_file, args.dev_file) else: best_re = best_evi = best_re_ign = [-1, -1, -1] output = { tag + "_rel": [i * 100 for i in best_re], tag + "_rel_ign": [i * 100 for i in best_re_ign], tag + "_evi": [i * 100 for i in best_evi], } scores = {"dev_F1": best_re[-1] * 100, "dev_evi_F1": best_evi[-1] * 100, "dev_F1_ign": best_re_ign[-1] * 100} if args.save_attn: attns_path = os.path.join(args.load_path, f"{os.path.splitext(args.test_file)[0]}.attns") print(f"saving attentions into {attns_path} ...") with open(attns_path, "wb") as f: pickle.dump(attns, f) return scores, output, official_results, results def dump_to_file(offi: list, offi_path: str, scores: list, score_path: str, results: list = [], res_path: str = "", thresh: float = None): ''' dump scores and (top-k) predictions to file. ''' print(f"saving official predictions into {offi_path} ...") json.dump(offi, open(offi_path, "w")) print(f"saving evaluations into {score_path} ...") headers = ["precision", "recall", "F1"] scores_pd = pd.DataFrame.from_dict(scores, orient="index", columns=headers) print(scores_pd) scores_pd.to_csv(score_path, sep='\t') if len(results) != 0: assert res_path != "" print(f"saving topk results into {res_path} ...") json.dump(results, open(res_path, "w")) if thresh is not None: thresh_path = os.path.join(os.path.dirname(offi_path), "thresh") if not os.path.exists(thresh_path): print(f"saving threshold into {thresh_path} ...") json.dump(thresh, open(thresh_path, "w")) return def main(): parser = argparse.ArgumentParser() parser = add_args(parser) args = parser.parse_args() device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") args.n_gpu = torch.cuda.device_count() args.device = device config = AutoConfig.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=args.num_class, ) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, ) model = AutoModel.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, ) config.transformer_type = args.transformer_type set_seed(args) read = read_docred config.cls_token_id = tokenizer.cls_token_id config.sep_token_id = tokenizer.sep_token_id model = DocREModel(args, config, model, tokenizer, num_labels=args.num_labels, max_sent_num=args.max_sent_num, evi_thresh=args.evi_thresh) model.to(args.device) print('total parameters:', sum([np.prod(list(p.size())) for p in model.parameters() if p.requires_grad])) if args.load_path != "": # load model from existing checkpoint model_path = os.path.join(args.load_path, "best.ckpt") model.load_state_dict(torch.load(model_path)) if args.do_train: # Training
def load_input(batch, device, tag="dev"): input = {'input_ids': batch[0].to(device), 'attention_mask': batch[1].to(device), 'labels': batch[2].to(device), 'entity_pos': batch[3], 'hts': batch[4], 'sent_pos': batch[5], 'sent_labels': batch[6].to(device) if (not batch[6] is None) and (batch[7] is None) else None, 'teacher_attns': batch[7].to(device) if not batch[7] is None else None, 'graph': batch[8], 'tag': tag } return input def train(args, model, train_features, dev_features): def finetune(features, optimizer, num_epoch, num_steps): best_score = -1 train_dataloader = DataLoader(features, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, drop_last=True) train_iterator = range(int(num_epoch)) total_steps = int(len(train_dataloader) * num_epoch // args.gradient_accumulation_steps) warmup_steps = int(total_steps * args.warmup_ratio) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps) scaler = GradScaler() print("Total steps: {}".format(total_steps)) print("Warmup steps: {}".format(warmup_steps)) for epoch in tqdm(train_iterator, desc='Train epoch'): for step, batch in enumerate(train_dataloader): model.zero_grad() optimizer.zero_grad() model.train() inputs = load_input(batch, args.device) outputs = model(**inputs) loss = [outputs["loss"]["rel_loss"]] if inputs["sent_labels"] is not None: loss.append(outputs["loss"]["evi_loss"] * args.evi_lambda) if inputs["teacher_attns"] is not None: loss.append(outputs["loss"]["attn_loss"] * args.attn_lambda) loss = sum(loss) / args.gradient_accumulation_steps scaler.scale(loss).backward() if step % args.gradient_accumulation_steps == 0: if args.max_grad_norm > 0: scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) scaler.step(optimizer) scaler.update() scheduler.step() model.zero_grad() num_steps += 1 if (step + 1) == len(train_dataloader) or ( args.evaluation_steps > 0 and num_steps % args.evaluation_steps == 0 and step % args.gradient_accumulation_steps == 0): dev_scores, dev_output, official_results, results = evaluate(args, model, dev_features, tag="dev") print(dev_output) if dev_scores["dev_F1_ign"] > best_score: best_score = dev_scores["dev_F1_ign"] best_offi_results = official_results best_results = results best_output = dev_output ckpt_file = os.path.join(args.save_path, "best.ckpt") print(f"saving model checkpoint into {ckpt_file} ...") torch.save(model.state_dict(), ckpt_file) if epoch == train_iterator[-1]: # last epoch ckpt_file = os.path.join(args.save_path, "last.ckpt") print(f"saving model checkpoint into {ckpt_file} ...") torch.save(model.state_dict(), ckpt_file) pred_file = os.path.join(args.save_path, args.pred_file) score_file = os.path.join(args.save_path, "scores.csv") results_file = os.path.join(args.save_path, f"topk_{args.pred_file}") dump_to_file(best_offi_results, pred_file, best_output, score_file, best_results, results_file) return num_steps new_layer = ["extractor", "bilinear", "graph"] optimizer_grouped_parameters = [ {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in new_layer)], }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in new_layer)], "lr": args.lr_added}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr_transformer, eps=args.adam_epsilon) num_steps = 0 set_seed(args) model.zero_grad() finetune(train_features, optimizer, args.num_train_epochs, num_steps) def evaluate(args, model, features, tag="dev"): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds, evi_preds = [], [] scores, topks = [], [] attns = [] for batch in dataloader: model.eval() if args.save_attn: tag = "infer" inputs = load_input(batch, args.device, tag) with torch.no_grad(): outputs = model(**inputs) pred = outputs["rel_pred"] pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) if "scores" in outputs: scores.append(outputs["scores"].cpu().numpy()) topks.append(outputs["topks"].cpu().numpy()) if "evi_pred" in outputs: # relation extraction and evidence extraction evi_pred = outputs["evi_pred"] evi_pred = evi_pred.cpu().numpy() evi_preds.append(evi_pred) if "attns" in outputs: # attention recorded attn = outputs["attns"] attns.extend([a.cpu().numpy() for a in attn]) preds = np.concatenate(preds, axis=0) if scores: scores = np.concatenate(scores, axis=0) topks = np.concatenate(topks, axis=0) if evi_preds: evi_preds = np.concatenate(evi_preds, axis=0) official_results, results = to_official(preds, features, evi_preds=evi_preds, scores=scores, topks=topks) if len(official_results) > 0: if tag == "test": best_re, best_evi, best_re_ign, _ = official_evaluate(official_results, args.data_dir, args.train_file, args.test_file) else: best_re, best_evi, best_re_ign, _ = official_evaluate(official_results, args.data_dir, args.train_file, args.dev_file) else: best_re = best_evi = best_re_ign = [-1, -1, -1] output = { tag + "_rel": [i * 100 for i in best_re], tag + "_rel_ign": [i * 100 for i in best_re_ign], tag + "_evi": [i * 100 for i in best_evi], } scores = {"dev_F1": best_re[-1] * 100, "dev_evi_F1": best_evi[-1] * 100, "dev_F1_ign": best_re_ign[-1] * 100} if args.save_attn: attns_path = os.path.join(args.load_path, f"{os.path.splitext(args.test_file)[0]}.attns") print(f"saving attentions into {attns_path} ...") with open(attns_path, "wb") as f: pickle.dump(attns, f) return scores, output, official_results, results def dump_to_file(offi: list, offi_path: str, scores: list, score_path: str, results: list = [], res_path: str = "", thresh: float = None): ''' dump scores and (top-k) predictions to file. ''' print(f"saving official predictions into {offi_path} ...") json.dump(offi, open(offi_path, "w")) print(f"saving evaluations into {score_path} ...") headers = ["precision", "recall", "F1"] scores_pd = pd.DataFrame.from_dict(scores, orient="index", columns=headers) print(scores_pd) scores_pd.to_csv(score_path, sep='\t') if len(results) != 0: assert res_path != "" print(f"saving topk results into {res_path} ...") json.dump(results, open(res_path, "w")) if thresh is not None: thresh_path = os.path.join(os.path.dirname(offi_path), "thresh") if not os.path.exists(thresh_path): print(f"saving threshold into {thresh_path} ...") json.dump(thresh, open(thresh_path, "w")) return def main(): parser = argparse.ArgumentParser() parser = add_args(parser) args = parser.parse_args() device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") args.n_gpu = torch.cuda.device_count() args.device = device config = AutoConfig.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=args.num_class, ) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, ) model = AutoModel.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, ) config.transformer_type = args.transformer_type set_seed(args) read = read_docred config.cls_token_id = tokenizer.cls_token_id config.sep_token_id = tokenizer.sep_token_id model = DocREModel(args, config, model, tokenizer, num_labels=args.num_labels, max_sent_num=args.max_sent_num, evi_thresh=args.evi_thresh) model.to(args.device) print('total parameters:', sum([np.prod(list(p.size())) for p in model.parameters() if p.requires_grad])) if args.load_path != "": # load model from existing checkpoint model_path = os.path.join(args.load_path, "best.ckpt") model.load_state_dict(torch.load(model_path)) if args.do_train: # Training
create_directory(args.save_path)
4
2023-10-20 05:53:25+00:00
16k
xingchenshanyao/YOLOP-E
lib/core/function.py
[ { "identifier": "ConfusionMatrix", "path": "lib/core/evaluate.py", "snippet": "class ConfusionMatrix:\n # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix\n def __init__(self, nc=1, conf=0.25, iou_thres=0.45):\n nc = 10 # 20230904 nc是类别数\n self.matrix ...
import time import torch import numpy as np import json import random import cv2 import os import math import wandb from lib.core.evaluate import ConfusionMatrix,SegmentationMetric from lib.core.general import non_max_suppression,check_img_size,scale_coords,xyxy2xywh,xywh2xyxy,box_iou,coco80_to_coco91_class,plot_images,ap_per_class,output_to_target from lib.utils.utils import time_synchronized from lib.utils import plot_img_and_mask,plot_one_box,show_seg_result from threading import Thread from PIL import Image from torchvision import transforms from pathlib import Path from torch.cuda import amp from tqdm import tqdm from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
11,711
ll_IoU_seg = AverageMeter() ll_mIoU_seg = AverageMeter() T_inf = AverageMeter() T_nms = AverageMeter() # switch to train mode model.eval() jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, target, paths, shapes) in tqdm(enumerate(val_loader), total=len(val_loader)): if not config.DEBUG: img = img.to(device, non_blocking=True) assign_target = [] for tgt in target: assign_target.append(tgt.to(device)) target = assign_target nb, _, height, width = img.shape #batch size, channel, height, width with torch.no_grad(): pad_w, pad_h = shapes[0][1][1] pad_w = int(pad_w) pad_h = int(pad_h) ratio = shapes[0][1][0][0] t = time_synchronized() det_out, da_seg_out, ll_seg_out= model(img) # 检测图片? t_inf = time_synchronized() - t if batch_i > 0: T_inf.update(t_inf/img.size(0),img.size(0)) inf_out,train_out = det_out #driving area segment evaluation # 可驾驶区域分割评估 _,da_predict=torch.max(da_seg_out, 1) _,da_gt=torch.max(target[1], 1) da_predict = da_predict[:, pad_h:height-pad_h, pad_w:width-pad_w] da_gt = da_gt[:, pad_h:height-pad_h, pad_w:width-pad_w] da_metric.reset() da_metric.addBatch(da_predict.cpu(), da_gt.cpu()) da_acc = da_metric.pixelAccuracy() da_IoU = da_metric.IntersectionOverUnion() da_mIoU = da_metric.meanIntersectionOverUnion() da_acc_seg.update(da_acc,img.size(0)) da_IoU_seg.update(da_IoU,img.size(0)) da_mIoU_seg.update(da_mIoU,img.size(0)) #lane line segment evaluation # 车道线分割评估 _,ll_predict=torch.max(ll_seg_out, 1) _,ll_gt=torch.max(target[2], 1) ll_predict = ll_predict[:, pad_h:height-pad_h, pad_w:width-pad_w] ll_gt = ll_gt[:, pad_h:height-pad_h, pad_w:width-pad_w] ll_metric.reset() ll_metric.addBatch(ll_predict.cpu(), ll_gt.cpu()) ll_acc = ll_metric.lineAccuracy() ll_IoU = ll_metric.IntersectionOverUnion() ll_mIoU = ll_metric.meanIntersectionOverUnion() ll_acc_seg.update(ll_acc,img.size(0)) ll_IoU_seg.update(ll_IoU,img.size(0)) ll_mIoU_seg.update(ll_mIoU,img.size(0)) total_loss, head_losses = criterion((train_out,da_seg_out, ll_seg_out), target, shapes,model) #Compute loss losses.update(total_loss.item(), img.size(0)) #NMS # 非极大值抑制 t = time_synchronized() target[0][:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [target[0][target[0][:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling output = non_max_suppression(inf_out, conf_thres= config.TEST.NMS_CONF_THRESHOLD, iou_thres=config.TEST.NMS_IOU_THRESHOLD, labels=lb) #output = non_max_suppression(inf_out, conf_thres=0.001, iou_thres=0.6) #output = non_max_suppression(inf_out, conf_thres=config.TEST.NMS_CONF_THRES, iou_thres=config.TEST.NMS_IOU_THRES) t_nms = time_synchronized() - t if batch_i > 0: T_nms.update(t_nms/img.size(0),img.size(0)) if config.TEST.PLOTS: if batch_i == 0: for i in range(test_batch_size): img_test = cv2.imread(paths[i]) da_seg_mask = da_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) da_seg_mask = torch.nn.functional.interpolate(da_seg_mask, scale_factor=int(1/ratio), mode='bilinear') _, da_seg_mask = torch.max(da_seg_mask, 1) da_gt_mask = target[1][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) da_gt_mask = torch.nn.functional.interpolate(da_gt_mask, scale_factor=int(1/ratio), mode='bilinear') _, da_gt_mask = torch.max(da_gt_mask, 1) da_seg_mask = da_seg_mask.int().squeeze().cpu().numpy() da_gt_mask = da_gt_mask.int().squeeze().cpu().numpy() # seg_mask = seg_mask > 0.5 # plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir) img_test1 = img_test.copy() _ = show_seg_result(img_test, da_seg_mask, i,epoch,save_dir) _ = show_seg_result(img_test1, da_gt_mask, i, epoch, save_dir, is_gt=True) img_ll = cv2.imread(paths[i]) ll_seg_mask = ll_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) ll_seg_mask = torch.nn.functional.interpolate(ll_seg_mask, scale_factor=int(1/ratio), mode='bilinear') _, ll_seg_mask = torch.max(ll_seg_mask, 1) ll_gt_mask = target[2][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) ll_gt_mask = torch.nn.functional.interpolate(ll_gt_mask, scale_factor=int(1/ratio), mode='bilinear') _, ll_gt_mask = torch.max(ll_gt_mask, 1) ll_seg_mask = ll_seg_mask.int().squeeze().cpu().numpy() ll_gt_mask = ll_gt_mask.int().squeeze().cpu().numpy() # seg_mask = seg_mask > 0.5 # plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir) img_ll1 = img_ll.copy() _ = show_seg_result(img_ll, ll_seg_mask, i,epoch,save_dir, is_ll=True) _ = show_seg_result(img_ll1, ll_gt_mask, i, epoch, save_dir, is_ll=True, is_gt=True) img_det = cv2.imread(paths[i]) img_gt = img_det.copy() det = output[i].clone() if len(det):
id_dict_SDExpressway = { 0:'Car', 1:'Truck', 2:'Guidance Sign', 3:'Warning Sign', 4:'Pending Sign', 5:'Speed Limit Sign', 6:'Emergency Telephone Sign', 7:'Directional Sign', 8:'Straight Ahead Arrow', 9:'Straight or Right Turn Arrow'} def train(cfg, train_loader, model, criterion, optimizer, scaler, epoch, num_batch, num_warmup, writer_dict, logger, device, rank=-1): """ train for one epoch Inputs: - config: configurations - train_loader: loder for data - model: - criterion: (function) calculate all the loss, return total_loss, head_losses - writer_dict: outputs(2,) output[0] len:3, [1,3,32,32,85], [1,3,16,16,85], [1,3,8,8,85] output[1] len:1, [2,256,256] output[2] len:1, [2,256,256] target(2,) target[0] [1,n,5] target[1] [2,256,256] target[2] [2,256,256] Returns: None """ batch_time = AverageMeter() # batch_time = <lib.core.function.AverageMeter object at 0x7f0255618970> data_time = AverageMeter() # data_time = <lib.core.function.AverageMeter object at 0x7f025561a4f0> losses = AverageMeter() # losses = <lib.core.function.AverageMeter object at 0x7f02402e7cd0> # switch to train mode model.train() start = time.time() # start = 1688805138.6791408 for i, (input, target, paths, shapes) in enumerate(train_loader): # i=0 # target = [tensor([[0.0000e+00,...335e-01]]), tensor([[[[1., 1., 1...., 0.]]]]), tensor([[[[1., 1., 1...., 0.]]]])] # paths = ('/home/xingchen/Study...3225df.jpg', '/home/xingchen/Study...49926c.jpg', ...) # shapes = (((720, 1280), ((0.5, 0.5), (0.0, 12.0))), ((...), (...)), ...) intermediate = time.time() # intermediate = 1688805496.5324085 #print('tims:{}'.format(intermediate-start)) num_iter = i + num_batch * (epoch - 1) # num_iter = 0 # num_batch = 4375 if num_iter < num_warmup: # warm up lf = lambda x: ((1 + math.cos(x * math.pi / cfg.TRAIN.END_EPOCH)) / 2) * \ (1 - cfg.TRAIN.LRF) + cfg.TRAIN.LRF # cosine xi = [0, num_warmup] # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 # 偏置lr从0.1下降到lr0,所有其他lr从0.0上升到lr0 x['lr'] = np.interp(num_iter, xi, [cfg.TRAIN.WARMUP_BIASE_LR if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) if 'momentum' in x: x['momentum'] = np.interp(num_iter, xi, [cfg.TRAIN.WARMUP_MOMENTUM, cfg.TRAIN.MOMENTUM]) data_time.update(time.time() - start) if not cfg.DEBUG: input = input.to(device, non_blocking=True) assign_target = [] for tgt in target: assign_target.append(tgt.to(device)) target = assign_target with amp.autocast(enabled=device.type != 'cpu'): outputs = model(input) # outputs = [[tensor([[[[[ 8.8806e...ackward0>), tensor([[[[[ 4.6631e...ackward0>), tensor([[[[[ 1.4758e...ackward0>)], tensor([[[[0.5151, 0...ackward0>), tensor([[[[0.4868, 0...ackward0>)] total_loss, head_losses = criterion(outputs, target, shapes,model) # print(head_losses) # compute gradient and do update step optimizer.zero_grad() scaler.scale(total_loss).backward() scaler.step(optimizer) scaler.update() if rank in [-1, 0]: # measure accuracy and record loss losses.update(total_loss.item(), input.size(0)) # _, avg_acc, cnt, pred = accuracy(output.detach().cpu().numpy(), # target.detach().cpu().numpy()) # acc.update(avg_acc, cnt) # measure elapsed time batch_time.update(time.time() - start) end = time.time() if i % cfg.PRINT_FREQ == 0: msg = 'Epoch: [{0}][{1}/{2}]\t' \ 'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \ 'Speed {speed:.1f} samples/s\t' \ 'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \ 'Loss {loss.val:.5f} ({loss.avg:.5f})'.format( epoch, i, len(train_loader), batch_time=batch_time, speed=input.size(0)/batch_time.val, data_time=data_time, loss=losses) logger.info(msg) writer = writer_dict['writer'] global_steps = writer_dict['train_global_steps'] writer.add_scalar('train_loss', losses.val, global_steps) # writer.add_scalar('train_acc', acc.val, global_steps) writer_dict['train_global_steps'] = global_steps + 1 def validate(epoch,config, val_loader, val_dataset, model, criterion, output_dir, tb_log_dir, writer_dict=None, logger=None, device='cpu', rank=-1,nc = 1): """ validata Inputs: - config: configurations - train_loader: loder for data - model: - criterion: (function) calculate all the loss, return - writer_dict: Return: None """ # setting max_stride = 32 weights = None save_dir = output_dir + os.path.sep + 'visualization' # save_dir = 'runs/BddDataset/_2023-07-09-09-50/visualization' if not os.path.exists(save_dir): os.mkdir(save_dir) # print(save_dir) _, imgsz = [check_img_size(x, s=max_stride) for x in config.MODEL.IMAGE_SIZE] #imgsz is multiple of max_stride batch_size = config.TRAIN.BATCH_SIZE_PER_GPU * len(config.GPUS) # batch_size = 16 test_batch_size = config.TEST.BATCH_SIZE_PER_GPU * len(config.GPUS) # test_batch_size = 16 training = False is_coco = False #is coco dataset save_conf=False # save auto-label confidences verbose=False save_hybrid=False log_imgs,wandb = min(16,100), None nc = 10 #20230904 iouv = torch.linspace(0.5,0.95,10).to(device) #iou vector for mAP@0.5:0.95 niou = iouv.numel() # niou = 10 try: except ImportError: wandb = None log_imgs = 0 seen = 0 # import pdb;pdb.set_trace() confusion_matrix = ConfusionMatrix(nc=model.nc) #detector confusion matrix # confusion matrix 混合矩阵 da_metric = SegmentationMetric(config.num_seg_class) #segment confusion matrix ll_metric = SegmentationMetric(2) #segment confusion matrix # names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} # names = {'0':0} names = id_dict_SDExpressway #20230904 colors = [[random.randint(0, 255) for _ in range(3)] for _ in names] # colors = [[191, 83, 111]] coco91class = coco80_to_coco91_class() s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') # s = ' Class Images Targets P R mAP@.5 mAP@.5:.95' p, r, f1, mp, mr, map50, map, t_inf, t_nms = 0., 0., 0., 0., 0., 0., 0., 0., 0. losses = AverageMeter() da_acc_seg = AverageMeter() da_IoU_seg = AverageMeter() da_mIoU_seg = AverageMeter() ll_acc_seg = AverageMeter() ll_IoU_seg = AverageMeter() ll_mIoU_seg = AverageMeter() T_inf = AverageMeter() T_nms = AverageMeter() # switch to train mode model.eval() jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, target, paths, shapes) in tqdm(enumerate(val_loader), total=len(val_loader)): if not config.DEBUG: img = img.to(device, non_blocking=True) assign_target = [] for tgt in target: assign_target.append(tgt.to(device)) target = assign_target nb, _, height, width = img.shape #batch size, channel, height, width with torch.no_grad(): pad_w, pad_h = shapes[0][1][1] pad_w = int(pad_w) pad_h = int(pad_h) ratio = shapes[0][1][0][0] t = time_synchronized() det_out, da_seg_out, ll_seg_out= model(img) # 检测图片? t_inf = time_synchronized() - t if batch_i > 0: T_inf.update(t_inf/img.size(0),img.size(0)) inf_out,train_out = det_out #driving area segment evaluation # 可驾驶区域分割评估 _,da_predict=torch.max(da_seg_out, 1) _,da_gt=torch.max(target[1], 1) da_predict = da_predict[:, pad_h:height-pad_h, pad_w:width-pad_w] da_gt = da_gt[:, pad_h:height-pad_h, pad_w:width-pad_w] da_metric.reset() da_metric.addBatch(da_predict.cpu(), da_gt.cpu()) da_acc = da_metric.pixelAccuracy() da_IoU = da_metric.IntersectionOverUnion() da_mIoU = da_metric.meanIntersectionOverUnion() da_acc_seg.update(da_acc,img.size(0)) da_IoU_seg.update(da_IoU,img.size(0)) da_mIoU_seg.update(da_mIoU,img.size(0)) #lane line segment evaluation # 车道线分割评估 _,ll_predict=torch.max(ll_seg_out, 1) _,ll_gt=torch.max(target[2], 1) ll_predict = ll_predict[:, pad_h:height-pad_h, pad_w:width-pad_w] ll_gt = ll_gt[:, pad_h:height-pad_h, pad_w:width-pad_w] ll_metric.reset() ll_metric.addBatch(ll_predict.cpu(), ll_gt.cpu()) ll_acc = ll_metric.lineAccuracy() ll_IoU = ll_metric.IntersectionOverUnion() ll_mIoU = ll_metric.meanIntersectionOverUnion() ll_acc_seg.update(ll_acc,img.size(0)) ll_IoU_seg.update(ll_IoU,img.size(0)) ll_mIoU_seg.update(ll_mIoU,img.size(0)) total_loss, head_losses = criterion((train_out,da_seg_out, ll_seg_out), target, shapes,model) #Compute loss losses.update(total_loss.item(), img.size(0)) #NMS # 非极大值抑制 t = time_synchronized() target[0][:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [target[0][target[0][:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling output = non_max_suppression(inf_out, conf_thres= config.TEST.NMS_CONF_THRESHOLD, iou_thres=config.TEST.NMS_IOU_THRESHOLD, labels=lb) #output = non_max_suppression(inf_out, conf_thres=0.001, iou_thres=0.6) #output = non_max_suppression(inf_out, conf_thres=config.TEST.NMS_CONF_THRES, iou_thres=config.TEST.NMS_IOU_THRES) t_nms = time_synchronized() - t if batch_i > 0: T_nms.update(t_nms/img.size(0),img.size(0)) if config.TEST.PLOTS: if batch_i == 0: for i in range(test_batch_size): img_test = cv2.imread(paths[i]) da_seg_mask = da_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) da_seg_mask = torch.nn.functional.interpolate(da_seg_mask, scale_factor=int(1/ratio), mode='bilinear') _, da_seg_mask = torch.max(da_seg_mask, 1) da_gt_mask = target[1][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) da_gt_mask = torch.nn.functional.interpolate(da_gt_mask, scale_factor=int(1/ratio), mode='bilinear') _, da_gt_mask = torch.max(da_gt_mask, 1) da_seg_mask = da_seg_mask.int().squeeze().cpu().numpy() da_gt_mask = da_gt_mask.int().squeeze().cpu().numpy() # seg_mask = seg_mask > 0.5 # plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir) img_test1 = img_test.copy() _ = show_seg_result(img_test, da_seg_mask, i,epoch,save_dir) _ = show_seg_result(img_test1, da_gt_mask, i, epoch, save_dir, is_gt=True) img_ll = cv2.imread(paths[i]) ll_seg_mask = ll_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) ll_seg_mask = torch.nn.functional.interpolate(ll_seg_mask, scale_factor=int(1/ratio), mode='bilinear') _, ll_seg_mask = torch.max(ll_seg_mask, 1) ll_gt_mask = target[2][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) ll_gt_mask = torch.nn.functional.interpolate(ll_gt_mask, scale_factor=int(1/ratio), mode='bilinear') _, ll_gt_mask = torch.max(ll_gt_mask, 1) ll_seg_mask = ll_seg_mask.int().squeeze().cpu().numpy() ll_gt_mask = ll_gt_mask.int().squeeze().cpu().numpy() # seg_mask = seg_mask > 0.5 # plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir) img_ll1 = img_ll.copy() _ = show_seg_result(img_ll, ll_seg_mask, i,epoch,save_dir, is_ll=True) _ = show_seg_result(img_ll1, ll_gt_mask, i, epoch, save_dir, is_ll=True, is_gt=True) img_det = cv2.imread(paths[i]) img_gt = img_det.copy() det = output[i].clone() if len(det):
det[:,:4] = scale_coords(img[i].shape[1:],det[:,:4],img_det.shape).round()
4
2023-10-24 02:08:25+00:00
16k
giulio98/functional-diffusion-processes
src/functional_diffusion_processes/trainers/trainer.py
[ { "identifier": "AudioDataset", "path": "src/functional_diffusion_processes/datasets/audio_dataset.py", "snippet": "class AudioDataset(BaseDataset, abc.ABC):\n \"\"\"Base class for defining audio datasets.\n\n This class serves as the foundation for defining datasets containing audio data.\n It...
import abc import gc import io import logging import os import flax import flax.jax_utils as flax_utils import hydra.utils import jax import numpy as np import tensorflow as tf import wandb from typing import Any, Callable, Tuple, Union from cleanfid import fid from flax import linen, traverse_util from flax.training import checkpoints from flax.training.checkpoints import restore_checkpoint from jax import numpy as jnp from omegaconf import DictConfig, OmegaConf from tqdm.auto import tqdm from wandb.sdk.lib import RunDisabled from wandb.sdk.wandb_run import Run from ..datasets import AudioDataset, ImageDataset from ..datasets.base_dataset import BaseDataset from ..losses.base_loss import Loss from ..metrics import FIDMetric from ..samplers import Sampler from ..sdetools.base_sde import SDE from ..utils.common import filter_mask, make_grid_image, process_images, save_samples, to_grayscale from ..utils.scaler import get_data_inverse_scaler, get_data_scaler from ..utils.training_state import TrainState from .helpers import colorizing_fn, construct_sampling_fn, construct_train_step, inpainting_fn, sampling_fn
12,261
# import imageio # import imageio pylogger = logging.getLogger(__name__) class Trainer(abc.ABC): """Class for training a model.""" def __init__( self, mode: str, model_name: str, training_config: DictConfig, optimizer, evaluation_config: DictConfig, trainer_logging: DictConfig,
# import imageio # import imageio pylogger = logging.getLogger(__name__) class Trainer(abc.ABC): """Class for training a model.""" def __init__( self, mode: str, model_name: str, training_config: DictConfig, optimizer, evaluation_config: DictConfig, trainer_logging: DictConfig,
sampler: Sampler,
5
2023-10-24 22:01:35+00:00
16k
KosinskiLab/pyTME
tme/tests/test_structure.py
[ { "identifier": "Structure", "path": "tme/structure.py", "snippet": "class Structure:\n \"\"\"Represents atomic structures in accordance with the Protein Data Bank (PDB)\n format specification.\n\n Attributes\n ----------\n record_type : NDArray\n Type of the record, e.g., ATOM, HE...
from tempfile import mkstemp from os import remove from tme import Structure from tme.matching_utils import euler_to_rotationmatrix, minimum_enclosing_box import pytest import numpy as np
11,743
STRUCTURE_ATTRIBUTES = [ "record_type", "atom_serial_number", "atom_name", "atom_coordinate", "alternate_location_indicator", "residue_name", "chain_identifier", "residue_sequence_number", "code_for_residue_insertion", "occupancy", "temperature_factor", "segment_identifier", "element_symbol", "charge", "details", ] class TestStructure: def setup_method(self):
STRUCTURE_ATTRIBUTES = [ "record_type", "atom_serial_number", "atom_name", "atom_coordinate", "alternate_location_indicator", "residue_name", "chain_identifier", "residue_sequence_number", "code_for_residue_insertion", "occupancy", "temperature_factor", "segment_identifier", "element_symbol", "charge", "details", ] class TestStructure: def setup_method(self):
self.structure = Structure.from_file("./tme/tests/data/Structures/5khe.cif")
0
2023-10-20 13:46:01+00:00
16k
tonnetonne814/MB-iSTFT-BERT-VITS2-44100-Ja
train_ms.py
[ { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files...
import os import torch import torch.distributed as dist import logging import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, ) from losses import generator_loss, discriminator_loss, feature_loss, kl_loss from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
11,061
else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers ): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, ) in tqdm(enumerate(train_loader)): if net_g.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.mas_noise_scale_initial - net_g.noise_scale_delta * global_step ) net_g.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda( rank, non_blocking=True ) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda( rank, non_blocking=True ) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda( rank, non_blocking=True ) speakers = speakers.cuda(rank, non_blocking=True) tone = tone.cuda(rank, non_blocking=True) language = language.cuda(rank, non_blocking=True) bert = bert.cuda(rank, non_blocking=True) ja_bert = ja_bert.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length )
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cudnn.benchmark = True torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 torch.backends.cuda.enable_math_sdp(True) global_step = 0 def run(): #dist.init_process_group( # backend="gloo", # init_method="env://", # Due to some training problem,we proposed to use gloo instead of nccl. #) # Use torchrun instead of mp.spawn #rank = dist.get_rank() #n_gpus = dist.get_world_size() rank = 0 n_gpus = 1 hps = utils.get_hparams() torch.manual_seed(hps.train.seed) torch.cuda.set_device(rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=16, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=4, ) # DataLoader config could be adjusted. if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas is True ): print("Using noise scaled MAS for VITS2") mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator is True ): print("Using duration discriminator for VITS2") net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder is True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) else: print("Using normal encoder for VITS1") net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(rank) net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) else: optim_dur_disc = None # net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) # net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) # if net_dur_disc is not None: # net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True) try: if net_dur_disc is not None: _, _, dur_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_g, g_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_d, d_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_g.param_groups[0].get("initial_lr"): optim_g.param_groups[0]["initial_lr"] = g_resume_lr if not optim_d.param_groups[0].get("initial_lr"): optim_d.param_groups[0]["initial_lr"] = d_resume_lr if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr epoch_str = max(epoch_str, 1) global_step = (epoch_str - 1) * len(train_loader) except Exception as e: print(e) epoch_str = 1 global_step = 0 scheduler_g = torch.optim.lr_scheduler.ExponentialLR( optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_d = torch.optim.lr_scheduler.ExponentialLR( optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) if net_dur_disc is not None: # if not optim_dur_disc.param_groups[0].get("initial_lr"): # optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR( optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers ): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, ) in tqdm(enumerate(train_loader)): if net_g.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.mas_noise_scale_initial - net_g.noise_scale_delta * global_step ) net_g.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda( rank, non_blocking=True ) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda( rank, non_blocking=True ) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda( rank, non_blocking=True ) speakers = speakers.cuda(rank, non_blocking=True) tone = tone.cuda(rank, non_blocking=True) language = language.cuda(rank, non_blocking=True) bert = bert.cuda(rank, non_blocking=True) ja_bert = ja_bert.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length )
y_hat_mel = mel_spectrogram_torch(
10
2023-10-16 10:04:32+00:00
16k
violet-sto/HN-GFN
proxy/proxy.py
[ { "identifier": "Regressor", "path": "proxy/regression.py", "snippet": "class Regressor(nn.Module):\n def __init__(self, args, nhid, nvec, num_out_per_stem, num_out_per_mol, num_conv_steps, version, dropout_rate=0, do_stem_mask=True, do_nblocks=False):\n nn.Module.__init__(self)\n self....
import numpy as np import pandas as pd import os import torch import torch.nn as nn import torch.nn.functional as F import time from proxy.regression import Regressor, DropoutRegressor, EvidentialRegressor, EnsembleRegressor, GPRegressor from mol_mdp_ext import MolMDPExtended from botorch.utils.multi_objective.box_decompositions.non_dominated import FastNondominatedPartitioning from botorch.utils.multi_objective.hypervolume import Hypervolume from botorch.acquisition.multi_objective.monte_carlo import qExpectedHypervolumeImprovement from botorch.acquisition.multi_objective.analytic import ExpectedHypervolumeImprovement from botorch.acquisition.analytic import UpperConfidenceBound, ExpectedImprovement from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization from botorch.utils.transforms import normalize, unnormalize from botorch.acquisition.objective import GenericMCObjective from botorch.acquisition.objective import ScalarizedPosteriorTransform from botorch.utils.multi_objective.pareto import is_non_dominated from botorch.sampling.samplers import SobolQMCNormalSampler from sklearn.model_selection import train_test_split from utils.acq_func import qUpperConfidenceBound, qExpectedImprovement from copy import copy, deepcopy
11,173
print('initialize from %s Done!' % self.args.proxy_init_checkpoint) def get_partitioning(self, dataset): ys = [] for s, r in dataset.iterset(self.args.proxy_mbsize, 'train'): y = r ys.append(y) ys = torch.cat(ys, dim=0) self.mean = torch.mean(ys, dim=0, keepdim=True) self.std = torch.std(ys, dim=0, keepdim=True) self.proxy.mean = self.mean self.proxy.std = self.std return FastNondominatedPartitioning(ref_point=self.ref_point, Y=ys) def update(self, dataset, round_idx, reset=False): print("Training surrogate function...") if reset: self.init_model() self.partitioning = self.get_partitioning(dataset) if self.args.proxy_uncertainty == 'GP': self.proxy.fit(dataset) else: self.proxy.fit(dataset, self.opt, self.mean, self.std, round_idx) def __call__(self, m, weights=None): raise NotImplementedError class NoAF(Proxy): def __call__(self, m, weights=None): m = self.mdp.mols2batch([self.mdp.mol2repr(m)]) m.dtype = m.x.dtype objective = GenericMCObjective(get_chebyshev_scalarization( weights=weights.squeeze(), Y=torch.zeros(0, len(self.args.objectives)))) mean = self.proxy.posterior(m).mean return ((weights * mean).sum(), mean.squeeze()) class UCB(Proxy): def __init__(self, args, bpath, oracle): super().__init__(args, bpath, oracle) self.beta = args.beta self.sampler = SobolQMCNormalSampler(128) self.score_clip = torch.tensor([0.6, 0.6, 0.7, 0.7]).unsqueeze(0).to(args.device) self.args = args def upper_confidence_bound(self, mu: np.array, var: np.array, beta: float): return mu + (beta * var).sqrt() def __call__(self, m, weights=None): if self.args.proxy_uncertainty != 'GP': m = self.mdp.mols2batch([self.mdp.mol2repr(m)]) m.dtype = m.x.dtype Y_bounds = torch.stack([self.partitioning.Y.min( dim=-2).values, self.partitioning.Y.max(dim=-2).values]) posterior = self.proxy.posterior(m) mean = posterior.mean variance = posterior.variance # oracle scale normalize_mean = normalize(mean, Y_bounds) # [0, 1] scale new_mean = normalize_mean.matmul(weights.t()).squeeze() # weighted_sum scalarization new_weights = weights / (Y_bounds[1]-Y_bounds[0]) new_variance = (variance * new_weights**2).sum(1) raw_reward = self.upper_confidence_bound(mu=new_mean, var=new_variance, beta=self.beta) return raw_reward, mean.squeeze() class UCB_chebyshev(Proxy): def __init__(self, args, bpath, oracle): super().__init__(args, bpath, oracle) self.beta = args.beta self.sampler = SobolQMCNormalSampler(128) def __call__(self, m, weights=None): if self.args.proxy_uncertainty != 'GP': m = self.mdp.mols2batch([self.mdp.mol2repr(m)]) m.dtype = m.x.dtype Y_bounds = torch.stack([self.partitioning.Y.min( dim=-2).values, self.partitioning.Y.max(dim=-2).values]) objective = GenericMCObjective(get_chebyshev_scalarization( weights=weights.squeeze(), Y=self.partitioning.Y)) posterior = self.proxy.posterior(m) # oracle scale mean = posterior.mean variance = posterior.variance # * chebyshev_scalarization acq_func = qUpperConfidenceBound( model=self.proxy, objective=objective, beta=self.beta, # 0.1 sampler=self.sampler) return (acq_func(m), mean.squeeze()) class EI(Proxy): def __init__(self, args, bpath, oracle): super().__init__(args, bpath, oracle) self.beta = args.beta self.sampler = SobolQMCNormalSampler(128) def __call__(self, m, weights=None): m = self.mdp.mols2batch([self.mdp.mol2repr(m)]) m.dtype = m.x.dtype Y_bounds = torch.stack([self.partitioning.Y.min( dim=-2).values, self.partitioning.Y.max(dim=-2).values]) objective = GenericMCObjective(get_chebyshev_scalarization( weights=weights.squeeze(), Y=self.partitioning.Y)) posterior = self.proxy.posterior(m) mean = posterior.mean variance = posterior.variance
# from botorch.acquisition.monte_carlo import qUpperConfidenceBound, qExpectedImprovement def make_proxy_model(args, mdp): repr_type = args.proxy_repr_type nemb = args.proxy_nemb num_conv_steps = args.proxy_num_conv_steps model_version = args.proxy_model_version if args.proxy_uncertainty == "none": model = Regressor(args, nhid=nemb, nvec=0, num_out_per_stem=mdp.num_blocks, num_out_per_mol=len(args.objectives), num_conv_steps=num_conv_steps, version=model_version, dropout_rate=args.proxy_dropout) if args.proxy_uncertainty == "dropout": model = DropoutRegressor(args, nhid=nemb, nvec=0, num_out_per_stem=mdp.num_blocks, num_out_per_mol=len(args.objectives), num_conv_steps=num_conv_steps, version=model_version, dropout_rate=args.proxy_dropout, num_dropout_samples=args.proxy_num_dropout_samples) elif args.proxy_uncertainty == 'ensemble': model = EnsembleRegressor(args, nhid=nemb, nvec=0, num_out_per_stem=mdp.num_blocks, num_out_per_mol=len(args.objectives), num_conv_steps=num_conv_steps, version=model_version, dropout_rate=args.proxy_dropout, num_dropout_samples=args.proxy_num_dropout_samples) elif args.proxy_uncertainty == 'evidential': model = EvidentialRegressor(args, nhid=nemb, nvec=0, num_out_per_stem=mdp.num_blocks, num_out_per_mol=len(args.objectives), num_conv_steps=num_conv_steps, version=model_version, dropout_rate=args.proxy_dropout) elif args.proxy_uncertainty == 'GP': model = GPRegressor(args, nhid=nemb, nvec=0, num_out_per_stem=mdp.num_blocks, num_out_per_mol=len(args.objectives), num_conv_steps=num_conv_steps, version=model_version, dropout_rate=args.proxy_dropout) model.to(args.device) if args.floatX == 'float64': model = model.double() return model def get_proxy(args, bpath, oracle): if args.acq_fn.lower() == 'none': return NoAF(args, bpath, oracle) elif args.acq_fn.lower() == 'ucb': return UCB(args, bpath, oracle) elif args.acq_fn.lower() == 'ucb_chebyshev': return UCB_chebyshev(args, bpath, oracle) elif args.acq_fn.lower() == 'ei': return EI(args, bpath, oracle) class Proxy: def __init__(self, args, bpath, oracle): self.args = args self.ref_point = torch.zeros(len(args.objectives)).to(args.device) self.oracle = oracle self.device = args.device self.mdp = MolMDPExtended(bpath) self.mdp.post_init(args.device, args.proxy_repr_type) if args.floatX == 'float64': self.mdp.floatX = torch.double else: self.mdp.floatX = torch.float self.init_model() def init_model(self): self.proxy = make_proxy_model(self.args, self.mdp) if self.args.proxy_uncertainty == 'ensemble': self.params = sum([list(model.parameters()) for model in self.proxy.proxy], []) self.opt = torch.optim.Adam(self.params, self.args.proxy_learning_rate, weight_decay=self.args.proxy_weight_decay) elif self.args.proxy_uncertainty == 'GP': pass else: self.opt = torch.optim.Adam(self.proxy.parameters(), self.args.proxy_learning_rate, weight_decay=self.args.proxy_weight_decay) def initialize_from_checkpoint(self): checkpoint = torch.load( self.args.proxy_init_checkpoint, map_location=self.device) self.proxy.proxy.load_state_dict(checkpoint) print('initialize from %s Done!' % self.args.proxy_init_checkpoint) def get_partitioning(self, dataset): ys = [] for s, r in dataset.iterset(self.args.proxy_mbsize, 'train'): y = r ys.append(y) ys = torch.cat(ys, dim=0) self.mean = torch.mean(ys, dim=0, keepdim=True) self.std = torch.std(ys, dim=0, keepdim=True) self.proxy.mean = self.mean self.proxy.std = self.std return FastNondominatedPartitioning(ref_point=self.ref_point, Y=ys) def update(self, dataset, round_idx, reset=False): print("Training surrogate function...") if reset: self.init_model() self.partitioning = self.get_partitioning(dataset) if self.args.proxy_uncertainty == 'GP': self.proxy.fit(dataset) else: self.proxy.fit(dataset, self.opt, self.mean, self.std, round_idx) def __call__(self, m, weights=None): raise NotImplementedError class NoAF(Proxy): def __call__(self, m, weights=None): m = self.mdp.mols2batch([self.mdp.mol2repr(m)]) m.dtype = m.x.dtype objective = GenericMCObjective(get_chebyshev_scalarization( weights=weights.squeeze(), Y=torch.zeros(0, len(self.args.objectives)))) mean = self.proxy.posterior(m).mean return ((weights * mean).sum(), mean.squeeze()) class UCB(Proxy): def __init__(self, args, bpath, oracle): super().__init__(args, bpath, oracle) self.beta = args.beta self.sampler = SobolQMCNormalSampler(128) self.score_clip = torch.tensor([0.6, 0.6, 0.7, 0.7]).unsqueeze(0).to(args.device) self.args = args def upper_confidence_bound(self, mu: np.array, var: np.array, beta: float): return mu + (beta * var).sqrt() def __call__(self, m, weights=None): if self.args.proxy_uncertainty != 'GP': m = self.mdp.mols2batch([self.mdp.mol2repr(m)]) m.dtype = m.x.dtype Y_bounds = torch.stack([self.partitioning.Y.min( dim=-2).values, self.partitioning.Y.max(dim=-2).values]) posterior = self.proxy.posterior(m) mean = posterior.mean variance = posterior.variance # oracle scale normalize_mean = normalize(mean, Y_bounds) # [0, 1] scale new_mean = normalize_mean.matmul(weights.t()).squeeze() # weighted_sum scalarization new_weights = weights / (Y_bounds[1]-Y_bounds[0]) new_variance = (variance * new_weights**2).sum(1) raw_reward = self.upper_confidence_bound(mu=new_mean, var=new_variance, beta=self.beta) return raw_reward, mean.squeeze() class UCB_chebyshev(Proxy): def __init__(self, args, bpath, oracle): super().__init__(args, bpath, oracle) self.beta = args.beta self.sampler = SobolQMCNormalSampler(128) def __call__(self, m, weights=None): if self.args.proxy_uncertainty != 'GP': m = self.mdp.mols2batch([self.mdp.mol2repr(m)]) m.dtype = m.x.dtype Y_bounds = torch.stack([self.partitioning.Y.min( dim=-2).values, self.partitioning.Y.max(dim=-2).values]) objective = GenericMCObjective(get_chebyshev_scalarization( weights=weights.squeeze(), Y=self.partitioning.Y)) posterior = self.proxy.posterior(m) # oracle scale mean = posterior.mean variance = posterior.variance # * chebyshev_scalarization acq_func = qUpperConfidenceBound( model=self.proxy, objective=objective, beta=self.beta, # 0.1 sampler=self.sampler) return (acq_func(m), mean.squeeze()) class EI(Proxy): def __init__(self, args, bpath, oracle): super().__init__(args, bpath, oracle) self.beta = args.beta self.sampler = SobolQMCNormalSampler(128) def __call__(self, m, weights=None): m = self.mdp.mols2batch([self.mdp.mol2repr(m)]) m.dtype = m.x.dtype Y_bounds = torch.stack([self.partitioning.Y.min( dim=-2).values, self.partitioning.Y.max(dim=-2).values]) objective = GenericMCObjective(get_chebyshev_scalarization( weights=weights.squeeze(), Y=self.partitioning.Y)) posterior = self.proxy.posterior(m) mean = posterior.mean variance = posterior.variance
acq_func = qExpectedImprovement(
7
2023-10-24 14:10:35+00:00
16k
SALT-NLP/Efficient_Unlearning
src/models/transformers/parameter-efficient-finetuning/heads/base.py
[ { "identifier": "ImageClassifierOutput", "path": "src/models/transformers/modeling_outputs.py", "snippet": "class ImageClassifierOutput(ModelOutput):\n \"\"\"\n Base class for outputs of image classification models.\n\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, retur...
import logging import torch from dataclasses import dataclass from typing import List, Optional, Union from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...modeling_outputs import ( ImageClassifierOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...utils import ModelOutput from ..composition import AdapterCompositionBlock, BatchSplit, Parallel, parse_heads_from_composition from ..context import AdapterSetup, ForwardContext from ..model_mixin import ModelWithHeadsAdaptersMixin from ..modeling import Activation_Function_Class
12,507
id2label=None, ): super().__init__(head_name) self.config = { "head_type": "question_answering", "num_labels": num_labels, "layers": layers, "activation_function": activation_function, "label2id": {label: id_ for id_, label in id2label.items()} if id2label is not None else None, } self.build(model) def forward(self, outputs, cls_output=None, attention_mask=None, return_dict=False, **kwargs): sequence_output = outputs[0] logits = super().forward(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) start_positions = kwargs.pop("start_positions", None) end_positions = kwargs.pop("end_positions", None) total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if return_dict: if isinstance(outputs, Seq2SeqModelOutput): return Seq2SeqQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) else: return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: outputs = ( start_logits, end_logits, ) + outputs[1:] if total_loss is not None: outputs = (total_loss,) + outputs return outputs def get_label_names(self): return ["start_positions", "end_positions"] class ImageClassificationHead(PredictionHead): def __init__( self, model, head_name, num_labels=2, layers=2, activation_function="tanh", multilabel=False, id2label=None, use_pooler=False, bias=True, ): super().__init__(head_name) self.config = { "head_type": "image_classification", "num_labels": num_labels, "layers": layers, "activation_function": activation_function, "multilabel": multilabel, "label2id": {label: id_ for id_, label in id2label.items()} if id2label is not None else None, "use_pooler": use_pooler, "bias": bias, } self.build(model) def forward(self, outputs, cls_output=None, attention_mask=None, return_dict=False, **kwargs): if cls_output is None: if self.config["use_pooler"]: cls_output = kwargs.pop("pooled_output") else: cls_output = outputs[0][:, 0] logits = super().forward(cls_output) loss = None labels = kwargs.pop("labels", None) if labels is not None: if self.config["num_labels"] == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) elif self.config["multilabel"]: loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config["num_labels"]), labels.view(-1)) if return_dict:
logger = logging.getLogger(__name__) @dataclass class MultiHeadOutput(ModelOutput): head_outputs: List[ModelOutput] = None loss: Optional[torch.FloatTensor] = None @property def logits(self): return torch.vstack([outputs["logits"] for outputs in self.head_outputs]) def __getitem__(self, k): # with number indices the head output at that position is accessed # e.g output[1] is equivalent to output.head_outputs[1] if isinstance(k, int): return self.head_outputs[k] # with strings the attribute in the underlying dict can be adressed # e.g output["loss"] is equivalent to output.loss else: return super().__getitem__(k) def __setitem__(self, k, v): if isinstance(k, int): self.head_outputs[k] = v else: return super().__setitem__(k, v) def __iter__(self): # iterates over the head outputs return iter(self.head_outputs) def __len__(self): return len(self.head_outputs) # Let this class inherit from nn.Sequential to provide iterable access as before class PredictionHead(nn.Sequential): def __init__(self, name): super().__init__() self.config = {} self.name = name def build(self, model): model_config = model.config pred_head = [] dropout_prob = self.config.get("dropout_prob", model_config.hidden_dropout_prob) bias = self.config.get("bias", True) for l_id in range(self.config["layers"]): if dropout_prob > 0: pred_head.append(nn.Dropout(dropout_prob)) if l_id < self.config["layers"] - 1: pred_head.append(nn.Linear(model_config.hidden_size, model_config.hidden_size)) if self.config["activation_function"]: pred_head.append(Activation_Function_Class(self.config["activation_function"])) else: if "num_labels" in self.config: pred_head.append(nn.Linear(model_config.hidden_size, self.config["num_labels"], bias=bias)) elif "num_choices" in self.config: # used for multiple_choice head pred_head.append(nn.Linear(model_config.hidden_size, 1, bias=bias)) else: pred_head.append(nn.Linear(model_config.hidden_size, model_config.hidden_size, bias=bias)) if self.config["activation_function"]: pred_head.append(Activation_Function_Class(self.config["activation_function"])) for i, module in enumerate(pred_head): self.add_module(str(i), module) self.apply(model._init_weights) self.train(model.training) # make sure training mode is consistent def get_output_embeddings(self): return None # override for heads with output embeddings def get_label_names(self): return ["labels"] class ClassificationHead(PredictionHead): def __init__( self, model, head_name, num_labels=2, layers=2, activation_function="tanh", id2label=None, use_pooler=False, bias=True, ): super().__init__(head_name) self.config = { "head_type": "classification", "num_labels": num_labels, "layers": layers, "activation_function": activation_function, "label2id": {label: id_ for id_, label in id2label.items()} if id2label is not None else None, "use_pooler": use_pooler, "bias": bias, } self.build(model) def forward(self, outputs, cls_output=None, attention_mask=None, return_dict=False, **kwargs): if cls_output is None: if self.config["use_pooler"]: cls_output = kwargs.pop("pooled_output") else: cls_output = outputs[0][:, 0] logits = super().forward(cls_output) loss = None labels = kwargs.pop("labels", None) if labels is not None: if self.config["num_labels"] == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config["num_labels"]), labels.view(-1)) if return_dict: if isinstance(outputs, Seq2SeqModelOutput): return Seq2SeqSequenceClassifierOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) else: return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: outputs = (logits,) + outputs[1:] if labels is not None: outputs = (loss,) + outputs return outputs class MultiLabelClassificationHead(PredictionHead): def __init__( self, model, head_name, num_labels=2, layers=2, activation_function="tanh", id2label=None, use_pooler=False, bias=True, ): super().__init__(head_name) self.config = { "head_type": "multilabel_classification", "num_labels": num_labels, "layers": layers, "activation_function": activation_function, "label2id": {label: id_ for id_, label in id2label.items()} if id2label is not None else None, "use_pooler": use_pooler, "bias": bias, } self.build(model) def forward(self, outputs, cls_output=None, attention_mask=None, return_dict=False, **kwargs): if cls_output is None: if self.config["use_pooler"]: cls_output = kwargs.pop("pooled_output") else: cls_output = outputs[0][:, 0] logits = super().forward(cls_output) loss = None labels = kwargs.pop("labels", None) if labels is not None: loss_fct = BCEWithLogitsLoss() if labels.dtype != torch.float32: labels = labels.float() loss = loss_fct(logits, labels) if return_dict: if isinstance(outputs, Seq2SeqModelOutput): return Seq2SeqSequenceClassifierOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) else: return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: outputs = (logits,) + outputs[1:] if labels is not None: outputs = (loss,) + outputs return outputs class MultipleChoiceHead(PredictionHead): def __init__( self, model, head_name, num_choices=2, layers=2, activation_function="tanh", id2label=None, use_pooler=False, ): super().__init__(head_name) self.config = { "head_type": "multiple_choice", "num_choices": num_choices, "layers": layers, "activation_function": activation_function, "label2id": {label: id_ for id_, label in id2label.items()} if id2label is not None else None, "use_pooler": use_pooler, } self.build(model) def forward(self, outputs, cls_output=None, attention_mask=None, return_dict=None, **kwargs): if cls_output is None: if self.config["use_pooler"]: cls_output = kwargs.pop("pooled_output") else: cls_output = outputs[0][:, 0] logits = super().forward(cls_output) logits = logits.view(-1, self.config["num_choices"]) loss = None labels = kwargs.pop("labels", None) if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits, labels) if return_dict: return MultipleChoiceModelOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: outputs = (logits,) + outputs[1:] if labels is not None: outputs = (loss,) + outputs return outputs class TaggingHead(PredictionHead): def __init__( self, model, head_name, num_labels=2, layers=1, activation_function="tanh", id2label=None, ): super().__init__(head_name) self.config = { "head_type": "tagging", "num_labels": num_labels, "layers": layers, "activation_function": activation_function, "label2id": {label: id_ for id_, label in id2label.items()} if id2label is not None else None, } self.build(model) def forward(self, outputs, cls_output=None, attention_mask=None, return_dict=False, **kwargs): logits = super().forward(outputs[0]) loss = None labels = kwargs.pop("labels", None) if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.config["num_labels"]) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.config["num_labels"]), labels.view(-1)) if return_dict: return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: outputs = (logits,) + outputs[1:] if labels is not None: outputs = (loss,) + outputs return outputs class QuestionAnsweringHead(PredictionHead): def __init__( self, model, head_name, num_labels=2, layers=1, activation_function="tanh", id2label=None, ): super().__init__(head_name) self.config = { "head_type": "question_answering", "num_labels": num_labels, "layers": layers, "activation_function": activation_function, "label2id": {label: id_ for id_, label in id2label.items()} if id2label is not None else None, } self.build(model) def forward(self, outputs, cls_output=None, attention_mask=None, return_dict=False, **kwargs): sequence_output = outputs[0] logits = super().forward(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) start_positions = kwargs.pop("start_positions", None) end_positions = kwargs.pop("end_positions", None) total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if return_dict: if isinstance(outputs, Seq2SeqModelOutput): return Seq2SeqQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) else: return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: outputs = ( start_logits, end_logits, ) + outputs[1:] if total_loss is not None: outputs = (total_loss,) + outputs return outputs def get_label_names(self): return ["start_positions", "end_positions"] class ImageClassificationHead(PredictionHead): def __init__( self, model, head_name, num_labels=2, layers=2, activation_function="tanh", multilabel=False, id2label=None, use_pooler=False, bias=True, ): super().__init__(head_name) self.config = { "head_type": "image_classification", "num_labels": num_labels, "layers": layers, "activation_function": activation_function, "multilabel": multilabel, "label2id": {label: id_ for id_, label in id2label.items()} if id2label is not None else None, "use_pooler": use_pooler, "bias": bias, } self.build(model) def forward(self, outputs, cls_output=None, attention_mask=None, return_dict=False, **kwargs): if cls_output is None: if self.config["use_pooler"]: cls_output = kwargs.pop("pooled_output") else: cls_output = outputs[0][:, 0] logits = super().forward(cls_output) loss = None labels = kwargs.pop("labels", None) if labels is not None: if self.config["num_labels"] == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) elif self.config["multilabel"]: loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config["num_labels"]), labels.view(-1)) if return_dict:
return ImageClassifierOutput(
0
2023-10-18 18:05:54+00:00
16k
nchen909/Pass-Tuning
models_list/bitfit/modeling_auto.py
[ { "identifier": "PLBartForConditionalGeneration", "path": "models_list/bitfit/modeling_plbart.py", "snippet": "class PLBartForConditionalGeneration(PLBartPreTrainedModel):\n base_model_prefix = \"model\"\n _keys_to_ignore_on_load_missing = [\n r\"final_logits_bias\",\n r\"encoder.ver...
import warnings from collections import OrderedDict from transformers.utils import logging from transformers.models.albert.modeling_albert import ( AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from .modeling_plbart import ( PLBartForConditionalGeneration, PLBartModel, ) from transformers.models.bart.modeling_bart import ( BartForCausalLM, BartForQuestionAnswering, BartForSequenceClassification, ) from transformers.models.bert.modeling_bert import ( BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLMHeadModel, BertModel, ) from transformers.models.bert_generation.modeling_bert_generation import BertGenerationDecoder, BertGenerationEncoder from transformers.models.big_bird.modeling_big_bird import ( BigBirdForCausalLM, BigBirdForMaskedLM, BigBirdForMultipleChoice, BigBirdForPreTraining, BigBirdForQuestionAnswering, BigBirdForSequenceClassification, BigBirdForTokenClassification, BigBirdModel, ) from transformers.models.bigbird_pegasus.modeling_bigbird_pegasus import ( BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, ) from transformers.models.blenderbot.modeling_blenderbot import BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel from transformers.models.blenderbot_small.modeling_blenderbot_small import ( BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, ) from transformers.models.camembert.modeling_camembert import ( CamembertForCausalLM, CamembertForMaskedLM, CamembertForMultipleChoice, CamembertForQuestionAnswering, CamembertForSequenceClassification, CamembertForTokenClassification, CamembertModel, ) from transformers.models.canine.modeling_canine import ( CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineModel, ) from transformers.models.clip.modeling_clip import CLIPModel from transformers.models.convbert.modeling_convbert import ( ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertModel, ) from transformers.models.ctrl.modeling_ctrl import CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel from transformers.models.deberta.modeling_deberta import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta_v2.modeling_deberta_v2 import ( DebertaV2ForMaskedLM, DebertaV2ForQuestionAnswering, DebertaV2ForSequenceClassification, DebertaV2ForTokenClassification, DebertaV2Model, ) from transformers.models.deit.modeling_deit import DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTModel from transformers.models.detr.modeling_detr import DetrForObjectDetection, DetrModel from transformers.models.distilbert.modeling_distilbert import ( DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) from transformers.models.dpr.modeling_dpr import DPRQuestionEncoder from transformers.models.electra.modeling_electra import ( ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ) from transformers.models.encoder_decoder.modeling_encoder_decoder import EncoderDecoderModel from transformers.models.flaubert.modeling_flaubert import ( FlaubertForMultipleChoice, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.fsmt.modeling_fsmt import FSMTForConditionalGeneration, FSMTModel from transformers.models.funnel.modeling_funnel import ( FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, ) from transformers.models.gpt2.modeling_gpt2 import GPT2ForSequenceClassification, GPT2LMHeadModel, GPT2Model from transformers.models.gpt_neo.modeling_gpt_neo import GPTNeoForCausalLM, GPTNeoForSequenceClassification, GPTNeoModel from transformers.models.hubert.modeling_hubert import HubertModel from transformers.models.ibert.modeling_ibert import ( IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, ) from transformers.models.layoutlm.modeling_layoutlm import ( LayoutLMForMaskedLM, LayoutLMForSequenceClassification, LayoutLMForTokenClassification, LayoutLMModel, ) from transformers.models.led.modeling_led import ( LEDForConditionalGeneration, LEDForQuestionAnswering, LEDForSequenceClassification, LEDModel, ) from transformers.models.longformer.modeling_longformer import ( LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, ) from transformers.models.luke.modeling_luke import LukeModel from transformers.models.lxmert.modeling_lxmert import LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel from transformers.models.m2m_100.modeling_m2m_100 import M2M100ForConditionalGeneration, M2M100Model from transformers.models.marian.modeling_marian import MarianForCausalLM, MarianModel, MarianMTModel from transformers.models.mbart.modeling_mbart import ( MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, ) from transformers.models.megatron_bert.modeling_megatron_bert import ( MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) from transformers.models.mobilebert.modeling_mobilebert import ( MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) from transformers.models.mpnet.modeling_mpnet import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) from transformers.models.mt5.modeling_mt5 import MT5ForConditionalGeneration, MT5Model from transformers.models.openai.modeling_openai import OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel from transformers.models.pegasus.modeling_pegasus import PegasusForCausalLM, PegasusForConditionalGeneration, PegasusModel from transformers.models.prophetnet.modeling_prophetnet import ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel from transformers.models.rag.modeling_rag import ( # noqa: F401 - need to import all RagModels to be in globals() function RagModel, RagSequenceForGeneration, RagTokenForGeneration, ) from transformers.models.reformer.modeling_reformer import ( ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerModel, ReformerModelWithLMHead, ) from transformers.models.retribert.modeling_retribert import RetriBertModel from transformers.models.roberta.modeling_roberta import ( RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, ) from transformers.models.roformer.modeling_roformer import ( RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerModel, ) from transformers.models.speech_to_text.modeling_speech_to_text import Speech2TextForConditionalGeneration, Speech2TextModel from transformers.models.squeezebert.modeling_squeezebert import ( SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) from .modeling_t5 import T5ForConditionalGeneration, T5Model from transformers.models.tapas.modeling_tapas import ( TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, ) from transformers.models.transfo_xl.modeling_transfo_xl import TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel from transformers.models.visual_bert.modeling_visual_bert import VisualBertForPreTraining, VisualBertModel from transformers.models.vit.modeling_vit import ViTForImageClassification, ViTModel from transformers.models.wav2vec2.modeling_wav2vec2 import Wav2Vec2ForMaskedLM, Wav2Vec2ForPreTraining, Wav2Vec2Model from transformers.models.xlm.modeling_xlm import ( XLMForMultipleChoice, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm_prophetnet.modeling_xlm_prophetnet import ( XLMProphetNetForCausalLM, XLMProphetNetForConditionalGeneration, XLMProphetNetModel, ) from transformers.models.xlm_roberta.modeling_xlm_roberta import ( XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, ) from transformers.models.xlnet.modeling_xlnet import ( XLNetForMultipleChoice, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, ) from transformers.models.auto.auto_factory import _BaseAutoModelClass, auto_class_update from transformers.models.auto.configuration_auto import ( AlbertConfig, PLBartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BlenderbotConfig, BlenderbotSmallConfig, CamembertConfig, CanineConfig, CLIPConfig, ConvBertConfig, CTRLConfig, DebertaConfig, DebertaV2Config, DeiTConfig, DetrConfig, DistilBertConfig, DPRConfig, ElectraConfig, EncoderDecoderConfig, FlaubertConfig, FSMTConfig, FunnelConfig, GPT2Config, GPTNeoConfig, HubertConfig, IBertConfig, LayoutLMConfig, LEDConfig, LongformerConfig, LukeConfig, LxmertConfig, M2M100Config, MarianConfig, MBartConfig, MegatronBertConfig, MobileBertConfig, MPNetConfig, MT5Config, OpenAIGPTConfig, PegasusConfig, ProphetNetConfig, ReformerConfig, RetriBertConfig, RobertaConfig, RoFormerConfig, Speech2TextConfig, SqueezeBertConfig, T5Config, TapasConfig, TransfoXLConfig, VisualBertConfig, ViTConfig, Wav2Vec2Config, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLNetConfig, )
11,353
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Auto Model class. """ # Add modeling imports here # # Instead of loading the BART from the transformers==4.9.1, we choose to load from our own prefix-tuning version. # Instead of loading the T5 from the transformers==4.9.1, we choose to load from our prefix-tuning version. logger = logging.get_logger(__name__) MODEL_MAPPING = OrderedDict( [ # Base model mapping (VisualBertConfig, VisualBertModel), (CanineConfig, CanineModel), (RoFormerConfig, RoFormerModel), (CLIPConfig, CLIPModel), (BigBirdPegasusConfig, BigBirdPegasusModel), (DeiTConfig, DeiTModel), (LukeConfig, LukeModel), (DetrConfig, DetrModel), (GPTNeoConfig, GPTNeoModel), (BigBirdConfig, BigBirdModel), (Speech2TextConfig, Speech2TextModel), (ViTConfig, ViTModel), (Wav2Vec2Config, Wav2Vec2Model), (HubertConfig, HubertModel), (M2M100Config, M2M100Model), (ConvBertConfig, ConvBertModel), (LEDConfig, LEDModel), (BlenderbotSmallConfig, BlenderbotSmallModel), (RetriBertConfig, RetriBertModel), (MT5Config, MT5Model), (T5Config, T5Model), (PegasusConfig, PegasusModel), (MarianConfig, MarianMTModel), (MBartConfig, MBartModel), (BlenderbotConfig, BlenderbotModel), (DistilBertConfig, DistilBertModel), (AlbertConfig, AlbertModel), (CamembertConfig, CamembertModel), (XLMRobertaConfig, XLMRobertaModel), (PLBartConfig, PLBartModel), (LongformerConfig, LongformerModel), (RobertaConfig, RobertaModel), (LayoutLMConfig, LayoutLMModel), (SqueezeBertConfig, SqueezeBertModel), (BertConfig, BertModel), (OpenAIGPTConfig, OpenAIGPTModel), (GPT2Config, GPT2Model), (MegatronBertConfig, MegatronBertModel), (MobileBertConfig, MobileBertModel), (TransfoXLConfig, TransfoXLModel), (XLNetConfig, XLNetModel), (FlaubertConfig, FlaubertModel), (FSMTConfig, FSMTModel), (XLMConfig, XLMModel), (CTRLConfig, CTRLModel), (ElectraConfig, ElectraModel), (ReformerConfig, ReformerModel), (FunnelConfig, (FunnelModel, FunnelBaseModel)), (LxmertConfig, LxmertModel), (BertGenerationConfig, BertGenerationEncoder), (DebertaConfig, DebertaModel), (DebertaV2Config, DebertaV2Model), (DPRConfig, DPRQuestionEncoder), (XLMProphetNetConfig, XLMProphetNetModel), (ProphetNetConfig, ProphetNetModel), (MPNetConfig, MPNetModel), (TapasConfig, TapasModel), (MarianConfig, MarianModel), (IBertConfig, IBertModel), ] ) MODEL_FOR_PRETRAINING_MAPPING = OrderedDict( [ # Model for pre-training mapping (VisualBertConfig, VisualBertForPreTraining), (LayoutLMConfig, LayoutLMForMaskedLM), (RetriBertConfig, RetriBertModel), (T5Config, T5ForConditionalGeneration), (DistilBertConfig, DistilBertForMaskedLM), (AlbertConfig, AlbertForPreTraining), (CamembertConfig, CamembertForMaskedLM), (XLMRobertaConfig, XLMRobertaForMaskedLM),
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Auto Model class. """ # Add modeling imports here # # Instead of loading the BART from the transformers==4.9.1, we choose to load from our own prefix-tuning version. # Instead of loading the T5 from the transformers==4.9.1, we choose to load from our prefix-tuning version. logger = logging.get_logger(__name__) MODEL_MAPPING = OrderedDict( [ # Base model mapping (VisualBertConfig, VisualBertModel), (CanineConfig, CanineModel), (RoFormerConfig, RoFormerModel), (CLIPConfig, CLIPModel), (BigBirdPegasusConfig, BigBirdPegasusModel), (DeiTConfig, DeiTModel), (LukeConfig, LukeModel), (DetrConfig, DetrModel), (GPTNeoConfig, GPTNeoModel), (BigBirdConfig, BigBirdModel), (Speech2TextConfig, Speech2TextModel), (ViTConfig, ViTModel), (Wav2Vec2Config, Wav2Vec2Model), (HubertConfig, HubertModel), (M2M100Config, M2M100Model), (ConvBertConfig, ConvBertModel), (LEDConfig, LEDModel), (BlenderbotSmallConfig, BlenderbotSmallModel), (RetriBertConfig, RetriBertModel), (MT5Config, MT5Model), (T5Config, T5Model), (PegasusConfig, PegasusModel), (MarianConfig, MarianMTModel), (MBartConfig, MBartModel), (BlenderbotConfig, BlenderbotModel), (DistilBertConfig, DistilBertModel), (AlbertConfig, AlbertModel), (CamembertConfig, CamembertModel), (XLMRobertaConfig, XLMRobertaModel), (PLBartConfig, PLBartModel), (LongformerConfig, LongformerModel), (RobertaConfig, RobertaModel), (LayoutLMConfig, LayoutLMModel), (SqueezeBertConfig, SqueezeBertModel), (BertConfig, BertModel), (OpenAIGPTConfig, OpenAIGPTModel), (GPT2Config, GPT2Model), (MegatronBertConfig, MegatronBertModel), (MobileBertConfig, MobileBertModel), (TransfoXLConfig, TransfoXLModel), (XLNetConfig, XLNetModel), (FlaubertConfig, FlaubertModel), (FSMTConfig, FSMTModel), (XLMConfig, XLMModel), (CTRLConfig, CTRLModel), (ElectraConfig, ElectraModel), (ReformerConfig, ReformerModel), (FunnelConfig, (FunnelModel, FunnelBaseModel)), (LxmertConfig, LxmertModel), (BertGenerationConfig, BertGenerationEncoder), (DebertaConfig, DebertaModel), (DebertaV2Config, DebertaV2Model), (DPRConfig, DPRQuestionEncoder), (XLMProphetNetConfig, XLMProphetNetModel), (ProphetNetConfig, ProphetNetModel), (MPNetConfig, MPNetModel), (TapasConfig, TapasModel), (MarianConfig, MarianModel), (IBertConfig, IBertModel), ] ) MODEL_FOR_PRETRAINING_MAPPING = OrderedDict( [ # Model for pre-training mapping (VisualBertConfig, VisualBertForPreTraining), (LayoutLMConfig, LayoutLMForMaskedLM), (RetriBertConfig, RetriBertModel), (T5Config, T5ForConditionalGeneration), (DistilBertConfig, DistilBertForMaskedLM), (AlbertConfig, AlbertForPreTraining), (CamembertConfig, CamembertForMaskedLM), (XLMRobertaConfig, XLMRobertaForMaskedLM),
(PLBartConfig, PLBartForConditionalGeneration),
0
2023-10-20 09:24:44+00:00
16k
JoaoPedro9674/django-ledger
django_ledger/io/io_mixin.py
[ { "identifier": "settings", "path": "django_ledger/settings.py", "snippet": " DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = True\n DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = False\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = True\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = False\nDJANGO_LEDGER_USE_CLOSING_ENTRIES...
from collections import defaultdict, namedtuple from datetime import datetime, date from itertools import groupby from pathlib import Path from random import choice from typing import List, Set, Union, Tuple, Optional, Dict from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError, ObjectDoesNotExist from django.db.models import Sum, QuerySet from django.db.models.functions import TruncMonth from django.http import Http404 from django.utils.dateparse import parse_date, parse_datetime from django.utils.timezone import make_aware, is_naive, localtime from django.utils.translation import gettext_lazy as _ from django_ledger import settings from django_ledger.exceptions import InvalidDateInputError, TransactionNotInBalanceError from django_ledger.io import roles as roles_module from django_ledger.io.io_context import (RoleContextManager, GroupContextManager, ActivityContextManager, BalanceSheetStatementContextManager, IncomeStatementContextManager, CashFlowStatementContextManager) from django_ledger.io.io_digest import IODigestContextManager from django_ledger.io.ratios import FinancialRatioManager from django_ledger.models.utils import lazy_loader
13,672
""" Django Ledger created by Miguel Sanda <msanda@arrobalytics.com>. Copyright© EDMA Group Inc licensed under the GPLv3 Agreement. Contributions to this module: * Miguel Sanda <msanda@arrobalytics.com> """ UserModel = get_user_model() def diff_tx_data(tx_data: list, raise_exception: bool = True): IS_TX_MODEL = False TransactionModel = lazy_loader.get_txs_model() if isinstance(tx_data[0], TransactionModel): CREDITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'credit') DEBITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'debit') IS_TX_MODEL = True elif isinstance(tx_data[0], dict): CREDITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'credit') DEBITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'debit') else: raise ValidationError('Only Dictionary or TransactionModel allowed.') is_valid = (CREDITS == DEBITS) diff = CREDITS - DEBITS if not is_valid and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: if raise_exception:
""" Django Ledger created by Miguel Sanda <msanda@arrobalytics.com>. Copyright© EDMA Group Inc licensed under the GPLv3 Agreement. Contributions to this module: * Miguel Sanda <msanda@arrobalytics.com> """ UserModel = get_user_model() def diff_tx_data(tx_data: list, raise_exception: bool = True): IS_TX_MODEL = False TransactionModel = lazy_loader.get_txs_model() if isinstance(tx_data[0], TransactionModel): CREDITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'credit') DEBITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'debit') IS_TX_MODEL = True elif isinstance(tx_data[0], dict): CREDITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'credit') DEBITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'debit') else: raise ValidationError('Only Dictionary or TransactionModel allowed.') is_valid = (CREDITS == DEBITS) diff = CREDITS - DEBITS if not is_valid and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: if raise_exception:
raise TransactionNotInBalanceError(
2
2023-10-20 01:07:20+00:00
16k
hitz-zentroa/This-is-not-a-Dataset
run.py
[ { "identifier": "load_model", "path": "load_model.py", "snippet": "def load_model(\n inference: bool,\n model_weights_name_or_path: str,\n quantization: Optional[int] = None,\n use_lora: bool = False,\n lora_weights_name_or_path: Optional[str] = None,\n lora_target_modules: Optional[Li...
from load_model import load_model from dataset import get_dataloader from evaluate import evaluate from config import DataTrainingArguments, ModelArguments from transformers import ( HfArgumentParser, Seq2SeqTrainingArguments, set_seed, get_scheduler, ) from tqdm import tqdm from accelerate import Accelerator, find_executable_batch_size from typing import List from optimizer import get_optimizer from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from transformers.modeling_utils import unwrap_model import torch import os import wandb import gc import json import math import sys import logging
10,903
optimizer = get_optimizer(training_args=training_args, model=model) lr_scheduler = get_scheduler( name=training_args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=int(training_args.warmup_ratio * max_train_steps), num_training_steps=max_train_steps, ) model, optimizer, train_dataloader = accelerator.prepare( model, optimizer, train_dataloader ) if dev_dataloader is not None: dev_dataloader = accelerator.prepare(dev_dataloader) completed_steps = 0 best_epoch_metric: float = -1 validation_dir: str = os.path.join(training_args.output_dir, "val_logs") os.makedirs(validation_dir, exist_ok=True) running_loss = 0 num_batches = 0 first = True progress_bar = tqdm( range(max_train_steps), disable=not accelerator.is_local_main_process, ascii=True, desc="Training", ) for epoch in range(int(training_args.num_train_epochs)): model.train() for step, batch in enumerate(train_dataloader): ### DEBUG ### if first and accelerator.is_main_process: decodeable_inputs = batch.input_ids.clone() decodeable_inputs[ decodeable_inputs == -100 ] = tokenizer.pad_token_id model_inputs = "\n".join( tokenizer.batch_decode( decodeable_inputs, skip_special_tokens=False, clean_up_tokenization_spaces=False, ) ) decodeable_labels = batch.labels.clone() decodeable_labels[ decodeable_labels == -100 ] = tokenizer.pad_token_id labels = "\n".join( tokenizer.batch_decode( decodeable_labels, skip_special_tokens=False, clean_up_tokenization_spaces=False, ) ) print(f"*** Sample of batch 0 ***") print(f"-- Model inputs --\n{model_inputs}") print(f"-- Labels --\n{labels}") print(f"*** End of sample ***\n") first = False loss = compute_loss(model=model, inputs=batch, return_outputs=False) running_loss += loss.item() loss = loss / training_args.gradient_accumulation_steps accelerator.backward(loss) num_batches += 1 if ( step % training_args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1 ): optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if ( accelerator.is_local_main_process and completed_steps > 0 and (completed_steps % 10 == 0) ): wandb.log( { "Train/Loss": loss.item(), "Train/Running Loss": loss.item() / num_batches, "Train/Learning Rate": optimizer.param_groups[0]["lr"], "epoch": epoch, "step": completed_steps, } ) if ( training_args.eval_steps is not None and completed_steps % training_args.eval_steps == 0 and dev_dataloader is not None ): gen_predictions( model=model, tokenizer=tokenizer, true_tokens_ids=true_tokens_ids, false_tokens_ids=false_tokens_ids, dataloader=dev_dataloader, output_path=os.path.join( validation_dir, f"step_{completed_steps}.preds", ), accelerator=accelerator, predict_with_generate=training_args.predict_with_generate, ) if accelerator.is_main_process:
def clean_cache(): """Clean cache to avoid memory leak. This fixes this issue: https://github.com/huggingface/transformers/issues/22801""" print(f"Cleaning GPU memory. Current memory usage: {torch.cuda.memory_allocated()}") torch.cuda.empty_cache() gc.collect() torch.cuda.empty_cache() print(f"GPU memory usage after cleaning: {torch.cuda.memory_allocated()}") def compute_loss(model, inputs, return_outputs=False): """ How the loss is computed by Trainer. By default, all models return the loss in the first element. Subclass and override for custom behavior. """ if "labels" in inputs: labels = inputs.pop("labels") else: raise ValueError("You should supply a labels key to compute the loss") if "loss_weight_mask" in inputs: loss_weight_mask = inputs.pop("loss_weight_mask") else: raise ValueError("You should supply a loss_weight_mask key to compute the loss") if unwrap_model(model).config.is_encoder_decoder: outputs = model(labels=labels, **inputs) else: outputs = model(**inputs) logits = outputs["logits"] if isinstance(outputs, dict) else outputs[0] model_name = unwrap_model(model)._get_name() if ( model_name in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values() or model_name == "PeftModelForCausalLM" ): logits = logits[..., :-1, :].contiguous() labels = labels[..., 1:].contiguous() loss_weight_mask = loss_weight_mask[..., 1:].contiguous() logits = logits.view(-1, logits.size(-1)) labels = labels.view(-1) loss_weight_mask = loss_weight_mask.view(-1) loss_fct = torch.nn.CrossEntropyLoss(reduction="none", ignore_index=-100) loss = loss_fct(logits, labels) loss = torch.sum(loss * loss_weight_mask) / torch.sum(loss_weight_mask) return (loss, outputs) if return_outputs else loss def gen_predictions( model, tokenizer, true_tokens_ids: List[int], false_tokens_ids: List[int], dataloader, output_path, accelerator, print_first=False, predict_with_generate=False, return_scores=False, ): if predict_with_generate and return_scores: raise ValueError( "return_scores is not supported when predict_with_generate is True" ) model.eval() with torch.no_grad(): samples_seen: int = 0 yes_id = true_tokens_ids[0] no_id = false_tokens_ids[0] all_preds = [] all_scores = [] first = True for step, batch in enumerate( tqdm(dataloader, f"Inference on {os.path.basename(output_path)}") ): if print_first and accelerator.is_local_main_process: ### DEBUG ### if print_first and first and accelerator.is_main_process: decodeable_inputs = batch.input_ids.clone() decodeable_inputs[ decodeable_inputs == -100 ] = tokenizer.pad_token_id model_inputs = "\n".join( tokenizer.batch_decode( decodeable_inputs, skip_special_tokens=False, clean_up_tokenization_spaces=False, ) ) print(f"*** Sample of batch 0 ***") print(f"-- Model inputs --\n{model_inputs}") print(f"*** End of sample ***\n") first = False if not predict_with_generate: if not model.config.is_encoder_decoder: logits = model( input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], ).logits else: encoder_output = model.get_encoder()( input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], ) decoder_args = { "attention_mask": batch["attention_mask"], "use_cache": False, "encoder_outputs": encoder_output, } gen_inputs = model.prepare_inputs_for_generation( input_ids=torch.tensor( [[tokenizer.pad_token_id]] * len(batch["input_ids"]) ).to(batch["input_ids"].device), **decoder_args, ) logits = model( **gen_inputs, ).logits logits = logits[:, -1, :] logits = torch.nn.functional.softmax(logits, dim=-1) logits = logits[:, [yes_id, no_id]] logits = logits[:, 0] / (logits[:, 0] + logits[:, 1]) preds = logits > 0.5 preds = accelerator.gather(preds).cpu().tolist() logits = accelerator.gather(logits).cpu().tolist() if accelerator.is_local_main_process: if accelerator.num_processes > 1: # Remove duplicated in last batch if we are in a distributed setting if step == len(dataloader) - 1: preds = preds[: (len(dataloader.dataset) - samples_seen)] logits = logits[: (len(dataloader.dataset) - samples_seen)] else: samples_seen += len(batch) all_preds.extend(preds) all_scores.extend(logits) else: preds = model.generate( input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], max_new_tokens=6, ) preds = accelerator.gather( accelerator.pad_across_processes( preds, dim=1, pad_index=tokenizer.pad_token_id, ) ).cpu() inputs_ids = accelerator.gather( accelerator.pad_across_processes( batch["input_ids"], dim=1, pad_index=tokenizer.pad_token_id, ) ).cpu() preds = preds[:, len(inputs_ids[0]) :] if accelerator.is_local_main_process: if accelerator.num_processes > 1: # Remove duplicated in last batch if we are in a distributed setting if step == len(dataloader) - 1: preds = preds[: (len(dataloader.dataset) - samples_seen)] else: samples_seen += len(batch) preds = tokenizer.batch_decode(preds, skip_special_tokens=True) # print(preds) for pred in preds: pred = pred.lower() if "true" in pred: all_preds.append(True) else: all_preds.append(False) if accelerator.is_local_main_process: with open(output_path, "w", encoding="utf8") as f: for pred in all_preds if not return_scores else all_scores: print(pred, file=f) if not return_scores: json_dataset = dataloader.dataset.get_jsonl() assert len(json_dataset) == len(all_preds) with open( os.path.splitext(output_path)[0] + ".jsonl", "w", encoding="utf8" ) as f: for json_line, pred in zip(json_dataset, all_preds): json_line["prediction"] = bool(pred) print(json.dumps(json_line, ensure_ascii=False), file=f) model.train() def main( model_args: ModelArguments, data_args: DataTrainingArguments, training_args: Seq2SeqTrainingArguments, ): assert ( training_args.do_train or training_args.do_predict ), "You must specify do_train or do_predict" assert not (training_args.do_train and data_args.do_predict_full_dataset), ( "You cannot do both training and predict_full_dataset, " "as the model will be evaluated on the full dataset, which" " includes the training set." ) logging.basicConfig(level=logging.INFO) accelerator = Accelerator() print(f"Accelerator State: {accelerator.state}") set_seed(training_args.seed) if training_args.do_train: model, tokenizer = load_model( inference=False, model_weights_name_or_path=model_args.model_name_or_path, lora_weights_name_or_path=model_args.lora_weights_name_or_path, quantization=model_args.quantization, use_lora=model_args.use_lora, lora_target_modules=model_args.lora_target_modules, torch_dtype=model_args.torch_dtype, force_auto_device_map=data_args.force_auto_device_map, use_flash_attention=model_args.use_flash_attention, use_gradient_checkpointing=model_args.use_lora, ) true_tokens_ids = tokenizer.encode("True", add_special_tokens=False) false_tokens_ids = tokenizer.encode("False", add_special_tokens=False) train_dataloader = get_dataloader( tokenizer=tokenizer, split="train", is_encoder_decoder=model.config.is_encoder_decoder, max_length=data_args.max_seq_length, conv_template=model_args.conversation_template, batch_size=training_args.per_device_train_batch_size, prompt_loss_weight=data_args.prompt_loss_weight, add_bos_token=model_args.add_bos_token, pattern=data_args.pattern, only_negative=data_args.only_negative, only_affirmative=data_args.only_affirmative, only_distractor=data_args.only_non_distractor, only_non_distractor=data_args.only_non_distractor, ) dev_dataloader = None if training_args.do_eval: dev_dataloader = get_dataloader( tokenizer=tokenizer, split="validation", is_encoder_decoder=model.config.is_encoder_decoder, max_length=data_args.max_seq_length, conv_template=model_args.conversation_template, batch_size=training_args.per_device_train_batch_size, prompt_loss_weight=data_args.prompt_loss_weight, add_bos_token=model_args.add_bos_token, pattern=data_args.pattern, only_negative=data_args.only_negative, only_affirmative=data_args.only_affirmative, only_distractor=data_args.only_non_distractor, only_non_distractor=data_args.only_non_distractor, ) if accelerator.is_main_process: wandb.init( project="ThisIsNotADataset", name=f"{os.path.basename(training_args.output_dir)}", config=vars(training_args), ) num_update_steps_per_epoch = math.ceil( len(train_dataloader) / training_args.gradient_accumulation_steps ) max_train_steps = int( training_args.num_train_epochs * num_update_steps_per_epoch ) optimizer = get_optimizer(training_args=training_args, model=model) lr_scheduler = get_scheduler( name=training_args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=int(training_args.warmup_ratio * max_train_steps), num_training_steps=max_train_steps, ) model, optimizer, train_dataloader = accelerator.prepare( model, optimizer, train_dataloader ) if dev_dataloader is not None: dev_dataloader = accelerator.prepare(dev_dataloader) completed_steps = 0 best_epoch_metric: float = -1 validation_dir: str = os.path.join(training_args.output_dir, "val_logs") os.makedirs(validation_dir, exist_ok=True) running_loss = 0 num_batches = 0 first = True progress_bar = tqdm( range(max_train_steps), disable=not accelerator.is_local_main_process, ascii=True, desc="Training", ) for epoch in range(int(training_args.num_train_epochs)): model.train() for step, batch in enumerate(train_dataloader): ### DEBUG ### if first and accelerator.is_main_process: decodeable_inputs = batch.input_ids.clone() decodeable_inputs[ decodeable_inputs == -100 ] = tokenizer.pad_token_id model_inputs = "\n".join( tokenizer.batch_decode( decodeable_inputs, skip_special_tokens=False, clean_up_tokenization_spaces=False, ) ) decodeable_labels = batch.labels.clone() decodeable_labels[ decodeable_labels == -100 ] = tokenizer.pad_token_id labels = "\n".join( tokenizer.batch_decode( decodeable_labels, skip_special_tokens=False, clean_up_tokenization_spaces=False, ) ) print(f"*** Sample of batch 0 ***") print(f"-- Model inputs --\n{model_inputs}") print(f"-- Labels --\n{labels}") print(f"*** End of sample ***\n") first = False loss = compute_loss(model=model, inputs=batch, return_outputs=False) running_loss += loss.item() loss = loss / training_args.gradient_accumulation_steps accelerator.backward(loss) num_batches += 1 if ( step % training_args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1 ): optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if ( accelerator.is_local_main_process and completed_steps > 0 and (completed_steps % 10 == 0) ): wandb.log( { "Train/Loss": loss.item(), "Train/Running Loss": loss.item() / num_batches, "Train/Learning Rate": optimizer.param_groups[0]["lr"], "epoch": epoch, "step": completed_steps, } ) if ( training_args.eval_steps is not None and completed_steps % training_args.eval_steps == 0 and dev_dataloader is not None ): gen_predictions( model=model, tokenizer=tokenizer, true_tokens_ids=true_tokens_ids, false_tokens_ids=false_tokens_ids, dataloader=dev_dataloader, output_path=os.path.join( validation_dir, f"step_{completed_steps}.preds", ), accelerator=accelerator, predict_with_generate=training_args.predict_with_generate, ) if accelerator.is_main_process:
results = evaluate(
2
2023-10-18 10:24:48+00:00
16k
Glasgow-AI4BioMed/GenKIE
tasks/pretrain_tasks/unify_task.py
[ { "identifier": "OFATask", "path": "tasks/ofa_task.py", "snippet": "class OFATask(FairseqTask):\n def __init__(self, cfg: OFAConfig, src_dict, tgt_dict):\n super().__init__(cfg)\n self.src_dict = src_dict\n self.tgt_dict = tgt_dict\n\n @classmethod\n def setup_task(cls, cfg...
from dataclasses import dataclass, field from typing import Optional from fairseq.tasks import register_task from fairseq.data import FairseqDataset, iterators from tasks.ofa_task import OFATask, OFAConfig from data.pretrain_data.unify_dataset import UnifyDataset from data.file_dataset import FileDataset import json import logging import os import math
11,143
# Copyright 2022 The OFA-Sys Team. # All rights reserved. # This source code is licensed under the Apache 2.0 license # found in the LICENSE file in the root directory. logger = logging.getLogger(__name__) @dataclass class UnifyConfig(OFAConfig): max_image_size: int = field( default=512, metadata={"help": ""} ) text_data: Optional[str] = field( default=None, metadata={"help": "pure text data"}, ) image_data: Optional[str] = field( default=None, metadata={"help": "pure image data"}, ) detection_data: Optional[str] = field( default=None, metadata={"help": "detection data"}, ) text_selected_cols: Optional[str] = field( default=None, metadata={"help": "pure text data selected cols"}, ) image_selected_cols: Optional[str] = field( default=None, metadata={"help": "pure image data selected cols"}, ) detection_selected_cols: Optional[str] = field( default=None, metadata={"help": "detection data selected cols"}, ) neg_sample_dir: Optional[str] = field( default=None, metadata={"help": "negative sample directory, which contains captions (taken from all image-text pairs), " "answers (taken from VQA), " "objects (taken form OpenImages) "}, ) code_image_size: int = field( default=128, metadata={"help": "the resolution of the generated image in the image infilling task"} ) pretrain_seed: int = field( default=7, metadata={"help": "pretrain seed"}, ) mask_ratio: float = field( default=0.3, metadata={"help": "fraction of words/subwords that will be masked"}, ) random_ratio: float = field( default=0.0, metadata={"help": "instead of using [MASK], use random token this often"}, ) keep_ratio: float = field( default=0.0, metadata={"help": "instead of using [MASK], keep original token this often"}, ) mask_length: str = field( default="span-poisson", metadata={"help": "mask length to choose ['subword', 'word', 'span-poisson']"}, ) poisson_lambda: float = field( default=3.0, metadata={"help": "randomly shuffle sentences for this proportion of inputs"}, ) replace_length: int = field( default=1, metadata={"help": "when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)"}, ) @register_task("unify_task", dataclass=UnifyConfig)
# Copyright 2022 The OFA-Sys Team. # All rights reserved. # This source code is licensed under the Apache 2.0 license # found in the LICENSE file in the root directory. logger = logging.getLogger(__name__) @dataclass class UnifyConfig(OFAConfig): max_image_size: int = field( default=512, metadata={"help": ""} ) text_data: Optional[str] = field( default=None, metadata={"help": "pure text data"}, ) image_data: Optional[str] = field( default=None, metadata={"help": "pure image data"}, ) detection_data: Optional[str] = field( default=None, metadata={"help": "detection data"}, ) text_selected_cols: Optional[str] = field( default=None, metadata={"help": "pure text data selected cols"}, ) image_selected_cols: Optional[str] = field( default=None, metadata={"help": "pure image data selected cols"}, ) detection_selected_cols: Optional[str] = field( default=None, metadata={"help": "detection data selected cols"}, ) neg_sample_dir: Optional[str] = field( default=None, metadata={"help": "negative sample directory, which contains captions (taken from all image-text pairs), " "answers (taken from VQA), " "objects (taken form OpenImages) "}, ) code_image_size: int = field( default=128, metadata={"help": "the resolution of the generated image in the image infilling task"} ) pretrain_seed: int = field( default=7, metadata={"help": "pretrain seed"}, ) mask_ratio: float = field( default=0.3, metadata={"help": "fraction of words/subwords that will be masked"}, ) random_ratio: float = field( default=0.0, metadata={"help": "instead of using [MASK], use random token this often"}, ) keep_ratio: float = field( default=0.0, metadata={"help": "instead of using [MASK], keep original token this often"}, ) mask_length: str = field( default="span-poisson", metadata={"help": "mask length to choose ['subword', 'word', 'span-poisson']"}, ) poisson_lambda: float = field( default=3.0, metadata={"help": "randomly shuffle sentences for this proportion of inputs"}, ) replace_length: int = field( default=1, metadata={"help": "when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)"}, ) @register_task("unify_task", dataclass=UnifyConfig)
class UnifyTask(OFATask):
0
2023-10-20 20:01:42+00:00
16k
timapage/pyqt6-yolov8
main.py
[ { "identifier": "CameraCaptureThread", "path": "src/qt/stream/video_capture.py", "snippet": "class CameraCaptureThread(QThread):\n send_video_info = pyqtSignal(dict)\n send_frame = pyqtSignal(list)\n def __init__(self):\n super(CameraCaptureThread, self).__init__()\n self.thread_n...
from src.qt.stream.video_capture import CameraCaptureThread from src.qt.stream.visualize import VideoVisualizationThread from src.qt.stream.ai_worker import AiWorkerThread from src.ui.main_window import Ui_MainWindow from src.qt.video.video_worker import FileProcessThread from PyQt6 import QtGui, QtWidgets from PyQt6.QtCore import Qt import sys import numpy as np
12,220
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) self.setupUi(self)
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) self.setupUi(self)
self.ai_thread = AiWorkerThread()
2
2023-10-18 09:21:01+00:00
16k
S-LoRA/S-LoRA
slora/server/router/manager.py
[ { "identifier": "SamplingParams", "path": "slora/server/sampling_params.py", "snippet": "class SamplingParams:\n\n def __init__(\n self,\n do_sample: bool = False,\n presence_penalty: float = 0.0,\n frequency_penalty: float = 0.0,\n temperature: float = 1.0,\n ...
import uvloop import asyncio import os import pickle import time import torch import zmq import zmq.asyncio import traceback from typing import Dict, List, Optional from ..sampling_params import SamplingParams from ..io_struct import Req, Batch, BatchAbortReq from .model_infer.model_rpc import start_model_process, ModelRpcClient from .req_queue import ReqQueue from rpyc.utils.classic import obtain from slora.utils.infer_utils import calculate_time from ..io_struct import BatchTokenIdOut, AbortReq from .stats import Stats from slora.server.input_params import InputParams from slora.models.peft.lora_adapter import get_lora_config from slora.server.router.profiler import AlphaModel, BetaModel from slora.server.router.abort_req_queue import AbortReqQueue from slora.server.router.cluster_req_queue import ClusterReqQueue from slora.server.router.vtc_req_queue import VTCReqQueue from slora.server.router.pets_req_queue import PETSReqQueue from slora.server.router.peft_req_queue import PEFTReqQueue
13,443
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) def get_scheduler(input_params, adapter_dirs): if input_params.scheduler == "vtc_fair": return VTCReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, adapter_dirs, input_params.fair_weights) elif input_params.scheduler == "pets": return PETSReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "peft": return PEFTReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.batch_num_adapters is not None: return ClusterReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, input_params.batch_num_adapters) elif input_params.enable_abort: return AbortReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "slora": return ReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) else: raise Exception("unrecognized scheduler") class RouterManager: def __init__(self, weightdir, adapter_dirs, load_way, world_size, eos_id, router_port, detokenization_port, model_rpc_ports, input_params, mode=[], log_stats=True, log_stats_interval=10): self.model_weightdir = weightdir self.adapter_dirs = adapter_dirs self.world_size = world_size self.load_way = load_way self.mode = mode self.input_params = input_params if self.input_params.prefetch: self.prefetch_stream = torch.cuda.Stream() else: self.prefetch_stream = None # get adapter rank self.lora_ranks = {} for lora_dir in adapter_dirs: config, _ = get_lora_config(lora_dir, input_params.dummy) self.lora_ranks[lora_dir] = config["r"] self.lora_ranks[None] = 0 self.req_queue = get_scheduler(input_params, adapter_dirs) self.running_batch: Batch = None self.eos_id = eos_id self.has_wait_tokens = 0 self.max_wait_tokens = 10 context = zmq.asyncio.Context(2) self.recv_from_httpserver = context.socket(zmq.PULL) self.recv_from_httpserver.bind(f"tcp://127.0.0.1:{router_port}") self.send_to_detokenization = context.socket(zmq.PUSH) self.send_to_detokenization.connect(f"tcp://127.0.0.1:{detokenization_port}") self.model_rpc_ports = model_rpc_ports self.stats_tool = Stats(log_stats, log_stats_interval) async def wait_to_model_ready(self): self.model_rpcs: List[ModelRpcClient] = [] for rank_id in range(self.world_size):
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) def get_scheduler(input_params, adapter_dirs): if input_params.scheduler == "vtc_fair": return VTCReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, adapter_dirs, input_params.fair_weights) elif input_params.scheduler == "pets": return PETSReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "peft": return PEFTReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.batch_num_adapters is not None: return ClusterReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, input_params.batch_num_adapters) elif input_params.enable_abort: return AbortReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "slora": return ReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) else: raise Exception("unrecognized scheduler") class RouterManager: def __init__(self, weightdir, adapter_dirs, load_way, world_size, eos_id, router_port, detokenization_port, model_rpc_ports, input_params, mode=[], log_stats=True, log_stats_interval=10): self.model_weightdir = weightdir self.adapter_dirs = adapter_dirs self.world_size = world_size self.load_way = load_way self.mode = mode self.input_params = input_params if self.input_params.prefetch: self.prefetch_stream = torch.cuda.Stream() else: self.prefetch_stream = None # get adapter rank self.lora_ranks = {} for lora_dir in adapter_dirs: config, _ = get_lora_config(lora_dir, input_params.dummy) self.lora_ranks[lora_dir] = config["r"] self.lora_ranks[None] = 0 self.req_queue = get_scheduler(input_params, adapter_dirs) self.running_batch: Batch = None self.eos_id = eos_id self.has_wait_tokens = 0 self.max_wait_tokens = 10 context = zmq.asyncio.Context(2) self.recv_from_httpserver = context.socket(zmq.PULL) self.recv_from_httpserver.bind(f"tcp://127.0.0.1:{router_port}") self.send_to_detokenization = context.socket(zmq.PUSH) self.send_to_detokenization.connect(f"tcp://127.0.0.1:{detokenization_port}") self.model_rpc_ports = model_rpc_ports self.stats_tool = Stats(log_stats, log_stats_interval) async def wait_to_model_ready(self): self.model_rpcs: List[ModelRpcClient] = [] for rank_id in range(self.world_size):
rpc_model = await start_model_process(port=self.model_rpc_ports[rank_id], world_size=self.world_size)
4
2023-11-05 04:08:36+00:00
16k
fleet-ai/context
cli.py
[ { "identifier": "print_markdown", "path": "utils/utils.py", "snippet": "def print_markdown(message):\n for line in message.split(\"\\n\"):\n line = line.strip()\n if line == \"\":\n print(\"\")\n elif line == \"---\":\n rprint(Rule(style=\"white\"))\n ...
import os import openai import sys import argparse import traceback from getpass import getpass from rich import print as rprint from utils.utils import print_markdown, print_exception, extract_code_blocks, print_help from utils.stream import TextStream from utils.ai import ( retrieve_context, construct_prompt, get_remote_chat_response, get_other_chat_response, ) from constants.cli import ARGUMENTS, LIBRARIES, OPENAI_MODELS from constants.ai import MODELS_TO_TOKENS
12,261
# pylint: disable=E0401 # pylint: disable=W0122 # pylint: disable=W0718 def main(): parser = argparse.ArgumentParser(description="Fleet Data Retriever", add_help=False) parser.add_argument("help", nargs="?", default=argparse.SUPPRESS) # Add arguments for arg in ARGUMENTS: if arg["type"] == bool: default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], action="store_true", default=default, ) elif arg["type"] == list: choices = arg["choices"] if "choices" in arg else None default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=str, nargs="+", choices=choices, default=default, ) else: choices = arg["choices"] if "choices" in arg else None default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=arg["type"], choices=choices, default=default, ) # Hit the retrieve endpoint args = parser.parse_args() k = args.k_value model = args.model cite_sources = args.cite_sources filters = {} if getattr(args, "help", None) is not None: print_help() return # If library specified, match library name to uuid if args.libraries: for library in args.libraries: if library not in LIBRARIES: rprint( "Library not found. Please refer to the list of available libraries." ) return filters["library_name"] = args.libraries # Get context window
# pylint: disable=E0401 # pylint: disable=W0122 # pylint: disable=W0718 def main(): parser = argparse.ArgumentParser(description="Fleet Data Retriever", add_help=False) parser.add_argument("help", nargs="?", default=argparse.SUPPRESS) # Add arguments for arg in ARGUMENTS: if arg["type"] == bool: default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], action="store_true", default=default, ) elif arg["type"] == list: choices = arg["choices"] if "choices" in arg else None default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=str, nargs="+", choices=choices, default=default, ) else: choices = arg["choices"] if "choices" in arg else None default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=arg["type"], choices=choices, default=default, ) # Hit the retrieve endpoint args = parser.parse_args() k = args.k_value model = args.model cite_sources = args.cite_sources filters = {} if getattr(args, "help", None) is not None: print_help() return # If library specified, match library name to uuid if args.libraries: for library in args.libraries: if library not in LIBRARIES: rprint( "Library not found. Please refer to the list of available libraries." ) return filters["library_name"] = args.libraries # Get context window
if model in OPENAI_MODELS:
11
2023-11-02 07:07:13+00:00
16k
ForceFledgling/proxyhub
proxyhub/api.py
[ { "identifier": "Checker", "path": "proxyhub/checker.py", "snippet": "class Checker:\n \"\"\"Proxy checker.\"\"\"\n\n def __init__(\n self,\n judges,\n max_tries=3,\n timeout=8,\n verify_ssl=False,\n strict=False,\n dnsbl=None,\n real_ext_ip=...
import asyncio import io import signal import warnings from collections import Counter, defaultdict from functools import partial from pprint import pprint from .checker import Checker from .errors import ResolveError from .providers import PROVIDERS, Provider from .proxy import Proxy from .resolver import Resolver from .server import Server from .utils import IPPortPatternLine, log
13,041
# Pause between grabbing cycles; in seconds. GRAB_PAUSE = 180 # The maximum number of providers that are parsed concurrently MAX_CONCURRENT_PROVIDERS = 3 class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxyhub.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxyhub.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop :param stop_broker_on_sigint: (optional) whether set SIGINT signal on broker object. Useful for a thread other than main thread. .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, stop_broker_on_sigint=True, **kwargs, ): self._loop = loop or asyncio.get_event_loop_policy().get_event_loop() self._proxies = queue or asyncio.Queue() self._resolver = Resolver(loop=self._loop) self._timeout = timeout self._verify_ssl = verify_ssl self.unique_proxies = {} self._all_tasks = [] self._checker = None self._server = None self._limit = 0 # not limited self._countries = None max_concurrent_conn = kwargs.get('max_concurrent_conn') if max_concurrent_conn: warnings.warn( '`max_concurrent_conn` is deprecated, use `max_conn` instead', DeprecationWarning, ) if isinstance(max_concurrent_conn, asyncio.Semaphore): max_conn = max_concurrent_conn._value else: max_conn = max_concurrent_conn attempts_conn = kwargs.get('attempts_conn') if attempts_conn: warnings.warn( '`attempts_conn` is deprecated, use `max_tries` instead', DeprecationWarning, ) max_tries = attempts_conn # The maximum number of concurrent checking proxies self._on_check = asyncio.Queue(maxsize=max_conn) self._max_tries = max_tries self._judges = judges self._providers = [
# Pause between grabbing cycles; in seconds. GRAB_PAUSE = 180 # The maximum number of providers that are parsed concurrently MAX_CONCURRENT_PROVIDERS = 3 class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxyhub.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxyhub.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop :param stop_broker_on_sigint: (optional) whether set SIGINT signal on broker object. Useful for a thread other than main thread. .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, stop_broker_on_sigint=True, **kwargs, ): self._loop = loop or asyncio.get_event_loop_policy().get_event_loop() self._proxies = queue or asyncio.Queue() self._resolver = Resolver(loop=self._loop) self._timeout = timeout self._verify_ssl = verify_ssl self.unique_proxies = {} self._all_tasks = [] self._checker = None self._server = None self._limit = 0 # not limited self._countries = None max_concurrent_conn = kwargs.get('max_concurrent_conn') if max_concurrent_conn: warnings.warn( '`max_concurrent_conn` is deprecated, use `max_conn` instead', DeprecationWarning, ) if isinstance(max_concurrent_conn, asyncio.Semaphore): max_conn = max_concurrent_conn._value else: max_conn = max_concurrent_conn attempts_conn = kwargs.get('attempts_conn') if attempts_conn: warnings.warn( '`attempts_conn` is deprecated, use `max_tries` instead', DeprecationWarning, ) max_tries = attempts_conn # The maximum number of concurrent checking proxies self._on_check = asyncio.Queue(maxsize=max_conn) self._max_tries = max_tries self._judges = judges self._providers = [
p if isinstance(p, Provider) else Provider(p)
3
2023-11-05 13:28:57+00:00
16k
TheFunny/ArisuAutoSweeper
module/device/method/minitouch.py
[ { "identifier": "Config", "path": "module/base/decorator.py", "snippet": "class Config:\n \"\"\"\n Decorator that calls different function with a same name according to config.\n\n func_list likes:\n func_list = {\n 'func1': [\n {'options': {'ENABLE': True}, 'func': 1},\n ...
import asyncio import json import re import socket import time import websockets from functools import wraps from typing import List from adbutils.errors import AdbError from uiautomator2 import _Service from module.base.decorator import Config, cached_property, del_cached_property from module.base.timer import Timer from module.base.utils import * from module.device.connection import Connection from module.device.method.utils import RETRY_TRIES, retry_sleep, handle_adb_error from module.exception import RequestHumanTakeover, ScriptError from module.logger import logger
12,634
if not self.device.config.DEVICE_OVER_HTTP: # Maximum X and Y coordinates may, but usually do not, match the display size. x, y = int(x / 1280 * max_x), int(y / 720 * max_y) else: # When over http, max_x and max_y are default to 1280 and 720, skip matching display size x, y = int(x), int(y) return x, y def commit(self): """ add minitouch command: 'c\n' """ self.commands.append(Command('c')) return self def reset(self): """ add minitouch command: 'r\n' """ self.commands.append(Command('r')) return self def wait(self, ms=10): """ add minitouch command: 'w <ms>\n' """ self.commands.append(Command('w', ms=ms)) self.delay += ms return self def up(self): """ add minitouch command: 'u <contact>\n' """ self.commands.append(Command('u', contact=self.contact)) return self def down(self, x, y, pressure=100): """ add minitouch command: 'd <contact> <x> <y> <pressure>\n' """ x, y = self.convert(x, y) self.commands.append(Command('d', x=x, y=y, contact=self.contact, pressure=pressure)) return self def move(self, x, y, pressure=100): """ add minitouch command: 'm <contact> <x> <y> <pressure>\n' """ x, y = self.convert(x, y) self.commands.append(Command('m', x=x, y=y, contact=self.contact, pressure=pressure)) return self def clear(self): """ clear current commands """ self.commands = [] self.delay = 0 def to_minitouch(self) -> str: return ''.join([command.to_minitouch() for command in self.commands]) def to_atx_agent(self) -> List[str]: return [command.to_atx_agent(self.max_x, self.max_y) for command in self.commands] def send(self): return self.device.minitouch_send(builder=self) class MinitouchNotInstalledError(Exception): pass class MinitouchOccupiedError(Exception): pass class U2Service(_Service): def __init__(self, name, u2obj): self.name = name self.u2obj = u2obj self.service_url = self.u2obj.path2url("/services/" + name) def retry(func): @wraps(func) def retry_wrapper(self, *args, **kwargs): """ Args: self (Minitouch): """ init = None for _ in range(RETRY_TRIES): try: if callable(init): retry_sleep(_) init() return func(self, *args, **kwargs) # Can't handle except RequestHumanTakeover: break # When adb server was killed except ConnectionResetError as e: logger.error(e) def init(): self.adb_reconnect() # Emulator closed except ConnectionAbortedError as e: logger.error(e) def init(): self.adb_reconnect() # MinitouchNotInstalledError: Received empty data from minitouch except MinitouchNotInstalledError as e: logger.error(e) def init(): self.install_uiautomator2() if self._minitouch_port: self.adb_forward_remove(f'tcp:{self._minitouch_port}') del_cached_property(self, 'minitouch_builder') # MinitouchOccupiedError: Timeout when connecting to minitouch except MinitouchOccupiedError as e: logger.error(e) def init(): self.restart_atx() if self._minitouch_port: self.adb_forward_remove(f'tcp:{self._minitouch_port}') del_cached_property(self, 'minitouch_builder') # AdbError except AdbError as e:
def random_normal_distribution(a, b, n=5): output = np.mean(np.random.uniform(a, b, size=n)) return output def random_theta(): theta = np.random.uniform(0, 2 * np.pi) return np.array([np.sin(theta), np.cos(theta)]) def random_rho(dis): return random_normal_distribution(-dis, dis) def insert_swipe(p0, p3, speed=15, min_distance=10): """ Insert way point from start to end. First generate a cubic bézier curve Args: p0: Start point. p3: End point. speed: Average move speed, pixels per 10ms. min_distance: Returns: list[list[int]]: List of points. Examples: > insert_swipe((400, 400), (600, 600), speed=20) [[400, 400], [406, 406], [416, 415], [429, 428], [444, 442], [462, 459], [481, 478], [504, 500], [527, 522], [545, 540], [560, 557], [573, 570], [584, 582], [592, 590], [597, 596], [600, 600]] """ p0 = np.array(p0) p3 = np.array(p3) # Random control points in Bézier curve distance = np.linalg.norm(p3 - p0) p1 = 2 / 3 * p0 + 1 / 3 * p3 + random_theta() * random_rho(distance * 0.1) p2 = 1 / 3 * p0 + 2 / 3 * p3 + random_theta() * random_rho(distance * 0.1) # Random `t` on Bézier curve, sparse in the middle, dense at start and end segments = max(int(distance / speed) + 1, 5) lower = random_normal_distribution(-85, -60) upper = random_normal_distribution(80, 90) theta = np.arange(lower + 0., upper + 0.0001, (upper - lower) / segments) ts = np.sin(theta / 180 * np.pi) ts = np.sign(ts) * abs(ts) ** 0.9 ts = (ts - min(ts)) / (max(ts) - min(ts)) # Generate cubic Bézier curve points = [] prev = (-100, -100) for t in ts: point = p0 * (1 - t) ** 3 + 3 * p1 * t * (1 - t) ** 2 + 3 * p2 * t ** 2 * (1 - t) + p3 * t ** 3 point = point.astype(int).tolist() if np.linalg.norm(np.subtract(point, prev)) < min_distance: continue points.append(point) prev = point # Delete nearing points if len(points[1:]): distance = np.linalg.norm(np.subtract(points[1:], points[0]), axis=1) mask = np.append(True, distance > min_distance) points = np.array(points)[mask].tolist() else: points = [p0, p3] return points class Command: def __init__( self, operation: str, contact: int = 0, x: int = 0, y: int = 0, ms: int = 10, pressure: int = 100 ): """ See https://github.com/openstf/minitouch#writable-to-the-socket Args: operation: c, r, d, m, u, w contact: x: y: ms: pressure: """ self.operation = operation self.contact = contact self.x = x self.y = y self.ms = ms self.pressure = pressure def to_minitouch(self) -> str: """ String that write into minitouch socket """ if self.operation == 'c': return f'{self.operation}\n' elif self.operation == 'r': return f'{self.operation}\n' elif self.operation == 'd': return f'{self.operation} {self.contact} {self.x} {self.y} {self.pressure}\n' elif self.operation == 'm': return f'{self.operation} {self.contact} {self.x} {self.y} {self.pressure}\n' elif self.operation == 'u': return f'{self.operation} {self.contact}\n' elif self.operation == 'w': return f'{self.operation} {self.ms}\n' else: return '' def to_atx_agent(self, max_x=1280, max_y=720) -> str: """ Dict that send to atx-agent, $DEVICE_URL/minitouch See https://github.com/openatx/atx-agent#minitouch%E6%93%8D%E4%BD%9C%E6%96%B9%E6%B3%95 """ x, y = self.x / max_x, self.y / max_y if self.operation == 'c': out = dict(operation=self.operation) elif self.operation == 'r': out = dict(operation=self.operation) elif self.operation == 'd': out = dict(operation=self.operation, index=self.contact, pressure=self.pressure, xP=x, yP=y) elif self.operation == 'm': out = dict(operation=self.operation, index=self.contact, pressure=self.pressure, xP=x, yP=y) elif self.operation == 'u': out = dict(operation=self.operation, index=self.contact) elif self.operation == 'w': out = dict(operation=self.operation, milliseconds=self.ms) else: out = dict() return json.dumps(out) class CommandBuilder: """Build command str for minitouch. You can use this, to custom actions as you wish:: with safe_connection(_DEVICE_ID) as connection: builder = CommandBuilder() builder.down(0, 400, 400, 50) builder.commit() builder.move(0, 500, 500, 50) builder.commit() builder.move(0, 800, 400, 50) builder.commit() builder.up(0) builder.commit() builder.publish(connection) """ DEFAULT_DELAY = 0.05 max_x = 1280 max_y = 720 def __init__(self, device, contact=0, handle_orientation=True): """ Args: device: """ self.device = device self.commands = [] self.delay = 0 self.contact = contact self.handle_orientation = handle_orientation @property def orientation(self): if self.handle_orientation: return self.device.orientation else: return 0 def convert(self, x, y): max_x, max_y = self.device.max_x, self.device.max_y orientation = self.orientation if orientation == 0: pass elif orientation == 1: x, y = 720 - y, x max_x, max_y = max_y, max_x elif orientation == 2: x, y = 1280 - x, 720 - y elif orientation == 3: x, y = y, 1280 - x max_x, max_y = max_y, max_x else: raise ScriptError(f'Invalid device orientation: {orientation}') self.max_x, self.max_y = max_x, max_y if not self.device.config.DEVICE_OVER_HTTP: # Maximum X and Y coordinates may, but usually do not, match the display size. x, y = int(x / 1280 * max_x), int(y / 720 * max_y) else: # When over http, max_x and max_y are default to 1280 and 720, skip matching display size x, y = int(x), int(y) return x, y def commit(self): """ add minitouch command: 'c\n' """ self.commands.append(Command('c')) return self def reset(self): """ add minitouch command: 'r\n' """ self.commands.append(Command('r')) return self def wait(self, ms=10): """ add minitouch command: 'w <ms>\n' """ self.commands.append(Command('w', ms=ms)) self.delay += ms return self def up(self): """ add minitouch command: 'u <contact>\n' """ self.commands.append(Command('u', contact=self.contact)) return self def down(self, x, y, pressure=100): """ add minitouch command: 'd <contact> <x> <y> <pressure>\n' """ x, y = self.convert(x, y) self.commands.append(Command('d', x=x, y=y, contact=self.contact, pressure=pressure)) return self def move(self, x, y, pressure=100): """ add minitouch command: 'm <contact> <x> <y> <pressure>\n' """ x, y = self.convert(x, y) self.commands.append(Command('m', x=x, y=y, contact=self.contact, pressure=pressure)) return self def clear(self): """ clear current commands """ self.commands = [] self.delay = 0 def to_minitouch(self) -> str: return ''.join([command.to_minitouch() for command in self.commands]) def to_atx_agent(self) -> List[str]: return [command.to_atx_agent(self.max_x, self.max_y) for command in self.commands] def send(self): return self.device.minitouch_send(builder=self) class MinitouchNotInstalledError(Exception): pass class MinitouchOccupiedError(Exception): pass class U2Service(_Service): def __init__(self, name, u2obj): self.name = name self.u2obj = u2obj self.service_url = self.u2obj.path2url("/services/" + name) def retry(func): @wraps(func) def retry_wrapper(self, *args, **kwargs): """ Args: self (Minitouch): """ init = None for _ in range(RETRY_TRIES): try: if callable(init): retry_sleep(_) init() return func(self, *args, **kwargs) # Can't handle except RequestHumanTakeover: break # When adb server was killed except ConnectionResetError as e: logger.error(e) def init(): self.adb_reconnect() # Emulator closed except ConnectionAbortedError as e: logger.error(e) def init(): self.adb_reconnect() # MinitouchNotInstalledError: Received empty data from minitouch except MinitouchNotInstalledError as e: logger.error(e) def init(): self.install_uiautomator2() if self._minitouch_port: self.adb_forward_remove(f'tcp:{self._minitouch_port}') del_cached_property(self, 'minitouch_builder') # MinitouchOccupiedError: Timeout when connecting to minitouch except MinitouchOccupiedError as e: logger.error(e) def init(): self.restart_atx() if self._minitouch_port: self.adb_forward_remove(f'tcp:{self._minitouch_port}') del_cached_property(self, 'minitouch_builder') # AdbError except AdbError as e:
if handle_adb_error(e):
7
2023-11-01 07:09:45+00:00
16k
BrianPugh/cyclopts
tests/test_help.py
[ { "identifier": "App", "path": "cyclopts/core.py", "snippet": "class App:\n _name: Optional[Tuple[str, ...]] = field(default=None, alias=\"name\", converter=optional_to_tuple_converter)\n\n _help: Optional[str] = field(default=None, alias=\"help\")\n\n usage: Optional[str] = field(default=None)...
import inspect import sys import attrs import pytest from enum import Enum from textwrap import dedent from typing import List, Literal, Optional, Union from typing_extensions import Annotated from typing import Annotated from cyclopts import App, Group, Parameter from cyclopts.help import ( HelpEntry, HelpPanel, create_parameter_help_panel, format_command_entries, format_doc, format_usage, ) from cyclopts.resolve import ResolvedCommand
11,284
@app.command def foo(): pass with console.capture() as capture: console.print(format_usage(app, [])) actual = capture.get() assert actual == "Usage: app COMMAND\n\n" def test_format_commands_docstring(app, console): @app.command def foo(): """Docstring for foo. This should not be shown. """ pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["foo"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ foo Docstring for foo. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_format_commands_no_show(app, console): @app.command def foo(): """Docstring for foo.""" pass @app.command(show=False) def bar(): """Should not be shown.""" pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app,))) with console.capture() as capture: app.help_print([], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ foo Docstring for foo. │ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_format_commands_explicit_help(app, console): @app.command(help="Docstring for foo.") def foo(): """Should not be shown.""" pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["foo"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ foo Docstring for foo. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_format_commands_explicit_name(app, console): @app.command(name="bar") def foo(): """Docstring for bar. This should not be shown. """ pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["bar"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ bar Docstring for bar. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_help_empty(console): app = App(name="foo", version_flags=[], help_flags=[]) with console.capture() as capture: app.help_print(console=console) actual = capture.get() assert actual == "Usage: foo\n\n" @pytest.fixture def capture_format_group_parameters(console, default_function_groups): def inner(cmd):
if sys.version_info < (3, 9): else: @pytest.fixture def app(): return App( name="app", help="App Help String Line 1.", ) def test_empty_help_panel_rich_silent(console): help_panel = HelpPanel(format="command", title="test") with console.capture() as capture: console.print(help_panel) actual = capture.get() assert actual == "" def test_help_default_action(app, console): """No command should default to help.""" with console.capture() as capture: app([], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_custom_usage(app, console): app.usage = "My custom usage." with console.capture() as capture: app([], console=console) actual = capture.get() expected = dedent( """\ My custom usage. App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_custom_usage_subapp(app, console): app.command(App(name="foo", usage="My custom usage.")) with console.capture() as capture: app(["foo", "--help"], console=console) actual = capture.get() expected = dedent( """\ My custom usage. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_default_help_flags(console): """Standard help flags.""" app = App(name="app", help="App Help String Line 1.") with console.capture() as capture: app(["--help"], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_usage_empty(console): app = App( name="app", help="App Help String Line 1.", help_flags=[], version_flags=[], ) with console.capture() as capture: console.print(format_usage(app, [])) actual = capture.get() assert actual == "Usage: app\n\n" def test_help_format_usage_command(app, console): @app.command def foo(): pass with console.capture() as capture: console.print(format_usage(app, [])) actual = capture.get() assert actual == "Usage: app COMMAND\n\n" def test_format_commands_docstring(app, console): @app.command def foo(): """Docstring for foo. This should not be shown. """ pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["foo"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ foo Docstring for foo. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_format_commands_no_show(app, console): @app.command def foo(): """Docstring for foo.""" pass @app.command(show=False) def bar(): """Should not be shown.""" pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app,))) with console.capture() as capture: app.help_print([], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ foo Docstring for foo. │ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_format_commands_explicit_help(app, console): @app.command(help="Docstring for foo.") def foo(): """Should not be shown.""" pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["foo"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ foo Docstring for foo. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_format_commands_explicit_name(app, console): @app.command(name="bar") def foo(): """Docstring for bar. This should not be shown. """ pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["bar"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ bar Docstring for bar. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_help_empty(console): app = App(name="foo", version_flags=[], help_flags=[]) with console.capture() as capture: app.help_print(console=console) actual = capture.get() assert actual == "Usage: foo\n\n" @pytest.fixture def capture_format_group_parameters(console, default_function_groups): def inner(cmd):
command = ResolvedCommand(cmd, *default_function_groups)
9
2023-11-03 02:24:25+00:00
16k
RoboFlamingo/RoboFlamingo
robot_flamingo/models/factory.py
[ { "identifier": "BCFlamingo", "path": "robot_flamingo/models/flamingo_bc.py", "snippet": "class BCFlamingo(nn.Module):\n def __init__(\n self,\n vision_encoder: nn.Module,\n lang_encoder: nn.Module,\n eoc_token_id: int,\n media_token_id: int,\n vis_dim: int,\...
from logging import debug from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig from typing import Optional from robot_flamingo.models.flamingo_bc import BCFlamingo from robot_flamingo.models.flamingo_mpt import MPTFlamingo from open_flamingo.src.flamingo_lm import FlamingoLMMixin from open_flamingo.src.utils import extend_instance from open_flamingo.src.factory import _infer_decoder_layers_attr_name import open_clip
13,257
def create_model_and_transforms( clip_vision_encoder_path: str, clip_vision_encoder_pretrained: str, lang_encoder_path: str, tokenizer_path: str, cross_attn_every_n_layers: int = 1, use_local_files: bool = False, decoder_layers_attr_name: str = None, # this is the window size sampled from the episode window_size: int = 32, freeze_embed: bool = False, train_params = -1, use_gripper=False, use_state=False, last_action=False, fusion_mode='', pad_length=-1, debug=False, sep_resampler=False, sep_lm_head=False, unfreeze_vit=False, return_feature=False, multi_step_action=1, llm_name='llama_9b', pooling='max', residual=False, tcp_rel=False, replan=-1, decoder_type='lstm', hidden_size=None, freeze_sampler=False, fwd_pred=False, fwd_pred_hand=False, no_image_patch=False, global_latent=1, refresh=-1, **flamingo_kwargs, ): """ Initialize a Flamingo model from a pretrained vision encoder and language encoder. Appends special tokens to the tokenizer and freezes backbones. Args: clip_vision_encoder_path (str): path to pretrained clip model (e.g. "ViT-B-32") clip_vision_encoder_pretrained (str): name of pretraining dataset for clip model (e.g. "laion2b_s32b_b79k") lang_encoder_path (str): path to pretrained language encoder tokenizer_path (str): path to pretrained tokenizer cross_attn_every_n_layers (int, optional): determines how often to add a cross-attention layer. Defaults to 1. use_local_files (bool, optional): whether to use local files. Defaults to False. decoder_layers_attr_name (str, optional): name of the decoder layers attribute. Defaults to None. Returns: Flamingo: Flamingo model from pretrained vision and language encoders Image processor: Pipeline to preprocess input images Tokenizer: A tokenizer for the language model """ vision_encoder, _, image_processor = open_clip.create_model_and_transforms( clip_vision_encoder_path, pretrained=clip_vision_encoder_pretrained ) # set the vision encoder to output the visual features vision_encoder.visual.output_tokens = True text_tokenizer = AutoTokenizer.from_pretrained( tokenizer_path, local_files_only=use_local_files ) # add Flamingo special tokens to the tokenizer text_tokenizer.add_special_tokens( {"additional_special_tokens": ["<|endofchunk|>", "<image>"]} ) if text_tokenizer.pad_token is None: # Issue: GPT models don't have a pad token, which we use to # modify labels for the loss. text_tokenizer.add_special_tokens({"pad_token": "<PAD>"}) if debug: # Load the local checkpoint into a model instance. lang_encoder = AutoModelForCausalLM.from_pretrained(lang_encoder_path, ignore_keys=["config"], trust_remote_code=True) # Set the `init_weights` parameter to `False` to prevent the model from loading the pretrained weights. lang_encoder.init_weights(False) else: print(lang_encoder_path) lang_encoder = AutoModelForCausalLM.from_pretrained( lang_encoder_path, local_files_only=use_local_files, trust_remote_code=True ) # print(lang_encoder_path) # if llm_name == 'llama': # lang_encoder = AutoModelForCausalLM.from_pretrained( # lang_encoder_path, local_files_only=use_local_files # ) # else: # # name = 'mosaicml/mpt-7b' # config = { # "model_type": "auto", # "add_lm_head": True, # } # lang_encoder = AutoModelForCausalLM.from_pretrained( # lang_encoder_path, local_files_only=use_local_files # ) # hacks for MPT-1B, which doesn't have a get_input_embeddings method if "mpt-1b-redpajama-200b" in lang_encoder_path: class EmbeddingFnMixin: def get_input_embeddings(self): return self.transformer.wte def set_input_embeddings(self, new_embeddings): self.transformer.wte = new_embeddings extend_instance(lang_encoder, EmbeddingFnMixin) extend_instance(lang_encoder, FlamingoLMMixin) if decoder_layers_attr_name is None: decoder_layers_attr_name = _infer_decoder_layers_attr_name(lang_encoder) lang_encoder.set_decoder_layers_attr_name(decoder_layers_attr_name) # print(lang_encoder.base_model_prefix) # print(getattr(lang_encoder, lang_encoder.base_model_prefix, lang_encoder)) # print(lang_encoder) lang_encoder.resize_token_embeddings(len(text_tokenizer)) if 'llama' in llm_name:
mpt_dict = { "mpt_3b": { "lang_encoder_path": "path_to/mpt-1b-redpajama-200b", "tokenizer_path": "path_to/mpt-1b-redpajama-200b", "cross_attn_every_n_layers": 1, "openflamingo_checkpoint": "path_to/OpenFlamingo-3B-vitl-mpt1b/checkpoint.pt" }, "mpt_dolly_3b": { "lang_encoder_path": "path_to/mpt-1b-redpajama-200b-dolly", "tokenizer_path": "path_to/mpt-1b-redpajama-200b-dolly", "cross_attn_every_n_layers": 1, "openflamingo_checkpoint": "path_to/OpenFlamingo-3B-vitl-mpt1b-langinstruct/checkpoint.pt" }, "mpt_4b": { "lang_encoder_path": "path_to/RedPajama-INCITE-Instruct-3B-v1", "tokenizer_path": "path_to/RedPajama-INCITE-Instruct-3B-v1", "cross_attn_every_n_layers": 2, "openflamingo_checkpoint": "path_to/OpenFlamingo-4B-vitl-rpj3b-langinstruct/checkpoint.pt" }, "mpt_base_4b": { "lang_encoder_path": "path_to/RedPajama-INCITE-Base-3B-v1", "tokenizer_path": "path_to/RedPajama-INCITE-Base-3B-v1", "cross_attn_every_n_layers": 2, "openflamingo_checkpoint": "path_to/OpenFlamingo-4B-vitl-rpj3b/checkpoint.pt" }, "mpt_9b": { "lang_encoder_path": "path_to/mpt-7b", "tokenizer_path": "path_to/mpt-7b", "cross_attn_every_n_layers": 4, "openflamingo_checkpoint": "path_to/OpenFlamingo-9B-vitl-mpt7b/checkpoint.pt" }, "llama_9b": { "lang_encoder_path": "path_to/llama-7b-hf-jxu124", "tokenizer_path": "path_to/llama-7b-hf-jxu124", "cross_attn_every_n_layers": 4, "openflamingo_checkpoint": "path_to/OpenFlamingo-9B/checkpoint.pt" } } def get_transforms( clip_vision_encoder_path: str = "ViT-L-14", clip_vision_encoder_pretrained: str = "openai", tokenizer_path: str = "path_to/llama-7b-hf-jxu124", use_local_files: bool = False, ): vision_encoder, _, image_processor = open_clip.create_model_and_transforms( clip_vision_encoder_path, pretrained=clip_vision_encoder_pretrained ) text_tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") # add Flamingo special tokens to the tokenizer text_tokenizer.add_special_tokens( {"additional_special_tokens": ["<|endofchunk|>", "<image>"]} ) if text_tokenizer.pad_token is None: # Issue: GPT models don't have a pad token, which we use to # modify labels for the loss. text_tokenizer.add_special_tokens({"pad_token": "<PAD>"}) return image_processor, text_tokenizer def create_model_and_transforms( clip_vision_encoder_path: str, clip_vision_encoder_pretrained: str, lang_encoder_path: str, tokenizer_path: str, cross_attn_every_n_layers: int = 1, use_local_files: bool = False, decoder_layers_attr_name: str = None, # this is the window size sampled from the episode window_size: int = 32, freeze_embed: bool = False, train_params = -1, use_gripper=False, use_state=False, last_action=False, fusion_mode='', pad_length=-1, debug=False, sep_resampler=False, sep_lm_head=False, unfreeze_vit=False, return_feature=False, multi_step_action=1, llm_name='llama_9b', pooling='max', residual=False, tcp_rel=False, replan=-1, decoder_type='lstm', hidden_size=None, freeze_sampler=False, fwd_pred=False, fwd_pred_hand=False, no_image_patch=False, global_latent=1, refresh=-1, **flamingo_kwargs, ): """ Initialize a Flamingo model from a pretrained vision encoder and language encoder. Appends special tokens to the tokenizer and freezes backbones. Args: clip_vision_encoder_path (str): path to pretrained clip model (e.g. "ViT-B-32") clip_vision_encoder_pretrained (str): name of pretraining dataset for clip model (e.g. "laion2b_s32b_b79k") lang_encoder_path (str): path to pretrained language encoder tokenizer_path (str): path to pretrained tokenizer cross_attn_every_n_layers (int, optional): determines how often to add a cross-attention layer. Defaults to 1. use_local_files (bool, optional): whether to use local files. Defaults to False. decoder_layers_attr_name (str, optional): name of the decoder layers attribute. Defaults to None. Returns: Flamingo: Flamingo model from pretrained vision and language encoders Image processor: Pipeline to preprocess input images Tokenizer: A tokenizer for the language model """ vision_encoder, _, image_processor = open_clip.create_model_and_transforms( clip_vision_encoder_path, pretrained=clip_vision_encoder_pretrained ) # set the vision encoder to output the visual features vision_encoder.visual.output_tokens = True text_tokenizer = AutoTokenizer.from_pretrained( tokenizer_path, local_files_only=use_local_files ) # add Flamingo special tokens to the tokenizer text_tokenizer.add_special_tokens( {"additional_special_tokens": ["<|endofchunk|>", "<image>"]} ) if text_tokenizer.pad_token is None: # Issue: GPT models don't have a pad token, which we use to # modify labels for the loss. text_tokenizer.add_special_tokens({"pad_token": "<PAD>"}) if debug: # Load the local checkpoint into a model instance. lang_encoder = AutoModelForCausalLM.from_pretrained(lang_encoder_path, ignore_keys=["config"], trust_remote_code=True) # Set the `init_weights` parameter to `False` to prevent the model from loading the pretrained weights. lang_encoder.init_weights(False) else: print(lang_encoder_path) lang_encoder = AutoModelForCausalLM.from_pretrained( lang_encoder_path, local_files_only=use_local_files, trust_remote_code=True ) # print(lang_encoder_path) # if llm_name == 'llama': # lang_encoder = AutoModelForCausalLM.from_pretrained( # lang_encoder_path, local_files_only=use_local_files # ) # else: # # name = 'mosaicml/mpt-7b' # config = { # "model_type": "auto", # "add_lm_head": True, # } # lang_encoder = AutoModelForCausalLM.from_pretrained( # lang_encoder_path, local_files_only=use_local_files # ) # hacks for MPT-1B, which doesn't have a get_input_embeddings method if "mpt-1b-redpajama-200b" in lang_encoder_path: class EmbeddingFnMixin: def get_input_embeddings(self): return self.transformer.wte def set_input_embeddings(self, new_embeddings): self.transformer.wte = new_embeddings extend_instance(lang_encoder, EmbeddingFnMixin) extend_instance(lang_encoder, FlamingoLMMixin) if decoder_layers_attr_name is None: decoder_layers_attr_name = _infer_decoder_layers_attr_name(lang_encoder) lang_encoder.set_decoder_layers_attr_name(decoder_layers_attr_name) # print(lang_encoder.base_model_prefix) # print(getattr(lang_encoder, lang_encoder.base_model_prefix, lang_encoder)) # print(lang_encoder) lang_encoder.resize_token_embeddings(len(text_tokenizer)) if 'llama' in llm_name:
Model_fn = BCFlamingo
0
2023-11-02 01:36:23+00:00
16k
radekd91/inferno
inferno/datasets/AfewVaDataModule.py
[ { "identifier": "load_segmentation", "path": "inferno/datasets/IO.py", "snippet": "def load_segmentation(filename):\n with open(filename, \"rb\") as f:\n seg = cpkl.load(f, compression='gzip')\n seg_type = seg[0]\n seg_image = seg[1]\n # seg_type = pkl.load(f)\n # s...
import json import os, sys import numpy as np import scipy as sp import torch import pytorch_lightning as pl import pandas as pd import pickle as pkl import imgaug import traceback import json import bisect import warnings import yaml from enum import Enum from pathlib import Path from skimage.io import imread, imsave from skimage.transform import resize, rescale from inferno.datasets.IO import load_segmentation, process_segmentation, load_emotion, save_emotion from inferno.utils.image import numpy_image_to_torch from inferno.transforms.keypoints import KeypointNormalization from inferno.datasets.FaceDataModuleBase import FaceDataModuleBase from inferno.datasets.ImageDatasetHelpers import bbox2point, bbpoint_warp from inferno.datasets.EmotionalImageDataset import EmotionalImageDatasetBase from inferno.datasets.UnsupervisedImageDataset import UnsupervisedImageDataset from inferno.utils.FaceDetector import save_landmark, load_landmark from tqdm import auto from torch.utils.data.dataloader import DataLoader from inferno.transforms.imgaug import create_image_augmenter from torchvision.transforms import Resize, Compose from sklearn.neighbors import NearestNeighbors from torch.utils.data._utils.collate import default_collate from torch.utils.data.sampler import WeightedRandomSampler from collections import OrderedDict from munch import Munch from inferno.utils.other import class_from_str from omegaconf import OmegaConf, DictConfig from inferno.layers.losses.EmonetLoader import get_emonet
12,800
self.v_sample_weights = v_weights bin_1d = np.arange(-1.,1., sampling_rate) stat, x_ed, va_binnumber = sp.stats.binned_statistic( va[:, 1], None, 'count', bin_1d) a_weights = 1 / va_binnumber a_weights /= np.linalg.norm(a_weights) a_weights *= np.linalg.norm(np.ones_like(a_weights)) self.a_sample_weights = a_weights def __len__(self): # return 100 return self.size def _load_image(self, index): vid_index = bisect.bisect_right(self.video_sizes_cumulative, index) - 1 vid_first_im_index = self.video_sizes_cumulative[vid_index] vid_item = list(self.sample_list.items())[vid_index] vid_name = vid_item[0] vid_gt = vid_item[1] assert vid_gt.video_id == vid_name vid_frame_list = sorted(list(vid_gt['frames'].keys())) selected_frame = vid_frame_list[index - vid_first_im_index] im_rel_path = Path(vid_name) / (selected_frame + self.ext) im_file = Path(self.image_path) / im_rel_path im_file = im_file.parent / (im_file.stem + self.ext) input_img = imread(im_file) # # scale_factor_x = 1.48 # scale_factor_x = 1.25 # # scale_factor_x = 1 # input_img = resize(input_img, (432, 720, 1)) input_img = resize(input_img, (576, 960)) scale_factor_x = 720 / 960 valence = vid_gt['frames'][selected_frame]['valence'] arousal = vid_gt['frames'][selected_frame]['arousal'] facial_landmarks = np.array(vid_gt['frames'][selected_frame]['landmarks']) facial_landmarks[:,0] /= scale_factor_x if self.normalize_va: valence /= 10. arousal /= 10. return input_img, facial_landmarks, valence, arousal, im_file def _load_additional_data(self, im_rel_path): return {} def _get_sample(self, index): num_fails = 0 max_fails = 50 try: input_img, facial_landmarks, valence, arousal, image_path = self._load_image(index) additional_data = self._load_additional_data(Path(image_path).relative_to(self.image_path)) except Exception as e: # if the image is corrupted or missing (there is a few :-/), find some other one while True: num_fails += 1 if num_fails >= max_fails: # something must have gone serious wrong. Nothing loads, so throw an exception raise e index += 1 index = index % len(self) try: input_img, facial_landmarks, valence, arousal, image_path = self._load_image(index) additional_data = self._load_additional_data(Path(image_path).relative_to(self.image_path)) success = True except Exception as e2: success = False if success: break left = facial_landmarks[:,0].min() top = facial_landmarks[:,1].min() right = facial_landmarks[:,0].max() bottom = facial_landmarks[:,1].max() input_img_shape = input_img.shape if not self.use_processed: # Use AffectNet as is provided (their bounding boxes, and landmarks, no segmentation) old_size, center = bbox2point(left, right, top, bottom, type='kpt68') # old_size, center = bbox2point(left, right, top, bottom, type='bbox') size = int(old_size * self.scale) img, landmark = bbpoint_warp(input_img, center, size, self.image_size, landmarks=facial_landmarks) img *= 255. if not self.use_gt_bb: raise NotImplementedError() # landmark_type, landmark = load_landmark( # self.path_prefix / self.landmark_list[index]) landmark = landmark[np.newaxis, ...] seg_image = None else: # use AffectNet processed by me. I used their bounding boxes (to not have to worry about detecting # the correct face in case there's more) and I ran our FAN and segmentation over it img = input_img # the image has already been cropped in preprocessing (make sure the input root path # is specificed to the processed folder and not the original one landmark_path = Path(self.image_path).parent / "landmarks" / im_rel_path landmark_path = landmark_path.parent / (landmark_path.stem + ".pkl") landmark_type, landmark = load_landmark( landmark_path) landmark = landmark[np.newaxis, ...] segmentation_path = Path(self.image_path).parent / "segmentations" / im_rel_path segmentation_path = segmentation_path.parent / (segmentation_path.stem + ".pkl") seg_image, seg_type = load_segmentation( segmentation_path) seg_image = seg_image[np.newaxis, :, :, np.newaxis] seg_image = process_segmentation( seg_image, seg_type).astype(np.uint8) if self.load_emotion_feature: emotion_path = Path(self.image_path).parent / "emotions" / im_rel_path emotion_path = emotion_path.parent / (emotion_path.stem + ".pkl")
""" Author: Radek Danecek Copyright (c) 2022, Radek Danecek All rights reserved. # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is # holder of all proprietary rights on this computer program. # Using this computer program means that you agree to the terms # in the LICENSE file included with this software distribution. # Any use not explicitly granted by the LICENSE is prohibited. # # Copyright©2022 Max-Planck-Gesellschaft zur Förderung # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute # for Intelligent Systems. All rights reserved. # # For comments or questions, please email us at emoca@tue.mpg.de # For commercial licensing contact, please contact ps-license@tuebingen.mpg.de """ warnings.filterwarnings('ignore') # def make_class_balanced_sampler(labels): # class_counts = np.bincount(labels) # class_weights = 1. / class_counts # weights = class_weights[labels] # return WeightedRandomSampler(weights, len(weights)) # # def make_va_balanced_sampler(labels): # class_counts = np.bincount(labels) # class_weights = 1. / class_counts # weights = class_weights[labels] # return WeightedRandomSampler(weights, len(weights)) # # def make_balanced_sample_by_weights(weights): # return WeightedRandomSampler(weights, len(weights)) def new_affewva(class_name): dataset_class = class_from_str(class_name, sys.modules[__name__]) return dataset_class class AfewVaDataModule(FaceDataModuleBase): def __init__(self, input_dir, output_dir, processed_subfolder = None, face_detector='fan', face_detector_threshold=0.9, image_size=224, scale=1.25, bb_center_shift_x=0., bb_center_shift_y=0., processed_ext=".png", device=None, augmentation=None, train_batch_size=64, val_batch_size=64, test_batch_size=64, num_workers=0, ring_type=None, ring_size=None, drop_last=False, sampler=None, split_seed=0, train_fraction=0.6, val_fraction=0.2, test_fraction=0.2, k_fold_crossvalidation=None, k_index=None, dataset_type=None, ): super().__init__(input_dir, output_dir, processed_subfolder, face_detector=face_detector, face_detector_threshold=face_detector_threshold, image_size=image_size, bb_center_shift_x=bb_center_shift_x, bb_center_shift_y=bb_center_shift_y, scale=scale, processed_ext=processed_ext, device=device) self.dataset_type = dataset_type or "AfewVa" # # self.subsets = sorted([f.name for f in (Path(input_dir) / "Manually_Annotated" / "Manually_Annotated_Images").glob("*") if f.is_dir()]) # self.input_dir = Path(self.root_dir) / "Manually_Annotated" / "Manually_Annotated_Images" # train = pd.read_csv(self.input_dir.parent / "training.csv") # val = pd.read_csv(self.input_dir.parent / "validation.csv") # self.df = pd.concat([train, val], ignore_index=True, sort=False) self.face_detector_type = 'fan' self.scale = scale self.use_processed = False if not (Path(self.output_dir) / "gt.pkl").exists(): video_list = sorted([p for p in Path(input_dir).glob("*") if p.is_dir()]) video_gts = OrderedDict() for iv, vp in enumerate(auto.tqdm(video_list)): video_gts[vp.stem] = Munch( json.load(open(vp / (vp.stem + ".json"), "r"))) with open(Path(self.output_dir) / "gt.pkl", "wb") as f: pkl.dump(video_gts, f) else: with open(Path(self.output_dir) / "gt.pkl", "rb") as f: video_gts = pkl.load(f) if self.use_processed: self.image_path = Path(self.output_dir) / "detections" else: self.image_path = Path(input_dir) self.seed = split_seed np.random.seed(self.seed) indices = np.arange(len(video_gts), dtype=np.int32) + 1 np.random.shuffle(indices) if k_fold_crossvalidation is not None: training_indices = [] validation_indices = [] for k in range(k_fold_crossvalidation): start_i = (k * len(indices)) // k_fold_crossvalidation end_i = ((k + 1) * len(indices)) // k_fold_crossvalidation training_indices += [np.concatenate([indices[0:(start_i)], indices[end_i:]])] validation_indices += [indices[start_i:end_i]] self.train_indices = training_indices[k_index] self.val_indices = validation_indices[k_index] self.test_indices = np.copy(validation_indices[k_index]) else: self.train_fraction = train_fraction self.val_fraction = val_fraction self.test_fraction = test_fraction assert self.train_fraction + self.val_fraction + self.test_fraction == 1.0 train_end = int(len(indices) * self.train_fraction) val_end = int(len(indices) * ( self.train_fraction + self.val_fraction)) self.train_indices = indices[:train_end] self.val_indices = indices[train_end:val_end] self.test_indices = indices[val_end:] # iterate over the training indices and create a list of the corresponding video names self.train_list = OrderedDict() self.val_list = OrderedDict() self.test_list = OrderedDict() for tr_i in self.train_indices: self.train_list[f"{tr_i:03d}"] = video_gts[f"{tr_i:03d}"] for v_i in self.val_indices: self.val_list[f"{v_i:03d}"] = video_gts[f"{v_i:03d}"] for t_i in self.test_indices: self.test_list[f"{t_i:03d}"] = video_gts[f"{t_i:03d}"] # self.ignore_invalid = ignore_invalid self.train_batch_size = train_batch_size self.val_batch_size = val_batch_size self.test_batch_size = test_batch_size self.num_workers = num_workers self.augmentation = augmentation self.sampler = sampler or "uniform" if self.sampler not in ["uniform", "balanced_videos", "balanced_expr", "balanced_va", "balanced_v", "balanced_a"]: raise ValueError(f"Invalid sampler type: '{self.sampler}'") if self.sampler in ["balanced_expr", "balanced_va", "balanced_v", "balanced_a"]: raise NotImplementedError() if ring_type not in [None, "gt_va", "augment"]: raise ValueError(f"Invalid ring type: '{ring_type}'") if ring_type == "gt_va": raise NotImplementedError() self.ring_type = ring_type self.ring_size = ring_size self.drop_last = drop_last @property def subset_size(self): return 1000 # @property # def num_subsets(self): # num_subsets = len(self.df) // self.subset_size # if len(self.df) % self.subset_size != 0: # num_subsets += 1 # return num_subsets def _detect_faces(self): subset_size = 1000 num_subsets = len(self.df) // subset_size if len(self.df) % subset_size != 0: num_subsets += 1 for sid in range(self.num_subsets): self._detect_landmarks_and_segment_subset(self.subset_size * sid, min((sid + 1) * self.subset_size, len(self.df))) def _extract_emotion_features(self): subset_size = 1000 num_subsets = len(self.df) // subset_size if len(self.df) % subset_size != 0: num_subsets += 1 for sid in range(self.num_subsets): self._extract_emotion_features_from_subset(self.subset_size * sid, min((sid + 1) * self.subset_size, len(self.df))) def _path_to_detections(self): return Path(self.output_dir) / "detections" def _path_to_segmentations(self): return Path(self.output_dir) / "segmentations" def _path_to_landmarks(self): return Path(self.output_dir) / "landmarks" def _path_to_emotions(self): return Path(self.output_dir) / "emotions" def _get_emotion_net(self, device): net = get_emonet() net = net.to(device) return net, "emo_net" def _extract_emotion_features_from_subset(self, start_i, end_i): self._path_to_emotions().mkdir(parents=True, exist_ok=True) print(f"Processing subset {start_i // self.subset_size}") image_file_list = [] for i in auto.tqdm(range(start_i, end_i)): im_file = self.df.loc[i]["subDirectory_filePath"] in_detection_fname = self._path_to_detections() / Path(im_file).parent / (Path(im_file).stem + ".png") if in_detection_fname.is_file(): image_file_list += [in_detection_fname] transforms = Compose([ Resize((256, 256)), ]) batch_size = 32 dataset = UnsupervisedImageDataset(image_file_list, image_transforms=transforms, im_read='pil') loader = DataLoader(dataset, batch_size=batch_size, num_workers=4, shuffle=False) device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print(device) net, emotion_type = self._get_emotion_net(device) for i, batch in enumerate(auto.tqdm(loader)): # facenet_pytorch expects this stanadrization for the input to the net # images = fixed_image_standardization(batch['image'].to(device)) images = batch['image'].cuda() # start = time.time() with torch.no_grad(): out = net(images, intermediate_features=True) # end = time.time() # print(f" Inference batch {i} took : {end - start}") emotion_features = {key : val.detach().cpu().numpy() for key, val in out.items()} # start = time.time() for j in range(images.size()[0]): image_path = batch['path'][j] out_emotion_folder = self._path_to_emotions() / Path(image_path).parent.name out_emotion_folder.mkdir(exist_ok=True, parents=True) emotion_path = out_emotion_folder / (Path(image_path).stem + ".pkl") emotion_feature_j = {key: val[j] for key, val in emotion_features.items()} del emotion_feature_j['emo_feat'] # too large to be stored per frame = (768, 64, 64) del emotion_feature_j['heatmap'] # not too large but probably not usefull = (68, 64, 64) # we are keeping emo_feat_2 (output of last conv layer (before FC) and then the outputs of the FCs - expression, valence and arousal) save_emotion(emotion_path, emotion_feature_j, emotion_type) def _detect_landmarks_and_segment_subset(self, start_i, end_i): self._path_to_detections().mkdir(parents=True, exist_ok=True) self._path_to_segmentations().mkdir(parents=True, exist_ok=True) self._path_to_landmarks().mkdir(parents=True, exist_ok=True) detection_fnames = [] out_segmentation_folders = [] status_array = np.memmap(self.status_array_path, dtype=np.bool, mode='r', shape=(self.num_subsets,) ) completed = status_array[start_i // self.subset_size] if not completed: print(f"Processing subset {start_i // self.subset_size}") for i in auto.tqdm(range(start_i, end_i)): im_file = self.df.loc[i]["subDirectory_filePath"] left = self.df.loc[i]["face_x"] top = self.df.loc[i]["face_y"] right = left + self.df.loc[i]["face_width"] bottom = top + self.df.loc[i]["face_height"] bb = np.array([top, left, bottom, right]) im_fullfile = Path(self.input_dir) / im_file try: detection, _, _, bbox_type, landmarks, orig_landmarks = self._detect_faces_in_image(im_fullfile, detected_faces=[bb]) except Exception as e: # except ValueError as e: print(f"Failed to load file:") print(f"{im_fullfile}") print(traceback.print_exc()) continue # except SyntaxError as e: # print(f"Failed to load file:") # print(f"{im_fullfile}") # print(traceback.print_exc()) # continue out_detection_fname = self._path_to_detections() / Path(im_file).parent / (Path(im_file).stem + self.processed_ext) # detection_fnames += [out_detection_fname.relative_to(self.output_dir)] out_detection_fname.parent.mkdir(exist_ok=True) detection_fnames += [out_detection_fname] if self.processed_ext in [".jpg", ".JPG"]: imsave(out_detection_fname, detection[0], quality=100) else: imsave(out_detection_fname, detection[0]) # out_segmentation_folders += [self._path_to_segmentations() / Path(im_file).parent] # save landmarks out_landmark_fname = self._path_to_landmarks() / Path(im_file).parent / (Path(im_file).stem + ".pkl") out_landmark_fname.parent.mkdir(exist_ok=True) # landmark_fnames += [out_landmark_fname.relative_to(self.output_dir)] save_landmark(out_landmark_fname, landmarks[0], bbox_type) self._segment_images(detection_fnames, self._path_to_segmentations(), path_depth=1) status_array = np.memmap(self.status_array_path, dtype=np.bool, mode='r+', shape=(self.num_subsets,) ) status_array[start_i // self.subset_size] = True status_array.flush() del status_array print(f"Processing subset {start_i // self.subset_size} finished") else: print(f"Subset {start_i // self.subset_size} is already processed") @property def status_array_path(self): return Path(self.output_dir) / "status.memmap" @property def is_processed(self): status_array = np.memmap(self.status_array_path, dtype=np.bool, mode='r', shape=(self.num_subsets,) ) all_processed = status_array.all() return all_processed def prepare_data(self): pass # if self.use_processed: # if not self.status_array_path.is_file(): # print(f"Status file does not exist. Creating '{self.status_array_path}'") # self.status_array_path.parent.mkdir(exist_ok=True, parents=True) # status_array = np.memmap(self.status_array_path, # dtype=np.bool, # mode='w+', # shape=(self.num_subsets,) # ) # status_array[...] = False # del status_array # # all_processed = self.is_processed # if not all_processed: # self._detect_faces() # # # if self.ring_type == "emonet_feature": # self._prepare_emotion_retrieval() def _new_training_set(self, for_training=True): if for_training: im_transforms_train = create_image_augmenter(self.image_size, self.augmentation) if self.ring_type == "emonet_feature": prefix = self.mode + "_train_" if self.ignore_invalid: prefix += "valid_only_" feature_label = 'emo_net_emo_feat_2' self._load_retrieval_arrays(prefix, feature_label) nn_indices = self.nn_indices_array nn_distances = self.nn_distances_array else: nn_indices = None nn_distances = None return new_affewva(self.dataset_type)(self.image_path, self.train_list, self.image_size, self.scale, im_transforms_train, ring_type=self.ring_type, ring_size=self.ring_size, load_emotion_feature=False, nn_indices_array=nn_indices, nn_distances_array= nn_distances, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) return new_affewva(self.dataset_type)(self.image_path, self.train_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, load_emotion_feature=True, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) def setup(self, stage=None): self.training_set = self._new_training_set() self.validation_set = new_affewva(self.dataset_type)(self.image_path, self.val_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) self.test_dataframe_path = Path(self.output_dir) / "validation_representative_selection.csv" self.test_set = new_affewva(self.dataset_type)(self.image_path, self.test_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) # if self.mode in ['all', 'manual']: # # self.image_list += sorted(list((Path(self.path) / "Manually_Annotated").rglob(".jpg"))) # self.dataframe = pd.load_csv(self.path / "Manually_Annotated" / "Manually_Annotated.csv") # if self.mode in ['all', 'automatic']: # # self.image_list += sorted(list((Path(self.path) / "Automatically_Annotated").rglob("*.jpg"))) # self.dataframe = pd.load_csv( # self.path / "Automatically_Annotated" / "Automatically_annotated_file_list.csv") def train_dataloader(self): if self.sampler == "uniform": sampler = None else: raise NotImplementedError() # elif self.sampler == "balanced_expr": # sampler = make_class_balanced_sampler(self.training_set.df["expression"].to_numpy()) # elif self.sampler == "balanced_va": # sampler = make_balanced_sample_by_weights(self.training_set.va_sample_weights) # elif self.sampler == "balanced_v": # sampler = make_balanced_sample_by_weights(self.training_set.v_sample_weights) # elif self.sampler == "balanced_a": # sampler = make_balanced_sample_by_weights(self.training_set.a_sample_weights) # else: # raise ValueError(f"Invalid sampler value: '{self.sampler}'") dl = DataLoader(self.training_set, shuffle=sampler is None, num_workers=self.num_workers, pin_memory=True, batch_size=self.train_batch_size, drop_last=self.drop_last, sampler=sampler) return dl def val_dataloader(self): return DataLoader(self.validation_set, shuffle=True, num_workers=self.num_workers, pin_memory=True, batch_size=self.val_batch_size, drop_last=False) def test_dataloader(self): return [ self.val_dataloader(), DataLoader(self.test_set, shuffle=True, num_workers=self.num_workers, pin_memory=True, batch_size=self.test_batch_size, drop_last=False) ] def _get_retrieval_array(self, prefix, feature_label, dataset_size, feature_shape, feature_dtype, modifier='w+'): outfile_name = self._path_to_emotion_nn_retrieval_file(prefix, feature_label) if outfile_name.is_file() and modifier != 'r': raise RuntimeError(f"The retrieval array already exists! '{outfile_name}'") shape = tuple([dataset_size] + list(feature_shape)) outfile_name.parent.mkdir(exist_ok=True, parents=True) array = np.memmap(outfile_name, dtype=feature_dtype, mode=modifier, shape=shape ) return array def _path_to_emotion_nn_indices_file(self, prefix, feature_label): nn_indices_file = Path(self.output_dir) / "cache" / (prefix + feature_label + "_nn_indices.memmap") return nn_indices_file def _path_to_emotion_nn_distances_file(self, prefix, feature_label): nn_distances_file = Path(self.output_dir) / "cache" / (prefix + feature_label + "_nn_distances.memmap") return nn_distances_file def _path_to_emotion_nn_retrieval_file(self, prefix, feature_label): outfile_name = Path(self.output_dir) / "cache" / (prefix + feature_label + ".memmap") return outfile_name def _load_retrieval_arrays(self, prefix, feature_label): # prefix = self.mode + "_train_" # if self.ignore_invalid: # prefix += "valid_only_" # feature_label = 'emo_net_emo_feat_2' nn_indices_file = self._path_to_emotion_nn_indices_file(prefix, feature_label) nn_distances_file = self._path_to_emotion_nn_distances_file(prefix, feature_label) try: with open(nn_indices_file.parent / (nn_indices_file.stem + "_meta.pkl"), "rb") as f: indices_array_dtype = pkl.load(f) indices_array_shape = pkl.load(f) except: indices_array_dtype = np.int64, indices_array_shape = (len(dataset), NUM_NEIGHBORS) try: with open(nn_distances_file.parent / (nn_distances_file.stem + "_meta.pkl"), "rb") as f: distances_array_dtype = pkl.load(f) distances_array_shape = pkl.load(f) except: distances_array_dtype = np.float32, distances_array_shape = (len(dataset), NUM_NEIGHBORS) self.nn_indices_array = np.memmap(nn_indices_file, # dtype=np.int32, dtype=indices_array_dtype, mode="r", shape=indices_array_shape ) self.nn_distances_array = np.memmap(nn_distances_file, dtype=distances_array_dtype, # dtype=np.float64, mode="r", shape=distances_array_shape ) def _prepare_emotion_retrieval(self): prefix = self.mode + "_train_" if self.ignore_invalid: prefix += "valid_only_" feature_label = 'emo_net_emo_feat_2' nn_indices_file = self._path_to_emotion_nn_indices_file(prefix, feature_label) nn_distances_file = self._path_to_emotion_nn_distances_file(prefix, feature_label) NUM_NEIGHBORS = 100 if nn_indices_file.is_file() and nn_distances_file.is_file(): print("Precomputed nn arrays found.") return dataset = self._new_training_set(for_training=False) dl = DataLoader(dataset, shuffle=False, num_workers=self.num_workers, batch_size=self.train_batch_size) array = None if self.ring_type != "emonet_feature": raise ValueError(f"Invalid ring type for emotion retrieval {self.ring_type}") outfile_name = self._path_to_emotion_nn_retrieval_file(prefix, feature_label) if not outfile_name.is_file(): for bi, batch in enumerate(auto.tqdm(dl)): feat = batch[feature_label].numpy() feat_size = feat.shape[1:] if array is None: array = self._get_retrieval_array(prefix, feature_label, len(dataset), feat_size, feat.dtype) # for i in range(feat.shape[0]): # idx = bi*self.train_batch_size + i array[bi*self.train_batch_size:bi*self.train_batch_size + feat.shape[0], ...] = feat del array else: print(f"Feature array found in '{outfile_name}'") for bi, batch in enumerate(dl): feat = batch[feature_label].numpy() feat_size = feat.shape[1:] break array = self._get_retrieval_array(prefix, feature_label, len(dataset), feat_size, feat.dtype, modifier='r') nbrs = NearestNeighbors(n_neighbors=30, algorithm='auto', n_jobs=-1).fit(array) distances, indices = nbrs.kneighbors(array, NUM_NEIGHBORS) indices_array = np.memmap(nn_indices_file, dtype=indices.dtype, mode="w+", shape=indices.shape ) indices_array[...] = indices del indices_array distances_array = np.memmap(nn_distances_file, dtype=distances.dtype, mode="w+", shape=distances.shape ) distances_array[...] = distances del distances_array # save sizes a dtypes with open(nn_indices_file.parent / (nn_indices_file.stem + "_meta.pkl"), "wb") as f: pkl.dump(indices.dtype, f) pkl.dump(indices.shape, f) with open(nn_distances_file.parent / (nn_distances_file.stem + "_meta.pkl"), "wb") as f: pkl.dump(distances.dtype, f) pkl.dump(distances.shape, f) self.nn_indices_array = np.memmap(nn_indices_file, dtype=indices.dtype, mode="r", shape=indices.shape ) self.nn_distances_array = np.memmap(nn_distances_file, dtype=distances.dtype, mode="r", shape=distances.shape ) class AfewVaDataVisTestModule(AfewVaDataModule): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def setup(self, stage=None): self.training_set = None self.validation_set = TestSubsetAfewVa(self.image_path, self.val_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) self.test_dataframe_path = Path(self.output_dir) / "validation_representative_selection.csv" self.test_set = TestSubsetAfewVa(self.image_path, self.test_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) def val_dataloader(self): return DataLoader(self.validation_set, shuffle=False, num_workers=self.num_workers, pin_memory=True, batch_size=self.val_batch_size, drop_last=False) def test_dataloader(self): return [ self.val_dataloader(), DataLoader(self.test_set, shuffle=False, num_workers=self.num_workers, pin_memory=True, batch_size=self.test_batch_size, drop_last=False) ] class AfewVa(EmotionalImageDatasetBase): def __init__(self, image_path, sample_list, image_size, scale = 1.4, transforms : imgaug.augmenters.Augmenter = None, use_gt_bb=True, bb_center_shift_x=0.0, bb_center_shift_y=0.0, ring_type=None, ring_size=None, load_emotion_feature=False, nn_indices_array=None, nn_distances_array=None, ext=".png", use_processed = None, normalize_va = None, ): self.sample_list = sample_list self.image_path = image_path self.image_size = image_size self.use_gt_bb = use_gt_bb # self.transforms = transforms or imgaug.augmenters.Identity() self.transforms = transforms or imgaug.augmenters.Resize((image_size, image_size)) self.scale = scale self.landmark_normalizer = KeypointNormalization() self.use_processed = use_processed or False self.normalize_va = normalize_va or True # normalize va to <-1,1> # self.ignore_invalid = ignore_invalid self.load_emotion_feature = load_emotion_feature self.nn_distances_array = nn_distances_array self.ext=ext # # if ignore_invalid: # # filter invalid classes # ignored_classes = [AffectNetExpressions.Uncertain.value, AffectNetExpressions.Occluded.value] # self.df = self.df[self.df["expression"].isin(ignored_classes) == False] # # self.df = self.df.drop(self.df[self.df["expression"].isin(ignored_classes)].index) # # # filter invalid va values # self.df = self.df[self.df.valence != -2.] # # self.df = self.df.drop(self.df.valence == -2.) # self.df = self.df[self.df.arousal != -2.] # # self.df = self.df.drop(self.df.arousal == -2.) # # valid_indices = np.logical_not(pd.isnull(self.df)) # # valid_indices = self.df.index # self.df = self.df.reset_index(drop=True) # # if nn_indices_array is not None and nn_indices_array.shape[0] != len(self.df): # # nn_indices_array = nn_indices_array[valid_indices, ...] # # if nn_distances_array is not None and nn_distances_array.shape[0] != len(self.df): # # nn_distances_array = nn_distances_array[valid_indices, ...] # # self.exp_weights = self.df["expression"].value_counts(normalize=True).to_dict() # self.exp_weight_tensor = torch.tensor([self.exp_weights[i] for i in range(len(self.exp_weights))], dtype=torch.float32) # self.exp_weight_tensor = 1. / self.exp_weight_tensor # self.exp_weight_tensor /= torch.norm(self.exp_weight_tensor) self.size = 0 self.video_sizes_cumulative = [0] for vid_name, gt in self.sample_list.items(): self.size += len(gt.frames) self.video_sizes_cumulative += [self.size] if ring_type not in [None, "gt_expression", "gt_va", "emonet_feature", "emonet_va", "emonet_expression", "augment"]: raise ValueError(f"Invalid ring type '{ring_type}'") if ring_type == "emonet_expression" and ( nn_indices_array is None or nn_distances_array is None ): raise ValueError(f"If ring type set to '{ring_type}', nn files must be specified") self.ring_type = ring_type self.ring_size = ring_size # self._init_sample_weights() def _init_sample_weights(self): raise NotImplementedError() if self.ring_type == "gt_expression": grouped = self.df.groupby(['expression']) self.expr2sample = grouped.groups elif self.ring_type == "emonet_expression": raise NotImplementedError() else: self.expr2sample = None va = self.df[["valence", "arousal"]].to_numpy() sampling_rate = 0.1 # bin_1d = np.arange(-1.,1.+sampling_rate, sampling_rate) bin_1d = np.arange(-1.,1., sampling_rate) stat, x_ed, y_ed, va_binnumber = sp.stats.binned_statistic_2d( va[:, 0], va[:, 1], None, 'count', [bin_1d, bin_1d], expand_binnumbers=False) va_weights = 1 / va_binnumber va_weights /= np.linalg.norm(va_weights) va_weights *= np.linalg.norm(np.ones_like(va_weights)) self.va_sample_weights = va_weights if self.ring_type == "gt_va": raise NotImplementedError() self.bins_to_samples = {} self.va_bin_indices = va_binnumber bin_indices = np.unique(va_binnumber) for bi in bin_indices: self.bins_to_samples[bi] = np.where(va_binnumber == bi)[0] elif self.ring_type == "emonet_va": raise NotImplementedError() else: self.bins_to_samples = {} if self.ring_type == "emonet_feature": raise NotImplementedError() if len(self) != self.nn_distances_array.shape[0] or len(self) != self.nn_indices_array.shape[0]: raise RuntimeError("The lengths of the dataset does not correspond to size of the nn_array. " "The sizes should be equal. Sth fishy is happening") # self.nn_indices_array = self.nn_indices_array self.nn_distances_array = nn_distances_array else: self.nn_indices_array = None self.nn_distances_array = None # v = self.df[["valence"]].to_numpy() sampling_rate = 0.1 bin_1d = np.arange(-1.,1., sampling_rate) stat, x_ed, va_binnumber = sp.stats.binned_statistic( va[:, 0], None, 'count', bin_1d) v_weights = 1 / va_binnumber v_weights /= np.linalg.norm(v_weights) v_weights *= np.linalg.norm(np.ones_like(v_weights)) self.v_sample_weights = v_weights bin_1d = np.arange(-1.,1., sampling_rate) stat, x_ed, va_binnumber = sp.stats.binned_statistic( va[:, 1], None, 'count', bin_1d) a_weights = 1 / va_binnumber a_weights /= np.linalg.norm(a_weights) a_weights *= np.linalg.norm(np.ones_like(a_weights)) self.a_sample_weights = a_weights def __len__(self): # return 100 return self.size def _load_image(self, index): vid_index = bisect.bisect_right(self.video_sizes_cumulative, index) - 1 vid_first_im_index = self.video_sizes_cumulative[vid_index] vid_item = list(self.sample_list.items())[vid_index] vid_name = vid_item[0] vid_gt = vid_item[1] assert vid_gt.video_id == vid_name vid_frame_list = sorted(list(vid_gt['frames'].keys())) selected_frame = vid_frame_list[index - vid_first_im_index] im_rel_path = Path(vid_name) / (selected_frame + self.ext) im_file = Path(self.image_path) / im_rel_path im_file = im_file.parent / (im_file.stem + self.ext) input_img = imread(im_file) # # scale_factor_x = 1.48 # scale_factor_x = 1.25 # # scale_factor_x = 1 # input_img = resize(input_img, (432, 720, 1)) input_img = resize(input_img, (576, 960)) scale_factor_x = 720 / 960 valence = vid_gt['frames'][selected_frame]['valence'] arousal = vid_gt['frames'][selected_frame]['arousal'] facial_landmarks = np.array(vid_gt['frames'][selected_frame]['landmarks']) facial_landmarks[:,0] /= scale_factor_x if self.normalize_va: valence /= 10. arousal /= 10. return input_img, facial_landmarks, valence, arousal, im_file def _load_additional_data(self, im_rel_path): return {} def _get_sample(self, index): num_fails = 0 max_fails = 50 try: input_img, facial_landmarks, valence, arousal, image_path = self._load_image(index) additional_data = self._load_additional_data(Path(image_path).relative_to(self.image_path)) except Exception as e: # if the image is corrupted or missing (there is a few :-/), find some other one while True: num_fails += 1 if num_fails >= max_fails: # something must have gone serious wrong. Nothing loads, so throw an exception raise e index += 1 index = index % len(self) try: input_img, facial_landmarks, valence, arousal, image_path = self._load_image(index) additional_data = self._load_additional_data(Path(image_path).relative_to(self.image_path)) success = True except Exception as e2: success = False if success: break left = facial_landmarks[:,0].min() top = facial_landmarks[:,1].min() right = facial_landmarks[:,0].max() bottom = facial_landmarks[:,1].max() input_img_shape = input_img.shape if not self.use_processed: # Use AffectNet as is provided (their bounding boxes, and landmarks, no segmentation) old_size, center = bbox2point(left, right, top, bottom, type='kpt68') # old_size, center = bbox2point(left, right, top, bottom, type='bbox') size = int(old_size * self.scale) img, landmark = bbpoint_warp(input_img, center, size, self.image_size, landmarks=facial_landmarks) img *= 255. if not self.use_gt_bb: raise NotImplementedError() # landmark_type, landmark = load_landmark( # self.path_prefix / self.landmark_list[index]) landmark = landmark[np.newaxis, ...] seg_image = None else: # use AffectNet processed by me. I used their bounding boxes (to not have to worry about detecting # the correct face in case there's more) and I ran our FAN and segmentation over it img = input_img # the image has already been cropped in preprocessing (make sure the input root path # is specificed to the processed folder and not the original one landmark_path = Path(self.image_path).parent / "landmarks" / im_rel_path landmark_path = landmark_path.parent / (landmark_path.stem + ".pkl") landmark_type, landmark = load_landmark( landmark_path) landmark = landmark[np.newaxis, ...] segmentation_path = Path(self.image_path).parent / "segmentations" / im_rel_path segmentation_path = segmentation_path.parent / (segmentation_path.stem + ".pkl") seg_image, seg_type = load_segmentation( segmentation_path) seg_image = seg_image[np.newaxis, :, :, np.newaxis] seg_image = process_segmentation( seg_image, seg_type).astype(np.uint8) if self.load_emotion_feature: emotion_path = Path(self.image_path).parent / "emotions" / im_rel_path emotion_path = emotion_path.parent / (emotion_path.stem + ".pkl")
emotion_features, emotion_type = load_emotion(emotion_path)
2
2023-11-07 20:13:32+00:00
16k
hxz393/ConfigCenterComparer
ui/action_start.py
[ { "identifier": "COL_INFO", "path": "config/settings.py", "snippet": "COL_INFO = {\n \"name\": {\"col\": 0},\n \"group\": {\"col\": 1},\n \"key\": {\"col\": 2},\n \"pro_value\": {\"col\": 3},\n \"pro_time\": {\"col\": 4},\n \"pre_value\": {\"col\": 5},\n \"pre_time\": {\"col\": 6},\...
import logging from typing import Dict, List from PyQt5.QtCore import Qt, QThread, pyqtSignal, QObject from PyQt5.QtGui import QIcon from PyQt5.QtWidgets import QAction, QHeaderView from config.settings import COL_INFO from lib.get_resource_path import get_resource_path from module.execute_queries import execute_queries from ui.config_manager import ConfigManager from ui.filter_bar import FilterBar from ui.lang_manager import LangManager from ui.message_show import message_show from ui.table_main import TableMain
13,377
""" 提供应用程序的主要功能,包括用户界面初始化、数据库查询执行、数据展示和处理。 本模块中包含的类负责应用程序的主要操作流程,如用户界面的初始化、按钮动作的处理、后台数据查询、数据展示等。主要类包括`ActionStart`和`StartWork`,分别负责处理用户界面动作和执行后台工作。 :author: assassing :contact: https://github.com/hxz393 :copyright: Copyright 2023, hxz393. 保留所有权利。 """ logger = logging.getLogger(__name__) class ActionStart(QObject): """ 负责处理用户界面动作,例如初始化界面、响应按钮点击等。 此类包含了界面的主要动作逻辑,如开始按钮的点击处理、用户界面语言的更新、表格的数据填充等。它与后台线程`StartWork`协作,实现数据的查询和展示。 :param lang_manager: 语言管理器,用于界面语言的加载和更新。 :param config_manager: 配置管理器,提供应用程序的配置信息。 :param table: 主表格界面,用于数据的展示。 :param filter_bar: 过滤条,用于数据的筛选。 :type lang_manager: LangManager :type config_manager: ConfigManager :type table: TableMain :type filter_bar: FilterBar """ status_updated = pyqtSignal(str) def __init__(self, lang_manager: LangManager, config_manager: ConfigManager,
""" 提供应用程序的主要功能,包括用户界面初始化、数据库查询执行、数据展示和处理。 本模块中包含的类负责应用程序的主要操作流程,如用户界面的初始化、按钮动作的处理、后台数据查询、数据展示等。主要类包括`ActionStart`和`StartWork`,分别负责处理用户界面动作和执行后台工作。 :author: assassing :contact: https://github.com/hxz393 :copyright: Copyright 2023, hxz393. 保留所有权利。 """ logger = logging.getLogger(__name__) class ActionStart(QObject): """ 负责处理用户界面动作,例如初始化界面、响应按钮点击等。 此类包含了界面的主要动作逻辑,如开始按钮的点击处理、用户界面语言的更新、表格的数据填充等。它与后台线程`StartWork`协作,实现数据的查询和展示。 :param lang_manager: 语言管理器,用于界面语言的加载和更新。 :param config_manager: 配置管理器,提供应用程序的配置信息。 :param table: 主表格界面,用于数据的展示。 :param filter_bar: 过滤条,用于数据的筛选。 :type lang_manager: LangManager :type config_manager: ConfigManager :type table: TableMain :type filter_bar: FilterBar """ status_updated = pyqtSignal(str) def __init__(self, lang_manager: LangManager, config_manager: ConfigManager,
table: TableMain,
7
2023-11-07 01:02:38+00:00
16k
pytorch-labs/ao
test/test.py
[ { "identifier": "DynamicallyPerAxisQuantizedLinear", "path": "torchao/quantization/dynamic_quant.py", "snippet": "class DynamicallyPerAxisQuantizedLinear(torch.nn.Linear):\n \"\"\"\n This class is a replacement for `torch.nn.Linear`. It implements a\n quantized matmul using int8 dynamic symmetr...
import copy import unittest import torch import torch.nn as nn import os from torch._inductor.utils import run_and_get_code from torch._dynamo import config from torch.ao.quantization import MinMaxObserver, QConfigMapping from torchao.quantization.dynamic_quant import ( DynamicallyPerAxisQuantizedLinear, ) from torchao.quantization.quant_api import ( apply_dynamic_quant, apply_weight_only_int8_quant, change_linear_weights_to_int8_dqtensors, change_linear_weights_to_int8_woqtensors, change_linear_weights_to_int4_woqtensors, _replace_with_custom_fn_if_matches_filter, ) from torchao.quantization.quant_primitives import ( dequantize_per_channel, dequantize_per_tensor, dynamically_quantize_per_channel, dynamically_quantize_per_tensor, quant_int8_dynamic_linear, quant_int8_dynamic_per_token_linear, quantize_activation_per_token_absmax, safe_int_mm, ) from torchao.quantization.smoothquant import ( get_scale, smooth_fq_linear_to_inference, SmoothFakeDynamicallyQuantizedLinear, swap_linear_with_smooth_fq_linear, ) from torchao.quantization.subclass import ( Int8DynamicallyQuantizedLinearWeight, Int8WeightOnlyQuantizedLinearWeight, Int4WeightOnlyQuantizedLinearWeight ) from torchao.quantization.utils import ( _apply_logging_hook, compute_error, compute_error as SQNR, _fqn_to_op_to_shape_to_count, LoggingTensorMode, ) from torch.ao.quantization.quantize_fx import convert_to_reference_fx, prepare_fx from transformers import ( # type: ignore[import-untyped] DistilBertModel, DistilBertTokenizer, )
11,332
qconfig_mapping = QConfigMapping().set_global(qconfig) lin_ref_p = prepare_fx(lin_ref, qconfig_mapping, (torch.randn(1, 1),)) lin_ref_q = convert_to_reference_fx(lin_ref_p) y_q_ref = lin_ref_q(x.float()) # scale, zp of weight (get from reference model) w_obs = qconfig.weight() w_obs(weight) lin_ref_w_scale, lin_ref_w_zp = w_obs.calculate_qparams() lin_ref_w_scale = lin_ref_w_scale.to(device).to(float_dtype) # print('lin_ref_w', 'scale', lin_ref_w_scale, 'zp', lin_ref_w_zp) w_vals, _s, _z = dynamically_quantize_per_channel( getattr(lin_ref_q, "0").weight.to(float_dtype), -128, 127, torch.int8 ) w_vals = w_vals.t().contiguous() w_vals_sums = w_vals.sum(dim=0) # do our version of the quantized linear operator y = quant_int8_dynamic_linear( x, qmin, qmax, int_dtype, w_vals, lin_ref_w_scale, w_vals_sums, bias, float_dtype, ) # print('y', y) # print('y_q_ref', y_q_ref) # print('y_ref', y_ref) sqnr_ref = compute_error(y_ref, y_q_ref) sqnr_our = compute_error(y_ref, y) # print('sqnr_ref', sqnr_ref, 'sqnr_our', sqnr_our) # for large shapes, sqnr can be in the high 30s for float32 and float16 self.assertTrue(sqnr_our.item() >= 37.5) def test_qlinear_per_channel_numerics_cpu(self): # Note: the AO codebase doesn't easily support qint8 activations, # so the test cases below are for the quant primitives defined in # this file only. The AO reference is using quint8 here. test_cases = ( ((2, 3), (3, 4), 0, 255, torch.uint8, torch.quint8, torch.float32, "cpu"), ((2, 3), (3, 4), -128, 127, torch.int8, torch.qint8, torch.float32, "cpu"), ) for test_case in test_cases: self._test_qlinear_per_channel_numerics(*test_case) def test_qlinear_per_channel_numerics_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return test_cases = ( # Note: torch._int_mm needs int8 activations, so we don't test uint8 # activations on CUDA at all ( (32, 32), (32, 16), -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", ), ( (32, 32), (32, 16), -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", ), # a large shape from LLaMa 1.5B - currently fails for float16 ( (17, 4096), (4096, 1536), -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", ), ( (17, 4096), (4096, 1536), -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", ), ) for test_case in test_cases: self._test_qlinear_per_channel_numerics(*test_case) class TestSubclass(unittest.TestCase): def _test_dequantize_impl( self, test_subclass_from_float, min_sqnr=35, test_dtype=torch.bfloat16, test_shape=(32, 64, 64), ): m, k, n = test_shape lin = torch.nn.Linear(k, n, device="cuda").to(test_dtype) w = lin.weight.detach() lin.weight = torch.nn.Parameter( test_subclass_from_float(lin.weight), requires_grad=False ) self.assertGreater(
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # mypy: ignore-errors torch.manual_seed(0) config.cache_size_limit = 100 class SmoothquantUnitTest(unittest.TestCase): # first, let's reproduce the graphic from the paper, Figure 4, to ensure # we are calculating the scales correctly def test_figure_4(self): X = torch.FloatTensor([1, -16, 2, 6, -2, 8, -1, -9]).reshape(1, 2, 4) W = torch.FloatTensor([2, 1, -2, 1, -1, -1, 2, -1, -2, -1, -1, 1]).reshape(4, 3) X_mul_W = torch.matmul(X, W) smoothquant_scale = get_scale( torch.amax(torch.abs(X), dim=(0, 1)), torch.amax(torch.abs(W), dim=1), alpha=0.5, ) # reproduce scaled calculation X_scaled = X / smoothquant_scale.reshape(1, 1, -1) W_scaled = torch.matmul(torch.diag(smoothquant_scale), W) X_scaled_mul_scaled_W = torch.matmul(X_scaled, W_scaled) assert torch.allclose(X_mul_W, X_scaled_mul_scaled_W), "not close!" assert X_mul_W.shape == X_scaled_mul_scaled_W.shape # next, run the above test on a sample of representative inputs def test_tensors(self): x_shape = (1, 5, 7) w_shape = (7, 9) for i in range(3): X = torch.randn(x_shape) * 10 W = torch.randn(w_shape) s = get_scale( torch.amax(torch.abs(X), dim=(0, 1)), torch.amax(torch.abs(W), dim=1), alpha=0.5, ) Y = torch.matmul(X, W) Y_ref = torch.matmul( X / s.reshape(1, 1, -1), torch.matmul(torch.diag(s), W), ) assert torch.allclose(Y, Y_ref, atol=1e-3, rtol=1e-3), "not close!" def _test_smooth_linear_impl(self, x_shape, lin_shape, device): # so we can use the full range torch.backends.quantized.engine = "qnnpack" x = torch.randn(*x_shape, device=device) * 9 + 10 lin_fp32 = nn.Linear(*lin_shape, device=device) # misc: ignore lin_smooth = SmoothFakeDynamicallyQuantizedLinear.from_float( copy.deepcopy(lin_fp32), alpha=0.25 ) lin_smooth_skip_scaling = SmoothFakeDynamicallyQuantizedLinear.from_float( copy.deepcopy(lin_fp32), alpha=0.25 ) lin_fp32_copy = copy.deepcopy(lin_fp32) # assignment: ignore lin_fp32_copy.qconfig = torch.ao.quantization.QConfig( # assignment: ignore activation=None, weight=torch.ao.quantization.default_per_channel_weight_observer, ) lin_dynamic_q = torch.ao.nn.quantized.dynamic.Linear.from_float( lin_fp32_copy.cpu() ) y_ref = lin_fp32(x) # calibrate the smoothquant versions y_smooth_nocalib = lin_smooth(x) _ = lin_smooth_skip_scaling(x) lin_smooth.to_inference() lin_smooth_skip_scaling.debug_skip_scaling = True lin_smooth_skip_scaling.to_inference() # verify that with scaling turned off, numerics match quantized version y_smooth_fq_only = lin_smooth_skip_scaling(x) y_smooth_fq = lin_smooth(x) y_dynamic_q = lin_dynamic_q(x.cpu()).to(device) # print('y_ref', y_ref) # print('y_smooth_nocalib', y_smooth_nocalib) # print('y_smooth_fq', y_smooth_fq) # print('y_smooth_fq_only', y_smooth_fq_only) # print('y_dynamic_q', y_dynamic_q) sqnr_smooth_fq = compute_error(y_ref, y_smooth_fq) sqnr_dynamic_q = compute_error(y_ref, y_dynamic_q) sqnr_fq = compute_error(y_smooth_fq_only, y_dynamic_q) # print('sqnr_smooth', sqnr_smooth_fq, 'sqnr_dynamic', sqnr_dynamic_q, 'sqnr_fq', sqnr_fq) assert torch.allclose( y_ref, y_smooth_nocalib ), "y_ref not close to y_smooth_nocalib" # after https://github.com/pytorch-labs/ao_benchmarks/pull/32, # numerics do not match exactly between production c++ code # and this Python code # assert torch.allclose( # y_smooth_fq_only, y_dynamic_q, # atol=torch.max(y_smooth_fq_only).item()*0.01, # rtol=0.00001), \ # 'y_smooth_fq_only not close to y_dynamic_q' self.assertTrue(sqnr_smooth_fq.item() >= 40.0) self.assertTrue(sqnr_dynamic_q.item() >= 40.0) self.assertTrue(sqnr_fq.item() >= 40.0) def test_smooth_linear_cpu(self): self._test_smooth_linear_impl((1, 5, 3), (3, 4), "cpu") def test_smooth_linear_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return self._test_smooth_linear_impl((1, 32, 32), (32, 16), "cuda") def test_smooth_linear_edge_cases(self): # so we can use the full range torch.backends.quantized.engine = "qnnpack" lin_fp32 = nn.Linear(3, 4) lin_smooth = SmoothFakeDynamicallyQuantizedLinear.from_float( lin_fp32, alpha=0.25 ) # test different ranks x0 = torch.randn(4, 5, 3) x1 = torch.randn(1, 8, 5, 3) x2 = torch.randn(2, 3, 7, 5, 3) # calibrate _ = lin_smooth(x0) _ = lin_smooth(x1) _ = lin_smooth(x2) # inference lin_smooth.to_inference() _ = lin_smooth(x0) _ = lin_smooth(x1) _ = lin_smooth(x2) def test_swap(self): m = nn.Sequential( nn.Sequential(nn.Linear(4, 4), nn.ReLU(), nn.Linear(4, 4)), nn.Linear(4, 4), ) m_copy = copy.deepcopy(m) swap_linear_with_smooth_fq_linear(m_copy, skip_fqn_list=["0.2"]) # verify all linears are swapped assert isinstance(m_copy[0][0], SmoothFakeDynamicallyQuantizedLinear) assert isinstance(m_copy[0][1], nn.ReLU) # this one was skipped assert isinstance(m_copy[0][2], nn.Linear) assert isinstance(m_copy[1], SmoothFakeDynamicallyQuantizedLinear) # verify results do not change without smoothing x = torch.randn(4, 4) y_ref = m(x) y = m_copy(x) assert torch.allclose(y_ref, y) def test_weight_t_and_non_t_numerics_match(self): # verify that numerics match whether weight is stored # in transposed format (for cuBLAS) vs non-transposed format # (for torch.compile) if not torch.cuda.is_available(): print("no cuda, skip") return dtype = torch.half device = "cuda" lin_ref = nn.Linear(32, 16, dtype=dtype, device=device) lin_eager_t = copy.deepcopy(lin_ref) lin_opt_t = copy.deepcopy(lin_eager_t) lin_opt = copy.deepcopy(lin_eager_t) lin_eager_t = SmoothFakeDynamicallyQuantizedLinear.from_float(lin_eager_t) lin_opt_t = SmoothFakeDynamicallyQuantizedLinear.from_float(lin_opt_t) lin_opt = SmoothFakeDynamicallyQuantizedLinear.from_float(lin_opt) lin_opt.store_w_int_repr_t = False x = torch.randn(32, 32, dtype=dtype, device=device) y_calib_eager_t = lin_eager_t(x) y_calib_opt_t = lin_opt_t(x) y_calib_opt = lin_opt(x) torch.testing.assert_close(y_calib_eager_t, y_calib_opt_t) torch.testing.assert_close(y_calib_eager_t, y_calib_opt) lin_eager_t.to_inference() lin_opt_t.to_inference() lin_opt.to_inference() torch.testing.assert_close(lin_eager_t.W_int_repr, lin_opt_t.W_int_repr) torch.testing.assert_close(lin_eager_t.W_int_repr, lin_opt.W_int_repr) lin_opt_t = torch.compile(lin_opt_t, mode="max-autotune") lin_opt = torch.compile(lin_opt, mode="max-autotune") y_ref = lin_ref(x) y_eager = lin_eager_t(x) y_opt_t = lin_opt_t(x) y_opt = lin_opt(x) if not torch.any(torch.isinf(y_ref)) and torch.any(torch.isinf(y_eager)): # eager mode torch._int_mm is sometimes buggy, when this happens # we can't really compare the compiled version against it properly print("eager mode torch._int_mm known bad, test is inconclusive") return sqnr_ref_eager = compute_error(y_ref, y_eager) sqnr_eager_opt_t = compute_error(y_eager, y_opt_t) sqnr_eager_opt = compute_error(y_eager, y_opt) # since torch.compile for a torch.half model can # change numerics significantly, we can only test for a high SQNR here # and not for closeness self.assertTrue(sqnr_eager_opt_t >= 45.0) self.assertTrue(sqnr_eager_opt >= 45.0) # y_opt_t and y_opt should be equivalent torch.testing.assert_close(y_opt_t, y_opt) def test_selective_torch_compile(self): m = nn.Sequential( nn.Linear(4, 4), nn.Sequential( nn.Linear(4, 4), nn.Linear(4, 4), ), nn.Linear(4, 4), ) x = torch.randn(4, 4) y_ref = m(x) _replace_with_custom_fn_if_matches_filter( m, lambda mod: torch.compile(mod), lambda mod, fqn: isinstance(mod, nn.Linear) and fqn != "1.0", ) self.assertTrue(isinstance(m[0], torch._dynamo.eval_frame.OptimizedModule)) self.assertTrue(isinstance(m[1][0], nn.Linear)) self.assertTrue(isinstance(m[1][1], torch._dynamo.eval_frame.OptimizedModule)) self.assertTrue(isinstance(m[2], torch._dynamo.eval_frame.OptimizedModule)) y = m(x) torch.testing.assert_close(y, y_ref) def test_debug_x_absmax(self): m = nn.Sequential(nn.Linear(3, 4)) x0 = torch.randn(4, 5, 3) y0 = m(x0) swap_linear_with_smooth_fq_linear(m) # no calibration, straight to inference, should not crash smooth_fq_linear_to_inference(m, debug_skip_calibration=True) y1 = m(x0) class PythonQuantPrimitivesUnitTest(unittest.TestCase): def _test_dynamic_quant_per_tensor_numerics_impl( self, qmin, qmax, int_dtype, qint_dtype, float_dtype, device, qscheme ): x = torch.randn(256, dtype=float_dtype, device=device) y_vals, y_scale, y_zero_point = dynamically_quantize_per_tensor( x, qmin, qmax, int_dtype, qscheme ) # reference # quantize_per_tensor_dynamic doesn't work for half, so we cast there and back x_for_ref = x.half().float() if float_dtype == torch.float16 else x # quantize_per_tensor_dynamic doesn't support qscheme, so we just do dynamic # quant manually with observers + static quant obs = MinMaxObserver( dtype=qint_dtype, qscheme=qscheme, quant_min=qmin, quant_max=qmax ).to(device) obs(x_for_ref) ref_scale, ref_zero_point = obs.calculate_qparams() y_ref = torch.quantize_per_tensor( x_for_ref, ref_scale, ref_zero_point, qint_dtype ) # y_ref = torch.quantize_per_tensor_dynamic(x_for_ref, qint_dtype, False) # print(y_ref) if float_dtype == torch.float: assert torch.equal(y_vals, y_ref.int_repr()) else: # numerics are not exactly aligned yet, off-by-one probably due # to rounding assert torch.max(torch.abs(y_vals - y_ref.int_repr())).item() <= 1 torch.testing.assert_close( y_scale, torch.tensor([y_ref.q_scale()], device=device, dtype=float_dtype) ) if y_zero_point is not None: assert torch.equal( y_zero_point, torch.tensor([y_ref.q_zero_point()], device=device) ) else: self.assertTrue(y_ref.q_zero_point() == 0) # dequantize and check again x_dq = dequantize_per_tensor(y_vals, y_scale, y_zero_point, float_dtype) y_ref_dq = y_ref.dequantize().to(float_dtype) if float_dtype == torch.float: torch.testing.assert_close(x_dq, y_ref_dq) else: sqnr = compute_error(x_dq, y_ref_dq) self.assertTrue(sqnr.item() > 45.0) def test_dynamic_quant_per_tensor_numerics_cpu(self): # verifies that dynamic quant per tensor in plain pytorch matches # numerics of production AO code # TODO(future): test this on cpu-half, need to first make # torch.aminmax support half on cpu test_cases = ( ( 0, 255, torch.uint8, torch.quint8, torch.float32, "cpu", torch.per_tensor_affine, ), ( -128, 127, torch.int8, torch.qint8, torch.float32, "cpu", torch.per_tensor_affine, ), ( -128, 127, torch.int8, torch.qint8, torch.float32, "cpu", torch.per_tensor_symmetric, ), ( -127, 127, torch.int8, torch.qint8, torch.float32, "cpu", torch.per_tensor_symmetric, ), ) for row in test_cases: self._test_dynamic_quant_per_tensor_numerics_impl(*row) def test_dynamic_quant_per_tensor_numerics_cuda(self): # verifies that dynamic quant per tensor in plain pytorch matches # numerics of production AO code if not torch.cuda.is_available(): print("no cuda, skip") return test_cases = ( ( -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", torch.per_tensor_affine, ), ( -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", torch.per_tensor_affine, ), ( -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", torch.per_tensor_symmetric, ), ( -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", torch.per_tensor_symmetric, ), ( -127, 127, torch.int8, torch.qint8, torch.float32, "cuda", torch.per_tensor_symmetric, ), ( -127, 127, torch.int8, torch.qint8, torch.float16, "cuda", torch.per_tensor_symmetric, ), ) for row in test_cases: self._test_dynamic_quant_per_tensor_numerics_impl(*row) def _test_dynamic_quant_per_channel_numerics_impl( self, qmin, qmax, int_dtype, qint_dtype, float_dtype, device ): # verifies that dynamic quant per channel in plain pytorch matches # numerics of production AO code # TODO(future): test this on cpu-half, need to first make # torch.aminmax support half on cpu x = torch.randn(16, 32, device=device, dtype=float_dtype) y_vals, y_scale, y_zero_point = dynamically_quantize_per_channel( x, qmin, qmax, int_dtype ) min_val, max_val = torch.aminmax(x, dim=1) # reference weight_obs = torch.ao.quantization.MovingAveragePerChannelMinMaxObserver( dtype=qint_dtype, quant_min=qmin, quant_max=qmax, qscheme=torch.per_channel_symmetric, averaging_constant=1.0, # make it ignore previous iterations ) weight_obs(x) y_ref_scale, y_ref_zp = weight_obs.calculate_qparams() y_ref_scale = y_ref_scale.to(device) y_ref_zp = y_ref_zp.to(device) # quantize_per_channel doesn't work for half, so we cast there and back x_for_ref = x.half().float() if float_dtype == torch.float16 else x y_ref = torch.quantize_per_channel( x_for_ref, y_ref_scale, y_ref_zp, 0, qint_dtype ) torch.testing.assert_close( y_scale, y_ref.q_per_channel_scales().to(float_dtype) ) assert torch.equal(y_zero_point, y_ref.q_per_channel_zero_points()) # this test case has one element where the rounding is off by one # from Python-only code vs the c++ code, it's easy to repro with # various shapes. # Discussion here is relevant: https://github.com/pytorch/pytorch/issues/16498 # TODO(future): figure out what to do about this # assert torch.equal(int_vals, q_reference.int_repr()) assert torch.max(torch.abs(y_vals - y_ref.int_repr())) <= 1 # dequantize x_dq = dequantize_per_channel(y_vals, y_scale, y_zero_point) x_ref_dq = y_ref.dequantize() # off-by-one for scale is okay torch.testing.assert_close( x_dq, x_ref_dq, atol=torch.max(y_scale).item() * 1.01, rtol=0.0001 ) def test_dynamic_quant_per_channel_numerics_cpu(self): test_cases = ((-128, 127, torch.int8, torch.qint8, torch.float32, "cpu"),) for row in test_cases: self._test_dynamic_quant_per_channel_numerics_impl(*row) def test_dynamic_quant_per_channel_numerics_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return test_cases = ( (-128, 127, torch.int8, torch.qint8, torch.float32, "cuda"), (-128, 127, torch.int8, torch.qint8, torch.float16, "cuda"), ) for row in test_cases: self._test_dynamic_quant_per_channel_numerics_impl(*row) def _test_quantize_per_token_impl(self, device, dtype): x = torch.randn(3, 3, 3, device=device, dtype=dtype) xq, scales = quantize_activation_per_token_absmax(x) x_dq = dequantize_per_tensor(xq, scales, None).to(x.dtype) sqnr = compute_error(x, x_dq) self.assertTrue(sqnr >= 45.0) def test_quantize_per_token_cpu(self): for dtype in (torch.float32, torch.float16, torch.bfloat16): self._test_quantize_per_token_impl("cpu", dtype) def test_quantize_per_token_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return for dtype in (torch.float32, torch.float16, torch.bfloat16): self._test_quantize_per_token_impl("cuda", dtype) def _test_per_token_linear_impl(self, device, dtype): x = torch.randn(2, 16, 8, device=device, dtype=dtype) w = torch.randn(16, 8, device=device, dtype=dtype) wq, w_scales, _w_zp = dynamically_quantize_per_channel(w, -127, 127, torch.int8) # Note: need to make the weight contiguous because we are # testing in eager mode and cuBlas will not give correct results # for a transposed weight y = quant_int8_dynamic_per_token_linear( x, wq.t().contiguous(), w_scales, None, dtype ) y_ref = torch.matmul(x, w.t()) sqnr = compute_error(y_ref, y) self.assertTrue(sqnr >= 42.0) def test_per_token_linear_cpu(self): for dtype in (torch.float32,): self._test_per_token_linear_impl("cpu", dtype) def test_per_token_linear_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return for dtype in (torch.float32, torch.float16, torch.bfloat16): self._test_per_token_linear_impl("cuda", dtype) def test__int_mm(self): # TODO(future): figure out what here needs to move to PT core, # if it's not already tested there if not torch.cuda.is_available(): print("no cuda, skip") return m, k, n = 32, 32, 16 x = torch.randint(-128, 127, (m, k), dtype=torch.int8, device="cuda") w = torch.randint(-128, 127, (k, n), dtype=torch.int8, device="cuda") y_ref = torch.matmul(x.float(), w.float()).to(torch.int32) y_raw = safe_int_mm(x, w) wrap_in_mm_opt = torch.compile(safe_int_mm, mode="max-autotune") # note: triton chokes on the line below on k == 8 and n == 8 with # https://www.internalfb.com/phabricator/paste/view/P683467944 # TODO(future): file an issue y_opt = wrap_in_mm_opt(x, w) torch.testing.assert_close(y_ref, y_raw, atol=0, rtol=0) torch.testing.assert_close(y_ref, y_opt, atol=0, rtol=0) def test__int_mm_eager_and_torch_compile_numerics(self): if not torch.cuda.is_available(): print("no cuda, skip") return def __int_mm_ref(x, w): x = x.cpu().to(torch.int32) w = w.cpu().to(torch.int32) y = torch.matmul(x, w) return y.cuda() shapes = ( # minimal test shape ((1, 32, 32), (32, 16)), # paste of real linear shapes from LLaMa 1.5b ((17, 1, 1536), (1536, 1536)), ((17, 8, 4096), (4096, 1536)), ((17, 1, 1536), (1536, 4096)), ((17, 8, 1536), (1536, 1536)), ((17, 1, 4096), (4096, 1536)), ((17, 8, 1536), (1536, 4096)), ) for x_shape, w_shape in shapes: def wrap_torch_int_mm(x, w): b, n, k = x.shape k, m = w.shape x = x.reshape(b * n, k) res = safe_int_mm(x, w) res = res.reshape(b, n, m) return res wrap_torch_int_mm_opt = torch.compile( wrap_torch_int_mm, mode="max-autotune" ) x = torch.randint(-128, 127, x_shape, dtype=torch.int8, device="cuda") w = torch.randint(-128, 127, w_shape, dtype=torch.int8, device="cuda") z_ref = __int_mm_ref(x, w) z_eager = wrap_torch_int_mm(x, w) z_torch_compile = wrap_torch_int_mm_opt(x, w) # print(z_ref) # print(z_eager) # print(z_torch_compile) torch.testing.assert_close(z_ref, z_eager, atol=0, rtol=0) torch.testing.assert_close(z_ref, z_torch_compile, atol=0, rtol=0) def _test_qlinear_per_channel_numerics( self, x_shape, lin_shape, qmin, qmax, int_dtype, qint_dtype, float_dtype, device ): qconfig = torch.ao.quantization.per_channel_dynamic_qconfig x = torch.randn(*x_shape, device=device, dtype=float_dtype) # TODO: test bias true and false # Note: reference path only works on float because lack of aten quant primitives # support of half, so we cast back and forth to emulate lin_ref = ( nn.Sequential(nn.Linear(*lin_shape)) .eval() .to(float_dtype) .float() .to(device) ) y_ref = lin_ref(x.float()) weight = lin_ref[0].weight bias = lin_ref[0].bias qconfig_mapping = QConfigMapping().set_global(qconfig) lin_ref_p = prepare_fx(lin_ref, qconfig_mapping, (torch.randn(1, 1),)) lin_ref_q = convert_to_reference_fx(lin_ref_p) y_q_ref = lin_ref_q(x.float()) # scale, zp of weight (get from reference model) w_obs = qconfig.weight() w_obs(weight) lin_ref_w_scale, lin_ref_w_zp = w_obs.calculate_qparams() lin_ref_w_scale = lin_ref_w_scale.to(device).to(float_dtype) # print('lin_ref_w', 'scale', lin_ref_w_scale, 'zp', lin_ref_w_zp) w_vals, _s, _z = dynamically_quantize_per_channel( getattr(lin_ref_q, "0").weight.to(float_dtype), -128, 127, torch.int8 ) w_vals = w_vals.t().contiguous() w_vals_sums = w_vals.sum(dim=0) # do our version of the quantized linear operator y = quant_int8_dynamic_linear( x, qmin, qmax, int_dtype, w_vals, lin_ref_w_scale, w_vals_sums, bias, float_dtype, ) # print('y', y) # print('y_q_ref', y_q_ref) # print('y_ref', y_ref) sqnr_ref = compute_error(y_ref, y_q_ref) sqnr_our = compute_error(y_ref, y) # print('sqnr_ref', sqnr_ref, 'sqnr_our', sqnr_our) # for large shapes, sqnr can be in the high 30s for float32 and float16 self.assertTrue(sqnr_our.item() >= 37.5) def test_qlinear_per_channel_numerics_cpu(self): # Note: the AO codebase doesn't easily support qint8 activations, # so the test cases below are for the quant primitives defined in # this file only. The AO reference is using quint8 here. test_cases = ( ((2, 3), (3, 4), 0, 255, torch.uint8, torch.quint8, torch.float32, "cpu"), ((2, 3), (3, 4), -128, 127, torch.int8, torch.qint8, torch.float32, "cpu"), ) for test_case in test_cases: self._test_qlinear_per_channel_numerics(*test_case) def test_qlinear_per_channel_numerics_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return test_cases = ( # Note: torch._int_mm needs int8 activations, so we don't test uint8 # activations on CUDA at all ( (32, 32), (32, 16), -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", ), ( (32, 32), (32, 16), -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", ), # a large shape from LLaMa 1.5B - currently fails for float16 ( (17, 4096), (4096, 1536), -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", ), ( (17, 4096), (4096, 1536), -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", ), ) for test_case in test_cases: self._test_qlinear_per_channel_numerics(*test_case) class TestSubclass(unittest.TestCase): def _test_dequantize_impl( self, test_subclass_from_float, min_sqnr=35, test_dtype=torch.bfloat16, test_shape=(32, 64, 64), ): m, k, n = test_shape lin = torch.nn.Linear(k, n, device="cuda").to(test_dtype) w = lin.weight.detach() lin.weight = torch.nn.Parameter( test_subclass_from_float(lin.weight), requires_grad=False ) self.assertGreater(
SQNR(w, lin.weight.dequantize()),
21
2023-11-03 21:27:36+00:00
16k
Zaczero/openstreetmap-ng
src/controllers/api/0.6/changeset_comment.py
[ { "identifier": "api_user", "path": "src/lib/auth.py", "snippet": "def api_user(*require_scopes: Scope | ExtendedScope) -> User:\n \"\"\"\n Dependency for authenticating the api user.\n \"\"\"\n\n return Security(\n _get_user,\n scopes=tuple(s.value for s in require_scopes),\n ...
from typing import Annotated from fastapi import APIRouter, Form from pydantic import PositiveInt from src.lib.auth import api_user from src.lib.format.format06 import Format06 from src.limits import CHANGESET_COMMENT_BODY_MAX_LENGTH from src.models.db.user import User from src.models.scope import ExtendedScope, Scope from src.services.changeset_comment_service import ChangesetCommentService
11,085
router = APIRouter() @router.post('/changeset/{changeset_id}/subscribe') async def changeset_subscribe( changeset_id: PositiveInt, _: Annotated[User, api_user(Scope.write_api)], ) -> dict: changeset = await ChangesetCommentService.subscribe(changeset_id)
router = APIRouter() @router.post('/changeset/{changeset_id}/subscribe') async def changeset_subscribe( changeset_id: PositiveInt, _: Annotated[User, api_user(Scope.write_api)], ) -> dict: changeset = await ChangesetCommentService.subscribe(changeset_id)
return Format06.encode_changeset(changeset)
1
2023-11-04 01:12:13+00:00
16k
codefuse-ai/Collinear-Constrained-Attention
train/trainer/atorch_trainer.py
[ { "identifier": "print_rank_0", "path": "utils/common_utils.py", "snippet": "TASK2ID = {}\nID2TASK = {}\n L = args.num_hidden_layers\n V = args.vocab_size\ndef get_rank():\ndef get_local_rank():\ndef is_main_process():\ndef is_local_main_process():\ndef print_rank_0(*message):\ndef get_world_size(...
import datetime import json import logging import math import os import random import re import shutil import time import warnings import gc import numpy as np import atorch import torch from functools import partial from pathlib import Path from deepspeed.ops.adam import DeepSpeedCPUAdam from torch.distributed.fsdp import FullStateDictConfig from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp import StateDictType from torch.optim.lr_scheduler import LambdaLR, CosineAnnealingLR, CosineAnnealingWarmRestarts from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from torch.utils.tensorboard import SummaryWriter from tqdm.auto import tqdm from transformers import get_scheduler as get_scheduler_trans from transformers.modeling_utils import PreTrainedModel, unwrap_model from transformers.trainer import ( OPTIMIZER_NAME, SCHEDULER_NAME, TRAINER_STATE_NAME, TRAINING_ARGS_NAME ) from transformers.trainer_pt_utils import reissue_pt_warnings from transformers.trainer_utils import ( PREFIX_CHECKPOINT_DIR, ) from transformers.utils import WEIGHTS_NAME from torch.nn import CrossEntropyLoss from utils.common_utils import print_rank_0, get_tflops_megatron, get_computation_speed, TASK2ID, ID2TASK, EarlyStopping, logger from utils.auto_accelerate_utils import FAMO, get_ltor_masks_and_position_ids, SelfPacedStatus from atorch.auto import auto_accelerate from atorch.utils.version import torch_version from model.gpt_neox.modeling_gpt_neox import GPTNeoXLayer, GPTNeoXAttention, GPTNeoXMLP from model.llama.modeling_llama import LlamaDecoderLayer, LlamaAttention, LlamaMLP from model.glm.modeling_glm import GLMBlock from torch.cuda.amp import GradScaler from apex.optimizers import FusedSGD from model.peft.modeling_peft import PeftModel
11,792
losses = torch.cat(losses) losses = losses[: len(self.valid_dataset)] mean_loss = torch.mean(losses).item() accumulated_task_loss = torch.tensor(accumulated_task_loss_np).to(self.device) accumulated_task_num = torch.tensor(accumulated_task_num_np).to(self.device) torch.distributed.all_reduce(accumulated_task_loss, op=torch.distributed.ReduceOp.SUM) torch.distributed.all_reduce(accumulated_task_num, op=torch.distributed.ReduceOp.SUM) accumulated_task_loss /= torch.distributed.get_world_size() valid_task_loss = accumulated_task_loss / (accumulated_step - 1) logs = {'valid_loss': mean_loss} per_task_valid_loss = {self.ID2TASK[i]+'_loss': valid_task_loss[i].item() for i in range(len(self.ID2TASK))} logs.update(per_task_valid_loss) if is_global_main_process(): logger.info('log point') for i in range(len(self.ID2TASK)): if accumulated_task_num[i] != 0: logger.info(f"{self.ID2TASK[i]}_loss: {valid_task_loss[i]}, sample nums: {accumulated_task_num[i]}") self.log(logs, step=self.global_steps, phase='Evaluation') metrics = {'valid_loss': mean_loss, 'valid_task_loss': valid_task_loss} logger.info(f"Finish evaluation") if self.is_rank0(): print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) return metrics def log(self, logs, step, phase='Train'): if not self.summary_writer: return logger.info(json.dumps(logs)) for key, value in logs.items(): self.summary_writer.add_scalar(f'{phase}/{key}', value, step) def _sorted_checkpoints( self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, checkpoint_name_pattern='([0-9]+)', use_mtime=False ): ordering_and_checkpoint_path = [] glob_checkpoints = [str(x) for x in Path(output_dir).glob( f"{checkpoint_prefix}-*") if os.path.isdir(x)] for path in glob_checkpoints: if use_mtime: ordering_and_checkpoint_path.append( (os.path.getmtime(path), path)) else: regex_match = re.search( f".*{checkpoint_prefix}-({checkpoint_name_pattern})", path) if regex_match is not None and regex_match.groups() is not None: ordering_and_checkpoint_path.append( (int(regex_match.groups()[0]), path)) checkpoints_sorted = sorted(ordering_and_checkpoint_path) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] # Make sure we don't delete the best model. if self.best_model_checkpoint is not None: best_model_index = checkpoints_sorted.index(str(Path(self.best_model_checkpoint))) # for i in range(best_model_index, len(checkpoints_sorted) - 2): for i in range(best_model_index, len(checkpoints_sorted) - 1): checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i] print_rank_0(f'checkpoints sorted list: {checkpoints_sorted}') return checkpoints_sorted def _rotate_checkpoints( self, use_mtime=False, output_dir=None, prefix=PREFIX_CHECKPOINT_DIR, checkpoint_name_pattern='.*') -> None: if self.args.save_total_limit is None or self.args.save_total_limit <= 0: return # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints( use_mtime=use_mtime, output_dir=output_dir, checkpoint_prefix=prefix, checkpoint_name_pattern=checkpoint_name_pattern) if len(checkpoints_sorted) <= self.args.save_total_limit: return # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which # we don't do to allow resuming. save_total_limit = self.args.save_total_limit number_of_checkpoints_to_delete = max( 0, len(checkpoints_sorted) - save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info( f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint, ignore_errors=True) def _clean_atorch_checkpoints(self, output_dir=None, prefix=PREFIX_CHECKPOINT_DIR): # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints( output_dir=output_dir, checkpoint_prefix=prefix, checkpoint_name_pattern='([0-9]+)') # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which # we don't do to allow resuming. for checkpoint in checkpoints_sorted[:-1]: logger.info( f"Deleting older atorch checkpoint [{checkpoint}] due to self.args.save_total_limit") try: os.remove(os.path.join(checkpoint, ATORCH_CHECKPOINT_NAME)) except Exception: continue def _save_peft_model(self, output_dir, state_dict=None): logger.info(f"Start saving peft model to {output_dir}") output_dir = output_dir if output_dir is not None else self.args.output_dir os.makedirs(output_dir, exist_ok=True) model = unwrap_model(self.model)
#!/usr/bin/env python # coding=utf-8 # Copyright (c) 2023 Ant Group. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. HYPER_PARAMETER_NAME = 'hyper_parameters.json' ATORCH_CHECKPOINT_NAME = 'atorch_checkpoint.bin' EPOCH_CHECKPOINT_NAME = 'epoch' FAMO_CHECKPOINT_NAME = 'famo_checkpoint' EMA_CHECKPOINT_NAME = 'ema_checkpoint' # logger = logging.getLogger(__name__) def is_local_main_process(): return atorch.local_rank() == 0 def is_global_main_process(): return atorch.rank() == 0 def has_inf_or_nan(x): try: # if x is half, the .float() incurs an additional deep copy, but it's necessary if # Pytorch's .sum() creates a one-element tensor of the same type as x # (which is true for some recent version of pytorch). cpu_sum = float(x.float().sum()) # More efficient version that can be used if .sum() returns a Python scalar # cpu_sum = float(x.sum()) except RuntimeError as instance: # We want to check if inst is actually an overflow exception. # RuntimeError could come from a different error. # If so, we still want the exception to propagate. if "value cannot be converted" not in instance.args[0]: raise return True else: if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum: return True return False def count_model_params(model): trainable_params = 0 all_params = 0 for param in model.parameters(): num_params = param.numel() all_params += num_params if param.requires_grad: trainable_params += num_params return all_params, trainable_params class AtorchArguments: def __init__(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) def get_linear_schedule_with_log_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): def lr_lambda(current_step: int): inverse_log_warm_up = 1.0 / math.log(num_warmup_steps) if current_step == 0: return 0.0 if current_step < num_warmup_steps: return inverse_log_warm_up * math.log(current_step) return max( 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)) ) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_scheduler(name, optimizer, num_warmup_steps, num_training_steps): scheduler_map = { 'log_warmup_linear_decay': get_linear_schedule_with_log_warmup} try: lr_scheduler = get_scheduler_trans( name, optimizer, num_warmup_steps, num_training_steps) return lr_scheduler except Exception: schedule_func = scheduler_map[name] return schedule_func(optimizer, num_warmup_steps, num_training_steps) class AtorchTrainer: def __init__(self, model, args, train_dataset, valid_dataset, tokenizer=None, callbacks=None, no_save_atorch_checkpoint=None, save_pytorch_model_bin_checkpoint=True, train_peft=False, rank=0, max_shard_size='10GB', files_to_save=None, args_to_save=None, data_collator=None, my_loss_func=None, **kwargs, ): self.args = args self.TASK2ID = TASK2ID self.ID2TASK = ID2TASK print('in atorch trainer') print(TASK2ID) print(ID2TASK) self.model = model self.no_save_atorch_checkpoint = no_save_atorch_checkpoint self.save_pytorch_model_bin_checkpoint = save_pytorch_model_bin_checkpoint self.train_peft = train_peft self.rank = rank self.kwargs = kwargs self.train_dataset = train_dataset self.valid_dataset = valid_dataset self.tokenizer = tokenizer self.max_shard_size = max_shard_size self.files_to_save = files_to_save self.args_to_save = args_to_save self.best_metric = None self.best_model_checkpoint = None self.no_save_base_model = True self.device = f"cuda:{atorch.local_rank()}" self.famo = FAMO(n_tasks=len(TASK2ID), device=self.device, mode=self.args.weighted_loss_mode) self.famo_resume = False self.selfpaced_status = SelfPacedStatus(args.selfpaced_interval) self.total_train_batch_size = self.args.per_device_train_batch_size * \ self.args.gradient_accumulation_steps * \ atorch.world_size() self.data_collator = data_collator self.my_loss_func = my_loss_func if self.args.early_stopping_patience > 0: print(f'early_stopping_patience: {self.args.early_stopping_patience}') patience = self.args.early_stopping_patience self.early_stopping = EarlyStopping(patience, verbose=True) self.train_dataloader_args = { "shuffle": True, "batch_size": self.total_train_batch_size, "pin_memory": True, "collate_fn": data_collator, "drop_last": True, "num_workers": self.args.num_workers, # "persistent_workers": args.num_workers > 0, } self.valid_dataloader = DataLoader( valid_dataset, sampler=DistributedSampler(valid_dataset, shuffle=True), batch_size=args.per_device_valid_batch_size, pin_memory=True, collate_fn=data_collator ) self.valid_dataloader_length = len(self.valid_dataloader) if self.args.resume_from_checkpoint == 'true': self.resume_checkpoint_dir = self.get_last_checkpoint( self.args.output_dir) self.atorch_args = AtorchArguments( lr=args.learning_rate, weight_decay=args.weight_decay, adam_eps=args.adam_epsilon, adam_beta1=args.adam_beta1, adam_beta2=args.adam_beta2) self.atorch_init() self.num_update_steps_per_epoch = math.ceil( len(self.train_dataloader) / self.args.gradient_accumulation_steps) print(f'number of update steps per epoch: {self.num_update_steps_per_epoch}') if self.args.max_steps == -1: self.args.max_steps = int( self.args.num_train_epochs * self.num_update_steps_per_epoch) else: self.args.num_train_epochs = math.ceil( self.args.max_steps / self.num_update_steps_per_epoch) # self.args.warmup_steps = self.args.get_warmup_steps( # self.args.max_steps) # 找不到get_warmup_steps custom_lr_scheduler_type = self.kwargs.get( 'custom_lr_scheduler_type', None) self.lr_scheduler = get_scheduler( name=custom_lr_scheduler_type if custom_lr_scheduler_type else self.args.lr_scheduler_type, optimizer=self.optimizer, num_warmup_steps=self.args.num_warmup_steps, num_training_steps=self.args.max_steps, ) print_rank_0(f'lr_scheduler{self.lr_scheduler}') if self.args.resume_from_checkpoint == 'true': with warnings.catch_warnings(record=True): self.lr_scheduler.load_state_dict(torch.load( os.path.join(self.resume_checkpoint_dir, SCHEDULER_NAME))) self._load_rng_state(self.resume_checkpoint_dir) torch.distributed.barrier() now_datetime = datetime.datetime.now() timestr = datetime.datetime.strftime(now_datetime, '%Y%m%d-%H%M%S') self.log_dir = os.path.join(self.args.output_dir, 'runs', timestr) self.summary_writer = None if torch.distributed.get_rank() == 0: self.summary_writer = SummaryWriter(log_dir=self.log_dir) def get_last_checkpoint(self, folder): _re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)") content = sorted(os.listdir(folder)) checkpoints = [ path for path in content if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path)) ] if len(checkpoints) == 0: return return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0]))) def _load_rng_state(self, resume_checkpoint_dir): # Load RNG states from `checkpoint` if resume_checkpoint_dir is None: return if self.args.world_size > 1: rng_file = os.path.join( resume_checkpoint_dir, f"rng_state_{self.rank}.pth") if not os.path.isfile(rng_file): logger.info( f"Didn't find an RNG file for process {self.rnak}, if you are resuming a training that " "wasn't launched in a distributed fashion, reproducibility is not guaranteed." ) return else: rng_file = os.path.join(resume_checkpoint_dir, "rng_state.pth") if not os.path.isfile(rng_file): logger.info( "Didn't find an RNG file, if you are resuming a training that was launched in a distributed " "fashion, reproducibility is not guaranteed." ) return checkpoint_rng_state = torch.load(rng_file) random.setstate(checkpoint_rng_state["python"]) np.random.set_state(checkpoint_rng_state["numpy"]) torch.random.set_rng_state(checkpoint_rng_state["cpu"]) if torch.cuda.is_available(): if self.args.local_rank != -1: torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"]) else: try: torch.cuda.random.set_rng_state_all( checkpoint_rng_state["cuda"]) except Exception as e: logger.info( f"Didn't manage to set back the RNG states of the GPU because of the following error:\n {e}" "\nThis won't yield the same results as if the training had not been interrupted." ) def load_atorch_model_state(self, model_state_dict, **kwargs): print('resume atorch model state') if self.is_rank0(): self.model.load_state_dict(model_state_dict) # 在 rank 0 加载完毕后,再通过sync_module_states分发参数 torch.distributed.barrier() # self.model = FSDP(self.model, sync_module_states=True, **kwargs) def load_atorch_optim_state(self, optim_state_dict): print('resume optimizer state') optim_state_dict = FSDP.scatter_full_optim_state_dict( optim_state_dict, self.model) # may be removed after PyTorch 2.2 def move_optim_state_to_cpu(optim_state_dict): for k in optim_state_dict: if isinstance(optim_state_dict[k], torch.Tensor): optim_state_dict[k] = optim_state_dict[k].cpu() elif isinstance(optim_state_dict[k], dict): move_optim_state_to_cpu(optim_state_dict[k]) move_optim_state_to_cpu(optim_state_dict) self.optimizer.load_state_dict(optim_state_dict) def load_famo_state(self): print_rank_0(f'loading famo checkpoint') self.famo_resume = True famo_dir = os.path.join(self.resume_checkpoint_dir, 'famo_checkpoint/') if not os.path.exists(famo_dir): print_rank_0(f'can not find the famo checkpoint dir!') else: famo_state_name = FAMO_CHECKPOINT_NAME + f'_rank_{self.rank}.pth' famo_checkpoint_state = torch.load(os.path.join(famo_dir, famo_state_name)) w_opt_state = famo_checkpoint_state['w_opt_state'] self.famo.prev_train_loss = famo_checkpoint_state['prev_train_loss'].to(self.famo.device) self.famo.prev_valid_loss = famo_checkpoint_state['prev_valid_loss'].to(self.famo.device) self.famo.first_train_step = famo_checkpoint_state['first_train_step'] self.famo.first_valid_step = famo_checkpoint_state['first_valid_step'] self.famo.ratio_valid_task_loss_prev = famo_checkpoint_state['ratio_valid_task_loss_prev'].to(self.famo.device) self.famo.w = famo_checkpoint_state['w'].to(self.famo.device) self.famo.w_opt.load_state_dict(w_opt_state) print_rank_0(f'prev_train_loss: {self.famo.prev_train_loss}') print_rank_0(f'prev_valid_loss: {self.famo.prev_valid_loss}') print_rank_0(f'first_train_step: {self.famo.first_train_step}') print_rank_0(f'first_valid_step: {self.famo.first_valid_step}') print_rank_0(f'ratio_valid_task_loss_prev: {self.famo.ratio_valid_task_loss_prev}') print_rank_0(f'w: {self.famo.w}') print_rank_0(f'load famo checkpoint successfully') def atorch_init(self): assert torch_version() >= (2, 0, 0), "use pt2.0 for use orig param if fsdp" if self.args.model_type == 'gpt_neox': # wrap_class = (GPTNeoXAttention, GPTNeoXMLP) wrap_class = (GPTNeoXLayer,) elif self.args.model_type == 'llama': # wrap_class = (LlamaAttention, LlamaMLP) wrap_class = (LlamaDecoderLayer,) elif self.args.model_type == 'glm': wrap_class = (GLMBlock,) parallel_mode = [] if self.args.dp: # p_mode = ([("data", torch.distributed.get_world_size())], None) parallel_mode.append(("data", self.args.dp)) if self.args.tp: parallel_mode.append(("tensor_parallel", self.args.tp)) strategy = [ # ("parallel_mode", p_mode), ("parallel_mode", (parallel_mode, None)), "module_replace", # ("fsdp", fsdp_config), # ("amp_native", {"dtype": torch.bfloat16}) if self.args.bf16 else "amp_native", # ("checkpoint", wrap_class), ] if self.args.peft_type is None or self.args.peft_type == 'lora': cpu_offload = False if self.args.total_model_param < 1e9 else True fsdp_config = { "atorch_wrap_cls": wrap_class, "sync_module_states": True, "use_orig_params": True, "limit_all_gathers": True, # "cpu_offload": True, } print(fsdp_config) fsdp_opt = ("fsdp", fsdp_config) strategy.append(fsdp_opt) self.args.atorch_opt = "fsdp" else: num_all_params, num_trainable_params = count_model_params(self.model) if num_all_params < 11e9 or self.args.peft_type == "qlora": # For GLM-10B logger.info( f"Found using {self.args.peft_type} method. The peft model has {num_all_params} and only " f"{num_trainable_params} params are trainable({100 * num_trainable_params / num_all_params}%)" ". Set atorch opt to DistributedDataParallel.") self.args.atorch_opt = "ddp" if self.args.bf16 or self.args.fp16: if self.args.bf16: amp_config = {"dtype": torch.bfloat16, "skip_if_nonfinite": True} # amp_config = {"dtype": torch.bfloat16} if self.args.peft_type == "qlora": # The dtype of grads is bf16 when using qlora # atorch scaler does not support bf16 grads. amp_config["skip_if_nonfinite"] = False elif self.args.fp16: amp_config = {"dtype": torch.float16} strategy.append(("amp_native", amp_config)) # strategy.append(("half", "bf16")) if self.args.checkpoint_activations: strategy.append(("checkpoint", wrap_class)) print(f"Manually loaded auto acc strategy: {strategy}") def prepare_input(batch, device): # DEBUG: GLM NoneType batch = {k: v.to(device=device, non_blocking=True) if v is not None else None for k, v in batch.items()} return batch def optim_param_func(model, args): no_decay = ["bias", "LayerNorm.weight", "layernorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": args.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.0, }, ] return optimizer_grouped_parameters # load fsdp checkpoint参数 if self.args.resume_from_checkpoint == 'true': logger.info(f'Resume training from {self.resume_checkpoint_dir}') if self.is_rank0(): sd = torch.load(os.path.join( self.resume_checkpoint_dir, ATORCH_CHECKPOINT_NAME), map_location='cpu') model_state_dict, optim_state_dict = sd['model_state_dict'], sd['optimizer_state_dict'] else: model_state_dict, optim_state_dict = None, None torch.distributed.barrier() # other rank waiting ########## self.load_atorch_model_state(model_state_dict) ########## if self.is_rank0(): print(f'GPU mem before fsdp:') print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) optim_func = torch.optim.AdamW print(f'optimizer before fsdp: {optim_func}') ddp_find_unused_parameters = None if self.args.atorch_opt == "ddp" and not (self.args.peft_type in ["lora", "qlora"] and self.args.checkpoint_activations): ddp_find_unused_parameters = True status, result, best_strategy = auto_accelerate( self.model, optim_func, self.train_dataset, dataloader_args=self.train_dataloader_args, loss_func=self.my_loss_func, prepare_input=prepare_input, optim_args={ "lr": self.atorch_args.lr, "weight_decay": self.atorch_args.weight_decay, "eps": self.atorch_args.adam_eps, "betas": (self.atorch_args.adam_beta1, self.atorch_args.adam_beta2), }, optim_param_func=partial( optim_param_func, args=self.atorch_args), load_strategy=strategy, ignore_dryrun_on_load_strategy=True, find_unused_parameters=ddp_find_unused_parameters, ) assert ( status ), f"auto_accelerate failed. status: {status}, result: {result}, best_strategy: {best_strategy}" print(f"Best strategy is: {best_strategy}") self.model = result.model self.optimizer = result.optim print(f'optimizer after fsdp: {self.optimizer}') self.loss_func = result.loss_func self.train_dataloader = result.dataloader self.prepare_input = result.prepare_input if self.args.resume_from_checkpoint == 'true': self.load_atorch_optim_state(optim_state_dict) if self.args.weighted_loss_mode.startswith('famo_valid'): self.load_famo_state() print(f"atorch use optimizer: {self.optimizer}") if self.is_rank0(): print(f'GPU mem after fsdp:') print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) def evaluate(self): logger.info(f"Start evaluation") if self.is_rank0(): print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) print(f'valid dataset length is: {len(self.valid_dataset)}') print(f'valid dataloader length is: {len(self.valid_dataloader)}') print(f'per device batch size: {self.args.per_device_valid_batch_size}') progress_bar = tqdm(range(len(self.valid_dataloader)), disable=not is_local_main_process(), smoothing=0) self.model.eval() losses = [] accumulated_task_loss_np = np.zeros(len(self.ID2TASK)) accumulated_task_num_np = np.zeros(len(self.ID2TASK)) accumulated_step = 0 for step, batch in enumerate(self.valid_dataloader): # if step >= self.args.valid_iters: if step >= self.args.valid_iters and (self.args.total_model_param >= 1e9 or self.args.train_mode == 'sst'): break with torch.no_grad(): # batch = {k: v.to(self.device) for k, v in batch.items()} # batch = self.prepare_input(batch, self.device) # outputs = self.model(**batch) outputs = self.model( input_ids=batch['input_ids'].to(self.device), attention_mask=batch['attention_mask'].to(self.device), position_ids=batch['position_ids'].to(self.device) ) # loss = outputs["loss"] loss, task_loss, task_num, _, _ = self.loss_func(outputs, batch, self.args.weighted_loss_mode) repeated_loss = loss.repeat( self.args.per_device_valid_batch_size) if repeated_loss.ndim == 0: repeated_loss = repeated_loss.clone()[None] output_tensors = [repeated_loss.clone() for _ in range(atorch.world_size())] torch.distributed.all_gather(output_tensors, repeated_loss) for tensor in output_tensors: if torch.isnan(tensor).any() or torch.isinf(tensor).any(): accumulated_step -= 1 continue losses.append(torch.cat(output_tensors, dim=0).cpu()) task_loss = task_loss.cpu().numpy() task_num = task_num.cpu().numpy() accumulated_task_loss_np += task_loss accumulated_task_num_np += task_num accumulated_step += 1 progress_bar.update(1) losses = torch.cat(losses) losses = losses[: len(self.valid_dataset)] mean_loss = torch.mean(losses).item() accumulated_task_loss = torch.tensor(accumulated_task_loss_np).to(self.device) accumulated_task_num = torch.tensor(accumulated_task_num_np).to(self.device) torch.distributed.all_reduce(accumulated_task_loss, op=torch.distributed.ReduceOp.SUM) torch.distributed.all_reduce(accumulated_task_num, op=torch.distributed.ReduceOp.SUM) accumulated_task_loss /= torch.distributed.get_world_size() valid_task_loss = accumulated_task_loss / (accumulated_step - 1) logs = {'valid_loss': mean_loss} per_task_valid_loss = {self.ID2TASK[i]+'_loss': valid_task_loss[i].item() for i in range(len(self.ID2TASK))} logs.update(per_task_valid_loss) if is_global_main_process(): logger.info('log point') for i in range(len(self.ID2TASK)): if accumulated_task_num[i] != 0: logger.info(f"{self.ID2TASK[i]}_loss: {valid_task_loss[i]}, sample nums: {accumulated_task_num[i]}") self.log(logs, step=self.global_steps, phase='Evaluation') metrics = {'valid_loss': mean_loss, 'valid_task_loss': valid_task_loss} logger.info(f"Finish evaluation") if self.is_rank0(): print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) return metrics def log(self, logs, step, phase='Train'): if not self.summary_writer: return logger.info(json.dumps(logs)) for key, value in logs.items(): self.summary_writer.add_scalar(f'{phase}/{key}', value, step) def _sorted_checkpoints( self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, checkpoint_name_pattern='([0-9]+)', use_mtime=False ): ordering_and_checkpoint_path = [] glob_checkpoints = [str(x) for x in Path(output_dir).glob( f"{checkpoint_prefix}-*") if os.path.isdir(x)] for path in glob_checkpoints: if use_mtime: ordering_and_checkpoint_path.append( (os.path.getmtime(path), path)) else: regex_match = re.search( f".*{checkpoint_prefix}-({checkpoint_name_pattern})", path) if regex_match is not None and regex_match.groups() is not None: ordering_and_checkpoint_path.append( (int(regex_match.groups()[0]), path)) checkpoints_sorted = sorted(ordering_and_checkpoint_path) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] # Make sure we don't delete the best model. if self.best_model_checkpoint is not None: best_model_index = checkpoints_sorted.index(str(Path(self.best_model_checkpoint))) # for i in range(best_model_index, len(checkpoints_sorted) - 2): for i in range(best_model_index, len(checkpoints_sorted) - 1): checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i] print_rank_0(f'checkpoints sorted list: {checkpoints_sorted}') return checkpoints_sorted def _rotate_checkpoints( self, use_mtime=False, output_dir=None, prefix=PREFIX_CHECKPOINT_DIR, checkpoint_name_pattern='.*') -> None: if self.args.save_total_limit is None or self.args.save_total_limit <= 0: return # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints( use_mtime=use_mtime, output_dir=output_dir, checkpoint_prefix=prefix, checkpoint_name_pattern=checkpoint_name_pattern) if len(checkpoints_sorted) <= self.args.save_total_limit: return # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which # we don't do to allow resuming. save_total_limit = self.args.save_total_limit number_of_checkpoints_to_delete = max( 0, len(checkpoints_sorted) - save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info( f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint, ignore_errors=True) def _clean_atorch_checkpoints(self, output_dir=None, prefix=PREFIX_CHECKPOINT_DIR): # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints( output_dir=output_dir, checkpoint_prefix=prefix, checkpoint_name_pattern='([0-9]+)') # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which # we don't do to allow resuming. for checkpoint in checkpoints_sorted[:-1]: logger.info( f"Deleting older atorch checkpoint [{checkpoint}] due to self.args.save_total_limit") try: os.remove(os.path.join(checkpoint, ATORCH_CHECKPOINT_NAME)) except Exception: continue def _save_peft_model(self, output_dir, state_dict=None): logger.info(f"Start saving peft model to {output_dir}") output_dir = output_dir if output_dir is not None else self.args.output_dir os.makedirs(output_dir, exist_ok=True) model = unwrap_model(self.model)
if isinstance(model, PeftModel):
10
2023-11-02 01:37:01+00:00
16k
bytedance/cryostar
projects/star/train_density.py
[ { "identifier": "StarfileDataSet", "path": "cryostar/utils/dataio.py", "snippet": "class StarfileDataSet(Dataset):\n\n def __init__(self, cfg: StarfileDatasetConfig):\n super().__init__()\n self.cfg = cfg\n self.df = starfile.read(Path(cfg.starfile_path))\n\n if \"optics\"...
import os import os.path as osp import einops import lightning.pytorch as pl import numpy as np import torch from lightning.pytorch.strategies import DDPStrategy from lightning.pytorch.utilities import rank_zero_only from torch.utils.data import DataLoader from tqdm import tqdm from mmengine import mkdir_or_exist from cryostar.utils.dataio import StarfileDataSet, StarfileDatasetConfig from cryostar.nerf.volume_utils import ImplicitFourierVolume from cryostar.utils.transforms import SpatialGridTranslate, FourierGridTranslate from cryostar.utils.ctf_utils import CTFRelion, CTFCryoDRGN from cryostar.utils.fft_utils import (fourier_to_primal_2d, primal_to_fourier_2d) from cryostar.utils.latent_space_utils import sample_along_pca, get_nearest_point, cluster_kmeans from cryostar.utils.misc import (pl_init_exp, create_circular_mask, log_to_current, pretty_dict) from cryostar.utils.losses import calc_kl_loss from cryostar.utils.ml_modules import VAEEncoder, reparameterize from cryostar.utils.mrc_tools import save_mrc from miscs import infer_ctf_params_from_config
12,413
else: f_pred = self.vol(None, R) pred_ctf_params = {k: batch[k] for k in ('defocusU', 'defocusV', 'angleAstigmatism') if k in batch} f_pred = self.ctf(f_pred, batch['idx'], ctf_params=pred_ctf_params, mode="gt", frequency_marcher=None) if self.cfg.loss.loss_fn == "rmsf": pred = fourier_to_primal_2d(f_pred).real delta = pred - proj_out em_loss = delta.reshape(bsz, -1).square().mean() elif self.cfg.loss.loss_fn == "fmsf": f_proj = primal_to_fourier_2d(proj_out) delta = torch.view_as_real(f_proj - f_pred) delta = delta[einops.repeat(self.mask, "ny nx -> b 1 ny nx c", b=delta.shape[0], c=delta.shape[-1])] em_loss = delta.reshape(bsz, -1).square().mean() else: raise NotImplementedError loss = em_loss log_dict = {"em": em_loss} if self.z_dim != 0: log_dict["kld"] = kld_loss loss = loss + kld_loss if self.global_step % 100 == 0: log_to_current(f"epoch {self.current_epoch} [{batch_idx}/{self.trainer.num_training_batches}] | " + pretty_dict(log_dict, 5)) return loss def on_validation_start(self) -> None: self.evaluate() def validation_step(self, *args, **kwargs): pass def save_ckpt(self): if self.trainer.is_global_zero: save_dir = self._get_save_dir() torch.save(self.vol.state_dict(), os.path.join(save_dir, "ckpt.pt")) # self.history_saved_dirs.append(save_dir) # keep_last_k = 1 # if len(self.history_saved_dirs) >= keep_last_k: # for to_remove in self.history_saved_dirs[:-keep_last_k]: # p = Path(to_remove) / "ckpt.pt" # if p.exists(): # p.unlink() # log_to_current(f"delete {p} to keep last {keep_last_k} ckpts") def evaluate(self) -> None: pixel_size = self.cfg.data_process.down_apix valid_loader = DataLoader(dataset=self.dataset, batch_size=self.cfg.data_loader.val_batch_per_gpu, shuffle=False, drop_last=False, num_workers=12) if self.trainer.is_global_zero: save_dir = self._get_save_dir() self.save_ckpt() if self.z_dim != 0: if self.cfg.extra_input_data_attr.given_z is None: zs = [] for batch in tqdm(iter(valid_loader)): proj_in, proj_out = self.process_image(batch) f_proj_in = primal_to_fourier_2d(proj_in) if self.cfg.model.enc_space == "real": enc_input = einops.rearrange(proj_in, "b 1 ny nx -> b (1 ny nx)").to(self.device) else: enc_input = einops.rearrange(torch.view_as_real(f_proj_in), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2).to(self.device) mu, log_var = self.encoder(enc_input) zs.append(mu.detach().cpu()) # if self.cfg.trainer.devices == 1 and len(zs) > 20: # for _ in range(10): # log_to_current("WARNING!" + "*" * _) # log_to_current( # "since only one device is used, we assume this is a debug mode, and do not go through all validsets" # ) # break zs = torch.cat(zs).cpu().numpy() else: zs = self.given_z.cpu().numpy() np.save(f"{save_dir}/z.npy", zs) kmeans_labels, centers = cluster_kmeans(zs, 10) centers, centers_ind = get_nearest_point(zs, centers) np.savetxt(f"{save_dir}/z_kmeans.txt", centers, fmt='%.5f') np.savetxt(f"{save_dir}/z_kmeans_ind.txt", centers_ind, fmt='%d') centers = torch.from_numpy(centers).to(self.device) for i in range(len(centers)): v = self.vol.make_volume(centers[i:i + 1]) save_mrc(v.cpu().numpy(), f"{save_dir}/vol_kmeans_{i:03}.mrc", pixel_size, -pixel_size * (v.shape[0] // 2)) for pca_dim in range(1, 1 + min(3, self.cfg.model.z_dim)): z_on_pca, z_on_pca_id = sample_along_pca(zs, pca_dim, 10) np.savetxt(f"{save_dir}/z_pca_{pca_dim}.txt", z_on_pca, fmt='%.5f') np.savetxt(f"{save_dir}/z_pca_ind_{pca_dim}.txt", z_on_pca_id, fmt='%d') z_on_pca = torch.from_numpy(z_on_pca).to(self.device) for i in range(len(z_on_pca)): v = self.vol.make_volume(z_on_pca[i:i + 1]) save_mrc(v.cpu().numpy(), f"{save_dir}/vol_pca_{pca_dim}_{i:03}.mrc", pixel_size, -pixel_size * (v.shape[0] // 2)) else: v = self.vol.make_volume(None) save_mrc(v.cpu().numpy(), f"{save_dir}/vol.mrc", pixel_size, -pixel_size * (v.shape[0] // 2)) def on_train_start(self) -> None: if self.trainer.is_global_zero: log_to_current(self) def configure_optimizers(self): return torch.optim.AdamW(self.parameters(), 0.0001) def train(): cfg = pl_init_exp(exp_prefix=TASK_NAME, backup_list=[ __file__, ], inplace=False) dataset = StarfileDataSet(
log_to_current = rank_zero_only(log_to_current) TASK_NAME = "density" class CryoModel(pl.LightningModule): def __init__(self, cfg, dataset): super().__init__() self.cfg = cfg self.dataset = dataset self.z_dim = cfg.model.z_dim self.history_saved_dirs = [] if cfg.extra_input_data_attr.given_z is None and self.z_dim != 0: if cfg.model.enc_space == "real": self.encoder = VAEEncoder(self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) elif cfg.model.enc_space == "fourier": self.encoder = VAEEncoder(2 * self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) else: raise NotImplementedError if cfg.model.shift_method == "interp": self.translate = SpatialGridTranslate(self.cfg.data_process.down_side_shape, ) log_to_current("We will deprecate `model.shift_method=interp` in a future version, use `model.shift_method=fft` instead.") elif cfg.model.shift_method == "fft": self.f_translate = FourierGridTranslate(self.cfg.data_process.down_side_shape, ) else: raise NotImplementedError ctf_params = infer_ctf_params_from_config(cfg) if cfg.model.ctf == "v1": self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset)) log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.") elif cfg.model.ctf == "v2": self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset)) else: raise NotImplementedError log_to_current(ctf_params) self.vol = ImplicitFourierVolume( self.z_dim, self.cfg.data_process.down_side_shape, self.cfg.loss.mask_rad_for_image_loss, { "net_type": cfg.model.net_type, "pe_dim": self.cfg.data_process.down_side_shape, "D": self.cfg.data_process.down_side_shape, "pe_type": cfg.model.pe_type, "force_symmetry": False, "hidden": cfg.model.hidden, }) mask = create_circular_mask(self.cfg.data_process.down_side_shape, self.cfg.data_process.down_side_shape, None, self.cfg.data_process.down_side_shape // 2 * self.cfg.loss.mask_rad_for_image_loss,) self.register_buffer("mask", torch.from_numpy(mask)) if cfg.extra_input_data_attr.given_z is not None: self.register_buffer("given_z", torch.from_numpy(np.load(cfg.extra_input_data_attr.given_z))) if getattr(self.cfg.extra_input_data_attr, "ckpt_path", None) is not None: log_to_current(f"load checkpoint from {self.cfg.extra_input_data_attr.ckpt_path}") state_dict = torch.load(self.cfg.extra_input_data_attr.ckpt_path, map_location=self.device) self.vol.load_state_dict(state_dict) def _get_save_dir(self): save_dir = os.path.join(self.cfg.work_dir, f"{self.current_epoch:04d}_{self.global_step:07d}") mkdir_or_exist(save_dir) return save_dir def process_image(self, batch): R = batch["rotmat"] bsz = len(R) trans = torch.cat([ batch["shiftY"].float().reshape(bsz, 1, 1) / self.cfg.data_process.down_apix, batch["shiftX"].float().reshape(bsz, 1, 1) / self.cfg.data_process.down_apix ], dim=2) proj_in = batch["proj"].to(self.device) if self.cfg.model.shift_method == "interp": proj = self.translate.transform(proj_in.squeeze(1), trans.to(self.device)) elif self.cfg.model.shift_method == "fft": fproj = primal_to_fourier_2d(proj_in) fproj = self.f_translate.transform(fproj.squeeze(1), trans.to(self.device)) proj = fourier_to_primal_2d(fproj) if self.cfg.model.shift_data: return proj, proj else: return proj_in, proj def training_step(self, batch, batch_idx): R = batch["rotmat"] bsz = len(R) proj_in, proj_out = self.process_image(batch) f_proj_in = primal_to_fourier_2d(proj_in) if self.z_dim != 0: if self.cfg.extra_input_data_attr.given_z is not None: z = self.given_z[batch["idx"]].reshape(bsz, -1) kld_loss = 0.0 else: if self.cfg.model.enc_space == "fourier": enc_input = einops.rearrange(torch.view_as_real(f_proj_in), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2) elif self.cfg.model.enc_space == "real": enc_input = einops.rearrange(proj_in, "b 1 ny nx -> b (1 ny nx)") mu, log_var = self.encoder(enc_input) z = reparameterize(mu, log_var) kld_loss = calc_kl_loss(mu, log_var, self.cfg.loss.free_bits) kld_loss = kld_loss / self.mask.sum() f_pred = self.vol(z, R) else: f_pred = self.vol(None, R) pred_ctf_params = {k: batch[k] for k in ('defocusU', 'defocusV', 'angleAstigmatism') if k in batch} f_pred = self.ctf(f_pred, batch['idx'], ctf_params=pred_ctf_params, mode="gt", frequency_marcher=None) if self.cfg.loss.loss_fn == "rmsf": pred = fourier_to_primal_2d(f_pred).real delta = pred - proj_out em_loss = delta.reshape(bsz, -1).square().mean() elif self.cfg.loss.loss_fn == "fmsf": f_proj = primal_to_fourier_2d(proj_out) delta = torch.view_as_real(f_proj - f_pred) delta = delta[einops.repeat(self.mask, "ny nx -> b 1 ny nx c", b=delta.shape[0], c=delta.shape[-1])] em_loss = delta.reshape(bsz, -1).square().mean() else: raise NotImplementedError loss = em_loss log_dict = {"em": em_loss} if self.z_dim != 0: log_dict["kld"] = kld_loss loss = loss + kld_loss if self.global_step % 100 == 0: log_to_current(f"epoch {self.current_epoch} [{batch_idx}/{self.trainer.num_training_batches}] | " + pretty_dict(log_dict, 5)) return loss def on_validation_start(self) -> None: self.evaluate() def validation_step(self, *args, **kwargs): pass def save_ckpt(self): if self.trainer.is_global_zero: save_dir = self._get_save_dir() torch.save(self.vol.state_dict(), os.path.join(save_dir, "ckpt.pt")) # self.history_saved_dirs.append(save_dir) # keep_last_k = 1 # if len(self.history_saved_dirs) >= keep_last_k: # for to_remove in self.history_saved_dirs[:-keep_last_k]: # p = Path(to_remove) / "ckpt.pt" # if p.exists(): # p.unlink() # log_to_current(f"delete {p} to keep last {keep_last_k} ckpts") def evaluate(self) -> None: pixel_size = self.cfg.data_process.down_apix valid_loader = DataLoader(dataset=self.dataset, batch_size=self.cfg.data_loader.val_batch_per_gpu, shuffle=False, drop_last=False, num_workers=12) if self.trainer.is_global_zero: save_dir = self._get_save_dir() self.save_ckpt() if self.z_dim != 0: if self.cfg.extra_input_data_attr.given_z is None: zs = [] for batch in tqdm(iter(valid_loader)): proj_in, proj_out = self.process_image(batch) f_proj_in = primal_to_fourier_2d(proj_in) if self.cfg.model.enc_space == "real": enc_input = einops.rearrange(proj_in, "b 1 ny nx -> b (1 ny nx)").to(self.device) else: enc_input = einops.rearrange(torch.view_as_real(f_proj_in), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2).to(self.device) mu, log_var = self.encoder(enc_input) zs.append(mu.detach().cpu()) # if self.cfg.trainer.devices == 1 and len(zs) > 20: # for _ in range(10): # log_to_current("WARNING!" + "*" * _) # log_to_current( # "since only one device is used, we assume this is a debug mode, and do not go through all validsets" # ) # break zs = torch.cat(zs).cpu().numpy() else: zs = self.given_z.cpu().numpy() np.save(f"{save_dir}/z.npy", zs) kmeans_labels, centers = cluster_kmeans(zs, 10) centers, centers_ind = get_nearest_point(zs, centers) np.savetxt(f"{save_dir}/z_kmeans.txt", centers, fmt='%.5f') np.savetxt(f"{save_dir}/z_kmeans_ind.txt", centers_ind, fmt='%d') centers = torch.from_numpy(centers).to(self.device) for i in range(len(centers)): v = self.vol.make_volume(centers[i:i + 1]) save_mrc(v.cpu().numpy(), f"{save_dir}/vol_kmeans_{i:03}.mrc", pixel_size, -pixel_size * (v.shape[0] // 2)) for pca_dim in range(1, 1 + min(3, self.cfg.model.z_dim)): z_on_pca, z_on_pca_id = sample_along_pca(zs, pca_dim, 10) np.savetxt(f"{save_dir}/z_pca_{pca_dim}.txt", z_on_pca, fmt='%.5f') np.savetxt(f"{save_dir}/z_pca_ind_{pca_dim}.txt", z_on_pca_id, fmt='%d') z_on_pca = torch.from_numpy(z_on_pca).to(self.device) for i in range(len(z_on_pca)): v = self.vol.make_volume(z_on_pca[i:i + 1]) save_mrc(v.cpu().numpy(), f"{save_dir}/vol_pca_{pca_dim}_{i:03}.mrc", pixel_size, -pixel_size * (v.shape[0] // 2)) else: v = self.vol.make_volume(None) save_mrc(v.cpu().numpy(), f"{save_dir}/vol.mrc", pixel_size, -pixel_size * (v.shape[0] // 2)) def on_train_start(self) -> None: if self.trainer.is_global_zero: log_to_current(self) def configure_optimizers(self): return torch.optim.AdamW(self.parameters(), 0.0001) def train(): cfg = pl_init_exp(exp_prefix=TASK_NAME, backup_list=[ __file__, ], inplace=False) dataset = StarfileDataSet(
StarfileDatasetConfig(
1
2023-11-06 07:15:26+00:00
16k
UMass-Foundation-Model/CoVLM
transformers/src/transformers/models/graphormer/configuration_graphormer.py
[ { "identifier": "PretrainedConfig", "path": "transformers/src/transformers/configuration_utils.py", "snippet": "class PretrainedConfig(PushToHubMixin):\n r\"\"\"\n Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as\n methods for lo...
from ...configuration_utils import PretrainedConfig from ...utils import logging
12,439
# coding=utf-8 # Copyright 2022 Microsoft, clefourrier and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Graphormer model configuration""" logger = logging.get_logger(__name__) GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = { # pcqm4mv1 now deprecated "graphormer-base": "https://huggingface.co/clefourrier/graphormer-base-pcqm4mv2/resolve/main/config.json", # See all Graphormer models at https://huggingface.co/models?filter=graphormer }
# coding=utf-8 # Copyright 2022 Microsoft, clefourrier and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Graphormer model configuration""" logger = logging.get_logger(__name__) GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = { # pcqm4mv1 now deprecated "graphormer-base": "https://huggingface.co/clefourrier/graphormer-base-pcqm4mv2/resolve/main/config.json", # See all Graphormer models at https://huggingface.co/models?filter=graphormer }
class GraphormerConfig(PretrainedConfig):
0
2023-11-07 04:23:57+00:00
16k
HKU-BAL/ClairS-TO
src/realign_reads.py
[ { "identifier": "subprocess_popen", "path": "shared/utils.py", "snippet": "BASIC_BASES = set(\"ACGTU\")\nWARNING = '\\033[93m'\nERROR = '\\033[91m'\nENDC = '\\033[0m'\ndef log_error(log):\ndef log_warning(log):\ndef is_file_exists(file_name, suffix=\"\"):\ndef is_folder_exists(folder_name, suffix=\"\"):...
import sys import os import shlex import ctypes import re import subprocess import shared.param as param from subprocess import PIPE from argparse import ArgumentParser, SUPPRESS from collections import defaultdict from shared.utils import subprocess_popen, reference_sequence_from, IUPAC_base_to_ACGT_base_dict as BASE2ACGT, log_error from shared.interval_tree import bed_tree_from from shared.intervaltree.intervaltree import IntervalTree
13,875
chunk_start += CHUNK_SIZE chunk_end += CHUNK_SIZE read = Read(read_start=POS, seq=SEQ, cigar=CIGAR, mapping_quality=MAPQ, base_quality=QUAL, strand=STRAND, raw_base_quality=raw_base_quality, read_name=read_name, flag=FLAG, PNEXT=PNEXT, RNEXT=RNEXT, TLEN=TLEN, phasing=HP_TAG) if CIGAR == "*" or is_too_many_soft_clipped_bases_for_a_read_from(CIGAR): continue aligned_reads[read_name] = read if MAPQ < min_dbg_mapping_quality: continue advance = 0 for c in str(CIGAR): if c.isdigit(): advance = advance * 10 + int(c) continue if c == '=': reference_position += advance query_position += advance elif c == "M" or c == 'X': for _ in range(advance): if QUAL[query_position] >= min_dbg_base_quality: reference_base = reference_sequence[reference_position - reference_start_0_based] # 0 base query_base = SEQ[query_position] if reference_base in 'ACGT' and query_base != reference_base: pileup[reference_position]['X'] += 1 reference_position += 1 query_position += 1 elif c == "I" or c == 'S': pre_base = reference_sequence[reference_position - reference_start_0_based - 1] ins_base_quality = QUAL[query_position: query_position + advance] out_of_region = reference_position < chunk_start - region_expansion_in_bp or reference_position > chunk_end + region_expansion_in_bp if not out_of_region and pre_base in 'ACGT' and ( sum([True for bq in ins_base_quality if bq < min_dbg_base_quality]) == 0): # skip the bad seq start = reference_position - advance end = reference_position + advance for ins_idx in range(start, end): pileup[ins_idx]["X"] += 1 # insertion consumes query query_position += advance elif c == "D": out_of_region = reference_position < chunk_start - region_expansion_in_bp or reference_position > chunk_end + region_expansion_in_bp pre_base = reference_sequence[reference_position - reference_start_0_based - 1] # 0-base if not out_of_region and pre_base in 'ACGT': start = reference_position end = reference_position + advance for ins_idx in range(start, end): pileup[ins_idx]["X"] += 1 # deletion consumes reference reference_position += advance # reset advance advance = 0 yield chunk_start, chunk_end yield None, None def reads_realignment(args): POS = args.pos args.ctg_start = POS - args.realign_flanking_window args.ctg_end = POS + args.realign_flanking_window bed_file_path = args.bed_fn extend_bed = args.extend_bed fasta_file_path = args.ref_fn ctg_name = args.ctg_name ctg_start = args.ctg_start ctg_end = args.ctg_end samtools_execute_command = args.samtools bam_file_path = args.bam_fn min_mq = args.min_mq min_coverage = args.min_coverage is_bed_file_given = bed_file_path is not None is_ctg_name_given = ctg_name is not None read_fn = args.read_fn global test_pos test_pos = None is_ctg_range_given = is_ctg_name_given and ctg_start is not None and ctg_end is not None ref_regions = [] reads_regions = [] reference_start, reference_end = None, None if is_ctg_range_given: extend_start = ctg_start - max_window_size extend_end = ctg_end + max_window_size reads_regions.append(region_from(ctg_name=ctg_name, ctg_start=extend_start, ctg_end=extend_end)) reference_start, reference_end = ctg_start - expandReferenceRegion, ctg_end + expandReferenceRegion reference_start = 1 if reference_start < 1 else reference_start ref_regions.append(region_from(ctg_name=ctg_name, ctg_start=reference_start, ctg_end=reference_end)) elif is_ctg_name_given: reads_regions.append(region_from(ctg_name=ctg_name)) ref_regions.append(region_from(ctg_name=ctg_name)) reference_start = 1 reference_sequence = reference_sequence_from( samtools_execute_command=samtools_execute_command, fasta_file_path=fasta_file_path, regions=ref_regions ) if reference_sequence is None or len(reference_sequence) == 0: sys.exit("[ERROR] Failed to load reference sequence from file ({}).".format(fasta_file_path))
# BSD 3-Clause License # # Copyright 2023 The University of Hong Kong, Department of Computer Science # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. realign_chunk_size = 5000 min_dbg_mapping_quality = min_dbg_base_quality = 20 region_expansion_in_bp = expand_align_ref_region = 20 min_windows_distance = expand_align_ref_region * 4 max_window_size = max_region_reads_num = 1000 expandReferenceRegion = 100000 realigner_mod = os.path.join(*(os.path.split(__file__)[:-1] + ('realign/realigner',))) dbg_mod = os.path.join(*(os.path.split(__file__)[:-1] + ('realign/debruijn_graph',))) if not os.path.exists(realigner_mod) or not os.path.exists(dbg_mod): # try to find modules in clair3 python_path = subprocess.run('which python', stdout=subprocess.PIPE, shell=True).stdout.decode().rstrip() conda_prefix = os.path.dirname(os.path.dirname(python_path)) clair3_realign_path = os.path.join(conda_prefix, 'bin', 'preprocess', 'realign') clair3_realigner_mod = os.path.join(clair3_realign_path, 'realigner') clair3_dbg_mod = os.path.join(clair3_realign_path, 'debruijn_graph') if os.path.exists(clair3_realigner_mod) and os.path.exists(clair3_dbg_mod): realigner_mod = clair3_realigner_mod dbg_mod = clair3_dbg_mod else: print(log_error("[ERROR] `realigner` or `debruijn_graph` submodule not found in conda environment, pls install clair3-illumina package!")) sys.exit(1) realigner = ctypes.cdll.LoadLibrary(realigner_mod) dbg = ctypes.cdll.LoadLibrary(dbg_mod) class StructPointer(ctypes.Structure): _fields_ = [("position", ctypes.c_int * max_region_reads_num), ("cigar_string", ctypes.c_char_p * max_region_reads_num), ] class DBGPointer(ctypes.Structure): _fields_ = [("consensus_size", ctypes.c_int), ("consensus", ctypes.c_char_p * 200), ] # Read class for storing read information cigar_indel_re = r"(\d+)(D)" cigarRe = r"(\d+)([MIDNSHP=X])" graph_min_mapping_quality = 14 def get_len(seq, cigar): if 'D' not in cigar: return len(seq) indel_length = 0 for m in re.finditer(cigar_indel_re, cigar): indel_length += int(m.group(1)) return len(seq) + indel_length def print_ed(s1, s2): match_str = "" for x, y in zip(s1, s2): if x == y: match_str += "|" else: match_str += " " print(s1) print(match_str) print(s2) class Read(object): def __init__(self, read_start, seq, cigar, mapping_quality, base_quality, strand, raw_base_quality=None, unalign=False, read_name=None, read_id=None, flag=None, RNEXT=0, PNEXT=0, TLEN=0, phasing=None): self.read_start = read_start self.cigar = cigar self.mapping_quality = mapping_quality self.seq = seq self.base_quality = base_quality self.read_id = read_id self.read_end = self.read_start + get_len(seq, cigar) self.strand = strand self.graph_mq = True if self.mapping_quality >= graph_min_mapping_quality else False self.raw_base_quality = raw_base_quality self.read_name = read_name self.region = {} self.region_cigar = None self.region_start = None self.flag = str(flag) self.RNEXT = RNEXT self.PNEXT = PNEXT self.TLEN = PNEXT self.test_pos = None self.best_cigar = cigar self.best_pos = read_start self.best_align_score = None self.phasing = phasing def set_realign_flag(self): self.unalign = True def count_align_score(self, cigar): score = 0 for m in re.finditer(cigarRe, cigar): l, op, = int(m.group(1)), m.group(2) if op in 'MX=S': continue elif op in 'ID': score += l return score def set_realignment_info(self, region_start, realignment_cigar, realignment_start): realignment_cigar = realignment_cigar.replace('X', 'M') if realignment_cigar == self.cigar and realignment_start == self.read_start: return if self.best_align_score and realignment_cigar == self.best_cigar and realignment_start == self.best_pos: return realignment_align_score = self.count_align_score(realignment_cigar) if not self.best_align_score or realignment_align_score >= self.best_align_score: self.best_cigar = realignment_cigar self.best_pos = realignment_start self.best_align_score = realignment_align_score def decode_region(self, region_str): if region_str == '-' or '-' not in region_str: return region_str = region_str.rstrip().split('_') for region in region_str: region, cigar, pos = region.split('-') region, pos = int(region), int(pos) self.region[region] = [cigar, pos] def byte(x): return bytes(x, encoding="utf8") def find_max_overlap_index(query_region, search_regions): def overlap_length(region1, region2): return max(0, (min(region1[1], region2[1]) - max(region1[0], region2[0]))) overlap_lengths = [overlap_length(query_region, search_region) for search_region in search_regions] argmax = max(range(len(search_regions)), key=lambda idx: overlap_lengths[idx]) return None if overlap_lengths[argmax] == 0 else argmax def get_reference_seq(sequence, start, end, reference_start_0_based): if end < start: end, start = start, end return sequence[start - reference_start_0_based: end - reference_start_0_based] def phredscore2raw_score(qual): return ord(qual) - 33 def evc_base_from(base): return base if base == "N" else BASE2ACGT[base] def region_from(ctg_name, ctg_start=None, ctg_end=None): """ 1-based region string [start, end] """ if ctg_name is None: return "" if (ctg_start is None) != (ctg_end is None): return "" if ctg_start is None and ctg_end is None: return "{}".format(ctg_name) return "{}:{}-{}".format(ctg_name, ctg_start, ctg_end) class TensorStdout(object): def __init__(self, handle): self.stdin = handle def __del__(self): self.stdin.close() def get_halpotype_tag(samtools_view_columns): found_hp_tag = False tag = [c for c in samtools_view_columns if 'HP:i:' in c] if not len(tag) or len(tag[0]) < 6 or not tag[0][5].isdigit(): return None return tag[0][5] def is_too_many_soft_clipped_bases_for_a_read_from(CIGAR): soft_clipped_bases = 0 total_alignment_positions = 0 advance = 0 for c in str(CIGAR): if c.isdigit(): advance = advance * 10 + int(c) continue if c == "S": soft_clipped_bases += advance total_alignment_positions += advance advance = 0 # skip a read less than 55% aligned return 1.0 - float(soft_clipped_bases) / (total_alignment_positions + 1) < 0.55 def samtools_view_generator_from(samtools_view_process, aligned_reads, pileup, ctg_name, reference_sequence, reference_start_0_based, header, center_pos=None): CHUNK_SIZE = realign_chunk_size chunk_start, chunk_end = None, None for row_id, row in enumerate(samtools_view_process.stdout): if row[0] == '@': header.append(row) continue columns = row.strip().split() RNAME = columns[2] if RNAME != ctg_name: continue read_name = columns[0] FLAG = int(columns[1]) POS = int(columns[3]) - 1 # switch from 1-base to 0-base to match sequence index MAPQ = int(columns[4]) CIGAR = columns[5] SEQ = columns[9].upper() # uppercase for SEQ (regexp is \*|[A-Za-z=.]+) RNEXT = columns[6] PNEXT = columns[7] TLEN = columns[8] reference_position = POS query_position = 0 raw_base_quality = columns[10] QUAL = [phredscore2raw_score(item) for item in raw_base_quality] STRAND = (16 == (FLAG & 16)) HP_TAG = get_halpotype_tag(columns[11:]) read_name += "_" + str(int(STRAND)) # distinguish two strand if chunk_start is None: chunk_start = POS chunk_end = chunk_start + CHUNK_SIZE if POS >= chunk_end + region_expansion_in_bp: yield chunk_start, chunk_end chunk_start += CHUNK_SIZE chunk_end += CHUNK_SIZE read = Read(read_start=POS, seq=SEQ, cigar=CIGAR, mapping_quality=MAPQ, base_quality=QUAL, strand=STRAND, raw_base_quality=raw_base_quality, read_name=read_name, flag=FLAG, PNEXT=PNEXT, RNEXT=RNEXT, TLEN=TLEN, phasing=HP_TAG) if CIGAR == "*" or is_too_many_soft_clipped_bases_for_a_read_from(CIGAR): continue aligned_reads[read_name] = read if MAPQ < min_dbg_mapping_quality: continue advance = 0 for c in str(CIGAR): if c.isdigit(): advance = advance * 10 + int(c) continue if c == '=': reference_position += advance query_position += advance elif c == "M" or c == 'X': for _ in range(advance): if QUAL[query_position] >= min_dbg_base_quality: reference_base = reference_sequence[reference_position - reference_start_0_based] # 0 base query_base = SEQ[query_position] if reference_base in 'ACGT' and query_base != reference_base: pileup[reference_position]['X'] += 1 reference_position += 1 query_position += 1 elif c == "I" or c == 'S': pre_base = reference_sequence[reference_position - reference_start_0_based - 1] ins_base_quality = QUAL[query_position: query_position + advance] out_of_region = reference_position < chunk_start - region_expansion_in_bp or reference_position > chunk_end + region_expansion_in_bp if not out_of_region and pre_base in 'ACGT' and ( sum([True for bq in ins_base_quality if bq < min_dbg_base_quality]) == 0): # skip the bad seq start = reference_position - advance end = reference_position + advance for ins_idx in range(start, end): pileup[ins_idx]["X"] += 1 # insertion consumes query query_position += advance elif c == "D": out_of_region = reference_position < chunk_start - region_expansion_in_bp or reference_position > chunk_end + region_expansion_in_bp pre_base = reference_sequence[reference_position - reference_start_0_based - 1] # 0-base if not out_of_region and pre_base in 'ACGT': start = reference_position end = reference_position + advance for ins_idx in range(start, end): pileup[ins_idx]["X"] += 1 # deletion consumes reference reference_position += advance # reset advance advance = 0 yield chunk_start, chunk_end yield None, None def reads_realignment(args): POS = args.pos args.ctg_start = POS - args.realign_flanking_window args.ctg_end = POS + args.realign_flanking_window bed_file_path = args.bed_fn extend_bed = args.extend_bed fasta_file_path = args.ref_fn ctg_name = args.ctg_name ctg_start = args.ctg_start ctg_end = args.ctg_end samtools_execute_command = args.samtools bam_file_path = args.bam_fn min_mq = args.min_mq min_coverage = args.min_coverage is_bed_file_given = bed_file_path is not None is_ctg_name_given = ctg_name is not None read_fn = args.read_fn global test_pos test_pos = None is_ctg_range_given = is_ctg_name_given and ctg_start is not None and ctg_end is not None ref_regions = [] reads_regions = [] reference_start, reference_end = None, None if is_ctg_range_given: extend_start = ctg_start - max_window_size extend_end = ctg_end + max_window_size reads_regions.append(region_from(ctg_name=ctg_name, ctg_start=extend_start, ctg_end=extend_end)) reference_start, reference_end = ctg_start - expandReferenceRegion, ctg_end + expandReferenceRegion reference_start = 1 if reference_start < 1 else reference_start ref_regions.append(region_from(ctg_name=ctg_name, ctg_start=reference_start, ctg_end=reference_end)) elif is_ctg_name_given: reads_regions.append(region_from(ctg_name=ctg_name)) ref_regions.append(region_from(ctg_name=ctg_name)) reference_start = 1 reference_sequence = reference_sequence_from( samtools_execute_command=samtools_execute_command, fasta_file_path=fasta_file_path, regions=ref_regions ) if reference_sequence is None or len(reference_sequence) == 0: sys.exit("[ERROR] Failed to load reference sequence from file ({}).".format(fasta_file_path))
tree = bed_tree_from(bed_file_path=bed_file_path)
1
2023-11-07 04:39:16+00:00
16k
the-siesta-group/edfio
tests/test_faq.py
[ { "identifier": "Edf", "path": "edfio/edf.py", "snippet": "class Edf:\n \"\"\"Python representation of an EDF file.\n\n EDF header fields are exposed as properties with appropriate data types (i.e.,\n string, numeric, date, or time objects). Fields that might break the file on\n modification...
import datetime import numpy as np import pytest from pathlib import Path from edfio import Edf, EdfSignal, read_edf from edfio._header_field import ( RawHeaderFieldDate, RawHeaderFieldFloat, RawHeaderFieldTime, )
12,268
""" Tests to verify the adherence to the EDF FAQ: https://www.edfplus.info/specs/edffaq.html """ def test_q1_create_edf_signal_with_non_printable_character_in_label_fails(): with pytest.raises(ValueError, match="contains non-printable characters"):
""" Tests to verify the adherence to the EDF FAQ: https://www.edfplus.info/specs/edffaq.html """ def test_q1_create_edf_signal_with_non_printable_character_in_label_fails(): with pytest.raises(ValueError, match="contains non-printable characters"):
EdfSignal(np.arange(10.1), 1, label="\t")
1
2023-11-09 09:53:27+00:00
16k
sb-ai-lab/HypEx
hypex/matcher.py
[ { "identifier": "FaissMatcher", "path": "hypex/algorithms/faiss_matcher.py", "snippet": "class FaissMatcher:\n \"\"\"A class used to match instances using Faiss library.\"\"\"\n\n def __init__(\n self,\n df: pd.DataFrame,\n outcomes: str,\n treatment: st...
import logging import pickle import numpy as np import pandas as pd from typing import Union from tqdm.auto import tqdm from .algorithms.faiss_matcher import FaissMatcher from .algorithms.no_replacement_matching import MatcherNoReplacement from .selectors.feature_selector import FeatureSelector from .selectors.spearman_filter import SpearmanFilter from .selectors.outliers_filter import OutliersFilter from .selectors.base_filtration import const_filtration, nan_filtration from .utils.validators import random_feature from .utils.validators import random_treatment from .utils.validators import subset_refuter from .utils.validators import test_significance
12,854
if self.info_col is not None: df = df.drop(columns=self.info_col) features = feat_select.perform_selection(df=df) if self.group_col is None: self.features_importance = features else: self.features_importance = features.append( {"Feature": self.group_col, "Importance": features.Importance.max()}, ignore_index=True ) return self.features_importance.sort_values("Importance", ascending=False) def _create_faiss_matcher(self, df=None, validation=None): """Creates a FaissMatcher object. Args: df: The dataframe to use. If None, uses self.input_data. validation: Whether to use the matcher for validation. If None, determines based on whether """ if df is None: df = self.input_data self.matcher = FaissMatcher( df, self.outcomes, self.treatment, info_col=self.info_col, weights=self.weights, features=self.features_importance, group_col=self.group_col, validation=validation, n_neighbors=self.n_neighbors, pbar=False if validation else self.pbar, ) def _perform_validation(self): """Performs validation using the FaissMatcher.""" if self.group_col is None: sim = self.matcher.match() else: sim = self.matcher.group_match() for key in self.val_dict.keys(): self.val_dict[key].append(sim[key][0]) def _log(self, message, silent=None): """Logs a message at the appropriate level. Args: message: The message to log. silent: If silent, logs will be only info """ if silent is None: silent = self.silent if silent: logger.debug(message) else: logger.info(message) def _matching(self) -> tuple: """Performs matching considering the presence of groups. Returns: Results of matching and matching quality metrics """ self._create_faiss_matcher() self._log("Applying matching") self.results, df_matched = self.matcher.match() self.quality_result = self.matcher.matching_quality(df_matched) return self.results, self.quality_result, df_matched def validate_result( self, refuter: str = "random_feature", effect_type: str = "ate", n_sim: int = 10, fraction: float = 0.8 ) -> dict: """Validates estimated ATE (Average Treatment Effect). Validates estimated effect: 1) by replacing real treatment with random placebo treatment. Estimated effect must be droped to zero, p-val > 0.05; 2) by adding random feature (`random_feature`). Estimated effect shouldn't change significantly, p-val < 0.05; 3) estimates effect on subset of data (default fraction is 0.8). Estimated effect shouldn't change significantly, p-val < 0.05. Args: refuter: Refuter type (`random_treatment`, `random_feature`, `subset_refuter`) effect_type: Which effect to validate (`ate`, `att`, `atc`) n_sim: Number of simulations fraction: Subset fraction for subset refuter only Returns: Dictionary of outcome_name (mean_effect on validation, p-value) """ if self.silent: logger.debug("Applying validation of result") else: logger.info("Applying validation of result") self.val_dict = {k: [] for k in self.outcomes} self.pval_dict = dict() effect_dict = {"ate": 0, "atc": 1, "att": 2} assert effect_type in effect_dict.keys() for i in tqdm(range(n_sim)): if refuter in ["random_treatment", "random_feature"]: if refuter == "random_treatment": self.input_data, orig_treatment, self.validate = random_treatment(self.input_data, self.treatment) elif refuter == "random_feature":
"""Base Matcher class.""" REPORT_FEAT_SELECT_DIR = "report_feature_selector" REPORT_PROP_MATCHER_DIR = "report_matcher" NAME_REPORT = "lama_interactive_report.html" N_THREADS = 1 N_FOLDS = 4 RANDOM_STATE = 123 TEST_SIZE = 0.2 TIMEOUT = 600 VERBOSE = 2 USE_ALGOS = ["lgb"] PROP_SCORES_COLUMN = "prop_scores" GENERATE_REPORT = True SAME_TARGET_THRESHOLD = 0.7 OUT_INTER_COEFF = 1.5 OUT_MODE_PERCENT = True OUT_MIN_PERCENT = 0.02 OUT_MAX_PERCENT = 0.98 logger = logging.getLogger("hypex") console_out = logging.StreamHandler() logging.basicConfig( handlers=(console_out,), format="[%(asctime)s | %(name)s | %(levelname)s]: %(message)s", datefmt="%d.%m.%Y %H:%M:%S", level=logging.INFO, ) class Matcher: """Class for compile full pipeline of Matching in Causal Inference task. Matcher steps: - Read, analyze data - Feature selection via LightAutoML - Converting a dataset with features to another space via Cholesky decomposition In the new space, the distance L2 becomes equivalent to the Mahalanobis distance. This allows us to use faiss to search for nearest objects, which can search only by L2 metric, but without violating the methodology of matching, for which it is important to count by the Mahalanobis distance - Finding the nearest neighbors for each unit (with duplicates) using faiss. For each of the control group, neighbors from the target group are matched and vice versa. - Calculation bias - Creating matched df (Wide df with pairs) - Calculation metrics: ATE, ATT, ATC, p-value, and сonfidence intervals - Calculation quality: PS-test, KS test, SMD test - Returns metrics as dataframe, quality results as dict of df's and df_matched - After receiving the result, the result should be validated using :func:`~hypex.matcher.Matcher.validate_result` Example: Common usecase - base pipeline for matching >>> # Base info >>> treatment = "treatment" # Column name with info about 'treatment' 0 or 1 >>> target = "target" # Column name with target >>> >>> # Optional >>> info_col = ["user_id", 'address'] # Columns that will not participate in the match and are informative. >>> group_col = "CatCol" # Column name for strict comparison (for a categorical feature) >>> >>> # Matching >>> model = Matcher(data, outcome=target, treatment=treatment, info_col=info_col, group_col=group_col) >>> features = model.lama_feature_select() # Feature selection via lama >>> results, quality, df_matched = model.estimate(features=some_features) # Performs matching >>> >>> model.validate_result() """ def __init__( self, input_data: pd.DataFrame, treatment: str, outcome: Union[str, list] = None, outcome_type: str = "numeric", group_col: str = None, info_col: list = None, weights: dict = None, base_filtration: bool = False, generate_report: bool = GENERATE_REPORT, report_feat_select_dir: str = REPORT_FEAT_SELECT_DIR, timeout: int = TIMEOUT, n_threads: int = N_THREADS, n_folds: int = N_FOLDS, verbose: bool = VERBOSE, use_algos: list = None, same_target_threshold: float = SAME_TARGET_THRESHOLD, interquartile_coeff: float = OUT_INTER_COEFF, drop_outliers_by_percentile: bool = OUT_MODE_PERCENT, min_percentile: float = OUT_MIN_PERCENT, max_percentile: float = OUT_MAX_PERCENT, n_neighbors: int = 1, silent: bool = True, pbar: bool = True, ): """Initialize the Matcher object. Args: input_data: Input dataframe outcome: Target column treatment: Column determine control and test groups outcome_type: Values type of target column. Defaults to "numeric" group_col: Column for grouping. Defaults to None. info_col: Columns with id, date or metadata, not taking part in calculations. Defaults to None weights: weights for numeric columns in order to increase matching quality by weighted feature. By default, is None (all features have the same weight equal to 1). Example: {'feature_1': 10} base_filtration: To use or not base filtration of features in order to remove all constant or almost all constant, bool. Default is False. generate_report: Flag to create report. Defaults to True report_feat_select_dir: Folder for report files. Defaults to "report_feature_selector" timeout: Limit work time of code LAMA. Defaults to 600 n_threads: Maximum number of threads. Defaults to 1 n_folds: Number of folds for cross-validation. Defaults to 4 verbose: Flag to show process stages. Defaults to 2 use_algos: List of names of LAMA algorithms for feature selection. Defaults to ["lgb"] same_target_threshold: Threshold for correlation coefficient filter (Spearman). Default to 0.7 interquartile_coeff: Percent for drop outliers. Default to 1.5 drop_outliers_by_percentile: Flag to drop outliers by custom percentiles. Defaults to True min_percentile: Minimum percentile to drop outliers. Defaults to 0.02 max_percentile: Maximum percentile to drop outliers. Defaults to 0.98 n_neighbors: Number of neighbors to match (in fact you may see more then n matches as every match may have more then one neighbor with the same distance). Default value is 1. silent: Write logs in debug mode pbar: Display progress bar while get index """ if use_algos is None: use_algos = USE_ALGOS self.input_data = input_data if outcome is None: outcome = list() self.outcomes = outcome if type(outcome) == list else [outcome] self.treatment = treatment self.group_col = group_col self.info_col = info_col self.outcome_type = outcome_type self.weights = weights self.generate_report = generate_report self.report_feat_select_dir = report_feat_select_dir self.timeout = timeout self.n_threads = n_threads self.n_folds = n_folds self.verbose = verbose self.use_algos = use_algos self.same_target_threshold = same_target_threshold self.interquartile_coeff = interquartile_coeff self.mode_percentile = drop_outliers_by_percentile self.min_percentile = min_percentile self.max_percentile = max_percentile self.base_filtration = base_filtration self.features_importance = None self.matcher = None self.val_dict = None self.pval_dict = None self.new_treatment = None self.validate = None self.dropped_features = [] self.n_neighbors = n_neighbors self.silent = silent self.pbar = pbar self._preprocessing_data() def _convert_categorical_to_dummy(self): """Converts categorical variables to dummy variables. Returns: Data with categorical variables converted to dummy variables. """ info_col = self.info_col if self.info_col is not None else [] group_col = [self.group_col] if self.group_col is not None else [] columns_to_drop = info_col + group_col if columns_to_drop is not None: data = self.input_data.drop(columns=columns_to_drop) else: data = self.input_data dummy_data = pd.get_dummies(data, drop_first=True, dtype=np.uint8) return dummy_data def _preprocessing_data(self): """Converts categorical features into dummy variables.""" info_col = self.info_col if self.info_col is not None else [] group_col = [self.group_col] if self.group_col is not None else [] columns_to_drop = info_col + group_col + self.outcomes + [self.treatment] if self.base_filtration: filtered_features = nan_filtration(self.input_data.drop(columns=columns_to_drop)) self.dropped_features = [f for f in self.input_data.columns if f not in filtered_features + columns_to_drop] self.input_data = self.input_data[filtered_features + columns_to_drop] nan_counts = self.input_data.isna().sum().sum() if nan_counts != 0: self._log(f"Number of NaN values filled with zeros: {nan_counts}", silent=False) self.input_data = self.input_data.fillna(0) if self.group_col is not None: group_col = self.input_data[[self.group_col]] if self.info_col is not None: info_col = self.input_data[self.info_col] self.input_data = self._convert_categorical_to_dummy() if self.group_col is not None: self.input_data = pd.concat([self.input_data, group_col], axis=1) if self.info_col is not None: self.input_data = pd.concat([self.input_data, info_col], axis=1) if self.base_filtration: filtered_features = const_filtration(self.input_data.drop(columns=columns_to_drop)) self.dropped_features = np.concatenate( ( self.dropped_features, [f for f in self.input_data.columns if f not in filtered_features + columns_to_drop], ) ) self.input_data = self.input_data[filtered_features + columns_to_drop] self._log("Categorical features turned into dummy") def _apply_filter(self, filter_class, *filter_args): """Applies a filter to the input data. Args: filter_class: The class of the filter to apply. *filter_args: Arguments to pass to the filter class. """ filter_instance = filter_class(*filter_args) self.input_data = filter_instance.perform_filter(self.input_data) def _spearman_filter(self): """Applies a filter by dropping columns correlated with the outcome column. This method uses the Spearman filter to eliminate features from the dataset that are highly correlated with the outcome columns, based on a pre-set threshold """ self._log("Applying filter by spearman test - drop columns correlated with outcome") self._apply_filter(SpearmanFilter, self.outcomes[0], self.treatment, self.same_target_threshold) def outliers_filter(self): """Removes outlier values from the dataset. This method employs an OutliersFilter. If `drop_outliers_by_percentile` is True, it retains only the values between the min and max percentiles If `drop_outliers_by_percentile` is False, it retains only the values between 2nd and 98th percentiles """ self._log( f"Applying filter of outliers\n" f"interquartile_coeff={self.interquartile_coeff}\n" f"mode_percentile={self.mode_percentile}\n" f"min_percentile={self.min_percentile}\n" f"max_percentile={self.max_percentile}" ) self._apply_filter( OutliersFilter, self.interquartile_coeff, self.mode_percentile, self.min_percentile, self.max_percentile ) def match_no_rep(self, threshold: float = 0.1, approximate_match: bool = False) -> pd.DataFrame: """Matching groups with no replacement. It's done by optimizing the linear sum of distances between pairs of treatment and control samples. Args: threshold: caliper for minimum deviation between test and control groups. in case weights is not None. approximate_match: use or not approximate matching Returns: Matched dataframe with no replacements. """ a = self.input_data[self.treatment] X = self.input_data.drop(columns=self.treatment) if self.info_col is not None: X = X.drop(columns=self.info_col) index_matched = MatcherNoReplacement(X, a, self.weights, approximate_match).match() filtred_matches = index_matched.loc[1].iloc[self.input_data[a == 1].index].matches[index_matched.loc[1].iloc[self.input_data[a == 1].index].matches.apply(lambda x: x != [])] if self.weights is not None: weighted_features = [f for f in self.weights.keys()] index_dict = dict() for w in weighted_features: source = self.input_data.loc[np.concatenate(filtred_matches.values)][w].values target = self.input_data.loc[filtred_matches.index.to_list()][w].values index = abs(source - target) <= abs(source) * threshold index_dict.update({w: index}) index_filtered = sum(index_dict.values()) == len(self.weights) matched_data = pd.concat( [self.input_data.loc[filtred_matches.index.to_list()].iloc[index_filtered], self.input_data.loc[np.concatenate(filtred_matches.values)].iloc[index_filtered]] ) else: matched_data = pd.concat([self.input_data.loc[filtred_matches.index.to_list()], self.input_data.loc[np.concatenate(filtred_matches.values)]]) return matched_data def lama_feature_select(self) -> pd.DataFrame: """Calculates the importance of each feature. This method use LamaFeatureSelector to rank the importance of each feature in the dataset The features are then sorted by their importance with the most important feature first Returns: The feature importances, sorted in descending order """ self._log("Counting feature importance") feat_select = FeatureSelector( outcome=self.outcomes[0], outcome_type=self.outcome_type, treatment=self.treatment, timeout=self.timeout, n_threads=self.n_threads, n_folds=self.n_folds, verbose=self.verbose, generate_report=self.generate_report, report_dir=self.report_feat_select_dir, use_algos=self.use_algos, ) df = self.input_data if self.group_col is None else self.input_data.drop(columns=self.group_col) if self.info_col is not None: df = df.drop(columns=self.info_col) features = feat_select.perform_selection(df=df) if self.group_col is None: self.features_importance = features else: self.features_importance = features.append( {"Feature": self.group_col, "Importance": features.Importance.max()}, ignore_index=True ) return self.features_importance.sort_values("Importance", ascending=False) def _create_faiss_matcher(self, df=None, validation=None): """Creates a FaissMatcher object. Args: df: The dataframe to use. If None, uses self.input_data. validation: Whether to use the matcher for validation. If None, determines based on whether """ if df is None: df = self.input_data self.matcher = FaissMatcher( df, self.outcomes, self.treatment, info_col=self.info_col, weights=self.weights, features=self.features_importance, group_col=self.group_col, validation=validation, n_neighbors=self.n_neighbors, pbar=False if validation else self.pbar, ) def _perform_validation(self): """Performs validation using the FaissMatcher.""" if self.group_col is None: sim = self.matcher.match() else: sim = self.matcher.group_match() for key in self.val_dict.keys(): self.val_dict[key].append(sim[key][0]) def _log(self, message, silent=None): """Logs a message at the appropriate level. Args: message: The message to log. silent: If silent, logs will be only info """ if silent is None: silent = self.silent if silent: logger.debug(message) else: logger.info(message) def _matching(self) -> tuple: """Performs matching considering the presence of groups. Returns: Results of matching and matching quality metrics """ self._create_faiss_matcher() self._log("Applying matching") self.results, df_matched = self.matcher.match() self.quality_result = self.matcher.matching_quality(df_matched) return self.results, self.quality_result, df_matched def validate_result( self, refuter: str = "random_feature", effect_type: str = "ate", n_sim: int = 10, fraction: float = 0.8 ) -> dict: """Validates estimated ATE (Average Treatment Effect). Validates estimated effect: 1) by replacing real treatment with random placebo treatment. Estimated effect must be droped to zero, p-val > 0.05; 2) by adding random feature (`random_feature`). Estimated effect shouldn't change significantly, p-val < 0.05; 3) estimates effect on subset of data (default fraction is 0.8). Estimated effect shouldn't change significantly, p-val < 0.05. Args: refuter: Refuter type (`random_treatment`, `random_feature`, `subset_refuter`) effect_type: Which effect to validate (`ate`, `att`, `atc`) n_sim: Number of simulations fraction: Subset fraction for subset refuter only Returns: Dictionary of outcome_name (mean_effect on validation, p-value) """ if self.silent: logger.debug("Applying validation of result") else: logger.info("Applying validation of result") self.val_dict = {k: [] for k in self.outcomes} self.pval_dict = dict() effect_dict = {"ate": 0, "atc": 1, "att": 2} assert effect_type in effect_dict.keys() for i in tqdm(range(n_sim)): if refuter in ["random_treatment", "random_feature"]: if refuter == "random_treatment": self.input_data, orig_treatment, self.validate = random_treatment(self.input_data, self.treatment) elif refuter == "random_feature":
self.input_data, self.validate = random_feature(self.input_data)
7
2023-11-01 08:58:57+00:00
16k
tianhaowuhz/human-assisting-dex-grasp
Runners/EvalGFPPO.py
[ { "identifier": "GFPPO", "path": "Algorithms/ppo/gf_ppo_update.py", "snippet": "class GFPPO:\n def __init__(self,\n vec_env,\n cfg_train,\n device='cpu',\n sampler='sequential',\n log_dir='run',\n is_testi...
import isaacgym import condexenvs import torch import os import sys from Algorithms.ppo import GFPPO from utils.config import load_cfg, get_args, set_np_formatting
10,971
sys.path.append(os.path.dirname(os.path.dirname(__file__))) if __name__ == '__main__': set_np_formatting() args = get_args() cfg_train, logdir = load_cfg(args) ''' change for different method ''' cfg_train['setting']['grad_scale'] = 1.0 cfg_train['policy']['pointnet_version'] = 'pt' if args.exp_name == 'ours': reward_type = "sr" sub_obs_type = "joint+fingertipjoint+wrist+objpcl+gf" cfg_train['setting']['action_type'] = "joint" cfg_train['setting']['sub_action_type'] = "add+jointscale" cfg_train['policy']['pretrain_pointnet'] = True cfg_train["learn"]["test"] = True ''' policy ''' cfg_train['policy']['hand_pcl'] = False envs = condexenvs.make( seed=args.seed, task="ShadowHandCon", num_envs=args.num_envs, sim_device=f"cuda:{args.run_device_id}", rl_device=f"cuda:{args.run_device_id}", graphics_device_id = args.run_device_id, headless=args.headless, mode = args.mode, eval_times=args.eval_times, method = args.method, constrained = args.constrained, reward_type = reward_type, sub_obs_type = sub_obs_type, dataset_type = args.dataset_type, ) envs.reset(env_init=True) learn_cfg = cfg_train["learn"] is_testing = learn_cfg["test"] # Override resume and testing flags if they are passed as parameters. chkpt_path = args.model_dir logdir = logdir + "_seed{}".format(args.seed)
sys.path.append(os.path.dirname(os.path.dirname(__file__))) if __name__ == '__main__': set_np_formatting() args = get_args() cfg_train, logdir = load_cfg(args) ''' change for different method ''' cfg_train['setting']['grad_scale'] = 1.0 cfg_train['policy']['pointnet_version'] = 'pt' if args.exp_name == 'ours': reward_type = "sr" sub_obs_type = "joint+fingertipjoint+wrist+objpcl+gf" cfg_train['setting']['action_type'] = "joint" cfg_train['setting']['sub_action_type'] = "add+jointscale" cfg_train['policy']['pretrain_pointnet'] = True cfg_train["learn"]["test"] = True ''' policy ''' cfg_train['policy']['hand_pcl'] = False envs = condexenvs.make( seed=args.seed, task="ShadowHandCon", num_envs=args.num_envs, sim_device=f"cuda:{args.run_device_id}", rl_device=f"cuda:{args.run_device_id}", graphics_device_id = args.run_device_id, headless=args.headless, mode = args.mode, eval_times=args.eval_times, method = args.method, constrained = args.constrained, reward_type = reward_type, sub_obs_type = sub_obs_type, dataset_type = args.dataset_type, ) envs.reset(env_init=True) learn_cfg = cfg_train["learn"] is_testing = learn_cfg["test"] # Override resume and testing flags if they are passed as parameters. chkpt_path = args.model_dir logdir = logdir + "_seed{}".format(args.seed)
runner = GFPPO(vec_env=envs,
0
2023-11-09 06:08:40+00:00
16k
ml4bio/RhoFold
rhofold/model/structure_module.py
[ { "identifier": "Linear", "path": "rhofold/model/primitives.py", "snippet": "class Linear(nn.Linear):\n \"\"\"\n A Linear layer with built-in nonstandard initializations. Called just\n like torch.nn.Linear.\n\n Implements the initializers in 1.11.4, plus some additional ones found\n in th...
import math import torch import torch.nn as nn import torch.nn.functional as F from typing import Optional, Tuple, Sequence from rhofold.model.primitives import Linear, LayerNorm from rhofold.utils.rigid_utils import Rigid from rhofold.utils.tensor_utils import ( dict_multimap, permute_final_dims, flatten_final_dims, ) from einops import rearrange from rhofold.utils.alphabet import RNAAlphabet from rhofold.utils.converter import RNAConverter
11,019
self.layer_norm = LayerNorm(self.c) def forward(self, s): for l in self.layers: s = l(s) s = self.layer_norm(s) return s class StructureModule(nn.Module): def __init__( self, c_s, c_z, c_ipa, c_resnet, no_heads_ipa, no_qk_points, no_v_points, no_blocks, no_transition_layers, no_resnet_blocks, no_angles, trans_scale_factor, refinenet, **kwargs, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_ipa: IPA hidden channel dimension c_resnet: Angle resnet (Alg. 23 lines 11-14) hidden channel dimension no_heads_ipa: Number of IPA heads no_qk_points: Number of query/key points to generate during IPA no_v_points: Number of value points to generate during IPA no_blocks: Number of structure module blocks no_transition_layers: Number of layers in the single representation transition (Alg. 23 lines 8-9) no_resnet_blocks: Number of blocks in the angle resnet no_angles: Number of angles to generate in the angle resnet trans_scale_factor: Scale of single representation transition hidden dimension epsilon: Small number used in angle resnet normalization inf: Large number used for attention masking """ super(StructureModule, self).__init__() self.c_s = c_s self.c_z = c_z self.c_ipa = c_ipa self.c_resnet = c_resnet self.no_heads_ipa = no_heads_ipa self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.no_blocks = no_blocks self.no_transition_layers = no_transition_layers self.no_resnet_blocks = no_resnet_blocks self.no_angles = no_angles self.trans_scale_factor = trans_scale_factor self.epsilon = 1e-8 self.inf = 1e5 self.default_frames = None self.group_idx = None self.atom_mask = None self.lit_positions = None self.layer_norm_s = LayerNorm(self.c_s) self.layer_norm_z = LayerNorm(self.c_z) self.linear_in = Linear(self.c_s, self.c_s) self.ipa = InvariantPointAttention( self.c_s, self.c_z, self.c_ipa, self.no_heads_ipa, self.no_qk_points, self.no_v_points, inf=self.inf, eps=self.epsilon, ) self.layer_norm_ipa = LayerNorm(self.c_s) self.transition = StructureModuleTransition( self.c_s, self.no_transition_layers, ) self.bb_update = BackboneUpdate(self.c_s) self.angle_resnet = AngleResnet( self.c_s, self.c_resnet, self.no_resnet_blocks, self.no_angles, self.epsilon, ) self.refinenet = RefineNet( **refinenet ) if refinenet.enable else None
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class RefineNet(nn.Module): """""" def __init__(self, dim = 64, is_pos_emb = True, n_layer = 4, enable = True, **kwargs): """Constructor function.""" super().__init__() self.is_pos_emb = is_pos_emb self.alphabet = RNAAlphabet.from_architecture('RNA') self.embed_tokens = nn.Embedding(len(self.alphabet), dim) self.enable = enable if self.is_pos_emb: self.embed_positions = PosEmbedding(4096, dim, self.alphabet.padding_idx) self.refine_layer0 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer1 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer2 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer3 = ResEGNN(corrections=n_layer, dims_in=dim) def forward(self, tokens, cords): """Perform the forward pass. Args: Returns: """ if not self.enable: return cords tokens = tokens[:, 0, :] tokens = tokens.unsqueeze(-1).repeat(1, 1, 23) b, l, n = tokens.shape cords = cords.reshape([b, l, n, 3]) fea = self.embed_tokens(tokens) b, l, n, _ = fea.shape if self.is_pos_emb: fea += self.embed_positions(tokens.reshape(b * l, n)).view(fea.size()) out = self.refine_layer0(fea.reshape([ b * l, n, -1]), cords.reshape([ b * l, n, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, l, n, -1]).transpose(1,2) cords = cords.reshape([b, l, n, -1]).transpose(1,2) out = self.refine_layer1(fea.reshape([ b * n, l, -1]), cords.reshape([ b * n, l, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, n, l, -1]).transpose(1,2) cords = cords.reshape([b, n, l, -1]).transpose(1,2) out = self.refine_layer2(fea.reshape([ b * l, n, -1]), cords.reshape([ b * l, n, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, l, n, -1]).transpose(1,2) cords = cords.reshape([b, l, n, -1]).transpose(1,2) out = self.refine_layer3(fea.reshape([ b * n, l, -1]), cords.reshape([ b * n, l, -1]), is_fea = True) fea, cords = out[-1] cords = cords.reshape([b, n, l, -1]).transpose(1,2) cords = cords.reshape([b, l * n, 3]) return cords class Swish_(torch.nn.Module): def forward(self, x): return x * x.sigmoid() SiLU = torch.nn.SiLU if hasattr(torch.nn, 'SiLU') else Swish_ class CoorsNorm(torch.nn.Module): def __init__(self, eps=1e-8): super().__init__() self.eps = eps self.fn = torch.nn.LayerNorm(1) def forward(self, coors): norm = coors.norm(dim=-1, keepdim=True) normed_coors = coors / norm.clamp(min=self.eps) phase = self.fn(norm) return phase * normed_coors # classes class EGNN(torch.nn.Module): def __init__( self, dim, m_dim=32, ): super().__init__() ''' # Most of the code in this file is based on egnn-pytorch by lucidrains. ''' edge_input_dim = (dim * 2) + 1 self.edge_mlp = torch.nn.Sequential( torch.nn.Linear(edge_input_dim, edge_input_dim * 2), SiLU(), torch.nn.Linear(edge_input_dim * 2, m_dim), SiLU() ) self.coors_norm = CoorsNorm() self.node_mlp = torch.nn.Sequential( torch.nn.Linear(dim + m_dim, dim * 2), SiLU(), torch.nn.Linear(dim * 2, dim), ) self.coors_mlp = torch.nn.Sequential( torch.nn.Linear(m_dim, m_dim * 4), SiLU(), torch.nn.Linear(m_dim * 4, 1) ) def forward(self, feats, coors): rel_coors = rearrange(coors, 'b i d -> b i () d') - rearrange(coors, 'b j d -> b () j d') rel_dist = (rel_coors ** 2).sum(dim=-1, keepdim=True) feats_j = rearrange(feats, 'b j d -> b () j d') feats_i = rearrange(feats, 'b i d -> b i () d') feats_i, feats_j = torch.broadcast_tensors(feats_i, feats_j) edge_input = torch.cat((feats_i, feats_j, rel_dist), dim=-1) m_ij = self.edge_mlp(edge_input) coor_weights = self.coors_mlp(m_ij) coor_weights = rearrange(coor_weights, 'b i j () -> b i j') rel_coors = self.coors_norm(rel_coors) scale_factor = 1 / 50.0 coors_out = torch.einsum('b i j, b i j c -> b i c', coor_weights * scale_factor, rel_coors) + coors m_i = m_ij.sum(dim=-2) node_mlp_input = torch.cat((feats, m_i), dim=-1) node_out = self.node_mlp(node_mlp_input) + feats return node_out, coors_out class ResEGNN(torch.nn.Module): def __init__(self, corrections=4, dims_in=41, **kwargs): super().__init__() self.layers = torch.nn.ModuleList([EGNN(dim=dims_in, **kwargs) for _ in range(corrections)]) def forward(self, amino, geom, is_fea = False, keep_last_cords = None): output = [] for layer in self.layers: geom_init = geom amino, geom = layer(amino, geom) if keep_last_cords is not None: geom[:, -keep_last_cords:] = geom_init[:, -keep_last_cords:] output.append([amino, geom]) return output if is_fea else geom class PosEmbedding(nn.Embedding): """ """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int): if padding_idx is not None: num_embeddings_ = num_embeddings + padding_idx + 1 else: num_embeddings_ = num_embeddings super().__init__(num_embeddings_, embedding_dim, padding_idx) self.max_positions = num_embeddings def forward(self, input: torch.Tensor): """Input is expected to be of size [bsz x seqlen].""" mask = input.ne(self.padding_idx).int() positions = (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + self.padding_idx return F.embedding( positions, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, ) class AngleResnetBlock(nn.Module): def __init__(self, c_hidden): """ Args: c_hidden: Hidden channel dimension """ super(AngleResnetBlock, self).__init__() self.c_hidden = c_hidden self.linear_1 = Linear(self.c_hidden, self.c_hidden) self.linear_2 = Linear(self.c_hidden, self.c_hidden) self.relu = nn.ReLU() def forward(self, a: torch.Tensor) -> torch.Tensor: s_initial = a a = self.relu(a) a = self.linear_1(a) a = self.relu(a) a = self.linear_2(a) return a + s_initial class AngleResnet(nn.Module): """ """ def __init__(self, c_in, c_hidden, no_blocks, no_angles, epsilon): """ Args: c_in: Input channel dimension c_hidden: Hidden channel dimension no_blocks: Number of resnet blocks no_angles: Number of torsion angles to generate epsilon: Small constant for normalization """ super(AngleResnet, self).__init__() self.c_in = c_in self.c_hidden = c_hidden self.no_blocks = no_blocks self.no_angles = no_angles self.eps = epsilon self.linear_in = Linear(self.c_in, self.c_hidden) self.linear_initial = Linear(self.c_in, self.c_hidden) self.layers = nn.ModuleList() for _ in range(self.no_blocks): layer = AngleResnetBlock(c_hidden=self.c_hidden) self.layers.append(layer) self.linear_out = Linear(self.c_hidden, self.no_angles * 2) self.relu = nn.ReLU() def forward( self, s: torch.Tensor, s_initial: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: s: [*, C_hidden] single embedding s_initial: [*, C_hidden] single embedding as of the start of the StructureModule Returns: [*, no_angles, 2] predicted angles """ # [*, C_hidden] s_initial = self.relu(s_initial) s_initial = self.linear_initial(s_initial) s = self.relu(s) s = self.linear_in(s) s = s + s_initial for l in self.layers: s = l(s) s = self.relu(s) # [*, no_angles * 2] s = self.linear_out(s) # [*, no_angles, 2] s = s.view(s.shape[:-1] + (-1, 2)) unnormalized_s = s norm_denom = torch.sqrt( torch.clamp( torch.sum(s ** 2, dim=-1, keepdim=True), min=self.eps, ) ) s = s / norm_denom return unnormalized_s, s class InvariantPointAttention(nn.Module): """ Implements Algorithm 22. """ def __init__( self, c_s: int, c_z: int, c_hidden: int, no_heads: int, no_qk_points: int, no_v_points: int, inf: float = 1e5, eps: float = 1e-8, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_hidden: Hidden channel dimension no_heads: Number of attention heads no_qk_points: Number of query/key points to generate no_v_points: Number of value points to generate """ super(InvariantPointAttention, self).__init__() self.c_s = c_s self.c_z = c_z self.c_hidden = c_hidden self.no_heads = no_heads self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.inf = inf self.eps = eps hc = self.c_hidden * self.no_heads self.linear_q = Linear(self.c_s, hc) self.linear_kv = Linear(self.c_s, 2 * hc) hpq = self.no_heads * self.no_qk_points * 3 self.linear_q_points = Linear(self.c_s, hpq) hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3 self.linear_kv_points = Linear(self.c_s, hpkv) # hpv = self.no_heads * self.no_v_points * 3 self.linear_b = Linear(self.c_z, self.no_heads) self.head_weights = nn.Parameter(torch.zeros((no_heads))) # ipa_point_weights_init_(self.head_weights) concat_out_dim = self.no_heads * ( self.c_z + self.c_hidden + self.no_v_points * 4 ) self.linear_out = Linear(concat_out_dim, self.c_s) self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward( self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, _offload_inference: bool = False, _z_reference_list: Optional[Sequence[torch.Tensor]] = None, ) -> torch.Tensor: """ Args: s: [*, N_res, C_s] single representation z: [*, N_res, N_res, C_z] pair representation r: [*, N_res] transformation object mask: [*, N_res] mask Returns: [*, N_res, C_s] single representation update """ z = [z] ####################################### # Generate scalar and point activations ####################################### # [*, N_res, H * C_hidden] q = self.linear_q(s) kv = self.linear_kv(s) # [*, N_res, H, C_hidden] q = q.view(q.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, 2 * C_hidden] kv = kv.view(kv.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, C_hidden] k, v = torch.split(kv, self.c_hidden, dim=-1) # [*, N_res, H * P_q * 3] q_pts = self.linear_q_points(s) # [*, N_res, H * P_q, 3] q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1) q_pts = torch.stack(q_pts, dim=-1) q_pts = r[..., None].apply(q_pts) # [*, N_res, H, P_q, 3] q_pts = q_pts.view( q_pts.shape[:-2] + (self.no_heads, self.no_qk_points, 3) ) # [*, N_res, H * (P_q + P_v) * 3] kv_pts = self.linear_kv_points(s) # [*, N_res, H * (P_q + P_v), 3] kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1) kv_pts = torch.stack(kv_pts, dim=-1) kv_pts = r[..., None].apply(kv_pts) # [*, N_res, H, (P_q + P_v), 3] kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.no_heads, -1, 3)) # [*, N_res, H, P_q/P_v, 3] k_pts, v_pts = torch.split( kv_pts, [self.no_qk_points, self.no_v_points], dim=-2 ) # [*, N_res, N_res, H] b = self.linear_b(z[0]) if(_offload_inference): z[0] = z[0].cpu() # [*, H, N_res, N_res] a = torch.matmul( permute_final_dims(q, (1, 0, 2)), # [*, H, N_res, C_hidden] permute_final_dims(k, (1, 2, 0)), # [*, H, C_hidden, N_res] ) a *= math.sqrt(1.0 / (3 * self.c_hidden)) a += (math.sqrt(1.0 / 3) * permute_final_dims(b, (2, 0, 1))) # [*, N_res, N_res, H, P_q, 3] pt_att = q_pts.unsqueeze(-4) - k_pts.unsqueeze(-5) pt_att = pt_att ** 2 # [*, N_res, N_res, H, P_q] pt_att = sum(torch.unbind(pt_att, dim=-1)) head_weights = self.softplus(self.head_weights).view( *((1,) * len(pt_att.shape[:-2]) + (-1, 1)) ) head_weights = head_weights * math.sqrt( 1.0 / (3 * (self.no_qk_points * 9.0 / 2)) ) pt_att = pt_att * head_weights # [*, N_res, N_res, H] pt_att = torch.sum(pt_att, dim=-1) * (-0.5) # [*, N_res, N_res] square_mask = mask.unsqueeze(-1) * mask.unsqueeze(-2) square_mask = self.inf * (square_mask - 1) # [*, H, N_res, N_res] pt_att = permute_final_dims(pt_att, (2, 0, 1)) a = a + pt_att a = a + square_mask.unsqueeze(-3) a = self.softmax(a) # [*, N_res, H, C_hidden] o = torch.matmul( a, v.transpose(-2, -3).to(dtype=a.dtype) ).transpose(-2, -3) # [*, N_res, H * C_hidden] o = flatten_final_dims(o, 2) # [*, H, 3, N_res, P_v] o_pt = torch.sum( ( a[..., None, :, :, None] * permute_final_dims(v_pts, (1, 3, 0, 2))[..., None, :, :] ), dim=-2, ) # [*, N_res, H, P_v, 3] o_pt = permute_final_dims(o_pt, (2, 0, 3, 1)) o_pt = r[..., None, None].invert_apply(o_pt) # [*, N_res, H * P_v] o_pt_norm = flatten_final_dims( torch.sqrt(torch.sum(o_pt ** 2, dim=-1) + self.eps), 2 ) # [*, N_res, H * P_v, 3] o_pt = o_pt.reshape(*o_pt.shape[:-3], -1, 3) if(_offload_inference): z[0] = z[0].to(o_pt.device) # [*, N_res, H, C_z] o_pair = torch.matmul(a.transpose(-2, -3), z[0].to(dtype=a.dtype)) # [*, N_res, H * C_z] o_pair = flatten_final_dims(o_pair, 2) # [*, N_res, C_s] s = self.linear_out( torch.cat( (o, *torch.unbind(o_pt, dim=-1), o_pt_norm, o_pair), dim=-1 ).to(dtype=z[0].dtype) ) return s class BackboneUpdate(nn.Module): """ Implements part of Algorithm 23. """ def __init__(self, c_s): """ Args: c_s: Single representation channel dimension """ super(BackboneUpdate, self).__init__() self.c_s = c_s self.linear = Linear(self.c_s, 6) def forward(self, s: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: [*, N_res, C_s] single representation Returns: [*, N_res, 6] update vector """ # [*, 6] update = self.linear(s) return update class StructureModuleTransitionLayer(nn.Module): def __init__(self, c): super(StructureModuleTransitionLayer, self).__init__() self.c = c self.linear_1 = Linear(self.c, self.c) self.linear_2 = Linear(self.c, self.c) self.linear_3 = Linear(self.c, self.c) self.relu = nn.ReLU() def forward(self, s): s_initial = s s = self.linear_1(s) s = self.relu(s) s = self.linear_2(s) s = self.relu(s) s = self.linear_3(s) s = s + s_initial return s class StructureModuleTransition(nn.Module): def __init__(self, c, num_layers): super(StructureModuleTransition, self).__init__() self.c = c self.num_layers = num_layers self.layers = nn.ModuleList() for _ in range(self.num_layers): l = StructureModuleTransitionLayer(self.c) self.layers.append(l) self.layer_norm = LayerNorm(self.c) def forward(self, s): for l in self.layers: s = l(s) s = self.layer_norm(s) return s class StructureModule(nn.Module): def __init__( self, c_s, c_z, c_ipa, c_resnet, no_heads_ipa, no_qk_points, no_v_points, no_blocks, no_transition_layers, no_resnet_blocks, no_angles, trans_scale_factor, refinenet, **kwargs, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_ipa: IPA hidden channel dimension c_resnet: Angle resnet (Alg. 23 lines 11-14) hidden channel dimension no_heads_ipa: Number of IPA heads no_qk_points: Number of query/key points to generate during IPA no_v_points: Number of value points to generate during IPA no_blocks: Number of structure module blocks no_transition_layers: Number of layers in the single representation transition (Alg. 23 lines 8-9) no_resnet_blocks: Number of blocks in the angle resnet no_angles: Number of angles to generate in the angle resnet trans_scale_factor: Scale of single representation transition hidden dimension epsilon: Small number used in angle resnet normalization inf: Large number used for attention masking """ super(StructureModule, self).__init__() self.c_s = c_s self.c_z = c_z self.c_ipa = c_ipa self.c_resnet = c_resnet self.no_heads_ipa = no_heads_ipa self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.no_blocks = no_blocks self.no_transition_layers = no_transition_layers self.no_resnet_blocks = no_resnet_blocks self.no_angles = no_angles self.trans_scale_factor = trans_scale_factor self.epsilon = 1e-8 self.inf = 1e5 self.default_frames = None self.group_idx = None self.atom_mask = None self.lit_positions = None self.layer_norm_s = LayerNorm(self.c_s) self.layer_norm_z = LayerNorm(self.c_z) self.linear_in = Linear(self.c_s, self.c_s) self.ipa = InvariantPointAttention( self.c_s, self.c_z, self.c_ipa, self.no_heads_ipa, self.no_qk_points, self.no_v_points, inf=self.inf, eps=self.epsilon, ) self.layer_norm_ipa = LayerNorm(self.c_s) self.transition = StructureModuleTransition( self.c_s, self.no_transition_layers, ) self.bb_update = BackboneUpdate(self.c_s) self.angle_resnet = AngleResnet( self.c_s, self.c_resnet, self.no_resnet_blocks, self.no_angles, self.epsilon, ) self.refinenet = RefineNet( **refinenet ) if refinenet.enable else None
self.converter = RNAConverter(
7
2023-11-01 10:29:08+00:00
16k
tylerlight071/Project-Cipher
main.py
[ { "identifier": "clear_terminal", "path": "components/common_functions.py", "snippet": "def clear_terminal():\n os.system('cls' if os.name == 'nt' else 'clear')" }, { "identifier": "print_slow", "path": "components/common_functions.py", "snippet": "def print_slow(text, delay=0.00): #...
import msvcrt import os import pickle import sys import time import colorama import pygame from colorama import Fore, Style from components.common_functions import clear_terminal, print_slow, shop_help, help_user, connect_help, mail_help, \ system_help from conversations.calls import intro_call, first_call, second_call, third_call, fourth_call, fifth_call, sixth_call, \ markus_seen_call from conversations.minigame_calls import code_shatter_call from minigames.code_shatter_minigame import code_shatter_minigame from minigames.eye_spy_minigame import port_scanning from systems.level_1.amy.amy_system import AmySystem from systems.level_1.billy.billy_system import BillySystem from systems.level_1.cameras.camera_1 import camera_first from systems.level_1.markus.markus_system import MarkusSystem
13,717
# Set the PYGAME_HIDE_SUPPORT_PROMPT environment variable os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "1" # Initialize pygame mixer pygame.mixer.init() # Load the bg music file and loop it pygame.mixer.music.load('bg_music.mp3') pygame.mixer.music.play(-1) # sets the volume to 20% (change value to adjust) pygame.mixer.music.set_volume(0.2) # Define the global variables at the module level inventory = [] balance = 300 emails = [] has_read_email = False has_read_file = False has_intro_call = False seen_markus = False evidence = []
# Set the PYGAME_HIDE_SUPPORT_PROMPT environment variable os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "1" # Initialize pygame mixer pygame.mixer.init() # Load the bg music file and loop it pygame.mixer.music.load('bg_music.mp3') pygame.mixer.music.play(-1) # sets the volume to 20% (change value to adjust) pygame.mixer.music.set_volume(0.2) # Define the global variables at the module level inventory = [] balance = 300 emails = [] has_read_email = False has_read_file = False has_intro_call = False seen_markus = False evidence = []
amy_system = AmySystem()
18
2023-11-06 09:52:13+00:00
16k
Codra-Ingenierie-Informatique/DataLab
cdl/core/gui/objectview.py
[ { "identifier": "_", "path": "cdl/config.py", "snippet": "CONF_VERSION = \"1.0.0\"\nAPP_NAME = \"DataLab\"\nMOD_NAME = \"cdl\"\nAPP_DESC = _(\"\"\"DataLab is a generic signal and image processing platform\"\"\")\nAPP_PATH = osp.dirname(__file__)\nDEBUG = os.environ.get(\"DEBUG\", \"\").lower() in (\"1\"...
import os from collections.abc import Iterator from typing import TYPE_CHECKING from guidata.configtools import get_icon from qtpy import QtCore as QC from qtpy import QtGui as QG from qtpy import QtWidgets as QW from cdl.config import _ from cdl.core.gui.objectmodel import ObjectGroup from cdl.core.model.image import ImageObj from cdl.core.model.signal import SignalObj from cdl.utils.qthelpers import block_signals from cdl.core.gui.objectmodel import ObjectModel from cdl.core.gui.panel.base import BaseDataPanel
11,256
.. autosummary:: :toctree: SimpleObjectTree GetObjectDialog ObjectView .. autoclass:: SimpleObjectTree :members: .. autoclass:: GetObjectDialog :members: .. autoclass:: ObjectView :members: .. note:: This module provides tree widgets to display signals, images and groups. It is important to note that, by design, the user can only select either individual signals/images or groups, but not both at the same time. This is an important design choice, as it allows to simplify the user experience, and to avoid potential confusion between the two types of selection. """ # pylint: disable=invalid-name # Allows short reference names like x, y, ... from __future__ import annotations if TYPE_CHECKING: # pragma: no cover class SimpleObjectTree(QW.QTreeWidget): """Base object handling panel list widget, object (sig/ima) lists""" SIG_ITEM_DOUBLECLICKED = QC.Signal(str) SIG_CONTEXT_MENU = QC.Signal(QC.QPoint) def __init__(self, parent: QW.QWidget, objmodel: ObjectModel) -> None: self.objmodel: ObjectModel = objmodel super().__init__(parent) self.setHeaderHidden(True) self.setColumnCount(1) self.setAlternatingRowColors(True) self.itemDoubleClicked.connect(self.item_double_clicked) def __str__(self) -> str: """Return string representation""" textlist = [] for tl_index in range(self.topLevelItemCount()): tl_item = self.topLevelItem(tl_index) textlist.append(tl_item.text(0)) for index in range(tl_item.childCount()): textlist.append(" " + tl_item.child(index).text(0)) return os.linesep.join(textlist) def initialize_from(self, sobjlist: SimpleObjectTree) -> None: """Init from another SimpleObjectList, without making copies of objects""" self.objmodel = sobjlist.objmodel self.populate_tree() self.set_current_item_id(sobjlist.get_current_item_id()) def iter_items( self, item: QW.QTreeWidgetItem | None = None ) -> Iterator[QW.QTreeWidgetItem]: """Recursively iterate over all items""" if item is None: for index in range(self.topLevelItemCount()): yield from self.iter_items(self.topLevelItem(index)) else: yield item for index in range(item.childCount()): yield from self.iter_items(item.child(index)) def get_item_from_id(self, item_id) -> QW.QTreeWidgetItem: """Return QTreeWidgetItem from id (stored in item's data)""" for item in self.iter_items(): if item.data(0, QC.Qt.UserRole) == item_id: return item return None def get_current_item_id(self, object_only: bool = False) -> str | None: """Return current item id""" item = self.currentItem() if item is not None and (not object_only or item.parent() is not None): return item.data(0, QC.Qt.UserRole) return None def set_current_item_id(self, uuid: str, extend: bool = False) -> None: """Set current item by id""" item = self.get_item_from_id(uuid) if extend: self.setCurrentItem(item, 0, QC.QItemSelectionModel.Select) else: self.setCurrentItem(item) def get_current_group_id(self) -> str: """Return current group ID""" selected_item = self.currentItem() if selected_item is None: return None if selected_item.parent() is None: return selected_item.data(0, QC.Qt.UserRole) return selected_item.parent().data(0, QC.Qt.UserRole) @staticmethod def __update_item( item: QW.QTreeWidgetItem, obj: SignalObj | ImageObj | ObjectGroup ) -> None: """Update item""" item.setText(0, f"{obj.short_id}: {obj.title}") if isinstance(obj, (SignalObj, ImageObj)): item.setToolTip(0, obj.metadata_to_html()) item.setData(0, QC.Qt.UserRole, obj.uuid) def populate_tree(self) -> None: """Populate tree with objects""" uuid = self.get_current_item_id()
# -*- coding: utf-8 -*- # # Licensed under the terms of the BSD 3-Clause # (see cdl/LICENSE for details) """ Object (signal/image) view widgets ---------------------------------- This module provides widgets to display object (signal/image) trees. .. autosummary:: :toctree: SimpleObjectTree GetObjectDialog ObjectView .. autoclass:: SimpleObjectTree :members: .. autoclass:: GetObjectDialog :members: .. autoclass:: ObjectView :members: .. note:: This module provides tree widgets to display signals, images and groups. It is important to note that, by design, the user can only select either individual signals/images or groups, but not both at the same time. This is an important design choice, as it allows to simplify the user experience, and to avoid potential confusion between the two types of selection. """ # pylint: disable=invalid-name # Allows short reference names like x, y, ... from __future__ import annotations if TYPE_CHECKING: # pragma: no cover class SimpleObjectTree(QW.QTreeWidget): """Base object handling panel list widget, object (sig/ima) lists""" SIG_ITEM_DOUBLECLICKED = QC.Signal(str) SIG_CONTEXT_MENU = QC.Signal(QC.QPoint) def __init__(self, parent: QW.QWidget, objmodel: ObjectModel) -> None: self.objmodel: ObjectModel = objmodel super().__init__(parent) self.setHeaderHidden(True) self.setColumnCount(1) self.setAlternatingRowColors(True) self.itemDoubleClicked.connect(self.item_double_clicked) def __str__(self) -> str: """Return string representation""" textlist = [] for tl_index in range(self.topLevelItemCount()): tl_item = self.topLevelItem(tl_index) textlist.append(tl_item.text(0)) for index in range(tl_item.childCount()): textlist.append(" " + tl_item.child(index).text(0)) return os.linesep.join(textlist) def initialize_from(self, sobjlist: SimpleObjectTree) -> None: """Init from another SimpleObjectList, without making copies of objects""" self.objmodel = sobjlist.objmodel self.populate_tree() self.set_current_item_id(sobjlist.get_current_item_id()) def iter_items( self, item: QW.QTreeWidgetItem | None = None ) -> Iterator[QW.QTreeWidgetItem]: """Recursively iterate over all items""" if item is None: for index in range(self.topLevelItemCount()): yield from self.iter_items(self.topLevelItem(index)) else: yield item for index in range(item.childCount()): yield from self.iter_items(item.child(index)) def get_item_from_id(self, item_id) -> QW.QTreeWidgetItem: """Return QTreeWidgetItem from id (stored in item's data)""" for item in self.iter_items(): if item.data(0, QC.Qt.UserRole) == item_id: return item return None def get_current_item_id(self, object_only: bool = False) -> str | None: """Return current item id""" item = self.currentItem() if item is not None and (not object_only or item.parent() is not None): return item.data(0, QC.Qt.UserRole) return None def set_current_item_id(self, uuid: str, extend: bool = False) -> None: """Set current item by id""" item = self.get_item_from_id(uuid) if extend: self.setCurrentItem(item, 0, QC.QItemSelectionModel.Select) else: self.setCurrentItem(item) def get_current_group_id(self) -> str: """Return current group ID""" selected_item = self.currentItem() if selected_item is None: return None if selected_item.parent() is None: return selected_item.data(0, QC.Qt.UserRole) return selected_item.parent().data(0, QC.Qt.UserRole) @staticmethod def __update_item( item: QW.QTreeWidgetItem, obj: SignalObj | ImageObj | ObjectGroup ) -> None: """Update item""" item.setText(0, f"{obj.short_id}: {obj.title}") if isinstance(obj, (SignalObj, ImageObj)): item.setToolTip(0, obj.metadata_to_html()) item.setData(0, QC.Qt.UserRole, obj.uuid) def populate_tree(self) -> None: """Populate tree with objects""" uuid = self.get_current_item_id()
with block_signals(widget=self, enable=True):
4
2023-11-09 16:56:03+00:00
16k
lalalamdbf/PLSE_IDRR
src/prompt-tuning/prompt/pipeline_base.py
[ { "identifier": "InputExample", "path": "src/prompt-tuning/prompt/data_utils.py", "snippet": "class InputExample(object):\n \"\"\"A raw input example consisting of segments of text,\n a label for classification task or a target sequence of generation task.\n Other desired information can be pas...
from pickle import FALSE from torch.utils.data.sampler import RandomSampler from transformers.configuration_utils import PretrainedConfig from transformers.generation_utils import GenerationMixin from torch.utils.data import Dataset from typing import * from .data_utils import InputExample, InputFeatures from torch.utils.data._utils.collate import default_collate from tqdm.std import tqdm from transformers.tokenization_utils import PreTrainedTokenizer from transformers.utils.dummy_pt_objects import PreTrainedModel from .utils import TokenizerWrapper from .prompt_base import Template, Verbalizer from collections import defaultdict from collections import namedtuple from torch.utils.data import DataLoader import torch import torch.nn as nn import inspect import numpy as np
11,450
r"""Get the function f 's input arguments. A useful gadget when some function slot might be instantiated into multiple functions. Args: f (:obj:`function`) : the function to get the input arguments. Returns: namedtuple : of args, default, varargs, keywords, respectively.s """ sig = inspect.signature(f) args = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD ] varargs = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_POSITIONAL ] varargs = varargs[0] if varargs else None keywords = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_KEYWORD ] keywords = keywords[0] if keywords else None defaults = [ p.default for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty ] or None argspec = namedtuple('Signature', ['args', 'defaults', 'varargs', 'keywords']) return argspec(args, defaults, varargs, keywords) class PromptDataLoader(object): r""" PromptDataLoader wraps the original dataset. The input data is firstly wrapped with the prompt's template, and then is tokenized by a wrapperd-tokenizer. Args: dataset (:obj:`Dataset` or :obj:`List`): Either a DatasetObject or a list containing the input examples. template (:obj:`Template`): A derived class of :obj:`Template` tokenizer (:obj:`PretrainedTokenizer`): The pretrained tokenizer. tokenizer_wrapper_class (:cls:`TokenizerWrapper`): The class of tokenizer wrapper. max_seq_length (:obj:`int`, optional): The max sequence length of the input ids. It's used to truncate sentences. batch_size (:obj:`int`, optional): The batch_size of data loader teacher_forcing (:obj:`bool`, optional): Whether to fill the mask with target text. Set to true in training generation model. decoder_max_length (:obj:`int`, optional): the decoder maximum length of an encoder-decoder model. predict_eos_token (:obj:`bool`, optional): Whether to predict the <eos> token. Suggest to set to true in generation. truncate_method (:obj:`bool`, optional): the truncate method to use. select from `head`, `tail`, `balanced`. kwargs :Other kwargs that might be passed into a tokenizer wrapper. """ def __init__(self, dataset: Union[Dataset, List], template: Template, tokenizer_wrapper: Optional[TokenizerWrapper] = None, tokenizer: PreTrainedTokenizer = None, tokenizer_wrapper_class = None, verbalizer: Optional[Verbalizer] = None, max_seq_length: Optional[str] = 512, batch_size: Optional[int] = 1, shuffle: Optional[bool] = False, teacher_forcing: Optional[bool] = False, decoder_max_length: Optional[int] = -1, predict_eos_token: Optional[bool] = False, truncate_method: Optional[str] = "tail", drop_last: Optional[bool] = False, **kwargs, ): assert hasattr(dataset, "__iter__"), f"The dataset must have __iter__ method. dataset is {dataset}" assert hasattr(dataset, "__len__"), f"The dataset must have __len__ method. dataset is {dataset}" self.raw_dataset = dataset self.wrapped_dataset = [] self.tensor_dataset = [] self.template = template self.verbalizer = verbalizer self.batch_size = batch_size self.shuffle = shuffle self.teacher_forcing = teacher_forcing if tokenizer_wrapper is None: if tokenizer_wrapper_class is None: raise RuntimeError("Either wrapped_tokenizer or tokenizer_wrapper_class should be specified.") if tokenizer is None: raise RuntimeError("No tokenizer specified to instantiate tokenizer_wrapper.") tokenizer_wrapper_init_keys = signature(tokenizer_wrapper_class.__init__).args prepare_kwargs = { "max_seq_length" : max_seq_length, "truncate_method" : truncate_method, "decoder_max_length" : decoder_max_length, "predict_eos_token" : predict_eos_token, "tokenizer" : tokenizer, **kwargs, } to_pass_kwargs = {key: prepare_kwargs[key] for key in prepare_kwargs if key in tokenizer_wrapper_init_keys} self.tokenizer_wrapper = tokenizer_wrapper_class(**to_pass_kwargs) else: self.tokenizer_wrapper = tokenizer_wrapper # check the satisfiability of each component assert hasattr(self.template, 'wrap_one_example'), "Your prompt has no function variable \ named wrap_one_example" # process self.wrap() self.tokenize() if self.shuffle: sampler = RandomSampler(self.tensor_dataset) else: sampler = None self.dataloader = DataLoader( self.tensor_dataset, batch_size = self.batch_size, sampler= sampler,
def signature(f): r"""Get the function f 's input arguments. A useful gadget when some function slot might be instantiated into multiple functions. Args: f (:obj:`function`) : the function to get the input arguments. Returns: namedtuple : of args, default, varargs, keywords, respectively.s """ sig = inspect.signature(f) args = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD ] varargs = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_POSITIONAL ] varargs = varargs[0] if varargs else None keywords = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_KEYWORD ] keywords = keywords[0] if keywords else None defaults = [ p.default for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty ] or None argspec = namedtuple('Signature', ['args', 'defaults', 'varargs', 'keywords']) return argspec(args, defaults, varargs, keywords) class PromptDataLoader(object): r""" PromptDataLoader wraps the original dataset. The input data is firstly wrapped with the prompt's template, and then is tokenized by a wrapperd-tokenizer. Args: dataset (:obj:`Dataset` or :obj:`List`): Either a DatasetObject or a list containing the input examples. template (:obj:`Template`): A derived class of :obj:`Template` tokenizer (:obj:`PretrainedTokenizer`): The pretrained tokenizer. tokenizer_wrapper_class (:cls:`TokenizerWrapper`): The class of tokenizer wrapper. max_seq_length (:obj:`int`, optional): The max sequence length of the input ids. It's used to truncate sentences. batch_size (:obj:`int`, optional): The batch_size of data loader teacher_forcing (:obj:`bool`, optional): Whether to fill the mask with target text. Set to true in training generation model. decoder_max_length (:obj:`int`, optional): the decoder maximum length of an encoder-decoder model. predict_eos_token (:obj:`bool`, optional): Whether to predict the <eos> token. Suggest to set to true in generation. truncate_method (:obj:`bool`, optional): the truncate method to use. select from `head`, `tail`, `balanced`. kwargs :Other kwargs that might be passed into a tokenizer wrapper. """ def __init__(self, dataset: Union[Dataset, List], template: Template, tokenizer_wrapper: Optional[TokenizerWrapper] = None, tokenizer: PreTrainedTokenizer = None, tokenizer_wrapper_class = None, verbalizer: Optional[Verbalizer] = None, max_seq_length: Optional[str] = 512, batch_size: Optional[int] = 1, shuffle: Optional[bool] = False, teacher_forcing: Optional[bool] = False, decoder_max_length: Optional[int] = -1, predict_eos_token: Optional[bool] = False, truncate_method: Optional[str] = "tail", drop_last: Optional[bool] = False, **kwargs, ): assert hasattr(dataset, "__iter__"), f"The dataset must have __iter__ method. dataset is {dataset}" assert hasattr(dataset, "__len__"), f"The dataset must have __len__ method. dataset is {dataset}" self.raw_dataset = dataset self.wrapped_dataset = [] self.tensor_dataset = [] self.template = template self.verbalizer = verbalizer self.batch_size = batch_size self.shuffle = shuffle self.teacher_forcing = teacher_forcing if tokenizer_wrapper is None: if tokenizer_wrapper_class is None: raise RuntimeError("Either wrapped_tokenizer or tokenizer_wrapper_class should be specified.") if tokenizer is None: raise RuntimeError("No tokenizer specified to instantiate tokenizer_wrapper.") tokenizer_wrapper_init_keys = signature(tokenizer_wrapper_class.__init__).args prepare_kwargs = { "max_seq_length" : max_seq_length, "truncate_method" : truncate_method, "decoder_max_length" : decoder_max_length, "predict_eos_token" : predict_eos_token, "tokenizer" : tokenizer, **kwargs, } to_pass_kwargs = {key: prepare_kwargs[key] for key in prepare_kwargs if key in tokenizer_wrapper_init_keys} self.tokenizer_wrapper = tokenizer_wrapper_class(**to_pass_kwargs) else: self.tokenizer_wrapper = tokenizer_wrapper # check the satisfiability of each component assert hasattr(self.template, 'wrap_one_example'), "Your prompt has no function variable \ named wrap_one_example" # process self.wrap() self.tokenize() if self.shuffle: sampler = RandomSampler(self.tensor_dataset) else: sampler = None self.dataloader = DataLoader( self.tensor_dataset, batch_size = self.batch_size, sampler= sampler,
collate_fn = InputFeatures.collate_fct,
1
2023-11-01 08:52:36+00:00
16k
choderalab/chiron
Examples/LJ_mcmove.py
[ { "identifier": "LJPotential", "path": "chiron/potential.py", "snippet": "class LJPotential(NeuralNetworkPotential):\n def __init__(\n self,\n topology: Topology,\n sigma: unit.Quantity = 3.350 * unit.angstroms,\n epsilon: unit.Quantity = 1.0 * unit.kilocalories_per_mole,\...
from openmmtools.testsystems import LennardJonesFluid from chiron.potential import LJPotential from openmm import unit from chiron.states import SamplerState, ThermodynamicState from chiron.neighbors import NeighborListNsqrd, OrthogonalPeriodicSpace from chiron.neighbors import PairList from chiron.reporters import SimulationReporter from chiron.mcmc import MetropolisDisplacementMove import os
13,250
# Use the LennardJonesFluid example from openmmtools to initialize particle positions and topology # For this example, the topology provides the masses for the particles # The default LennardJonesFluid example considers the system to be Argon with 39.9 amu lj_fluid = LennardJonesFluid(reduced_density=0.1, nparticles=1000) # initialize the LennardJones potential in chiron # sigma = 0.34 * unit.nanometer epsilon = 0.238 * unit.kilocalories_per_mole cutoff = 3.0 * sigma lj_potential = LJPotential( lj_fluid.topology, sigma=sigma, epsilon=epsilon, cutoff=cutoff ) # define the sampler state
# Use the LennardJonesFluid example from openmmtools to initialize particle positions and topology # For this example, the topology provides the masses for the particles # The default LennardJonesFluid example considers the system to be Argon with 39.9 amu lj_fluid = LennardJonesFluid(reduced_density=0.1, nparticles=1000) # initialize the LennardJones potential in chiron # sigma = 0.34 * unit.nanometer epsilon = 0.238 * unit.kilocalories_per_mole cutoff = 3.0 * sigma lj_potential = LJPotential( lj_fluid.topology, sigma=sigma, epsilon=epsilon, cutoff=cutoff ) # define the sampler state
sampler_state = SamplerState(
1
2023-11-07 18:17:43+00:00
16k
WolfgangFahl/dcm
dcm/dcm_webserver.py
[ { "identifier": "Assessment", "path": "dcm/dcm_assessment.py", "snippet": "class Assessment:\n \"\"\"\n Assessment for CompetenceTree\n \"\"\"\n\n def __init__(\n self,\n webserver: NiceGuiWebserver,\n dcm: DynamicCompetenceMap,\n learner: Learner,\n debug:...
import os from typing import Optional from urllib.parse import urlparse from fastapi import HTTPException from fastapi.responses import HTMLResponse from ngwidgets.file_selector import FileSelector from ngwidgets.input_webserver import InputWebserver from ngwidgets.webserver import WebserverConfig from nicegui import Client, app, ui from pydantic import BaseModel from dcm.dcm_assessment import Assessment from dcm.dcm_chart import DcmChart from dcm.dcm_core import CompetenceTree, DynamicCompetenceMap, Learner from dcm.svg import SVG, SVGConfig from dcm.version import Version
11,980
""" Created on 2023-11-06 @author: wf """ class SVGRenderRequest(BaseModel): """ A request for rendering an SVG. Attributes: name (str): The name of the render request. definition (str): The string representation of the data to be rendered, in either JSON or YAML format. markup (str): The format of the definition ('json' or 'yaml'). config (SVGConfig): Optional configuration for SVG rendering. Defaults to None, which uses default settings. """ name: str definition: str markup: str
""" Created on 2023-11-06 @author: wf """ class SVGRenderRequest(BaseModel): """ A request for rendering an SVG. Attributes: name (str): The name of the render request. definition (str): The string representation of the data to be rendered, in either JSON or YAML format. markup (str): The format of the definition ('json' or 'yaml'). config (SVGConfig): Optional configuration for SVG rendering. Defaults to None, which uses default settings. """ name: str definition: str markup: str
config: Optional[SVGConfig] = None
6
2023-11-06 09:24:24+00:00
16k
Harvard-Ophthalmology-AI-Lab/FairSeg
SAMed/segment_anything/automatic_mask_generator.py
[ { "identifier": "Sam", "path": "SAMed/segment_anything/modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_...
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from .modeling import Sam from .predictor import SamPredictor from .utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
11,351
} curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros(len(data["boxes"])), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros(len(data["boxes"])), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold data["boxes"] = batched_mask_to_box(data["masks"]) # Filter boxes that touch crop boundaries
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamAutomaticMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crops_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crops_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros(len(data["boxes"])), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros(len(data["boxes"])), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold data["boxes"] = batched_mask_to_box(data["masks"]) # Filter boxes that touch crop boundaries
keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h])
11
2023-11-03 17:05:40+00:00
16k
microsoft/PLEX
PLEX/util/misc.py
[ { "identifier": "setup_logging", "path": "PLEX/util/log.py", "snippet": "def setup_logging(args):\n log_dir = Path(args['log_dir']).expanduser()\n if not log_dir.is_dir():\n print(f'Creating log dir {log_dir}')\n log_dir.mkdir(parents=True)\n\n now_str = datetime.now().strftime('%...
import numpy as np import torch import random import wandb import pickle import PLEX.util.globals as globals from collections import defaultdict from PLEX.util.log import setup_logging from PLEX.util.timer import Timer from PLEX.envs.environments import * from PLEX.models.trajectory_models.plex import PLEX from PLEX.models.trajectory_models.mlp_bc import MLPBCModel from PLEX.models.trajectory_models.decision_transformer import DecisionTransformer from PLEX.training.trainer import Trainer
11,877
env.close() return env.obs_dims, env.proprio_dim, env.action_dim elif example_task.dataset_type == 'metaworld': env = MetaWorldEnv(example_task, use_normalized_reward=False, full_state_mode=globals.full_state_mode, camera_name=camera_names[0]) env.close() return env.obs_dims, env.proprio_dim, env.action_dim elif example_task.dataset_type == 'd4rl': env = d4rlEnv(example_task, full_state_mode=globals.full_state_mode) env.close() return env.obs_dims, env.proprio_dim, env.action_dim elif example_task.dataset_type == 'bridge' or example_task.dataset_type == 'bridge-v2': obs_dims = (3, image_size, image_size) proprio_dim = 7 action_dim = 7 return obs_dims, proprio_dim, action_dim else: raise ValueError('Unknown dataset type: {}'.format(example_task.dataset_type)) def setup_model(cmdline_args, example_task, log, device, camera_names, modalities_to_mask, data_dir, bc_mode): obs_dims, proprio_dim, action_dim = get_robot_dims(example_task, camera_names, cmdline_args['image_size']) pretrained_state_dict = {} # Load pretrained weights, if applicable load_path = cmdline_args['load_path'] if load_path is not None: load_path = load_path.replace('--TARGET_ROBOT--', cmdline_args['robot']) log(f'Loading pretrained weights from {load_path}') pretrained_state_dict = torch.load(load_path) std_bounds = (cmdline_args['std_min'], cmdline_args['std_max']) tune_style_kwargs = {} tune_style_kwargs['image_encoder_tune_style'] = cmdline_args['image_encoder_tune_style'] if cmdline_args['model'] == 'PLEX': assert cmdline_args['obs_pred.K'] is not None assert cmdline_args['inv_d_pred.K'] is not None assert cmdline_args['obs_pred.K'] >= cmdline_args['inv_d_pred.K'] assert cmdline_args['obs_pred.K'] % cmdline_args['inv_d_pred.K'] == 0 obs_pred_gpt2_kwargs = dict( n_layer=cmdline_args['obs_pred.n_layer'], n_head=cmdline_args['obs_pred.n_head'], K=cmdline_args['obs_pred.K'], activation_function=cmdline_args['activation_function'], resid_pdrop=cmdline_args['dropout'], attn_pdrop=cmdline_args['dropout'] ) inv_d_pred_gpt2_kwargs = dict( n_layer=cmdline_args['inv_d_pred.n_layer'], n_head=cmdline_args['inv_d_pred.n_head'], K=cmdline_args['inv_d_pred.K'], activation_function=cmdline_args['activation_function'], resid_pdrop=cmdline_args['dropout'], attn_pdrop=cmdline_args['dropout'] ) model = PLEX( camera_names=camera_names, obs_dims=obs_dims, proprio_dim=proprio_dim, act_dim=action_dim, hidden_dim=cmdline_args['embed_dim'], # The history length for this model is always the observation prediction model's history length: history_len=cmdline_args['obs_pred.K'], image_encoder_arch=cmdline_args['image_encoder_arch'], image_encoder_load=cmdline_args['image_encoder_load'], use_random_crops=True, pool_type=cmdline_args['pool_type'], action_output_type=cmdline_args['action_output_type'], impute_style=cmdline_args['impute_style'], data_dir=data_dir, relative_position_encodings=cmdline_args['relative_position_encodings'], future_step=cmdline_args['future_step'], std_bounds=std_bounds, obs_pred_gpt2_kwargs=obs_pred_gpt2_kwargs, inv_d_pred_gpt2_kwargs=inv_d_pred_gpt2_kwargs, modalities_to_mask=modalities_to_mask, bc_mode=bc_mode ).to(device=device) # Record the tune style parameters tune_style_kwargs['obs_pred_transformer_tune_style'] = cmdline_args['obs_pred.transformer_tune_style'] tune_style_kwargs['inv_d_pred_transformer_tune_style'] = cmdline_args['inv_d_pred.transformer_tune_style'] elif cmdline_args['model'] == 'DT': # Configure the model gpt2_kwargs = dict( n_layer=cmdline_args['n_layer'], n_head=cmdline_args['n_head'], activation_function=cmdline_args['activation_function'], resid_pdrop=cmdline_args['dropout'], attn_pdrop=cmdline_args['dropout'], relative_position_encodings=cmdline_args['relative_position_encodings'] ) model = DecisionTransformer( camera_names=camera_names, obs_dims=obs_dims, proprio_dim=proprio_dim, act_dim=action_dim, hidden_dim=cmdline_args['embed_dim'], history_len=cmdline_args['K'], image_encoder_arch=cmdline_args['image_encoder_arch'], image_encoder_load=cmdline_args['image_encoder_load'], use_random_crops=True, pool_type=cmdline_args['pool_type'], action_output_type=cmdline_args['action_output_type'], impute_style=cmdline_args['impute_style'], data_dir=data_dir, gpt2_kwargs=gpt2_kwargs, std_bounds=std_bounds, modalities_to_mask=modalities_to_mask, bc_mode=bc_mode ).to(device=device) # Record the tune style parameters tune_style_kwargs['transformer_tune_style'] = cmdline_args['transformer_tune_style'] elif cmdline_args['model'] == 'MLP':
class TaskDescriptor: def __init__(self, task_descr_string): self.dataset_location = task_descr_string = task_descr_string.rstrip('/').lstrip('/ ') parts = task_descr_string.split('/') self.frame_rate = None assert parts[0] in {'robosuite', 'robomimic', 'libero', 'metaworld', 'bridge', 'bridge-v2', 'd4rl'} self.dataset_type = parts[0] assert self.dataset_type == 'bridge-v2' or self.dataset_type in TASK_NAMES, f"ERROR! {self.dataset_type} is not in dataset type-to-task names dict! Task descr string is {task_descr_string}." assert self.dataset_type == 'bridge-v2' or parts[1] in TASK_NAMES[self.dataset_type] self.name = parts[1] assert parts[2] in ROBOT_NAMES self.robot = parts[2] def parse_comma_sep_param_value(comma_sep_param_value_str): param_values = [param_value.strip() for param_value in comma_sep_param_value_str.split(',')] return param_values def parse_tasks(task_spec_str, robot=None, global_max_traj=None): if task_spec_str is None or task_spec_str == 'None': return [], [] task_specs = parse_comma_sep_param_value(task_spec_str) descriptors = [] max_trajs = [] for task_spec in task_specs: if task_spec.startswith('(') and task_spec.endswith(')'): task_spec, max_traj = [part.strip('(): ') for part in task_spec.split(':')] max_trajs.append(int(max_traj)) else: max_trajs.append(global_max_traj) if robot is None: task = task_spec else: # --TARGET_ROBOT-- is a reserved token that can't be used to name an actual robot. task = task_spec.replace('--TARGET_ROBOT--', robot) assert task != task_spec, 'Invalid task directory string: {}. Needs to contain the \"--TARGET_ROBOT--\" token'.format(task) descriptors.append(TaskDescriptor(task)) return descriptors, max_trajs # reward_type can be 'native', 'negative', 'random', 'zero', and 'sparse'. def construct_rewards(original_rewards, successes, reward_type): if reward_type == 'sparse': rewards = np.asarray([sparse_reward(r) for r in successes]) elif reward_type == 'native': rewards = original_rewards elif reward_type == 'negative': rewards = -original_rewards elif reward_type == 'zero': rewards = np.zeros_like(original_rewards) elif reward_type == 'random': rewards = np.random.rand(*original_rewards.shape) else: raise NotImplementedError return rewards def set_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) def construct_data_dir_path(cmdline_args): data_dir = cmdline_args['data_dir'].replace('--TARGET_ROBOT--', cmdline_args['robot']) data_dir = Path(data_dir).expanduser() return data_dir def setup_essentials(cmdline_args): set_seed(cmdline_args['seed']) data_shuffling_rng = np.random.RandomState(cmdline_args['seed']) log = setup_logging(cmdline_args) device = cmdline_args.get('device', 'cuda') log_to_wandb = cmdline_args.get('log_to_wandb', False) timer = Timer(log) camera_names = parse_comma_sep_param_value(cmdline_args['camera_names']) # Very important! This sets up observation preprocessing (such as resizing images to a desired size and swapping their format from HWC to CWH) # that will be done by the robomimic library to specified observation types when these observations are loaded from robomimic's h5py files or # generated by robosuite. if 'FULL_STATE' in camera_names: assert len(camera_names) == 1, "If FULL_STATE is present among camera names, it must be the only camera name." globals.full_state_mode = True else: globals.full_state_mode = False if not globals.full_state_mode: init_obs_preprocessing(camera_names, cmdline_args['image_size']) modalities_to_mask = parse_comma_sep_param_value(cmdline_args['modalities_to_mask']) data_dir = construct_data_dir_path(cmdline_args) common_env_metadata_dict = {'robosuite': None, 'metaworld': None, 'bridge': None} for modality in modalities_to_mask: assert modality in globals.MODALITIES return log, log_to_wandb, timer, data_shuffling_rng, device, camera_names, modalities_to_mask, data_dir, common_env_metadata_dict def get_robot_dims(example_task, camera_names, image_size): if example_task.dataset_type == 'robosuite' or example_task.dataset_type == 'robomimic': env = RobosuiteEnv(example_task, use_normalized_reward=False, full_state_mode=globals.full_state_mode, camera_names=camera_names) env.close() return env.obs_dims, env.proprio_dim, env.action_dim elif example_task.dataset_type == 'metaworld': env = MetaWorldEnv(example_task, use_normalized_reward=False, full_state_mode=globals.full_state_mode, camera_name=camera_names[0]) env.close() return env.obs_dims, env.proprio_dim, env.action_dim elif example_task.dataset_type == 'd4rl': env = d4rlEnv(example_task, full_state_mode=globals.full_state_mode) env.close() return env.obs_dims, env.proprio_dim, env.action_dim elif example_task.dataset_type == 'bridge' or example_task.dataset_type == 'bridge-v2': obs_dims = (3, image_size, image_size) proprio_dim = 7 action_dim = 7 return obs_dims, proprio_dim, action_dim else: raise ValueError('Unknown dataset type: {}'.format(example_task.dataset_type)) def setup_model(cmdline_args, example_task, log, device, camera_names, modalities_to_mask, data_dir, bc_mode): obs_dims, proprio_dim, action_dim = get_robot_dims(example_task, camera_names, cmdline_args['image_size']) pretrained_state_dict = {} # Load pretrained weights, if applicable load_path = cmdline_args['load_path'] if load_path is not None: load_path = load_path.replace('--TARGET_ROBOT--', cmdline_args['robot']) log(f'Loading pretrained weights from {load_path}') pretrained_state_dict = torch.load(load_path) std_bounds = (cmdline_args['std_min'], cmdline_args['std_max']) tune_style_kwargs = {} tune_style_kwargs['image_encoder_tune_style'] = cmdline_args['image_encoder_tune_style'] if cmdline_args['model'] == 'PLEX': assert cmdline_args['obs_pred.K'] is not None assert cmdline_args['inv_d_pred.K'] is not None assert cmdline_args['obs_pred.K'] >= cmdline_args['inv_d_pred.K'] assert cmdline_args['obs_pred.K'] % cmdline_args['inv_d_pred.K'] == 0 obs_pred_gpt2_kwargs = dict( n_layer=cmdline_args['obs_pred.n_layer'], n_head=cmdline_args['obs_pred.n_head'], K=cmdline_args['obs_pred.K'], activation_function=cmdline_args['activation_function'], resid_pdrop=cmdline_args['dropout'], attn_pdrop=cmdline_args['dropout'] ) inv_d_pred_gpt2_kwargs = dict( n_layer=cmdline_args['inv_d_pred.n_layer'], n_head=cmdline_args['inv_d_pred.n_head'], K=cmdline_args['inv_d_pred.K'], activation_function=cmdline_args['activation_function'], resid_pdrop=cmdline_args['dropout'], attn_pdrop=cmdline_args['dropout'] ) model = PLEX( camera_names=camera_names, obs_dims=obs_dims, proprio_dim=proprio_dim, act_dim=action_dim, hidden_dim=cmdline_args['embed_dim'], # The history length for this model is always the observation prediction model's history length: history_len=cmdline_args['obs_pred.K'], image_encoder_arch=cmdline_args['image_encoder_arch'], image_encoder_load=cmdline_args['image_encoder_load'], use_random_crops=True, pool_type=cmdline_args['pool_type'], action_output_type=cmdline_args['action_output_type'], impute_style=cmdline_args['impute_style'], data_dir=data_dir, relative_position_encodings=cmdline_args['relative_position_encodings'], future_step=cmdline_args['future_step'], std_bounds=std_bounds, obs_pred_gpt2_kwargs=obs_pred_gpt2_kwargs, inv_d_pred_gpt2_kwargs=inv_d_pred_gpt2_kwargs, modalities_to_mask=modalities_to_mask, bc_mode=bc_mode ).to(device=device) # Record the tune style parameters tune_style_kwargs['obs_pred_transformer_tune_style'] = cmdline_args['obs_pred.transformer_tune_style'] tune_style_kwargs['inv_d_pred_transformer_tune_style'] = cmdline_args['inv_d_pred.transformer_tune_style'] elif cmdline_args['model'] == 'DT': # Configure the model gpt2_kwargs = dict( n_layer=cmdline_args['n_layer'], n_head=cmdline_args['n_head'], activation_function=cmdline_args['activation_function'], resid_pdrop=cmdline_args['dropout'], attn_pdrop=cmdline_args['dropout'], relative_position_encodings=cmdline_args['relative_position_encodings'] ) model = DecisionTransformer( camera_names=camera_names, obs_dims=obs_dims, proprio_dim=proprio_dim, act_dim=action_dim, hidden_dim=cmdline_args['embed_dim'], history_len=cmdline_args['K'], image_encoder_arch=cmdline_args['image_encoder_arch'], image_encoder_load=cmdline_args['image_encoder_load'], use_random_crops=True, pool_type=cmdline_args['pool_type'], action_output_type=cmdline_args['action_output_type'], impute_style=cmdline_args['impute_style'], data_dir=data_dir, gpt2_kwargs=gpt2_kwargs, std_bounds=std_bounds, modalities_to_mask=modalities_to_mask, bc_mode=bc_mode ).to(device=device) # Record the tune style parameters tune_style_kwargs['transformer_tune_style'] = cmdline_args['transformer_tune_style'] elif cmdline_args['model'] == 'MLP':
model = MLPBCModel(
3
2023-11-06 09:38:09+00:00
16k
Giftify-Bot/Giftify-Bot
bot.py
[ { "identifier": "GuildConfig", "path": "models/giveaway_settings.py", "snippet": "class GuildConfig:\n \"\"\"Represents the configuration settings for a guild.\n\n Parameters\n ----------\n guild: discord.Guild\n The guild associated with the configuration.\n logging: Optional[disc...
import asyncio import datetime import logging import os import pathlib import sys import traceback import aiohttp import asyncpg import discord import dotenv import jishaku import sentry_sdk import uvloop from logging.handlers import RotatingFileHandler from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple from amari import AmariClient from discord.ext import commands from discord.utils import MISSING from discord.utils import _ColourFormatter as ColourFormatter from expiringdict import ExpiringDict from sentry_sdk.integrations.logging import LoggingIntegration from models.giveaway_settings import GuildConfig from models.giveaways import Giveaway from models.raffles import Raffle from utils.constants import ERROR_EMOJI, SUCCESS_EMOJI, WARN_EMOJI from utils.db import db_init from utils.tree import CommandTree from utils.view import ConfirmationView from cogs.timer_manager import TimerManager from models.donation_settings import GuildDonationConfig
13,510
from __future__ import annotations if TYPE_CHECKING: dotenv.load_dotenv() try: except ImportError: # Windows pass else: uvloop.install() jishaku.Flags.HIDE = True jishaku.Flags.RETAIN = True jishaku.Flags.NO_UNDERSCORE = True jishaku.Flags.NO_DM_TRACEBACK = True OWNER_IDS = (747403406154399765,) EXTENSIONS: Tuple[str, ...] = ( "meta", "settings", "timers", "giveaways", "donations", "raffles", "logger", "webserver", ) class RemoveNoise(logging.Filter): def __init__(self) -> None: super().__init__(name="discord.state") def filter(self, record) -> bool: if record.levelname == "WARNING" and "referencing an unknown" in record.msg: return False return True class LogHandler: def __init__(self, stream: bool = True) -> None: self.log: logging.Logger = logging.getLogger() self.max_bytes: int = 32 * 1024 * 1024 self.logging_path = pathlib.Path("./logs/") self.logging_path.mkdir(exist_ok=True) self.stream = stream async def __aenter__(self) -> "LogHandler": return self.__enter__() def __enter__(self: "LogHandler") -> "LogHandler": logging.getLogger("discord").setLevel(logging.INFO) logging.getLogger("discord.http").setLevel(logging.INFO) logging.getLogger("discord.state").addFilter(RemoveNoise()) self.log.setLevel(logging.INFO) handler = RotatingFileHandler( filename=self.logging_path / "Giftify.log", encoding="utf-8", mode="w", maxBytes=self.max_bytes, backupCount=5, ) dt_fmt = "%Y-%m-%d %H:%M:%S" fmt = logging.Formatter("[{asctime}] [{levelname:<7}] {name}: {message}", dt_fmt, style="{") handler.setFormatter(fmt) self.log.addHandler(handler) if self.stream: stream_handler = logging.StreamHandler() stream_handler.setFormatter(ColourFormatter()) self.log.addHandler(stream_handler) return self async def __aexit__(self, *args: Any) -> None: return self.__exit__(*args) def __exit__(self, *args: Any) -> None: handlers = self.log.handlers[:] for handler in handlers: handler.close() self.log.removeHandler(handler) class GiftifyHelper: configs: List[GuildConfig] = [] donation_configs: List[GuildDonationConfig] = [] cached_giveaways: List["Giveaway"] = [] webhook_cache: Dict[discord.TextChannel, discord.Webhook] = {}
from __future__ import annotations if TYPE_CHECKING: dotenv.load_dotenv() try: except ImportError: # Windows pass else: uvloop.install() jishaku.Flags.HIDE = True jishaku.Flags.RETAIN = True jishaku.Flags.NO_UNDERSCORE = True jishaku.Flags.NO_DM_TRACEBACK = True OWNER_IDS = (747403406154399765,) EXTENSIONS: Tuple[str, ...] = ( "meta", "settings", "timers", "giveaways", "donations", "raffles", "logger", "webserver", ) class RemoveNoise(logging.Filter): def __init__(self) -> None: super().__init__(name="discord.state") def filter(self, record) -> bool: if record.levelname == "WARNING" and "referencing an unknown" in record.msg: return False return True class LogHandler: def __init__(self, stream: bool = True) -> None: self.log: logging.Logger = logging.getLogger() self.max_bytes: int = 32 * 1024 * 1024 self.logging_path = pathlib.Path("./logs/") self.logging_path.mkdir(exist_ok=True) self.stream = stream async def __aenter__(self) -> "LogHandler": return self.__enter__() def __enter__(self: "LogHandler") -> "LogHandler": logging.getLogger("discord").setLevel(logging.INFO) logging.getLogger("discord.http").setLevel(logging.INFO) logging.getLogger("discord.state").addFilter(RemoveNoise()) self.log.setLevel(logging.INFO) handler = RotatingFileHandler( filename=self.logging_path / "Giftify.log", encoding="utf-8", mode="w", maxBytes=self.max_bytes, backupCount=5, ) dt_fmt = "%Y-%m-%d %H:%M:%S" fmt = logging.Formatter("[{asctime}] [{levelname:<7}] {name}: {message}", dt_fmt, style="{") handler.setFormatter(fmt) self.log.addHandler(handler) if self.stream: stream_handler = logging.StreamHandler() stream_handler.setFormatter(ColourFormatter()) self.log.addHandler(stream_handler) return self async def __aexit__(self, *args: Any) -> None: return self.__exit__(*args) def __exit__(self, *args: Any) -> None: handlers = self.log.handlers[:] for handler in handlers: handler.close() self.log.removeHandler(handler) class GiftifyHelper: configs: List[GuildConfig] = [] donation_configs: List[GuildDonationConfig] = [] cached_giveaways: List["Giveaway"] = [] webhook_cache: Dict[discord.TextChannel, discord.Webhook] = {}
raffles_cache: Dict[discord.Guild, List[Raffle]] = ExpiringDict(max_len=100, max_age_seconds=300)
2
2023-11-09 15:00:15+00:00
16k
Zjy0401/CoCoFormer
train.py
[ { "identifier": "create_jsf_datasets", "path": "dataset/jsf.py", "snippet": "def create_jsf_datasets(dataset_root, max_seq, random_seq=True):\n\n train_root = os.path.join(dataset_root, \"train\")\n # val_root = os.path.join(dataset_root, \"val\")\n test_root = os.path.join(dataset_root, \"test...
import os import csv import shutil import torch import torch.nn as nn import pickle from thop import profile from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from torch.optim import Adam from dataset.jsf import create_jsf_datasets from model.CoCoFormer import CoCoformer, Discriminator, PureTransformer from model.loss import SmoothCrossEntropyLoss from utilities.constants import * from utilities.device import get_device, use_cuda from utilities.lr_scheduling import LrStepTracker, get_lr from utilities.argument_funcs import parse_train_args, print_train_args, write_model_params from utilities.run_model import train_epoch, train_with_adv, eval_model, get_metrics, train_with_pure_transformer, params from tensorboardX import SummaryWriter
11,252
# from dataset.e_piano import create_epiano_datasets, compute_epiano_accuracy, split_train_test CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss", "Train Accuracy", "Avg Eval loss", "Eval accuracy"] # Baseline is an untrained epoch that we evaluate as a baseline loss and accuracy BASELINE_EPOCH = -1 # main def main(): """ ---------- Author: Damon Gwinn ---------- Entry point. Trains a model specified by command line arguments ---------- """ args = parse_train_args() print_train_args(args) if args.force_cpu: use_cuda(False) print("WARNING: Forced CPU usage, expect model to perform slower") print("") os.makedirs(args.output_dir, exist_ok=True) ##### Output prep ##### params_file = os.path.join(args.output_dir, "model_params.txt") write_model_params(args, params_file) weights_folder = os.path.join(args.output_dir, "weights") os.makedirs(weights_folder, exist_ok=True) results_folder = os.path.join(args.output_dir, "results") os.makedirs(results_folder, exist_ok=True) results_file = os.path.join(results_folder, "results.csv") best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle") best_acc_file = os.path.join(results_folder, "best_acc_weights.pickle") best_text = os.path.join(results_folder, "best_epochs.txt") ##### Tensorboard ##### if args.no_tensorboard: tensorboard_summary = None else: tensorboad_dir = os.path.join(args.output_dir, "tensorboard") tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir) ##### Datasets ##### # train_dataset, val_dataset, test_dataset = create_epiano_datasets(args.input_dir, args.max_sequence) train_dataset, test_dataset = create_jsf_datasets(args.input_dir, args.max_sequence) train_loader = DataLoader(train_dataset, batch_size=args.batch_size * len(args.gpu), num_workers=args.n_workers, shuffle=True) # val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers) test_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.n_workers) ##### read word2event event2word f = open(args.word2event, 'rb') word2event = pickle.load(f) # reverse the vector event2word event2word = {} for key, val in word2event.items(): event2word[val] = key if args.only_Transformer: model = PureTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) else: model = CoCoformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) model_disc = Discriminator() if args.gpu[0] != -1: model = torch.nn.DataParallel(model, device_ids=args.gpu) model = model.cuda(device=args.gpu[0]) model_disc = torch.nn.DataParallel(model_disc, device_ids=args.gpu) model_disc = model_disc.cuda(device=args.gpu[0]) params(train_loader, model, model_disc) ##### Continuing from previous training session ##### start_epoch = BASELINE_EPOCH if args.continue_weights is not None: if args.continue_epoch is None: print("ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights") return else: model.load_state_dict(torch.load(args.continue_weights)) start_epoch = args.continue_epoch elif args.continue_epoch is not None: print("ERROR: Need continue weights (-continue_weights) when using continue_epoch") return ##### Lr Scheduler vs static lr ##### if args.lr is None: if args.continue_epoch is None: init_step = 0 else: init_step = args.continue_epoch * len(train_loader) lr = LR_DEFAULT_START * len(args.gpu) lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step) else: lr = args.lr ##### Not smoothing evaluation loss ##### eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD) ##### SmoothCrossEntropyLoss or CrossEntropyLoss for training ##### if args.ce_smoothing is None: train_loss_func = eval_loss_func else:
# from dataset.e_piano import create_epiano_datasets, compute_epiano_accuracy, split_train_test CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss", "Train Accuracy", "Avg Eval loss", "Eval accuracy"] # Baseline is an untrained epoch that we evaluate as a baseline loss and accuracy BASELINE_EPOCH = -1 # main def main(): """ ---------- Author: Damon Gwinn ---------- Entry point. Trains a model specified by command line arguments ---------- """ args = parse_train_args() print_train_args(args) if args.force_cpu: use_cuda(False) print("WARNING: Forced CPU usage, expect model to perform slower") print("") os.makedirs(args.output_dir, exist_ok=True) ##### Output prep ##### params_file = os.path.join(args.output_dir, "model_params.txt") write_model_params(args, params_file) weights_folder = os.path.join(args.output_dir, "weights") os.makedirs(weights_folder, exist_ok=True) results_folder = os.path.join(args.output_dir, "results") os.makedirs(results_folder, exist_ok=True) results_file = os.path.join(results_folder, "results.csv") best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle") best_acc_file = os.path.join(results_folder, "best_acc_weights.pickle") best_text = os.path.join(results_folder, "best_epochs.txt") ##### Tensorboard ##### if args.no_tensorboard: tensorboard_summary = None else: tensorboad_dir = os.path.join(args.output_dir, "tensorboard") tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir) ##### Datasets ##### # train_dataset, val_dataset, test_dataset = create_epiano_datasets(args.input_dir, args.max_sequence) train_dataset, test_dataset = create_jsf_datasets(args.input_dir, args.max_sequence) train_loader = DataLoader(train_dataset, batch_size=args.batch_size * len(args.gpu), num_workers=args.n_workers, shuffle=True) # val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers) test_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.n_workers) ##### read word2event event2word f = open(args.word2event, 'rb') word2event = pickle.load(f) # reverse the vector event2word event2word = {} for key, val in word2event.items(): event2word[val] = key if args.only_Transformer: model = PureTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) else: model = CoCoformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) model_disc = Discriminator() if args.gpu[0] != -1: model = torch.nn.DataParallel(model, device_ids=args.gpu) model = model.cuda(device=args.gpu[0]) model_disc = torch.nn.DataParallel(model_disc, device_ids=args.gpu) model_disc = model_disc.cuda(device=args.gpu[0]) params(train_loader, model, model_disc) ##### Continuing from previous training session ##### start_epoch = BASELINE_EPOCH if args.continue_weights is not None: if args.continue_epoch is None: print("ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights") return else: model.load_state_dict(torch.load(args.continue_weights)) start_epoch = args.continue_epoch elif args.continue_epoch is not None: print("ERROR: Need continue weights (-continue_weights) when using continue_epoch") return ##### Lr Scheduler vs static lr ##### if args.lr is None: if args.continue_epoch is None: init_step = 0 else: init_step = args.continue_epoch * len(train_loader) lr = LR_DEFAULT_START * len(args.gpu) lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step) else: lr = args.lr ##### Not smoothing evaluation loss ##### eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD) ##### SmoothCrossEntropyLoss or CrossEntropyLoss for training ##### if args.ce_smoothing is None: train_loss_func = eval_loss_func else:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE, ignore_index=TOKEN_PAD)
4
2023-11-01 08:33:08+00:00
16k
tiendatnguyen-vision/Orbit-symmetrize
RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/representation.py
[ { "identifier": "Group", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/groups.py", "snippet": "class Group(nn.Module):\n \"\"\" Abstract Group Object which new groups should inherit from. \"\"\"\n\n def __init__(self):\n super().__init__()\n self.lie_algebra = NotImplemen...
import math import logging import itertools import torch from functools import lru_cache as cache, reduce from collections import defaultdict from plum import dispatch from torch import nn from ..groups import Group from .linear_operator_base import LinearOperator from .linear_operators import ConcatLazy, I, lazify, densify, LazyJVP, LazyPerm, \ LazyDirectSum, LazyKron, LazyKronsum, lazy_direct_matmat, product from .utils import orthogonal_complement, krylov_constraint_solve, get_device
11,277
Scalar = ScalarRep() def T(p, q=0, G=None): """ A convenience function for creating rank (p,q) tensors.""" return (V**p*V.t()**q)(G) def bilinear_weights(out_rep, in_rep): """ Bilinear weights for a linear operator from in_rep to out_rep. """ # TODO: replace lazy_projection function with LazyDirectSum LinearOperator W_rep, W_perm = (in_rep >> out_rep).canonicalize() # TODO: possible bug when in_rep and out_rep are both non sumreps? investigate inv_perm = torch.argsort(W_perm) mat_shape = out_rep.size(), in_rep.size() x_rep = in_rep W_multiplicities = W_rep.reps x_multiplicities = x_rep.reps x_multiplicities = {rep: n for rep, n in x_multiplicities.items() if rep != Scalar} def nelems(nx, rep): return min(nx, rep.size()) active_dims = sum(W_multiplicities.get(rep, 0)*nelems(n, rep) for rep, n in x_multiplicities.items()) reduced_indices_dict = {rep: ids[torch.randint( len(ids), size=(nelems(len(ids), rep),))].reshape(-1) for rep, ids in x_rep.as_dict(torch.arange(x_rep.size())).items()} # Apply the projections for each rank, concatenate, and permute back to orig rank order # (r,), (*c) # TODO: find out why backwards of this function is so slow def lazy_projection(params, x): bshape = x.shape[:-1] x = x.reshape(-1, x.size(-1)) bs = x.size(0) i = 0 Ws = [] for rep, W_mult in W_multiplicities.items(): if rep not in x_multiplicities: Ws.append(torch.zeros((bs, W_mult*rep.size()), device=x.device)) continue x_mult = x_multiplicities[rep] n = nelems(x_mult, rep) i_end = i+W_mult*n bids = reduced_indices_dict[rep] bilinear_params = params[i:i_end].reshape(W_mult, n) # bs,nK-> (nK,bs) i = i_end # (bs,W_mult,d^r) = (W_mult,n)@(n,d^r,bs) bilinear_elems = bilinear_params@x[..., bids].t().reshape(n, rep.size()*bs) bilinear_elems = bilinear_elems.reshape(W_mult*rep.size(), bs).t() Ws.append(bilinear_elems) Ws = torch.cat(Ws, axis=-1) # concatenate over rep axis # reorder to original rank ordering return Ws[..., inv_perm].reshape(*bshape, *mat_shape) return active_dims, lazy_projection class SumRep(Rep): """ A sum of representations, e.g. V+V.T. """ def __init__(self, *reps, extra_perm=None, skip_init=False): """ Constructs a tensor type based on a list of tensor ranks and possibly the symmetry generators gen.""" super().__init__() if skip_init: return # Integers can be used as shorthand for scalars. reps = [SumRepFromCollection({Scalar: rep}) if isinstance(rep, int) else \ rep for rep in reps] # Get reps and permutations reps, perms = zip(*[rep.canonicalize() for rep in reps]) rep_counters = [rep.reps if isinstance(rep, SumRep) else {rep: 1} for rep in reps] # Combine reps and permutations: ∑_a + ∑_b = ∑_{a∪b} self.reps, perm = self.compute_canonical(rep_counters, perms) self.perm = extra_perm[perm] if extra_perm is not None else perm self.invperm = torch.argsort(self.perm) self.canonical = (self.perm == torch.arange(len(self.perm))).all() self.is_permutation = all(rep.is_permutation for rep in self.reps.keys()) def size(self): return sum(rep.size()*count for rep, count in self.reps.items()) def rho(self, M): rhos = [rep.rho(M) for rep in self.reps] multiplicities = self.reps.values() return LazyPerm(self.invperm)@LazyDirectSum(rhos, multiplicities)@LazyPerm(self.perm) def drho(self, A): drhos = [rep.drho(A) for rep in self.reps] multiplicities = self.reps.values() return LazyPerm(self.invperm)@LazyDirectSum(drhos, multiplicities)@LazyPerm(self.perm) def __eq__(self, other): return self.reps == other.reps and (self.perm == other.perm).all() def __hash__(self): assert self.canonical return hash(tuple(self.reps.items())) def t(self): """ only swaps to adjoint representation, does not reorder elems""" return SumRep(*[rep.t() for rep, c in self.reps.items() for _ in range(c)], extra_perm=self.perm) def __repr__(self): return "+".join(f"{count if count > 1 else ''}{repr(rep)}" for rep, count in self.reps.items()) def canonicalize(self): """Returns a canonically ordered rep with order np.arange(self.size()) and the permutation which achieves that ordering""" return SumRepFromCollection(self.reps), self.perm def forward(self, G): return SumRepFromCollection({rep(G): c for rep, c in self.reps.items()}, perm=self.perm) def concrete(self): return True def equivariant_basis(self): """ Overrides default implementation with a more efficient version which decomposes the constraints across the sum.""" Qs = {rep: rep.equivariant_basis() for rep in self.reps}
""" The base Representation class. """ class Rep(nn.Module): """ The base Representation class. Representation objects formalize the vector space V on which the group acts, the group representation matrix ρ(g), and the Lie Algebra representation dρ(A) in a single object. Representations act as types for vectors coming from V. These types can be manipulated and transformed with the built in operators ⊕,⊗,dual, as well as incorporating custom representations. Representation objects should be immutable. At minimum, new representations need to implement ``rho``, ``__str__``.""" def __init__(self): super().__init__() self.is_permutation = False self._size = None self.G = None def rho(self, M): """ Group representation of the matrix M of shape (d,d)""" raise NotImplementedError def drho(self, A): """ Lie Algebra representation of the matrix A of shape (d,d)""" In = torch.eye(A.size(0), dtype=A.dtype, device=A.device) return LazyJVP(self.rho, In, A) def forward(self, G): """ Instantiate (nonconcrete) representation with a symmetry group (forward) """ raise NotImplementedError def __str__(self): return repr(self) def __repr__(self): raise NotImplementedError def __eq__(self, other): if type(self) is not type(other): # pylint: disable=unidiomatic-typecheck return False return self.__hash__() == other.__hash__() def __hash__(self): raise NotImplementedError def size(self): """ Dimension dim(V) of the representation """ if self._size is not None: return self._size if self.concrete() and isinstance(self.G, Group): self._size = self.rho(self.G.sample()).size(-1) return self._size raise NotImplementedError def canonicalize(self): """ An optional method to convert the representation into a canonical form in order to reuse equivalent solutions in the solver. Should return both the canonically ordered representation, along with a permutation which can be applied to vectors of the current representation to achieve that ordering. """ # return canonicalized rep return self, torch.arange(self.size()) def rho_dense(self, M): """ A convenience function which returns rho(M) as a dense matrix.""" return densify(self.rho(M)) def drho_dense(self, A): """ A convenience function which returns drho(A) as a dense matrix.""" return densify(self.drho(A)) def constraint_matrix(self): """ Constructs the equivariance constrant matrix (lazily) by concatenating the constraints (ρ(hᵢ)-I) for i=1,...M and dρ(Aₖ) for k=1,..,D from the generators of the symmetry group. """ n = self.size() constraints = [] constraints.extend([lazify(self.rho(h)).to(self.G.device)-I(n, device=self.G.device) \ for h in self.G.discrete_generators]) constraints.extend([lazify(self.drho(A)).to(self.G.device) for A in self.G.lie_algebra]) return ConcatLazy(constraints) if constraints else lazify( torch.zeros((1, n), device=self.G.device)) solcache = {} def equivariant_basis(self): """ Computes the equivariant solution basis for the given representation of size N. Canonicalizes problems and caches solutions for reuse. Output [Q (N,r)] """ if self == Scalar: return torch.ones((1, 1), device=self.G.device) canon_rep, perm = self.canonicalize() invperm = torch.argsort(perm) if canon_rep not in self.solcache: logging.info("%r cache miss", canon_rep) logging.info("Solving basis for %r%s", self, f", for G={self.G}" if self.G is not None else "") C_lazy = canon_rep.constraint_matrix() if C_lazy.size(0)*C_lazy.size(1) > 3e7: # Too large to use SVD result = krylov_constraint_solve(C_lazy) else: C_dense = C_lazy.to_dense() result = orthogonal_complement(C_dense) self.solcache[canon_rep] = result return self.solcache[canon_rep][invperm] def equivariant_projector(self): """ Computes the (lazy) projection matrix P=QQᵀ that projects to the equivariant basis.""" Q = self.equivariant_basis() Q_lazy = lazify(Q) P = Q_lazy@Q_lazy.H() return P def concrete(self): """ Concreteness """ return isinstance(self.G, Group) def __add__(self, other): """ Direct sum (⊕) of representations. """ if isinstance(other, int): if other == 0: return self return self+other*Scalar if both_concrete(self, other): return SumRep(self, other) return DeferredSumRep(self, other) def __radd__(self, other): if isinstance(other, int): if other == 0: return self return other*Scalar+self return NotImplemented def __mul__(self, other): """ Tensor sum (⊗) of representations. """ return mul_reps(self, other) def __rmul__(self, other): return mul_reps(other, self) def __pow__(self, other): """ Iterated tensor product. """ assert isinstance(other, int), \ f"Power only supported for integers, not {type(other)}" assert other >= 0, f"Negative powers {other} not supported" return reduce(lambda a, b: a*b, other*[self], Scalar) def __rshift__(self, other): """ Linear maps from self -> other """ return other*self.t() def __lshift__(self, other): """ Linear maps from other -> self """ return self*other.t() def __lt__(self, other): """ less than defined to disambiguate ordering multiple different representations. Canonical ordering is determined first by Group, then by size, then by hash""" if other == Scalar: return False try: if self.G < other.G: return True if self.G > other.G: return False except (AttributeError, TypeError): pass if self.size() < other.size(): return True if self.size() > other.size(): return False return hash(self) < hash(other) # For sorting purposes only def t(self): """ Dual representation V*, rho*, drho*.""" if isinstance(self.G, Group) and self.G.is_orthogonal: return self return Dual(self) @dispatch def mul_reps(ra, rb: int): """ Product of a scalar and a representation. """ if rb == 1: return ra if rb == 0: return 0 if ra.concrete(): return SumRep(*(rb*[ra])) return DeferredSumRep(*(rb*[ra])) @dispatch def mul_reps(ra: int, rb): # pylint: disable=function-redefined """ Product of a scalar and a representation. """ return mul_reps(rb, ra) # pylint: disable=W1114:arguments-out-of-order class ScalarRep(Rep): """ The trivial representation of the group G. """ def __init__(self, G=None): super().__init__() self.G = G self.is_permutation = True def forward(self, G): self.G = G return self def size(self): return 1 def canonicalize(self): return self, torch.zeros(1, dtype=torch.long) def __repr__(self): return "V⁰" def t(self): return self def rho(self, M): return torch.eye(1, device=self.G.device) def drho(self, A): return 0*torch.eye(1, device=self.G.device) def __hash__(self): return 0 def __eq__(self, other): return isinstance(other, ScalarRep) def __mul__(self, other): if isinstance(other, int): return super().__mul__(other) return other def __rmul__(self, other): if isinstance(other, int): return super().__rmul__(other) return other def concrete(self): return True class Base(Rep): """ Base representation V of a group.""" def __init__(self, G=None): super().__init__() self.G = G if G is not None: self.is_permutation = G.is_permutation def forward(self, G): return self.__class__(G) def rho(self, M): if isinstance(self.G, Group) and isinstance(M, dict): M = M[self.G] return M def drho(self, A): if isinstance(self.G, Group) and isinstance(A, dict): A = A[self.G] return A def size(self): assert self.G is not None, f"must know G to find size for rep={self}" return self.G.d def __repr__(self): return "V" def __hash__(self): return hash((type(self), self.G)) def __eq__(self, other): return type(other) is type(self) and self.G == other.G def __lt__(self, other): if isinstance(other, Dual): return True return super().__lt__(other) class Dual(Rep): """ Dual representation V*, rho*, drho*.""" def __init__(self, rep): super().__init__() self.rep = rep self.G = rep.G if hasattr(rep, "is_permutation"): self.is_permutation = rep.is_permutation def forward(self, G): return self.rep(G).t() def rho(self, M): rho = self.rep.rho(M) rhoinvt = rho.invt() if isinstance(rho, LinearOperator) else torch.linalg.inv(rho).t() return rhoinvt def drho(self, A): return -self.rep.drho(A).t() def __repr__(self): return repr(self.rep)+"*" def t(self): return self.rep def __eq__(self, other): return type(other) is type(self) and self.rep == other.rep def __hash__(self): return hash((type(self), self.rep)) def __lt__(self, other): if other == self.rep: return False return super().__lt__(other) def size(self): return self.rep.size() # Alias V or Vector for an instance of the Base representation of a group V = Vector = Base() # An instance of the Scalar representation, equivalent to V**0 Scalar = ScalarRep() def T(p, q=0, G=None): """ A convenience function for creating rank (p,q) tensors.""" return (V**p*V.t()**q)(G) def bilinear_weights(out_rep, in_rep): """ Bilinear weights for a linear operator from in_rep to out_rep. """ # TODO: replace lazy_projection function with LazyDirectSum LinearOperator W_rep, W_perm = (in_rep >> out_rep).canonicalize() # TODO: possible bug when in_rep and out_rep are both non sumreps? investigate inv_perm = torch.argsort(W_perm) mat_shape = out_rep.size(), in_rep.size() x_rep = in_rep W_multiplicities = W_rep.reps x_multiplicities = x_rep.reps x_multiplicities = {rep: n for rep, n in x_multiplicities.items() if rep != Scalar} def nelems(nx, rep): return min(nx, rep.size()) active_dims = sum(W_multiplicities.get(rep, 0)*nelems(n, rep) for rep, n in x_multiplicities.items()) reduced_indices_dict = {rep: ids[torch.randint( len(ids), size=(nelems(len(ids), rep),))].reshape(-1) for rep, ids in x_rep.as_dict(torch.arange(x_rep.size())).items()} # Apply the projections for each rank, concatenate, and permute back to orig rank order # (r,), (*c) # TODO: find out why backwards of this function is so slow def lazy_projection(params, x): bshape = x.shape[:-1] x = x.reshape(-1, x.size(-1)) bs = x.size(0) i = 0 Ws = [] for rep, W_mult in W_multiplicities.items(): if rep not in x_multiplicities: Ws.append(torch.zeros((bs, W_mult*rep.size()), device=x.device)) continue x_mult = x_multiplicities[rep] n = nelems(x_mult, rep) i_end = i+W_mult*n bids = reduced_indices_dict[rep] bilinear_params = params[i:i_end].reshape(W_mult, n) # bs,nK-> (nK,bs) i = i_end # (bs,W_mult,d^r) = (W_mult,n)@(n,d^r,bs) bilinear_elems = bilinear_params@x[..., bids].t().reshape(n, rep.size()*bs) bilinear_elems = bilinear_elems.reshape(W_mult*rep.size(), bs).t() Ws.append(bilinear_elems) Ws = torch.cat(Ws, axis=-1) # concatenate over rep axis # reorder to original rank ordering return Ws[..., inv_perm].reshape(*bshape, *mat_shape) return active_dims, lazy_projection class SumRep(Rep): """ A sum of representations, e.g. V+V.T. """ def __init__(self, *reps, extra_perm=None, skip_init=False): """ Constructs a tensor type based on a list of tensor ranks and possibly the symmetry generators gen.""" super().__init__() if skip_init: return # Integers can be used as shorthand for scalars. reps = [SumRepFromCollection({Scalar: rep}) if isinstance(rep, int) else \ rep for rep in reps] # Get reps and permutations reps, perms = zip(*[rep.canonicalize() for rep in reps]) rep_counters = [rep.reps if isinstance(rep, SumRep) else {rep: 1} for rep in reps] # Combine reps and permutations: ∑_a + ∑_b = ∑_{a∪b} self.reps, perm = self.compute_canonical(rep_counters, perms) self.perm = extra_perm[perm] if extra_perm is not None else perm self.invperm = torch.argsort(self.perm) self.canonical = (self.perm == torch.arange(len(self.perm))).all() self.is_permutation = all(rep.is_permutation for rep in self.reps.keys()) def size(self): return sum(rep.size()*count for rep, count in self.reps.items()) def rho(self, M): rhos = [rep.rho(M) for rep in self.reps] multiplicities = self.reps.values() return LazyPerm(self.invperm)@LazyDirectSum(rhos, multiplicities)@LazyPerm(self.perm) def drho(self, A): drhos = [rep.drho(A) for rep in self.reps] multiplicities = self.reps.values() return LazyPerm(self.invperm)@LazyDirectSum(drhos, multiplicities)@LazyPerm(self.perm) def __eq__(self, other): return self.reps == other.reps and (self.perm == other.perm).all() def __hash__(self): assert self.canonical return hash(tuple(self.reps.items())) def t(self): """ only swaps to adjoint representation, does not reorder elems""" return SumRep(*[rep.t() for rep, c in self.reps.items() for _ in range(c)], extra_perm=self.perm) def __repr__(self): return "+".join(f"{count if count > 1 else ''}{repr(rep)}" for rep, count in self.reps.items()) def canonicalize(self): """Returns a canonically ordered rep with order np.arange(self.size()) and the permutation which achieves that ordering""" return SumRepFromCollection(self.reps), self.perm def forward(self, G): return SumRepFromCollection({rep(G): c for rep, c in self.reps.items()}, perm=self.perm) def concrete(self): return True def equivariant_basis(self): """ Overrides default implementation with a more efficient version which decomposes the constraints across the sum.""" Qs = {rep: rep.equivariant_basis() for rep in self.reps}
device = self.G.device if self.G is not None else get_device(list(Qs.values()))
15
2023-11-01 07:19:02+00:00
16k
mbreuss/consistency_trajectory_models_toy_task
ctm_train.py
[ { "identifier": "ConsistencyTrajectoryModel", "path": "ctm/ctm.py", "snippet": "class ConsistencyTrajectoryModel(nn.Module):\n\n def __init__(\n self, \n data_dim: int,\n cond_dim: int,\n sampler_type: str,\n sigma_data: float,\n sigma...
from tqdm import tqdm from ctm.ctm import ConsistencyTrajectoryModel from ctm.toy_tasks.data_generator import DataGenerator from ctm.visualization.vis_utils import plot_main_figure
11,771
""" Discrete consistency distillation training of the consistency model on a toy task. We train a diffusion model and the consistency model at the same time and iteratively update the weights of the consistency model and the diffusion model. """ if __name__ == "__main__": device = 'cpu' n_sampling_steps = 10 use_pretraining = True cm = ConsistencyTrajectoryModel( data_dim=1, cond_dim=1, sampler_type='euler', lr=4e-4, sigma_data=0.5, sigma_min=0.05, solver_type='heun', sigma_max=2, n_discrete_t=18, conditioned=False, diffusion_lambda= 1, device=device, rho=7, ema_rate=0.999, use_teacher=use_pretraining, ) train_epochs = 2002 # chose one of the following toy tasks: 'three_gmm_1D' 'uneven_two_gmm_1D' 'two_gmm_1D' 'single_gaussian_1D' data_manager = DataGenerator('three_gmm_1D') samples, cond = data_manager.generate_samples(5000) samples = samples.reshape(-1, 1).to(device) pbar = tqdm(range(train_epochs)) # if not simultanous_training: # First pretrain the diffusion model and then train the consistency model if use_pretraining: for i in range(train_epochs): cond = cond.reshape(-1, 1).to(device) diff_loss = cm.diffusion_train_step(samples, cond, i, train_epochs) pbar.set_description(f"Step {i}, Diff Loss: {diff_loss:.8f}") pbar.update(1) cm.update_teacher_model()
""" Discrete consistency distillation training of the consistency model on a toy task. We train a diffusion model and the consistency model at the same time and iteratively update the weights of the consistency model and the diffusion model. """ if __name__ == "__main__": device = 'cpu' n_sampling_steps = 10 use_pretraining = True cm = ConsistencyTrajectoryModel( data_dim=1, cond_dim=1, sampler_type='euler', lr=4e-4, sigma_data=0.5, sigma_min=0.05, solver_type='heun', sigma_max=2, n_discrete_t=18, conditioned=False, diffusion_lambda= 1, device=device, rho=7, ema_rate=0.999, use_teacher=use_pretraining, ) train_epochs = 2002 # chose one of the following toy tasks: 'three_gmm_1D' 'uneven_two_gmm_1D' 'two_gmm_1D' 'single_gaussian_1D' data_manager = DataGenerator('three_gmm_1D') samples, cond = data_manager.generate_samples(5000) samples = samples.reshape(-1, 1).to(device) pbar = tqdm(range(train_epochs)) # if not simultanous_training: # First pretrain the diffusion model and then train the consistency model if use_pretraining: for i in range(train_epochs): cond = cond.reshape(-1, 1).to(device) diff_loss = cm.diffusion_train_step(samples, cond, i, train_epochs) pbar.set_description(f"Step {i}, Diff Loss: {diff_loss:.8f}") pbar.update(1) cm.update_teacher_model()
plot_main_figure(
2
2023-11-07 15:30:11+00:00
16k
awslabs/optimizing-multitask-training-through-dynamic-pipelines
dynapipe/pipe/data_loader.py
[ { "identifier": "ProfileBasedCostModelWithRC", "path": "dynapipe/data_opt/cost_models.py", "snippet": "class ProfileBasedCostModelWithRC(object):\n \"\"\"\n Wrapper class for multiple ProfileBasedCostModel objects, one for each\n tensor parallel degree and recomputation method.\n \"\"\"\n\n ...
import json import logging import multiprocessing as mp import os import time import traceback import torch import pickle from dataclasses import dataclass, field, fields from queue import Empty from typing import List, Optional from torch.utils.data import DataLoader as PTDataLoader from dynapipe.data_opt.cost_models import ProfileBasedCostModelWithRC from dynapipe.data_opt.optimizer import DataAssignmentOptimizer from dynapipe.model import DynaPipeCluster, TransformerModelSpec from dynapipe.pipe.instructions import ( deserialize_list_of_eps, serialize_list_of_eps, ) from dynapipe.schedule_opt.execution_planner import ExecutionPlanner from dynapipe.utils.logger import create_logger, logger from .kv_redis import RedisKVStore from .utils import validate_device_assignment
13,074
def _put_to_shared_kv_store( kv_store: RedisKVStore, key: str, data, logger=None ): # put execution plan into local kv store ack_key = key + "_ack" if logger is not None: logger.debug("Wait for data ack key: {}".format(ack_key)) # wait for ack key kv_store.get(ack_key) # remove ack key _checked_delete_key(kv_store, ack_key, logger=logger) if logger is not None: logger.debug("Set data key: {}".format(key)) # set data key kv_store.set(key, data) @dataclass class WorkerData: round_seqlen_multiple: Optional[int] = None logger: Optional[logging.Logger] = None kv_store: Optional[RedisKVStore] = None processed_batches: Optional[int] = None kv_buffer_size: Optional[int] = None seqlen_offset: Optional[int] = 0 def check_initialized(self): cls_fields = fields(self.__class__) for fld in cls_fields: if getattr(self, fld.name) is None: raise RuntimeError( "Worker data not initialized: {}".format(fld.name) ) @dataclass class PreprocessingWorkerData(WorkerData): # required at initialization: node_rank: Optional[int] = None profile_path: Optional[str] = None # filled later in worker init: dataopt: Optional[DataAssignmentOptimizer] = None exec_planner: Optional[ExecutionPlanner] = None partition_method: Optional[str] = None token_based_partition_mbs: Optional[int] = None disable_tsp: Optional[bool] = None schedule_method: Optional[str] = None disable_mb_permutation: Optional[bool] = None disable_scheduler_memory_limit: Optional[bool] = None enable_packing: Optional[bool] = None n_layers_per_stage: Optional[int] = None assigned_iters_per_node: Optional[int] = None node_size: Optional[int] = None def __post_init__(self): if self.node_rank is None: raise RuntimeError("node_rank must be set at initialization.") if self.profile_path is None: raise RuntimeError("profile_path must be set at initialization.") @dataclass class DataloaderWorkerData(WorkerData): # required at initialization: dp_rank: Optional[int] = None pp_rank: Optional[int] = None virtual_pp_rank: Optional[int] = None # filled later in worker init: dp_size: Optional[int] = None pp_size: Optional[int] = None virtual_pp_size: Optional[int] = None def __post_init__(self): if self.dp_rank is None: raise RuntimeError("dp_rank must be set at initialization.") if self.pp_rank is None: raise RuntimeError("pp_rank must be set at initialization.") if self.virtual_pp_rank is None: raise RuntimeError( "virtual_pp_rank must be " "set at initialization." ) class KVStoreMetaKeys: DP_SIZE = "data_parallel_size" TP_SIZE = "tensor_parallel_size" PP_SIZE = "pipeline_parallel_size" VIRTUAL_PP_SIZE = "virtual_pipeline_parallel_size" ZERO_STAGE = "zero_stage" NODE_SIZE = "node_size" MODEL_SPEC = "model_spec" N_EXECS = "n_executors" N_LAYERS_PER_STAGE = "n_layers_per_stage" N_CHUNKS_PER_DEVICE = "n_chunks_per_device" DEVICE_MEMORY_LIMIT = "device_memory_limit" PARTITION_METHOD = "partition_method" TOKEN_BASED_PARTITION_MBS = "token_based_partition_mbs" DISABLE_TSP = "disable_tsp" SCHEDULE_METHOD = "schedule_method" DISABLE_MB_PERMUTATION = "disable_mb_permutation" DISABLE_SCHEDULER_MEMORY_LIMIT = "disable_scheduler_memory_limit" ENABLE_PACKING = "enable_packing" PER_MB_MEM_FRAC = "per_mb_memory_fraction" CLUSTER_SPEC = "cluster_spec" DEV_ASSIGNMENT = "device_assignment" KV_BUFFER_SIZE = "kv_buffer_size" ROUND_SEQLEN_MULT = "round_seqlen_multiple" ASSIGNED_ITER_PER_NODE = "assigned_iters_per_node" SEQLEN_OFFSET = "seqlen_offset" MODEL_TYPE = "model_type" # used outside dataloader N_ITERS = "n_iters" @dataclass class TrainingSpec: cm_path: str cluster_spec: DynaPipeCluster
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 MANAGER_PROCESS_TIMEOUT = 1 RECEIVER_PROCESS_TIMEOUT = 1 KVSTORE_TIMEOUT = 1800 # 30 minutes # ONLY USED FOR DEBUG PURPOSES DEBUG_USE_DUMMY_EP = False DEBUG_DUMP_EP_STATS = os.getenv( "DYNAPIPE_DEBUG_DUMP_EP_STATS", "False" ).lower() in ("true", "1", "t") DEBUG_DUMP_EP_PREFIX = os.environ.get("DYNAPIPE_DEBUG_DUMP_EP_PREFIX", None) if DEBUG_DUMP_EP_STATS and DEBUG_DUMP_EP_PREFIX is None: raise ValueError( "DYNAPIPE_DEBUG_DUMP_EP_PREFIX must be set if " "DYNAPIPE_DEBUG_DUMP_EP_STATS is set." ) _kvstore_handle = None def _init_kv_store(is_master, logger=None): host = os.environ.get("DYNAPIPE_KV_HOST", "localhost") port = os.environ.get("DYNAPIPE_KV_PORT", 29500) if logger is not None: logger.debug( "Init kv store, is_master: {}, host: {}, port: {}".format( is_master, host, port ) ) # kv_store = torch.distributed.TCPStore( # "127.0.0.1", # port, # is_master=is_master, # timeout=timedelta(seconds=KVSTORE_TIMEOUT), # ) kv_store = RedisKVStore(host, port, is_master=is_master) return kv_store, host, port def _checked_delete_key(kv_store: RedisKVStore, key: str, logger=None): result = kv_store.delete_key(key) if not result: raise RuntimeError( "Internal error: failed to delete key " "{}.".format(key) ) if logger is not None: logger.debug("Deleted key: {}".format(key)) def _get_from_shared_kv_store( kv_store: RedisKVStore, key: str, reader_idx: int, n_total_readers: int, decode: bool = True, logger=None, ): reader_count_key = key + "_rc" reader_ack_key = key + "_r{}_ack".format(reader_idx) # wait for reader ack if logger is not None: logger.debug("Waiting for reader ack key: {}".format(reader_ack_key)) kv_store.get(reader_ack_key) if logger is not None: logger.debug( "Got reader ack key: {}, waiting for data key: {}".format( reader_ack_key, key ) ) data = kv_store.get(key) if logger is not None: logger.debug("Removing reader ack key: {}".format(reader_ack_key)) # remove reader ack _checked_delete_key(kv_store, reader_ack_key, logger=logger) # get reader count reader_count = kv_store.add(reader_count_key, 1) if reader_count == n_total_readers: if logger is not None: logger.debug( "Last reader, reset reader count: {}".format(reader_count_key) ) # reset reader count result_readers = kv_store.add(reader_count_key, -n_total_readers) assert result_readers == 0 if logger is not None: logger.debug("Last reader, remove data key: {}".format(key)) # remove data key _checked_delete_key(kv_store, key, logger=logger) if logger is not None: logger.debug("Last reader, set ack key: {}".format(key + "_ack")) # set all reader ack keys keys_to_reset = [ key + "_r{}_ack".format(i) for i in range(n_total_readers) ] if logger is not None: logger.debug("Last reader, reset keys: {}".format(keys_to_reset)) for reset_key in keys_to_reset: val = kv_store.add(reset_key, 1) # make sure the key is set got_val = int(kv_store.get(reset_key).decode()) if not val == got_val: raise RuntimeError( "Failed to set reader ack key: {}".format(reset_key) ) if logger is not None: logger.debug("Set reader ack key: {}".format(reset_key)) # set data ack key kv_store.add(key + "_ack", 1) if decode: return data.decode() return data def _put_to_shared_kv_store( kv_store: RedisKVStore, key: str, data, logger=None ): # put execution plan into local kv store ack_key = key + "_ack" if logger is not None: logger.debug("Wait for data ack key: {}".format(ack_key)) # wait for ack key kv_store.get(ack_key) # remove ack key _checked_delete_key(kv_store, ack_key, logger=logger) if logger is not None: logger.debug("Set data key: {}".format(key)) # set data key kv_store.set(key, data) @dataclass class WorkerData: round_seqlen_multiple: Optional[int] = None logger: Optional[logging.Logger] = None kv_store: Optional[RedisKVStore] = None processed_batches: Optional[int] = None kv_buffer_size: Optional[int] = None seqlen_offset: Optional[int] = 0 def check_initialized(self): cls_fields = fields(self.__class__) for fld in cls_fields: if getattr(self, fld.name) is None: raise RuntimeError( "Worker data not initialized: {}".format(fld.name) ) @dataclass class PreprocessingWorkerData(WorkerData): # required at initialization: node_rank: Optional[int] = None profile_path: Optional[str] = None # filled later in worker init: dataopt: Optional[DataAssignmentOptimizer] = None exec_planner: Optional[ExecutionPlanner] = None partition_method: Optional[str] = None token_based_partition_mbs: Optional[int] = None disable_tsp: Optional[bool] = None schedule_method: Optional[str] = None disable_mb_permutation: Optional[bool] = None disable_scheduler_memory_limit: Optional[bool] = None enable_packing: Optional[bool] = None n_layers_per_stage: Optional[int] = None assigned_iters_per_node: Optional[int] = None node_size: Optional[int] = None def __post_init__(self): if self.node_rank is None: raise RuntimeError("node_rank must be set at initialization.") if self.profile_path is None: raise RuntimeError("profile_path must be set at initialization.") @dataclass class DataloaderWorkerData(WorkerData): # required at initialization: dp_rank: Optional[int] = None pp_rank: Optional[int] = None virtual_pp_rank: Optional[int] = None # filled later in worker init: dp_size: Optional[int] = None pp_size: Optional[int] = None virtual_pp_size: Optional[int] = None def __post_init__(self): if self.dp_rank is None: raise RuntimeError("dp_rank must be set at initialization.") if self.pp_rank is None: raise RuntimeError("pp_rank must be set at initialization.") if self.virtual_pp_rank is None: raise RuntimeError( "virtual_pp_rank must be " "set at initialization." ) class KVStoreMetaKeys: DP_SIZE = "data_parallel_size" TP_SIZE = "tensor_parallel_size" PP_SIZE = "pipeline_parallel_size" VIRTUAL_PP_SIZE = "virtual_pipeline_parallel_size" ZERO_STAGE = "zero_stage" NODE_SIZE = "node_size" MODEL_SPEC = "model_spec" N_EXECS = "n_executors" N_LAYERS_PER_STAGE = "n_layers_per_stage" N_CHUNKS_PER_DEVICE = "n_chunks_per_device" DEVICE_MEMORY_LIMIT = "device_memory_limit" PARTITION_METHOD = "partition_method" TOKEN_BASED_PARTITION_MBS = "token_based_partition_mbs" DISABLE_TSP = "disable_tsp" SCHEDULE_METHOD = "schedule_method" DISABLE_MB_PERMUTATION = "disable_mb_permutation" DISABLE_SCHEDULER_MEMORY_LIMIT = "disable_scheduler_memory_limit" ENABLE_PACKING = "enable_packing" PER_MB_MEM_FRAC = "per_mb_memory_fraction" CLUSTER_SPEC = "cluster_spec" DEV_ASSIGNMENT = "device_assignment" KV_BUFFER_SIZE = "kv_buffer_size" ROUND_SEQLEN_MULT = "round_seqlen_multiple" ASSIGNED_ITER_PER_NODE = "assigned_iters_per_node" SEQLEN_OFFSET = "seqlen_offset" MODEL_TYPE = "model_type" # used outside dataloader N_ITERS = "n_iters" @dataclass class TrainingSpec: cm_path: str cluster_spec: DynaPipeCluster
model_spec: TransformerModelSpec
3
2023-11-08 07:58:20+00:00
16k
SqueezeAILab/LLMCompiler
src/llm_compiler/llm_compiler.py
[ { "identifier": "AsyncStatsCallbackHandler", "path": "src/callbacks/callbacks.py", "snippet": "class AsyncStatsCallbackHandler(AsyncCallbackHandler):\n \"\"\"Collect useful stats about the run.\n Add more stats as needed.\"\"\"\n\n def __init__(self, stream: bool = False) -> None:\n supe...
import asyncio from typing import Any, Dict, List, Mapping, Optional, Sequence, Union, cast from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.llms import BaseLLM from langchain.prompts.base import StringPromptValue from src.callbacks.callbacks import AsyncStatsCallbackHandler from src.chains.chain import Chain from src.llm_compiler.constants import JOINNER_REPLAN from src.llm_compiler.planner import Planner from src.llm_compiler.task_fetching_unit import Task, TaskFetchingUnit from src.tools.base import StructuredTool, Tool from src.utils.logger_utils import log
11,119
class LLMCompilerAgent: """Self defined agent for LLM Compiler.""" def __init__(self, llm: BaseLLM) -> None: self.llm = llm async def arun(self, prompt: str, callbacks=None) -> str: return await self.llm.agenerate_prompt( prompts=[StringPromptValue(text=prompt)], stop=None, callbacks=callbacks, ) class LLMCompiler(Chain, extra="allow"): """LLMCompuler Engine.""" """The step container to use.""" input_key: str = "input" output_key: str = "output" def __init__( self, tools: Sequence[Union[Tool, StructuredTool]], planner_llm: BaseLLM, planner_example_prompt: str, planner_example_prompt_replan: Optional[str], planner_stop: Optional[list[str]], planner_stream: bool, agent_llm: BaseLLM, joinner_prompt: str, joinner_prompt_final: Optional[str], max_replans: int, benchmark: bool, **kwargs, ) -> None: """ Args: tools: List of tools to use. max_replans: Maximum number of replans to do. benchmark: Whether to collect benchmark stats. Planner Args: planner_llm: LLM to use for planning. planner_example_prompt: Example prompt for planning. planner_example_prompt_replan: Example prompt for replanning. Assign this if you want to use different example prompt for replanning. If not assigned, default to `planner_example_prompt`. planner_stop: Stop tokens for planning. planner_stream: Whether to stream the planning. Agent Args: agent_llm: LLM to use for agent. joinner_prompt: Prompt to use for joinner. joinner_prompt_final: Prompt to use for joinner at the final replanning iter. If not assigned, default to `joinner_prompt`. """ super().__init__(**kwargs) if not planner_example_prompt_replan: log( "Replan example prompt not specified, using the same prompt as the planner." ) planner_example_prompt_replan = planner_example_prompt
class LLMCompilerAgent: """Self defined agent for LLM Compiler.""" def __init__(self, llm: BaseLLM) -> None: self.llm = llm async def arun(self, prompt: str, callbacks=None) -> str: return await self.llm.agenerate_prompt( prompts=[StringPromptValue(text=prompt)], stop=None, callbacks=callbacks, ) class LLMCompiler(Chain, extra="allow"): """LLMCompuler Engine.""" """The step container to use.""" input_key: str = "input" output_key: str = "output" def __init__( self, tools: Sequence[Union[Tool, StructuredTool]], planner_llm: BaseLLM, planner_example_prompt: str, planner_example_prompt_replan: Optional[str], planner_stop: Optional[list[str]], planner_stream: bool, agent_llm: BaseLLM, joinner_prompt: str, joinner_prompt_final: Optional[str], max_replans: int, benchmark: bool, **kwargs, ) -> None: """ Args: tools: List of tools to use. max_replans: Maximum number of replans to do. benchmark: Whether to collect benchmark stats. Planner Args: planner_llm: LLM to use for planning. planner_example_prompt: Example prompt for planning. planner_example_prompt_replan: Example prompt for replanning. Assign this if you want to use different example prompt for replanning. If not assigned, default to `planner_example_prompt`. planner_stop: Stop tokens for planning. planner_stream: Whether to stream the planning. Agent Args: agent_llm: LLM to use for agent. joinner_prompt: Prompt to use for joinner. joinner_prompt_final: Prompt to use for joinner at the final replanning iter. If not assigned, default to `joinner_prompt`. """ super().__init__(**kwargs) if not planner_example_prompt_replan: log( "Replan example prompt not specified, using the same prompt as the planner." ) planner_example_prompt_replan = planner_example_prompt
self.planner = Planner(
3
2023-12-06 21:12:54+00:00
16k
bytedance/ImageDream
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"i...
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from functools import partial from einops import rearrange, repeat from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import ( AutoencoderKL, IdentityFirstStage, VQModelInterface, ) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like, ) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl, ) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import ( count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat, )
12,369
opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config):
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config):
model = instantiate_from_config(config)
14
2023-12-13 21:09:37+00:00
16k
allenai/unified-io-2
t5x/models_test.py
[ { "identifier": "decoding", "path": "t5x/decoding.py", "snippet": "NEG_INF = np.array(-1.0e7)\nMIN_TEMPERATURE = np.array(1e-4)\nclass DecodingState:\nclass SamplingLoopState:\nclass BeamState:\ndef _is_tracer(value: Any):\ndef temperature_sample(\n inputs: jnp.ndarray,\n cache: Mapping[str, jnp.n...
import functools import flax import jax import jax.numpy as jnp import numpy as np import t5.data.tasks # pylint:disable=unused-import import tensorflow as tf from unittest import mock from absl import logging from absl.testing import absltest from absl.testing import parameterized from flax import traverse_util from t5x import decoding from t5x import models from t5x import partitioning from t5x import test_utils from t5x import trainer as trainer_lib from t5x import utils
12,466
shapes['encoder_segment_ids'], dtype=types['encoder_segment_ids']) np.testing.assert_allclose(called_with[1]['encoder_segment_ids'], encoder_positions) else: self.assertIsNone(called_with[1]['encoder_segment_ids']) if 'decoder_segment_ids' in shapes: decoder_segment_ids = jnp.ones( shapes['decoder_segment_ids'], dtype=types['decoder_segment_ids']) np.testing.assert_allclose(called_with[1]['decoder_segment_ids'], decoder_segment_ids) else: self.assertIsNone(called_with[1]['decoder_segment_ids']) self.assertFalse(called_with[1]['decode']) self.assertFalse(called_with[1]['enable_dropout']) @parameterized.named_parameters( dict(testcase_name='no_force_decoding', prompt_with_targets=False), dict(testcase_name='force_decoding', prompt_with_targets=True), ) def test_prompt_with_targets(self, prompt_with_targets): batch_size, encoder_len, max_decode_len, emb_dim = 2, 3, 4, 5 batch = { 'encoder_input_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_input_tokens': np.full([batch_size, max_decode_len], 2, dtype=np.int32) } # These dummy logits represent the probability distribution where all the # probability mass is in one item (i.e., degenerate distribution). For # batch element 0, it is vocabulary index 3. # We test `_predict_step` to avoid having to define a task and its # vocabulary. dummy_logits = jnp.expand_dims( jnp.array([[-1e7, -1e7, -1e7, 0, -1e7], [-1e7, -1e7, -1e7, -1e7, 0]]), axis=1) mock_decode_fn = mock.Mock() mock_decode_fn.return_value = (np.full([batch_size, max_decode_len, 1], 3, dtype=np.int32), np.full([batch_size, 1], 1.0, dtype=np.float32)) class MockModule: def __init__(self): self.dtype = jnp.float32 def apply(self, *args, method=None, **kwargs): del args, kwargs if method is None: # use for module.`__call__` return (dummy_logits, {'cache': {}}) else: return method() def encode(self): return jnp.zeros((batch_size, encoder_len, emb_dim)) def decode(self): return (dummy_logits, {'cache': {}}) def mock_init(self): self.module = MockModule() self.module.scan_layers = False self._input_vocabulary = mock.Mock(eos_id=1) self._output_vocabulary = mock.Mock(eos_id=1) self._decode_fn = mock_decode_fn with mock.patch.object( models.EncoderDecoderModel, '__init__', new=mock_init): model = models.EncoderDecoderModel() model.predict_batch_with_aux({}, batch, prompt_with_targets=prompt_with_targets) if prompt_with_targets: expected_inputs = batch['decoder_input_tokens'] else: expected_inputs = np.zeros([batch_size, max_decode_len], dtype=np.int32) assert mock_decode_fn.call_count == 1 # Look at the kwargs call list for inputs, assert_called_with doesn't # work well with np.array comparison. np.testing.assert_array_equal(mock_decode_fn.mock_calls[0][2]['inputs'], expected_inputs) def test_predict_batch_loop_and_caches_are_equal(self): vocab_size = 50 lengths = np.array([[2], [3]]) batch_size, beam_size, encoder_len, max_decode_len = 2, 2, 3, 7 batch = { 'encoder_input_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_target_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_input_tokens': np.concatenate( [ np.expand_dims( np.concatenate( [[0], np.arange(9, 9 + lengths[0][0], dtype=np.int32), np.zeros((max_decode_len - lengths[0][0] - 1), dtype=np.int32)]), axis=0), # First element np.expand_dims( np.concatenate( [[0], np.arange(3, 3 + lengths[1][0], dtype=np.int32), np.zeros((max_decode_len - lengths[1][0] - 1), dtype=np.int32)]), axis=0) # Second element ], axis=0), }
# Copyright 2022 The T5X Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for t5x.models.""" # Parse absl flags test_srcdir and test_tmpdir. jax.config.parse_flags_with_absl() PartitionSpec = partitioning.PartitionSpec class ModelsTest(parameterized.TestCase): def test_remove_prefix(self): sequences = np.array([[1, 2, 3, 4, 5, 6, 7, 0], [6, 7, 8, 9, 10, 11, 0, 0]]) prefix_lengths = np.array([2, 4]) expected = [[3, 4, 5, 6, 7, 0, 0, 0], [10, 11, 0, 0, 0, 0, 0, 0]] remove_prefix = jax.jit(models.remove_prefix) actual = remove_prefix(sequences, prefix_lengths) np.testing.assert_array_equal(actual, expected) def test_remove_prefix_zero_len_prefix(self): sequences = np.array([[1, 2, 3, 4, 5, 6, 7, 0], [6, 7, 8, 9, 10, 11, 0, 0]]) prefix_lengths = np.array([0, 0]) remove_prefix = jax.jit(models.remove_prefix) actual = remove_prefix(sequences, prefix_lengths) # The expected output is the original sequences. np.testing.assert_array_equal(actual, sequences) BATCH_SIZE, ENCODER_LEN, MAX_DECODE_LEN, EMBED_DIM = 2, 3, 4, 5 class EncoderDecoderModelTest(parameterized.TestCase): @parameterized.named_parameters( dict( testcase_name='no_types', shapes={ 'encoder_input_tokens': [1, 512], 'decoder_input_tokens': [1, 62] }, types=None), dict( testcase_name='int32', shapes={ 'encoder_input_tokens': [1, 512], 'decoder_input_tokens': [1, 62] }, types={ 'encoder_input_tokens': jnp.int32, 'decoder_input_tokens': jnp.int32 }), dict( testcase_name='float32', shapes={ 'encoder_input_tokens': [1, 512], 'decoder_input_tokens': [1, 62], 'encoder_positions': [1, 512], 'decoder_positions': [1, 62], }, types={ 'encoder_input_tokens': jnp.int32, 'decoder_input_tokens': jnp.int32, 'encoder_positions': jnp.int32, 'decoder_positions': jnp.int32 }), dict( testcase_name='float32_segment_ids', shapes={ 'encoder_input_tokens': [1, 512], 'decoder_input_tokens': [1, 62], 'encoder_segment_ids': [1, 512], 'decoder_segment_ids': [1, 62], }, types={ 'encoder_input_tokens': jnp.int32, 'decoder_input_tokens': jnp.int32, 'encoder_segment_ids': jnp.int32, 'decoder_segment_ids': jnp.int32 }), ) def test_get_initial_variables_shapes_and_types(self, shapes, types): mock_transformer = mock.Mock() mock_transformer.init.return_value = {'params': {}} mock_optimizer_def = mock.Mock() rng = mock.Mock() def mock_init(self): self.module = mock_transformer self.optimizer_def = mock_optimizer_def with mock.patch.object( models.EncoderDecoderModel, '__init__', new=mock_init): model = models.EncoderDecoderModel() model.get_initial_variables(rng, shapes, types) if types is None: encoder_input = jnp.ones( shapes['encoder_input_tokens'], dtype=jnp.float32) decoder_input = jnp.ones( shapes['decoder_input_tokens'], dtype=jnp.float32) else: encoder_input = jnp.ones( shapes['encoder_input_tokens'], dtype=types['encoder_input_tokens']) decoder_input = jnp.ones( shapes['decoder_input_tokens'], dtype=types['decoder_input_tokens']) # Using `.assert_called_once_with` doesn't work because the simple # comparison it does for the array arguments fail (truth value of an array # is ambiguous). called_with = mock_transformer.init.call_args self.assertEqual(called_with[0][0], rng) np.testing.assert_allclose(called_with[0][1], encoder_input) np.testing.assert_allclose(called_with[0][2], decoder_input) np.testing.assert_allclose(called_with[0][3], decoder_input) if 'encoder_positions' in shapes: encoder_positions = jnp.ones( shapes['encoder_positions'], dtype=types['encoder_positions']) np.testing.assert_allclose(called_with[1]['encoder_positions'], encoder_positions) else: self.assertIsNone(called_with[1]['encoder_positions']) if 'decoder_positions' in shapes: decoder_positions = jnp.ones( shapes['decoder_positions'], dtype=types['decoder_positions']) np.testing.assert_allclose(called_with[1]['decoder_positions'], decoder_positions) else: self.assertIsNone(called_with[1]['decoder_positions']) if 'encoder_segment_ids' in shapes: encoder_positions = jnp.ones( shapes['encoder_segment_ids'], dtype=types['encoder_segment_ids']) np.testing.assert_allclose(called_with[1]['encoder_segment_ids'], encoder_positions) else: self.assertIsNone(called_with[1]['encoder_segment_ids']) if 'decoder_segment_ids' in shapes: decoder_segment_ids = jnp.ones( shapes['decoder_segment_ids'], dtype=types['decoder_segment_ids']) np.testing.assert_allclose(called_with[1]['decoder_segment_ids'], decoder_segment_ids) else: self.assertIsNone(called_with[1]['decoder_segment_ids']) self.assertFalse(called_with[1]['decode']) self.assertFalse(called_with[1]['enable_dropout']) @parameterized.named_parameters( dict(testcase_name='no_force_decoding', prompt_with_targets=False), dict(testcase_name='force_decoding', prompt_with_targets=True), ) def test_prompt_with_targets(self, prompt_with_targets): batch_size, encoder_len, max_decode_len, emb_dim = 2, 3, 4, 5 batch = { 'encoder_input_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_input_tokens': np.full([batch_size, max_decode_len], 2, dtype=np.int32) } # These dummy logits represent the probability distribution where all the # probability mass is in one item (i.e., degenerate distribution). For # batch element 0, it is vocabulary index 3. # We test `_predict_step` to avoid having to define a task and its # vocabulary. dummy_logits = jnp.expand_dims( jnp.array([[-1e7, -1e7, -1e7, 0, -1e7], [-1e7, -1e7, -1e7, -1e7, 0]]), axis=1) mock_decode_fn = mock.Mock() mock_decode_fn.return_value = (np.full([batch_size, max_decode_len, 1], 3, dtype=np.int32), np.full([batch_size, 1], 1.0, dtype=np.float32)) class MockModule: def __init__(self): self.dtype = jnp.float32 def apply(self, *args, method=None, **kwargs): del args, kwargs if method is None: # use for module.`__call__` return (dummy_logits, {'cache': {}}) else: return method() def encode(self): return jnp.zeros((batch_size, encoder_len, emb_dim)) def decode(self): return (dummy_logits, {'cache': {}}) def mock_init(self): self.module = MockModule() self.module.scan_layers = False self._input_vocabulary = mock.Mock(eos_id=1) self._output_vocabulary = mock.Mock(eos_id=1) self._decode_fn = mock_decode_fn with mock.patch.object( models.EncoderDecoderModel, '__init__', new=mock_init): model = models.EncoderDecoderModel() model.predict_batch_with_aux({}, batch, prompt_with_targets=prompt_with_targets) if prompt_with_targets: expected_inputs = batch['decoder_input_tokens'] else: expected_inputs = np.zeros([batch_size, max_decode_len], dtype=np.int32) assert mock_decode_fn.call_count == 1 # Look at the kwargs call list for inputs, assert_called_with doesn't # work well with np.array comparison. np.testing.assert_array_equal(mock_decode_fn.mock_calls[0][2]['inputs'], expected_inputs) def test_predict_batch_loop_and_caches_are_equal(self): vocab_size = 50 lengths = np.array([[2], [3]]) batch_size, beam_size, encoder_len, max_decode_len = 2, 2, 3, 7 batch = { 'encoder_input_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_target_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_input_tokens': np.concatenate( [ np.expand_dims( np.concatenate( [[0], np.arange(9, 9 + lengths[0][0], dtype=np.int32), np.zeros((max_decode_len - lengths[0][0] - 1), dtype=np.int32)]), axis=0), # First element np.expand_dims( np.concatenate( [[0], np.arange(3, 3 + lengths[1][0], dtype=np.int32), np.zeros((max_decode_len - lengths[1][0] - 1), dtype=np.int32)]), axis=0) # Second element ], axis=0), }
model = test_utils.get_t5_test_model(vocab_size=50)
3
2023-12-12 20:23:33+00:00
16k
zju3dv/EasyVolcap
easyvolcap/utils/gl_utils.py
[ { "identifier": "dotdict", "path": "easyvolcap/utils/base_utils.py", "snippet": "class dotdict(dict, Dict[KT, VT]):\n \"\"\"\n This is the default data passing object used throughout the codebase\n Main function: dot access for dict values & dict like merging and updates\n\n a dictionary tha...
from typing import TYPE_CHECKING from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager # must be imported before OpenGL.GL from torch import nn from enum import Enum, auto from os.path import join, dirname from typing import Dict, Union, List from glm import vec2, vec3, vec4, mat3, mat4, mat4x3, mat2x3 # This is actually highly optimized from easyvolcap.utils.console_utils import * from easyvolcap.utils.base_utils import dotdict from easyvolcap.utils.viewer_utils import Camera from easyvolcap.utils.color_utils import cm_cpu_store from easyvolcap.utils.depth_utils import depth_curve_fn from easyvolcap.utils.data_utils import load_pts, load_mesh, to_cuda from easyvolcap.utils.fcds_utils import prepare_feedback_transform, get_opencv_camera_params from easyvolcap.utils.net_utils import typed, multi_gather, create_meshgrid, volume_rendering, raw2alpha, torch_dtype_to_numpy_dtype, load_pretrained, get_bounds from easyvolcap.utils.net_utils import CHECK_CUDART_ERROR, FORMAT_CUDART_ERROR from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager from OpenGL.GL import shaders from pytorch3d.structures import Pointclouds, Meshes from pytorch3d.structures import Pointclouds, Meshes from cuda import cudart from cuda import cudart from cuda import cudart from easyvolcap.engine.registry import call_from_cfg from easyvolcap.utils.gaussian_utils import GaussianModel from cuda import cudart from cuda import cudart from cuda import cudart from cuda import cudart from cuda import cudart from cuda import cudart import os import glm import torch import ctypes import numpy as np import sys import OpenGL.GL as gl
12,206
if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE: log(red('Framebuffer not complete, exiting...')) raise RuntimeError('Incomplete framebuffer') # Restore the original state gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) return color_buffer, depth_upper, depth_lower, depth_attach, fbo def hareward_peeling_framebuffer(H: int, W: int): # Prepare for write frame buffers index_buffer = gl.glGenTextures(1) depth_lower = gl.glGenTextures(1) depth_attach = gl.glGenTextures(1) fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb # Init the texture (call the resizing function), will simply allocate empty memory # The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter). gl.glBindTexture(gl.GL_TEXTURE_2D, index_buffer) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32I, W, H, 0, gl.GL_RED_INTEGER, gl.GL_INT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Bind texture to fbo gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, index_buffer, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0) gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1]) # Check framebuffer status if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE: log(red('Framebuffer not complete, exiting...')) raise RuntimeError('Incomplete framebuffer') # Restore the original state gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) return index_buffer, depth_lower, depth_attach, fbo class Gaussian(Mesh): def __init__(self, filename: str = 'assets/meshes/zju3dv.npz', gaussian_cfg: dotdict = dotdict(), quad_cfg: dotdict = dotdict(), render_depth: bool = False, # show depth or show color dpt_cm: str = 'linear', H: int = 1024, W: int = 1024, **kwargs, ): # Import Gaussian Model # Housekeeping super().__init__(**kwargs) self.name = split(filename)[-1] # Init Gaussian related models, for now only the first gaussian model is supported if filename.endswith('.npz') or filename.endswith('.pt') or filename.endswith('.pth'): # Load from GaussianTSampler pretrained, _ = load_pretrained(filename) # loaded model and updated path (maybe) pretrained = pretrained.model state_dict = dotdict() for k, v in pretrained.items(): if k.startswith('sampler.pcds.0'): state_dict[k.replace('sampler.pcds.0.', '')] = v # Load the parameters into the gaussian model self.gaussian_model: GaussianModel = call_from_cfg(GaussianModel, gaussian_cfg) # init empty gaussian model self.gaussian_model.load_state_dict(state_dict) # load the first gaussian model self.gaussian_model.cuda() # move the parameters to GPU elif filename.endswith('.ply'): # Load raw GaussianModel pass else: raise NotImplementedError # Init rendering quad self.quad: Quad = call_from_cfg(Quad, quad_cfg, H=H, W=W) # Other configurations self.render_depth = render_depth self.dpt_cm = dpt_cm # Disabling initialization def load_from_file(self, *args, **kwargs): pass def load_from_data(self, *args, **kwargs): pass def compile_shaders(self): pass def update_gl_buffers(self): pass def resize_textures(self, H: int, W: int): self.quad.resize_textures(H, W) # The actual rendering function @torch.no_grad() def render(self, camera: Camera): # Perform actual gaussian rendering
from __future__ import annotations if TYPE_CHECKING: # fmt: off # Environment variable messaging # Need to export EGL_DEVICE_ID before trying to import egl # And we need to consider the case when we're performing distributed training # from easyvolcap.engine import cfg, args # FIXME: GLOBAL IMPORTS if 'easyvolcap.engine' in sys.modules and (sys.modules['easyvolcap.engine'].args.type != 'gui' or sys.modules['easyvolcap.engine'].cfg.viewer_cfg.type == 'UnitySocketViewer'): # FIXME: GLOBAL VARIABLES try: except Exception as e: log(yellow(f'Could not import EGL related modules. {type(e).__name__}: {e}')) os.environ['PYOPENGL_PLATFORM'] = '' try: except Exception as e: print(f'WARNING: OpenGL shaders import error encountered, please install the latest PyOpenGL from github using:') print(f'pip install git+https://github.com/mcfletch/pyopengl') raise e # fmt: on def linearize_depth(d, n: float, f: float): # 0-1 -> -1,1 # ndc -> view return (2.0 * n * f) / (f + n - (d * 2 - 1) * (f - n)) def common_opengl_options(): # Use program point size gl.glEnable(gl.GL_PROGRAM_POINT_SIZE) # Performs face culling gl.glEnable(gl.GL_CULL_FACE) gl.glCullFace(gl.GL_BACK) # Performs alpha trans testing gl.glEnable(gl.GL_ALPHA_TEST) # Performs z-buffer testing gl.glEnable(gl.GL_DEPTH_TEST) # gl.glDepthMask(gl.GL_TRUE) gl.glDepthFunc(gl.GL_LEQUAL) # gl.glDepthRange(-1.0, 1.0) gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) # Enable some masking tests gl.glEnable(gl.GL_SCISSOR_TEST) # Enable this to correctly render points # https://community.khronos.org/t/gl-point-sprite-gone-in-3-2/59310 gl.glEnable(gl.GL_POINT_SPRITE) # MARK: ONLY SPRITE IS WORKING FOR NOW # gl.glEnable(gl.GL_POINT_SMOOTH) # MARK: ONLY SPRITE IS WORKING FOR NOW # # Configure how we store the pixels in memory for our subsequent reading of the FBO to store the rendering into memory. # # The second argument specifies that our pixels will be in bytes. # gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1) def load_shader_source(file: str = 'splat.frag'): # Ideally we can just specify the shader name instead of an variable if not exists(file): file = f'{dirname(__file__)}/shaders/{file}' if not exists(file): file = file.replace('shaders/', '') if not exists(file): raise RuntimeError(f'Shader file: {file} does not exist') with open(file, 'r') as f: return f.read() def use_gl_program(program: Union[shaders.ShaderProgram, dict]): if isinstance(program, dict): # Recompile the program if the user supplied sources program = dotdict(program) program = shaders.compileProgram( shaders.compileShader(program.VERT_SHADER_SRC, gl.GL_VERTEX_SHADER), shaders.compileShader(program.FRAG_SHADER_SRC, gl.GL_FRAGMENT_SHADER) ) return gl.glUseProgram(program) class Mesh: class RenderType(Enum): POINTS = 1 LINES = 2 TRIS = 3 QUADS = 4 # TODO: Support quad loading STRIPS = 5 # Helper class to render a mesh on opengl # This implementation should only be used for debug visualization # Since no differentiable mechanism will be added # We recommend using nvdiffrast and pytorch3d's point renderer directly if you will to optimize these structures directly def __init__(self, verts: torch.Tensor = torch.tensor([[0, 0, 0], [0, 1, 0], [0, 0, 1]]), # need to call update after update faces: torch.Tensor = torch.tensor([[0, 1, 2]]), # need to call update after update colors: torch.Tensor = None, normals: torch.Tensor = None, scalars: dotdict[str, torch.Tensor] = dotdict(), render_type: RenderType = RenderType.TRIS, # Misc info name: str = 'mesh', filename: str = '', visible: bool = True, # Render options shade_flat: bool = False, # smooth shading point_radius: float = 0.015, render_normal: bool = False, # Storage options store_device: str = 'cpu', compute_device: str = 'cuda', vert_sizes=[3, 3, 3], # pos + color + norm # Init options est_normal_thresh: int = 100000, # Ignore unused input **kwargs, ) -> None: super().__init__() self.name = name self.visible = visible self.render_type = render_type self.shade_flat = shade_flat self.point_radius = point_radius self.render_normal = render_normal self.store_device = store_device self.compute_device = compute_device self.vert_sizes = vert_sizes self.est_normal_thresh = est_normal_thresh # Uniform and program self.compile_shaders() self.uniforms = dotdict() # uniform values # Before initialization self.max_verts = 0 self.max_faces = 0 # OpenGL data if filename: self.load_from_file(filename) else: self.load_from_data(verts, faces, colors, normals, scalars) def compile_shaders(self): try: self.mesh_program = shaders.compileProgram( shaders.compileShader(load_shader_source('mesh.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('mesh.frag'), gl.GL_FRAGMENT_SHADER) ) self.point_program = shaders.compileProgram( shaders.compileShader(load_shader_source('point.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('point.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e @property def n_verts_bytes(self): return len(self.verts) * self.vert_size * self.verts.element_size() @property def n_faces_bytes(self): return len(self.faces) * self.face_size * self.faces.element_size() @property def verts_data(self): # a heavy copy operation verts = torch.cat([self.verts, self.colors, self.normals], dim=-1).ravel().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=np.float32, order='C') return verts @property def faces_data(self): # a heavy copy operation faces = self.faces.ravel().numpy() # N, 3 faces = np.asarray(faces, dtype=np.uint32, order='C') return faces @property def face_size(self): return self.render_type.value @property def vert_size(self): return sum(self.vert_sizes) def load_from_file(self, filename: str = 'assets/meshes/bunny.ply'): verts, faces, colors, normals, scalars = self.load_data_from_file(filename) self.load_from_data(verts, faces, colors, normals, scalars) def load_data_from_file(self, filename: str = 'assets/meshes/bunny.ply'): self.name = os.path.split(filename)[-1] verts, faces, colors, normals, scalars = None, None, None, None, None verts, faces = load_mesh(filename, device=self.store_device) if not len(faces): verts, colors, normals, scalars = load_pts(filename) self.render_type = Mesh.RenderType.POINTS else: self.render_type = Mesh.RenderType(faces.shape[-1]) # use value return verts, faces, colors, normals, scalars def load_from_data(self, verts: torch.Tensor, faces: torch.Tensor, colors: torch.Tensor = None, normals: torch.Tensor = None, scalars: dotdict[str, torch.Tensor] = dotdict()): # Data type conversion verts = torch.as_tensor(verts) # convert to tensor if input is of other types if verts.dtype == torch.float32: pass # supports this for now elif verts.dtype == torch.float16: pass # supports this for now else: verts = verts.type(torch.float) # convert to float32 if input is of higher precision gl_dtype = gl.GL_FLOAT if verts.dtype == torch.float else gl.GL_HALF_FLOAT self.vert_gl_types = [gl_dtype] * len(self.vert_sizes) # Prepare main mesh data: vertices and faces self.verts = torch.as_tensor(verts, device=self.store_device) self.faces = torch.as_tensor(faces, device=self.store_device, dtype=torch.int32) # NOTE: No uint32 support # Prepare colors and normals if colors is not None: self.colors = torch.as_tensor(colors, device=self.store_device, dtype=self.verts.dtype) else: bounds = get_bounds(self.verts[None])[0] self.colors = (self.verts - bounds[0]) / (bounds[1] - bounds[0]) if normals is not None: self.normals = torch.as_tensor(normals, device=self.store_device, dtype=self.verts.dtype) else: self.estimate_vertex_normals() # Prepare other scalars if scalars is not None: for k, v in scalars.items(): setattr(self, k, torch.as_tensor(v, device=self.store_device, dtype=self.verts.dtype)) # is this ok? # Prepare OpenGL related buffer self.update_gl_buffers() def estimate_vertex_normals(self): def est_pcd_norms(): if self.verts.dtype == torch.half: self.normals = self.verts else: pcd = Pointclouds([self.verts]).to(self.compute_device) self.normals = pcd.estimate_normals()[0].cpu().to(self.verts.dtype) # no batch dim def est_tri_norms(): if self.verts.dtype == torch.half: self.normals = self.verts else: mesh = Meshes([self.verts], [self.faces]).to(self.compute_device) self.normals = mesh.verts_normals_packed().cpu().to(self.verts.dtype) # no batch dim if not len(self.verts) > self.est_normal_thresh: if self.render_type == Mesh.RenderType.TRIS: est_tri_norms() elif self.render_type == Mesh.RenderType.POINTS: est_pcd_norms() else: # log(yellow(f'Unsupported mesh type: {self.render_type} for normal estimation, skipping')) self.normals = self.verts else: # log(yellow(f'Number of points for mesh too large: {len(self.verts)} > {self.est_normal_thresh}, skipping normal estimation')) self.normals = self.verts def offscreen_render(self, eglctx: "eglContextManager", camera: Camera): eglctx.resize(camera.W, camera.H) self.render(camera) def render(self, camera: Camera): if not self.visible: return # For point rendering if self.render_type == Mesh.RenderType.POINTS: gl.glUseProgram(self.point_program) self.use_gl_program(self.point_program) else: gl.glUseProgram(self.mesh_program) self.use_gl_program(self.mesh_program) self.upload_gl_uniforms(camera) gl.glBindVertexArray(self.vao) if self.render_type == Mesh.RenderType.POINTS: gl.glDrawArrays(gl.GL_POINTS, 0, len(self.verts)) # number of vertices elif self.render_type == Mesh.RenderType.LINES: gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glDrawElements(gl.GL_LINES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices elif self.render_type == Mesh.RenderType.TRIS: gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glDrawElements(gl.GL_TRIANGLES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices elif self.render_type == Mesh.RenderType.QUADS: gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glDrawElements(gl.GL_QUADS, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices elif self.render_type == Mesh.RenderType.STRIPS: gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) else: raise NotImplementedError gl.glBindVertexArray(0) def use_gl_program(self, program: shaders.ShaderProgram): use_gl_program(program) self.uniforms.shade_flat = gl.glGetUniformLocation(program, "shade_flat") self.uniforms.point_radius = gl.glGetUniformLocation(program, "point_radius") self.uniforms.render_normal = gl.glGetUniformLocation(program, "render_normal") self.uniforms.H = gl.glGetUniformLocation(program, "H") self.uniforms.W = gl.glGetUniformLocation(program, "W") self.uniforms.n = gl.glGetUniformLocation(program, "n") self.uniforms.f = gl.glGetUniformLocation(program, "f") self.uniforms.P = gl.glGetUniformLocation(program, "P") self.uniforms.K = gl.glGetUniformLocation(program, "K") self.uniforms.V = gl.glGetUniformLocation(program, "V") self.uniforms.M = gl.glGetUniformLocation(program, "M") def upload_gl_uniforms(self, camera: Camera): K = camera.gl_ixt # hold the reference V = camera.gl_ext # hold the reference M = glm.identity(mat4) P = K * V * M gl.glUniform1i(self.uniforms.shade_flat, self.shade_flat) gl.glUniform1f(self.uniforms.point_radius, self.point_radius) gl.glUniform1i(self.uniforms.render_normal, self.render_normal) gl.glUniform1i(self.uniforms.H, camera.H) # o2w gl.glUniform1i(self.uniforms.W, camera.W) # o2w gl.glUniform1f(self.uniforms.n, camera.n) # o2w gl.glUniform1f(self.uniforms.f, camera.f) # o2w gl.glUniformMatrix4fv(self.uniforms.P, 1, gl.GL_FALSE, glm.value_ptr(P)) # o2clip gl.glUniformMatrix4fv(self.uniforms.K, 1, gl.GL_FALSE, glm.value_ptr(K)) # c2clip gl.glUniformMatrix4fv(self.uniforms.V, 1, gl.GL_FALSE, glm.value_ptr(V)) # w2c gl.glUniformMatrix4fv(self.uniforms.M, 1, gl.GL_FALSE, glm.value_ptr(M)) # o2w def update_gl_buffers(self): # Might be overwritten self.resize_buffers(len(self.verts) if hasattr(self, 'verts') else 0, len(self.faces) if hasattr(self, 'faces') else 0) # maybe repeated if hasattr(self, 'verts'): gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo) gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, self.n_verts_bytes, self.verts_data) # hold the reference if hasattr(self, 'faces'): gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glBufferSubData(gl.GL_ELEMENT_ARRAY_BUFFER, 0, self.n_faces_bytes, self.faces_data) def resize_buffers(self, v: int = 0, f: int = 0): if v > self.max_verts or f > self.max_faces: if v > self.max_verts: self.max_verts = v if f > self.max_faces: self.max_faces = f self.init_gl_buffers(v, f) def init_gl_buffers(self, v: int = 0, f: int = 0): # This will only init the corresponding buffer object n_verts_bytes = v * self.vert_size * self.verts.element_size() if v > 0 else self.n_verts_bytes n_faces_bytes = f * self.face_size * self.faces.element_size() if f > 0 else self.n_faces_bytes # Housekeeping if hasattr(self, 'vao'): gl.glDeleteVertexArrays(1, [self.vao]) gl.glDeleteBuffers(2, [self.vbo, self.ebo]) self.vao = gl.glGenVertexArrays(1) self.vbo = gl.glGenBuffers(1) self.ebo = gl.glGenBuffers(1) gl.glBindVertexArray(self.vao) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo) gl.glBufferData(gl.GL_ARRAY_BUFFER, n_verts_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW) # NOTE: Using pointers here won't work # https://stackoverflow.com/questions/67195932/pyopengl-cannot-render-any-vao cumsum = 0 for i, (s, t) in enumerate(zip(self.vert_sizes, self.vert_gl_types)): gl.glVertexAttribPointer(i, s, t, gl.GL_FALSE, self.vert_size * self.verts.element_size(), ctypes.c_void_p(cumsum * self.verts.element_size())) # we use 32 bit float gl.glEnableVertexAttribArray(i) cumsum += s if n_faces_bytes > 0: # Some implementation has no faces, we dangerously ignore ebo here, assuming they will never be used gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, n_faces_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW) gl.glBindVertexArray(0) def render_imgui(self): pass class Quad(Mesh): # A shared texture for CUDA (pytorch) and OpenGL # Could be rendererd to screen using blitting or just drawing a quad def __init__(self, H: int = 256, W: int = 256, use_cudagl: bool = True, compose: bool = False, compose_power: float = 1.0): # the texture to blip self.use_cudagl = use_cudagl self.vert_sizes = [3] # only position self.vert_gl_types = [gl.GL_FLOAT] # only position self.render_type = Mesh.RenderType.STRIPS # remove side effects of settings _type self.max_verts, self.max_faces = 0, 0 self.verts = torch.as_tensor([[-1., -1., 0.5], [1., -1., 0.5], [-1., 1., 0.5], [1., 1., 0.5],]) self.update_gl_buffers() self.compile_shaders() self.max_H, self.max_W = H, W self.H, self.W = H, W self.compose = compose self.compose_power = compose_power self.init_texture() @property def n_faces_bytes(self): return 0 def use_gl_program(self, program: shaders.ShaderProgram): super().use_gl_program(program) self.uniforms.tex = gl.glGetUniformLocation(program, 'tex') gl.glUseProgram(self.quad_program) # use a different program gl.glUniform1i(self.uniforms.tex, 0) def compile_shaders(self): try: self.quad_program = shaders.compileProgram( shaders.compileShader(load_shader_source('quad.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('quad.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def resize_textures(self, H: int, W: int): # analogy to update_gl_buffers self.H, self.W = H, W if self.H > self.max_H or self.W > self.max_W: # max got updated self.max_H, self.max_W = max(int(self.H * 1.05), self.max_H), max(int(self.W * 1.05), self.max_W) self.init_texture() def init_texture(self): if hasattr(self, 'cu_tex'): CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_tex)) if hasattr(self, 'fbo'): gl.glDeleteFramebuffers(1, [self.fbo]) gl.glDeleteTextures(1, [self.tex]) # Init the texture to be blit onto the screen self.tex = gl.glGenTextures(1) gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA8, self.max_W, self.max_H, 0, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ctypes.c_void_p(0)) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Init the framebuffer object if explicit blitting is used (slower than drawing quad) self.fbo = gl.glGenFramebuffers(1) old_fbo = gl.glGetIntegerv(gl.GL_FRAMEBUFFER_BINDING) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self.tex, 0) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, old_fbo) if self.use_cudagl: if self.compose: # Both reading and writing of this resource is required flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsNone else: flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard self.cu_tex = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.tex, gl.GL_TEXTURE_2D, flags)) def copy_to_texture(self, image: torch.Tensor, x: int = 0, y: int = 0, w: int = 0, h: int = 0): assert self.use_cudagl, "Need to enable cuda-opengl interop to copy from device to device, check creation of this Quad" w = w or self.W h = h or self.H if image.shape[-1] == 3: image = torch.cat([image, image.new_ones(image.shape[:-1] + (1,)) * 255], dim=-1) # add alpha channel kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream)) cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(self.cu_tex, 0, 0)) if self.compose: """ Blit current framebuffer to this texture (self.tex) Read content of this texture into a cuda buffer Perform alpha blending based on the frame's alpha channel Copy the blended image back into the texture (self.tex) """ old = gl.glGetInteger(gl.GL_DRAW_FRAMEBUFFER_BINDING) gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, self.fbo) # read buffer defaults to 0 gl.glBlitFramebuffer(x, y, w, h, x, y, w, h, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) # now self.tex contains the content of the already rendered frame gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, old) buffer = torch.empty_like(image) CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(buffer.data_ptr(), # dst w * 4 * buffer.element_size(), # dpitch cu_tex_arr, # src x * 4 * image.element_size(), # wOffset y, # hOffset w * 4 * buffer.element_size(), # width Width of matrix transfer (columns in bytes) h, # height kind, # kind torch.cuda.current_stream().cuda_stream)) # stream # cv2.imwrite('image.png', image.flip(0).detach().cpu().numpy()[..., [2,1,0,3]]) alpha = image[..., -1:] / 255 image[..., :-1] = buffer[..., :-1] * (1 - alpha ** self.compose_power) + image[..., :-1] * alpha # storing float into int image[..., -1:] = buffer[..., -1:] + image[..., -1:] image = image.clip(0, 255) CHECK_CUDART_ERROR(cudart.cudaMemcpy2DToArrayAsync(cu_tex_arr, x * 4 * image.element_size(), y, image.data_ptr(), w * 4 * image.element_size(), # differently sized w * 4 * image.element_size(), # rgba, should do a composition first h, kind, torch.cuda.current_stream().cuda_stream)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream)) def upload_to_texture(self, ptr: np.ndarray): H, W = ptr.shape[:2] H, W = min(self.H, H), min(self.W, W) gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex) gl.glTexSubImage2D(gl.GL_TEXTURE_2D, 0, 0, 0, W, H, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ptr[:H, :W]) # to gpu, might slow down? @property def verts_data(self): # a heavy copy operation verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=np.float32, order='C') return verts def render(self, camera: Camera = None): self.draw() # no uploading needed def draw(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0): """ Upload the texture instead of the camera This respects the OpenGL convension of lower left corners """ w = w or self.W h = h or self.H _, _, W, H = gl.glGetIntegerv(gl.GL_VIEWPORT) gl.glViewport(x, y, w, h) gl.glScissor(x, y, w, h) # only render in this small region of the viewport gl.glUseProgram(self.quad_program) # use a different program gl.glActiveTexture(gl.GL_TEXTURE0) gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex) gl.glBindVertexArray(self.vao) gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) gl.glBindVertexArray(0) # Some house keepings gl.glViewport(0, 0, W, H) gl.glScissor(0, 0, W, H) def blit(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0): """ This respects the OpenGL convension of lower left corners """ w = w or self.W h = h or self.H old = gl.glGetInteger(gl.GL_READ_FRAMEBUFFER_BINDING) gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, self.fbo) # write buffer defaults to 0 gl.glBlitFramebuffer(x, y, x + w, y + h, # the height is flipped x, y, x + w, y + h, # the height is flipped gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, old) class UQuad(Mesh): """ Responsible for initializing textures with a single value or blitting a texture to a framebuffer (possibly better done with blit instead of quad drawing) Effectively clearing the texture for real, see: https://stackoverflow.com/questions/37335281/is-glcleargl-color-buffer-bit-preferred-before-a-whole-frame-buffer-overwritte """ def __init__(self): self.n_blit_values = 3 self.vert_sizes = [3] # only position self.vert_gl_types = [gl.GL_FLOAT] # only position self.max_verts, self.max_faces = 0, 0 self.verts = torch.as_tensor([[-1., -1., 0.5], [1., -1., 0.5], [-1., 1., 0.5], [1., 1., 0.5],]) self.compile_shaders() self.uniforms = dotdict() # uniform values self.use_gl_programs(self.quad_program) self.update_gl_buffers() @property def n_faces_bytes(self): return 0 @property def verts_data(self): # a heavy copy operation verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=np.float32, order='C') return verts def use_gl_programs(self, program: shaders.ShaderProgram): for i in range(self.n_blit_values): self.uniforms[f'value{i}'] = gl.glGetUniformLocation(program, f'value{i}') for i in range(self.n_blit_values): self.uniforms[f'use_tex{i}'] = gl.glGetUniformLocation(program, f'use_tex{i}') gl.glUseProgram(self.program) # use a different program for i in range(self.n_blit_values): self.uniforms[f'tex{i}'] = gl.glGetUniformLocation(program, f'tex{i}') gl.glUniform1i(self.uniforms[f'tex{i}'], i) def upload_gl_uniforms(self, values: List[List[float]], use_texs: List[bool]): for i, v in enumerate(values): v = vec4(v) # HACK: Hold the reference for this upload gl.glUniform4fv(self.uniforms[f'value{i}'], 1, glm.value_ptr(v)) # as float array for i, v in enumerate(use_texs): gl.glUniform1i(self.uniforms[f'use_tex{i}'], v) def compile_shaders(self): try: self.quad_program = shaders.compileProgram( shaders.compileShader(load_shader_source('uquad.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('uquad.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def draw(self, values: List[List[float]] = [], use_texs=[]): """ This function will render 'value' to the currently bound framebuffer, up to six outputs """ old_prog = gl.glGetIntegerv(gl.GL_CURRENT_PROGRAM) old_vao = gl.glGetIntegerv(gl.GL_VERTEX_ARRAY_BINDING) gl.glUseProgram(self.quad_program) self.upload_gl_uniforms(values, use_texs) # should be a noop # Prepare to render to textures gl.glBindVertexArray(self.vao) gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) # number of vertices gl.glBindVertexArray(old_vao) gl.glUseProgram(old_prog) class DQuad(UQuad): def compile_shaders(self): try: self.quad_program = shaders.compileProgram( shaders.compileShader(load_shader_source('dquad.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('dquad.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def draw(self, values: List[List[float]] = [], use_texs=[]): old_function = gl.glGetIntegerv(gl.GL_DEPTH_FUNC) gl.glDepthFunc(gl.GL_ALWAYS) super().draw(values, use_texs) gl.glDepthFunc(old_function) def hardware_rendering_framebuffer(H: int, W: int, gl_tex_dtype=gl.GL_RGBA16F): # Prepare for write frame buffers color_buffer = gl.glGenTextures(1) depth_upper = gl.glGenTextures(1) depth_lower = gl.glGenTextures(1) depth_attach = gl.glGenTextures(1) fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb # Init the texture (call the resizing function), will simply allocate empty memory # The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter). gl.glBindTexture(gl.GL_TEXTURE_2D, color_buffer) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl_tex_dtype, W, H, 0, gl.GL_RGBA, gl.GL_FLOAT, ctypes.c_void_p(0)) # 16 * 4 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_upper) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Bind texture to fbo gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, color_buffer, 0) # location 0 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_upper, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT2, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0) gl.glDrawBuffers(3, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1, gl.GL_COLOR_ATTACHMENT2]) # Check framebuffer status if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE: log(red('Framebuffer not complete, exiting...')) raise RuntimeError('Incomplete framebuffer') # Restore the original state gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) return color_buffer, depth_upper, depth_lower, depth_attach, fbo def hareward_peeling_framebuffer(H: int, W: int): # Prepare for write frame buffers index_buffer = gl.glGenTextures(1) depth_lower = gl.glGenTextures(1) depth_attach = gl.glGenTextures(1) fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb # Init the texture (call the resizing function), will simply allocate empty memory # The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter). gl.glBindTexture(gl.GL_TEXTURE_2D, index_buffer) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32I, W, H, 0, gl.GL_RED_INTEGER, gl.GL_INT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Bind texture to fbo gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, index_buffer, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0) gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1]) # Check framebuffer status if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE: log(red('Framebuffer not complete, exiting...')) raise RuntimeError('Incomplete framebuffer') # Restore the original state gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) return index_buffer, depth_lower, depth_attach, fbo class Gaussian(Mesh): def __init__(self, filename: str = 'assets/meshes/zju3dv.npz', gaussian_cfg: dotdict = dotdict(), quad_cfg: dotdict = dotdict(), render_depth: bool = False, # show depth or show color dpt_cm: str = 'linear', H: int = 1024, W: int = 1024, **kwargs, ): # Import Gaussian Model # Housekeeping super().__init__(**kwargs) self.name = split(filename)[-1] # Init Gaussian related models, for now only the first gaussian model is supported if filename.endswith('.npz') or filename.endswith('.pt') or filename.endswith('.pth'): # Load from GaussianTSampler pretrained, _ = load_pretrained(filename) # loaded model and updated path (maybe) pretrained = pretrained.model state_dict = dotdict() for k, v in pretrained.items(): if k.startswith('sampler.pcds.0'): state_dict[k.replace('sampler.pcds.0.', '')] = v # Load the parameters into the gaussian model self.gaussian_model: GaussianModel = call_from_cfg(GaussianModel, gaussian_cfg) # init empty gaussian model self.gaussian_model.load_state_dict(state_dict) # load the first gaussian model self.gaussian_model.cuda() # move the parameters to GPU elif filename.endswith('.ply'): # Load raw GaussianModel pass else: raise NotImplementedError # Init rendering quad self.quad: Quad = call_from_cfg(Quad, quad_cfg, H=H, W=W) # Other configurations self.render_depth = render_depth self.dpt_cm = dpt_cm # Disabling initialization def load_from_file(self, *args, **kwargs): pass def load_from_data(self, *args, **kwargs): pass def compile_shaders(self): pass def update_gl_buffers(self): pass def resize_textures(self, H: int, W: int): self.quad.resize_textures(H, W) # The actual rendering function @torch.no_grad() def render(self, camera: Camera): # Perform actual gaussian rendering
batch = to_cuda(camera.to_batch())
6
2023-12-07 08:53:42+00:00
16k
alibaba/animate-anything
utils/lora_handler.py
[ { "identifier": "UNet3DConditionModel", "path": "models/unet_3d_condition_mask.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n r\"\"\"\n UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep\n and returns samp...
import os import torch import uuid from logging import warnings from typing import Union from types import SimpleNamespace from models.unet_3d_condition_mask import UNet3DConditionModel from transformers import CLIPTextModel from utils.convert_diffusers_to_original_ms_text_to_video import convert_unet_state_dict, convert_text_enc_state_dict_v20 from .lora import ( extract_lora_ups_down, inject_trainable_lora_extended, save_lora_weight, train_patch_pipe, monkeypatch_or_replace_lora, monkeypatch_or_replace_lora_extended ) from stable_lora.lora import ( activate_lora_train, add_lora_to, save_lora, load_lora, set_mode_group )
12,304
self.handle_lora_load(lora_file, lora_loader_args) else: print(f"Could not load LoRAs for {model.__class__.__name__}. Injecting new ones instead...") except Exception as e: print(f"An error occured while loading a LoRA file: {e}") def get_lora_func_args(self, lora_path, use_lora, model, replace_modules, r, dropout, lora_bias): return_dict = lora_args.copy() if self.is_cloneofsimo_lora(): return_dict = filter_dict(return_dict, keys=CLONE_OF_SIMO_KEYS) return_dict.update({ "model": model, "loras": self.get_lora_file_path(lora_path, model), "target_replace_module": replace_modules, "r": r }) if self.is_stable_lora(): KEYS = ['model', 'lora_path'] return_dict = filter_dict(return_dict, KEYS) return_dict.update({'model': model, 'lora_path': lora_path}) return return_dict def do_lora_injection( self, model, replace_modules, bias='none', dropout=0, r=4, lora_loader_args=None, ): REPLACE_MODULES = replace_modules params = None negation = None is_injection_hybrid = False if self.is_cloneofsimo_lora(): is_injection_hybrid = True injector_args = lora_loader_args params, negation = self.lora_injector(**injector_args) for _up, _down in extract_lora_ups_down( model, target_replace_module=REPLACE_MODULES): if all(x is not None for x in [_up, _down]): print(f"Lora successfully injected into {model.__class__.__name__}.") break return params, negation, is_injection_hybrid if self.is_stable_lora(): injector_args = lora_args.copy() injector_args = filter_dict(injector_args, keys=STABLE_LORA_KEYS) SEARCH_CLASS = [torch.nn.Linear, torch.nn.Conv2d, torch.nn.Conv3d, torch.nn.Embedding] injector_args.update({ "model": model, "target_module": REPLACE_MODULES, "search_class": SEARCH_CLASS, "r": r, "dropout": dropout, "lora_bias": self.lora_bias }) activator = self.lora_injector(**injector_args) activator() return params, negation, is_injection_hybrid def add_lora_to_model(self, use_lora, model, replace_modules, dropout=0.0, lora_path='', r=16): params = None negation = None lora_loader_args = self.get_lora_func_args( lora_path, use_lora, model, replace_modules, r, dropout, self.lora_bias ) if use_lora: params, negation, is_injection_hybrid = self.do_lora_injection( model, replace_modules, bias=self.lora_bias, lora_loader_args=lora_loader_args, dropout=dropout, r=r ) if not is_injection_hybrid: self.load_lora(model, lora_path=lora_path, lora_loader_args=lora_loader_args) params = model if params is None else params return params, negation def deactivate_lora_train(self, models, deactivate=True): """ Usage: Use before and after sampling previews. Currently only available for Stable LoRA. """ if self.is_stable_lora(): set_mode_group(models, not deactivate) def save_cloneofsimo_lora(self, model, save_path, step):
FILE_BASENAMES = ['unet', 'text_encoder'] LORA_FILE_TYPES = ['.pt', '.safetensors'] CLONE_OF_SIMO_KEYS = ['model', 'loras', 'target_replace_module', 'r'] STABLE_LORA_KEYS = ['model', 'target_module', 'search_class', 'r', 'dropout', 'lora_bias'] lora_versions = dict( stable_lora = "stable_lora", cloneofsimo = "cloneofsimo" ) lora_func_types = dict( loader = "loader", injector = "injector" ) lora_args = dict( model = None, loras = None, target_replace_module = [], target_module = [], r = 4, search_class = [torch.nn.Linear], dropout = 0, lora_bias = 'none' ) LoraVersions = SimpleNamespace(**lora_versions) LoraFuncTypes = SimpleNamespace(**lora_func_types) LORA_VERSIONS = [LoraVersions.stable_lora, LoraVersions.cloneofsimo] LORA_FUNC_TYPES = [LoraFuncTypes.loader, LoraFuncTypes.injector] def filter_dict(_dict, keys=[]): if len(keys) == 0: assert "Keys cannot empty for filtering return dict." for k in keys: if k not in lora_args.keys(): assert f"{k} does not exist in available LoRA arguments" return {k: v for k, v in _dict.items() if k in keys} class LoraHandler(object): def __init__( self, version: LORA_VERSIONS = LoraVersions.cloneofsimo, use_unet_lora: bool = False, use_text_lora: bool = False, save_for_webui: bool = False, only_for_webui: bool = False, lora_bias: str = 'none', unet_replace_modules: list = ['UNet3DConditionModel'], text_encoder_replace_modules: list = ['CLIPEncoderLayer'] ): self.version = version self.lora_loader = self.get_lora_func(func_type=LoraFuncTypes.loader) self.lora_injector = self.get_lora_func(func_type=LoraFuncTypes.injector) self.lora_bias = lora_bias self.use_unet_lora = use_unet_lora self.use_text_lora = use_text_lora self.save_for_webui = save_for_webui self.only_for_webui = only_for_webui self.unet_replace_modules = unet_replace_modules self.text_encoder_replace_modules = text_encoder_replace_modules self.use_lora = any([use_text_lora, use_unet_lora]) if self.use_lora: print(f"Using LoRA Version: {self.version}") def is_cloneofsimo_lora(self): return self.version == LoraVersions.cloneofsimo def is_stable_lora(self): return self.version == LoraVersions.stable_lora def get_lora_func(self, func_type: LORA_FUNC_TYPES = LoraFuncTypes.loader): if self.is_cloneofsimo_lora(): if func_type == LoraFuncTypes.loader: return monkeypatch_or_replace_lora_extended if func_type == LoraFuncTypes.injector: return inject_trainable_lora_extended if self.is_stable_lora(): if func_type == LoraFuncTypes.loader: return load_lora if func_type == LoraFuncTypes.injector: return add_lora_to assert "LoRA Version does not exist." def check_lora_ext(self, lora_file: str): return lora_file.endswith(tuple(LORA_FILE_TYPES)) def get_lora_file_path( self, lora_path: str, model: Union[UNet3DConditionModel, CLIPTextModel] ): if os.path.exists(lora_path): lora_filenames = [fns for fns in os.listdir(lora_path)] is_lora = self.check_lora_ext(lora_path) is_unet = isinstance(model, UNet3DConditionModel) is_text = isinstance(model, CLIPTextModel) idx = 0 if is_unet else 1 base_name = FILE_BASENAMES[idx] for lora_filename in lora_filenames: is_lora = self.check_lora_ext(lora_filename) if not is_lora: continue if base_name in lora_filename: return os.path.join(lora_path, lora_filename) return None def handle_lora_load(self, file_name:str, lora_loader_args: dict = None): self.lora_loader(**lora_loader_args) print(f"Successfully loaded LoRA from: {file_name}") def load_lora(self, model, lora_path: str = '', lora_loader_args: dict = None,): try: lora_file = self.get_lora_file_path(lora_path, model) if lora_file is not None: lora_loader_args.update({"lora_path": lora_file}) self.handle_lora_load(lora_file, lora_loader_args) else: print(f"Could not load LoRAs for {model.__class__.__name__}. Injecting new ones instead...") except Exception as e: print(f"An error occured while loading a LoRA file: {e}") def get_lora_func_args(self, lora_path, use_lora, model, replace_modules, r, dropout, lora_bias): return_dict = lora_args.copy() if self.is_cloneofsimo_lora(): return_dict = filter_dict(return_dict, keys=CLONE_OF_SIMO_KEYS) return_dict.update({ "model": model, "loras": self.get_lora_file_path(lora_path, model), "target_replace_module": replace_modules, "r": r }) if self.is_stable_lora(): KEYS = ['model', 'lora_path'] return_dict = filter_dict(return_dict, KEYS) return_dict.update({'model': model, 'lora_path': lora_path}) return return_dict def do_lora_injection( self, model, replace_modules, bias='none', dropout=0, r=4, lora_loader_args=None, ): REPLACE_MODULES = replace_modules params = None negation = None is_injection_hybrid = False if self.is_cloneofsimo_lora(): is_injection_hybrid = True injector_args = lora_loader_args params, negation = self.lora_injector(**injector_args) for _up, _down in extract_lora_ups_down( model, target_replace_module=REPLACE_MODULES): if all(x is not None for x in [_up, _down]): print(f"Lora successfully injected into {model.__class__.__name__}.") break return params, negation, is_injection_hybrid if self.is_stable_lora(): injector_args = lora_args.copy() injector_args = filter_dict(injector_args, keys=STABLE_LORA_KEYS) SEARCH_CLASS = [torch.nn.Linear, torch.nn.Conv2d, torch.nn.Conv3d, torch.nn.Embedding] injector_args.update({ "model": model, "target_module": REPLACE_MODULES, "search_class": SEARCH_CLASS, "r": r, "dropout": dropout, "lora_bias": self.lora_bias }) activator = self.lora_injector(**injector_args) activator() return params, negation, is_injection_hybrid def add_lora_to_model(self, use_lora, model, replace_modules, dropout=0.0, lora_path='', r=16): params = None negation = None lora_loader_args = self.get_lora_func_args( lora_path, use_lora, model, replace_modules, r, dropout, self.lora_bias ) if use_lora: params, negation, is_injection_hybrid = self.do_lora_injection( model, replace_modules, bias=self.lora_bias, lora_loader_args=lora_loader_args, dropout=dropout, r=r ) if not is_injection_hybrid: self.load_lora(model, lora_path=lora_path, lora_loader_args=lora_loader_args) params = model if params is None else params return params, negation def deactivate_lora_train(self, models, deactivate=True): """ Usage: Use before and after sampling previews. Currently only available for Stable LoRA. """ if self.is_stable_lora(): set_mode_group(models, not deactivate) def save_cloneofsimo_lora(self, model, save_path, step):
def save_lora(model, name, condition, replace_modules, step, save_path):
11
2023-12-07 08:26:29+00:00
16k
octo-models/octo
scripts/finetune.py
[ { "identifier": "make_single_dataset", "path": "octo/data/dataset.py", "snippet": "def make_single_dataset(\n dataset_kwargs: dict,\n *,\n train: bool,\n traj_transform_kwargs: dict = {},\n frame_transform_kwargs: dict = {},\n) -> dl.DLataset:\n \"\"\"Creates a single dataset from kwar...
import datetime import imp import os import flax import jax import optax import tensorflow as tf import tqdm import wandb from functools import partial from absl import app, flags, logging from flax.traverse_util import flatten_dict from jax.sharding import Mesh, NamedSharding, PartitionSpec from ml_collections import config_flags, ConfigDict from octo.data.dataset import make_single_dataset from octo.model.octo_model import OctoModel from octo.utils.jax_utils import initialize_compilation_cache from octo.utils.spec import ModuleSpec from octo.utils.train_callbacks import ( RolloutVisualizationCallback, SaveCallback, ValidationCallback, VisualizationCallback, ) from octo.utils.train_utils import ( check_config_diff, create_optimizer, format_name_with_config, merge_params, process_text, Timer, TrainState, ) from jax_smi import initialise_tracking # type: ignore
11,474
params = model.params if FLAGS.config.optimizer.frozen_keys is None: FLAGS.config.optimizer.frozen_keys = model.config["optimizer"]["frozen_keys"] tx, lr_callable, param_norm_callable = create_optimizer( params, **FLAGS.config.optimizer.to_dict(), ) train_state = TrainState.create( model=model, tx=tx, rng=rng, ) ######### # # Save all metadata # ######### if FLAGS.config.save_dir is not None: save_dir = tf.io.gfile.join( FLAGS.config.save_dir, FLAGS.config.wandb.project, FLAGS.config.wandb.group or "", wandb_id, ) wandb.config.update(dict(save_dir=save_dir), allow_val_change=True) logging.info("Saving to %s", save_dir) save_callback = SaveCallback(save_dir) # Add window_size to top of config, to make eval easier new_config = ConfigDict(model.config) new_config["window_size"] = example_batch["observation"]["pad_mask"].shape[1] model = model.replace(config=new_config) # Save finetuning config since it's not saved by SaveCallback, i.e. as part of model.save_pretrained() with open( tf.io.gfile.join(save_dir, "finetune_config.json"), "w" ) as config_file: config_file.write(FLAGS.config.to_json_best_effort()) else: save_dir = None save_callback = SaveCallback(None) logging.warning("save_dir not passed in, not saving checkpoints") example_batch_spec = jax.tree_map( lambda arr: (arr.shape, str(arr.dtype)), example_batch ) wandb.config.update( dict(example_batch_spec=example_batch_spec), allow_val_change=True ) ######### # # Define loss, train_step, and eval_step # ######### def loss_fn(params, batch, rng, train=True): bound_module = model.module.bind({"params": params}, rngs={"dropout": rng}) transformer_embeddings = bound_module.octo_transformer( batch["observation"], batch["task"], batch["observation"]["pad_mask"], train=train, ) action_loss, action_metrics = bound_module.heads["action"].loss( transformer_embeddings, # Action head knows to pull out the action readout_key batch["action"], pad_mask=batch["observation"]["pad_mask"], train=train, ) return action_loss, action_metrics # Data parallelism # Model is replicated across devices, data is split across devices @partial( jax.jit, in_shardings=[replicated_sharding, dp_sharding], ) def train_step(state, batch): rng, dropout_rng = jax.random.split(state.rng) (loss, info), grads = jax.value_and_grad(loss_fn, has_aux=True)( state.model.params, batch, dropout_rng, train=True ) # Gradient Metrics (TODO: Does the finetuner need these?) ### grad_norm = optax.global_norm(grads) updates, _ = state.tx.update(grads, state.opt_state, state.model.params) update_norm = optax.global_norm(updates) info.update( { "grad_norm": grad_norm, "update_norm": update_norm, "param_norm": param_norm_callable(state.model.params), "learning_rate": lr_callable(state.step), } ) # End Debug Metrics # new_state = state.apply_gradients(grads=grads, rng=rng) return new_state, info ######### # # Build validation & visualization callbacks # ######### if FLAGS.config.modality == "image_conditioned": modes_to_evaluate = ["image_conditioned"] elif FLAGS.config.modality == "text_conditioned": modes_to_evaluate = ["text_conditioned"] elif FLAGS.config.modality == "multimodal": modes_to_evaluate = ["image_conditioned", "text_conditioned"] else: modes_to_evaluate = ["base"] dataset_kwargs_list = [FLAGS.config.dataset_kwargs]
try: initialise_tracking() except ImportError: pass FLAGS = flags.FLAGS flags.DEFINE_string("name", "experiment", "Experiment name.") flags.DEFINE_bool("debug", False, "Debug config (no wandb logging)") default_config_file = os.path.join( os.path.dirname(__file__), "configs/finetune_config.py" ) config_flags.DEFINE_config_file( "config", default_config_file, "File path to the training hyperparameter configuration.", lock_config=False, ) def main(_): initialize_compilation_cache() devices = jax.devices() logging.info( f""" Octo Finetuning Script ====================== Pretrained model: {FLAGS.config.pretrained_path} Finetuning Dataset: {FLAGS.config.dataset_kwargs.name} Data dir: {FLAGS.config.dataset_kwargs.data_dir} Task Modality: {FLAGS.config.modality} Finetuning Mode: {FLAGS.config.finetuning_mode} # Devices: {jax.device_count()} Batch size: {FLAGS.config.batch_size} ({FLAGS.config.batch_size // len(devices) } per device) # Steps: {FLAGS.config.num_steps} """ ) ######### # # Setup Jax Data Parallelism # ######### assert ( FLAGS.config.batch_size % len(devices) == 0 ), f"Batch size ({FLAGS.config.batch_size}) must be divisible by the number of devices ({len(devices)})" assert ( FLAGS.config.viz_kwargs.eval_batch_size % len(devices) == 0 ), f"Eval batch size ({FLAGS.config.viz_kwargs.eval_batch_size}) must be divisible by the number of devices ({len(devices)})" # create a 1D mesh with a single axis named "batch" mesh = Mesh(jax.devices(), axis_names="batch") # Our batches will be data-parallel sharded -- each device will get a slice of the batch dp_sharding = NamedSharding(mesh, PartitionSpec("batch")) # Our model will be replicated across devices (we are only doing data parallelism, not model parallelism) replicated_sharding = NamedSharding(mesh, PartitionSpec()) # prevent tensorflow from using GPU memory since it's only used for data loading tf.config.set_visible_devices([], "GPU") ######### # # Setup WandB # ######### name = format_name_with_config( FLAGS.name, FLAGS.config.to_dict(), ) wandb_id = "{name}_{time}".format( name=name, time=datetime.datetime.now().strftime("%Y%m%d_%H%M%S"), ) wandb.init( config=FLAGS.config.to_dict(), id=wandb_id, name=name, mode="disabled" if FLAGS.debug else None, **FLAGS.config.wandb, ) ######### # # Load Pretrained model + optionally modify config # ######### pretrained_model = OctoModel.load_pretrained( FLAGS.config.pretrained_path, step=FLAGS.config.pretrained_step, ) flat_config = flax.traverse_util.flatten_dict( pretrained_model.config, keep_empty_nodes=True ) for d_key in flax.traverse_util.flatten_dict( FLAGS.config.get("config_delete_keys", ConfigDict()).to_dict() ): for c_key in list(flat_config.keys()): if ".".join(c_key).startswith(".".join(d_key)): del flat_config[c_key] config = ConfigDict(flax.traverse_util.unflatten_dict(flat_config)) config.update(FLAGS.config.get("update_config", ConfigDict())) config = config.to_dict() check_config_diff(config, pretrained_model.config) ######### # # Setup Data Loader # ######### # create text processor if config["text_processor"] is None: text_processor = None else: text_processor = ModuleSpec.instantiate(config["text_processor"])() def process_batch(batch): batch = process_text(batch, text_processor) del batch["dataset_name"] return batch # load standardize_fn from `path/to/file.py:fn_name` format if ( standardize_fn := FLAGS.config["dataset_kwargs"].get("standardize_fn", None) ) is not None: path, name = standardize_fn.split(":") # imp is deprecated, but it's also what ml_collections uses standardize_fn = getattr(imp.load_source("standardize_fn", path), name) del FLAGS.config["dataset_kwargs"]["standardize_fn"] FLAGS.config["dataset_kwargs"]["standardize_fn"] = standardize_fn dataset = make_single_dataset( FLAGS.config.dataset_kwargs, traj_transform_kwargs=FLAGS.config.traj_transform_kwargs, frame_transform_kwargs=FLAGS.config.frame_transform_kwargs, train=True, ) train_data_iter = ( dataset.repeat() .unbatch() .shuffle(FLAGS.config.shuffle_buffer_size) .batch(FLAGS.config.batch_size) .iterator() ) train_data_iter = map(process_batch, train_data_iter) example_batch = next(train_data_iter) ######### # # Load Pretrained Model # ######### rng = jax.random.PRNGKey(FLAGS.config.seed) rng, init_rng = jax.random.split(rng) model = OctoModel.from_config( config, example_batch, text_processor, rng=init_rng, dataset_statistics=dataset.dataset_statistics, ) merged_params = merge_params(model.params, pretrained_model.params) model = model.replace(params=merged_params) del pretrained_model ######### # # Setup Optimizer and Train State # ######### params = model.params if FLAGS.config.optimizer.frozen_keys is None: FLAGS.config.optimizer.frozen_keys = model.config["optimizer"]["frozen_keys"] tx, lr_callable, param_norm_callable = create_optimizer( params, **FLAGS.config.optimizer.to_dict(), ) train_state = TrainState.create( model=model, tx=tx, rng=rng, ) ######### # # Save all metadata # ######### if FLAGS.config.save_dir is not None: save_dir = tf.io.gfile.join( FLAGS.config.save_dir, FLAGS.config.wandb.project, FLAGS.config.wandb.group or "", wandb_id, ) wandb.config.update(dict(save_dir=save_dir), allow_val_change=True) logging.info("Saving to %s", save_dir) save_callback = SaveCallback(save_dir) # Add window_size to top of config, to make eval easier new_config = ConfigDict(model.config) new_config["window_size"] = example_batch["observation"]["pad_mask"].shape[1] model = model.replace(config=new_config) # Save finetuning config since it's not saved by SaveCallback, i.e. as part of model.save_pretrained() with open( tf.io.gfile.join(save_dir, "finetune_config.json"), "w" ) as config_file: config_file.write(FLAGS.config.to_json_best_effort()) else: save_dir = None save_callback = SaveCallback(None) logging.warning("save_dir not passed in, not saving checkpoints") example_batch_spec = jax.tree_map( lambda arr: (arr.shape, str(arr.dtype)), example_batch ) wandb.config.update( dict(example_batch_spec=example_batch_spec), allow_val_change=True ) ######### # # Define loss, train_step, and eval_step # ######### def loss_fn(params, batch, rng, train=True): bound_module = model.module.bind({"params": params}, rngs={"dropout": rng}) transformer_embeddings = bound_module.octo_transformer( batch["observation"], batch["task"], batch["observation"]["pad_mask"], train=train, ) action_loss, action_metrics = bound_module.heads["action"].loss( transformer_embeddings, # Action head knows to pull out the action readout_key batch["action"], pad_mask=batch["observation"]["pad_mask"], train=train, ) return action_loss, action_metrics # Data parallelism # Model is replicated across devices, data is split across devices @partial( jax.jit, in_shardings=[replicated_sharding, dp_sharding], ) def train_step(state, batch): rng, dropout_rng = jax.random.split(state.rng) (loss, info), grads = jax.value_and_grad(loss_fn, has_aux=True)( state.model.params, batch, dropout_rng, train=True ) # Gradient Metrics (TODO: Does the finetuner need these?) ### grad_norm = optax.global_norm(grads) updates, _ = state.tx.update(grads, state.opt_state, state.model.params) update_norm = optax.global_norm(updates) info.update( { "grad_norm": grad_norm, "update_norm": update_norm, "param_norm": param_norm_callable(state.model.params), "learning_rate": lr_callable(state.step), } ) # End Debug Metrics # new_state = state.apply_gradients(grads=grads, rng=rng) return new_state, info ######### # # Build validation & visualization callbacks # ######### if FLAGS.config.modality == "image_conditioned": modes_to_evaluate = ["image_conditioned"] elif FLAGS.config.modality == "text_conditioned": modes_to_evaluate = ["text_conditioned"] elif FLAGS.config.modality == "multimodal": modes_to_evaluate = ["image_conditioned", "text_conditioned"] else: modes_to_evaluate = ["base"] dataset_kwargs_list = [FLAGS.config.dataset_kwargs]
val_callback = ValidationCallback(
6
2023-12-13 09:58:56+00:00
16k
modelscope/richdreamer
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"i...
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from einops import rearrange, repeat from functools import partial from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import (AutoencoderKL, IdentityFirstStage, VQModelInterface,) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like,) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl,) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import (count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat,)
12,424
self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != "__is_first_stage__" assert config != "__is_unconditional__" model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list( self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization ) ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior):
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != "__is_first_stage__" assert config != "__is_unconditional__" model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list( self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization ) ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior):
if isinstance(encoder_posterior, DiagonalGaussianDistribution):
8
2023-12-06 07:53:11+00:00
16k
rehg-lab/RAVE
annotator/oneformer/detectron2/modeling/meta_arch/fcos.py
[ { "identifier": "batched_nms", "path": "annotator/oneformer/detectron2/layers/nms.py", "snippet": "def batched_nms(\r\n boxes: torch.Tensor, scores: torch.Tensor, idxs: torch.Tensor, iou_threshold: float\r\n):\r\n \"\"\"\r\n Same as torchvision.ops.boxes.batched_nms, but with float().\r\n \"...
import logging import torch from typing import List, Optional, Tuple from fvcore.nn import sigmoid_focal_loss_jit from torch import nn from torch.nn import functional as F from annotator.oneformer.detectron2.layers import ShapeSpec, batched_nms from annotator.oneformer.detectron2.structures import Boxes, ImageList, Instances, pairwise_point_box_distance from annotator.oneformer.detectron2.utils.events import get_event_storage from ..anchor_generator import DefaultAnchorGenerator from ..backbone import Backbone from ..box_regression import Box2BoxTransformLinear, _dense_box_regression_loss from .dense_detector import DenseDetector from .retinanet import RetinaNetHead
13,829
# Copyright (c) Facebook, Inc. and its affiliates. __all__ = ["FCOS"] logger = logging.getLogger(__name__) class FCOS(DenseDetector): """ Implement FCOS in :paper:`fcos`. """ def __init__( self, *,
# Copyright (c) Facebook, Inc. and its affiliates. __all__ = ["FCOS"] logger = logging.getLogger(__name__) class FCOS(DenseDetector): """ Implement FCOS in :paper:`fcos`. """ def __init__( self, *,
backbone: Backbone,
8
2023-12-05 02:51:53+00:00
16k
u2seg/U2Seg
detectron2/data/build.py
[ { "identifier": "configurable", "path": "detectron2/config/config.py", "snippet": "def configurable(init_func=None, *, from_config=None):\n \"\"\"\n Decorate a function or a class's __init__ method so that it can be called\n with a :class:`CfgNode` object using a :func:`from_config` function th...
import itertools import logging import numpy as np import operator import pickle import torch import torch.utils.data as torchdata from collections import OrderedDict, defaultdict from typing import Any, Callable, Dict, List, Optional, Union from tabulate import tabulate from termcolor import colored from detectron2.config import configurable from detectron2.structures import BoxMode from detectron2.utils.comm import get_world_size from detectron2.utils.env import seed_all_rng from detectron2.utils.file_io import PathManager from detectron2.utils.logger import _log_api_usage, log_first_n from .catalog import DatasetCatalog, MetadataCatalog from .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset, ToIterableDataset from .dataset_mapper import DatasetMapper from .detection_utils import check_metadata_consistency from .samplers import ( InferenceSampler, RandomSubsetTrainingSampler, RepeatFactorTrainingSampler, TrainingSampler, )
12,599
logger.info("Making batched data loader with batch_size=%d", batch_size) if isinstance(dataset, torchdata.IterableDataset): assert sampler is None, "sampler must be None if dataset is IterableDataset" else: dataset = ToIterableDataset(dataset, sampler, shard_chunk_size=batch_size) if aspect_ratio_grouping: assert drop_last, "Aspect ratio grouping will drop incomplete batches." data_loader = torchdata.DataLoader( dataset, num_workers=num_workers, collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements worker_init_fn=worker_init_reset_seed, **kwargs ) # yield individual mapped dict data_loader = AspectRatioGroupedDataset(data_loader, batch_size) if collate_fn is None: return data_loader return MapDataset(data_loader, collate_fn) else: return torchdata.DataLoader( dataset, batch_size=batch_size, drop_last=drop_last, num_workers=num_workers, collate_fn=trivial_batch_collator if collate_fn is None else collate_fn, worker_init_fn=worker_init_reset_seed, **kwargs ) def _get_train_datasets_repeat_factors(cfg) -> Dict[str, float]: repeat_factors = cfg.DATASETS.TRAIN_REPEAT_FACTOR assert all(len(tup) == 2 for tup in repeat_factors) name_to_weight = defaultdict(lambda: 1, dict(repeat_factors)) # The sampling weights map should only contain datasets in train config unrecognized = set(name_to_weight.keys()) - set(cfg.DATASETS.TRAIN) assert not unrecognized, f"unrecognized datasets: {unrecognized}" logger = logging.getLogger(__name__) logger.info(f"Found repeat factors: {list(name_to_weight.items())}") # pyre-fixme[7]: Expected `Dict[str, float]` but got `DefaultDict[typing.Any, int]`. return name_to_weight def _build_weighted_sampler(cfg, enable_category_balance=False): dataset_repeat_factors = _get_train_datasets_repeat_factors(cfg) # OrderedDict to guarantee order of values() consistent with repeat factors dataset_name_to_dicts = OrderedDict( { name: get_detection_dataset_dicts( [name], filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.MODEL.KEYPOINT_ON else 0, proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, ) for name in cfg.DATASETS.TRAIN } ) # Repeat factor for every sample in the dataset repeat_factors = [ [dataset_repeat_factors[dsname]] * len(dataset_name_to_dicts[dsname]) for dsname in cfg.DATASETS.TRAIN ] repeat_factors = list(itertools.chain.from_iterable(repeat_factors)) repeat_factors = torch.tensor(repeat_factors) logger = logging.getLogger(__name__) if enable_category_balance: """ 1. Calculate repeat factors using category frequency for each dataset and then merge them. 2. Element wise dot producting the dataset frequency repeat factors with the category frequency repeat factors gives the final repeat factors. """ category_repeat_factors = [ RepeatFactorTrainingSampler.repeat_factors_from_category_frequency( dataset_dict, cfg.DATALOADER.REPEAT_THRESHOLD ) for dataset_dict in dataset_name_to_dicts.values() ] # flatten the category repeat factors from all datasets category_repeat_factors = list(itertools.chain.from_iterable(category_repeat_factors)) category_repeat_factors = torch.tensor(category_repeat_factors) repeat_factors = torch.mul(category_repeat_factors, repeat_factors) repeat_factors = repeat_factors / torch.min(repeat_factors) logger.info( "Using WeightedCategoryTrainingSampler with repeat_factors={}".format( cfg.DATASETS.TRAIN_REPEAT_FACTOR ) ) else: logger.info( "Using WeightedTrainingSampler with repeat_factors={}".format( cfg.DATASETS.TRAIN_REPEAT_FACTOR ) ) sampler = RepeatFactorTrainingSampler(repeat_factors) return sampler def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None): if dataset is None: dataset = get_detection_dataset_dicts( cfg.DATASETS.TRAIN, filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.MODEL.KEYPOINT_ON else 0, proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, ) _log_api_usage("dataset." + cfg.DATASETS.TRAIN[0]) if mapper is None:
# Copyright (c) Facebook, Inc. and its affiliates. """ This file contains the default logic to build a dataloader for training or testing. """ __all__ = [ "build_batch_data_loader", "build_detection_train_loader", "build_detection_test_loader", "get_detection_dataset_dicts", "load_proposals_into_dataset", "print_instances_class_histogram", ] def filter_images_with_only_crowd_annotations(dataset_dicts): """ Filter out images with none annotations or only crowd annotations (i.e., images without non-crowd annotations). A common training-time preprocessing on COCO dataset. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format, but filtered. """ num_before = len(dataset_dicts) def valid(anns): for ann in anns: if ann.get("iscrowd", 0) == 0: return True return False dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with no usable annotations. {} images left.".format( num_before - num_after, num_after ) ) return dataset_dicts def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image): """ Filter out images with too few number of keypoints. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format as dataset_dicts, but filtered. """ num_before = len(dataset_dicts) def visible_keypoints_in_image(dic): # Each keypoints field has the format [x1, y1, v1, ...], where v is visibility annotations = dic["annotations"] return sum( (np.array(ann["keypoints"][2::3]) > 0).sum() for ann in annotations if "keypoints" in ann ) dataset_dicts = [ x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image ] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with fewer than {} keypoints.".format( num_before - num_after, min_keypoints_per_image ) ) return dataset_dicts def load_proposals_into_dataset(dataset_dicts, proposal_file): """ Load precomputed object proposals into the dataset. The proposal file should be a pickled dict with the following keys: - "ids": list[int] or list[str], the image ids - "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id - "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores corresponding to the boxes. - "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. proposal_file (str): file path of pre-computed proposals, in pkl format. Returns: list[dict]: the same format as dataset_dicts, but added proposal field. """ logger = logging.getLogger(__name__) logger.info("Loading proposals from: {}".format(proposal_file)) with PathManager.open(proposal_file, "rb") as f: proposals = pickle.load(f, encoding="latin1") # Rename the key names in D1 proposal files rename_keys = {"indexes": "ids", "scores": "objectness_logits"} for key in rename_keys: if key in proposals: proposals[rename_keys[key]] = proposals.pop(key) # Fetch the indexes of all proposals that are in the dataset # Convert image_id to str since they could be int. img_ids = set({str(record["image_id"]) for record in dataset_dicts}) id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids} # Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS' bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS for record in dataset_dicts: # Get the index of the proposal i = id_to_index[str(record["image_id"])] boxes = proposals["boxes"][i] objectness_logits = proposals["objectness_logits"][i] # Sort the proposals in descending order of the scores inds = objectness_logits.argsort()[::-1] record["proposal_boxes"] = boxes[inds] record["proposal_objectness_logits"] = objectness_logits[inds] record["proposal_bbox_mode"] = bbox_mode return dataset_dicts def print_instances_class_histogram(dataset_dicts, class_names): """ Args: dataset_dicts (list[dict]): list of dataset dicts. class_names (list[str]): list of class names (zero-indexed). """ num_classes = len(class_names) hist_bins = np.arange(num_classes + 1) histogram = np.zeros((num_classes,), dtype=int) for entry in dataset_dicts: annos = entry["annotations"] classes = np.asarray( [x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=int ) if len(classes): assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}" assert ( classes.max() < num_classes ), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes" histogram += np.histogram(classes, bins=hist_bins)[0] N_COLS = min(6, len(class_names) * 2) def short_name(x): # make long class names shorter. useful for lvis if len(x) > 13: return x[:11] + ".." return x data = list( itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)]) ) total_num_instances = sum(data[1::2]) data.extend([None] * (N_COLS - (len(data) % N_COLS))) if num_classes > 1: data.extend(["total", total_num_instances]) data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)]) table = tabulate( data, headers=["category", "#instances"] * (N_COLS // 2), tablefmt="pipe", numalign="left", stralign="center", ) log_first_n( logging.INFO, "Distribution of instances among all {} categories:\n".format(num_classes) + colored(table, "cyan"), key="message", ) def get_detection_dataset_dicts( names, filter_empty=True, min_keypoints=0, proposal_files=None, check_consistency=True, ): """ Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation. Args: names (str or list[str]): a dataset name or a list of dataset names filter_empty (bool): whether to filter out images without instance annotations min_keypoints (int): filter out images with fewer keypoints than `min_keypoints`. Set to 0 to do nothing. proposal_files (list[str]): if given, a list of object proposal files that match each dataset in `names`. check_consistency (bool): whether to check if datasets have consistent metadata. Returns: list[dict]: a list of dicts following the standard dataset dict format. """ if isinstance(names, str): names = [names] assert len(names), names available_datasets = DatasetCatalog.keys() names_set = set(names) if not names_set.issubset(available_datasets): logger = logging.getLogger(__name__) logger.warning( "The following dataset names are not registered in the DatasetCatalog: " f"{names_set - available_datasets}. " f"Available datasets are {available_datasets}" ) dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names] if isinstance(dataset_dicts[0], torchdata.Dataset): if len(dataset_dicts) > 1: # ConcatDataset does not work for iterable style dataset. # We could support concat for iterable as well, but it's often # not a good idea to concat iterables anyway. return torchdata.ConcatDataset(dataset_dicts) return dataset_dicts[0] for dataset_name, dicts in zip(names, dataset_dicts): assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) if proposal_files is not None: assert len(names) == len(proposal_files) # load precomputed proposals from proposal files dataset_dicts = [ load_proposals_into_dataset(dataset_i_dicts, proposal_file) for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files) ] dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts)) has_instances = "annotations" in dataset_dicts[0] if filter_empty and has_instances: dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts) if min_keypoints > 0 and has_instances: dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints) if check_consistency and has_instances: try: class_names = MetadataCatalog.get(names[0]).thing_classes check_metadata_consistency("thing_classes", names) print_instances_class_histogram(dataset_dicts, class_names) except AttributeError: # class names are not available for this dataset pass assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names)) return dataset_dicts def build_batch_data_loader( dataset, sampler, total_batch_size, *, aspect_ratio_grouping=False, num_workers=0, collate_fn=None, drop_last: bool = True, **kwargs, ): """ Build a batched dataloader. The main differences from `torch.utils.data.DataLoader` are: 1. support aspect ratio grouping options 2. use no "batch collation", because this is common for detection training Args: dataset (torch.utils.data.Dataset): a pytorch map-style or iterable dataset. sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices. Must be provided iff. ``dataset`` is a map-style dataset. total_batch_size, aspect_ratio_grouping, num_workers, collate_fn: see :func:`build_detection_train_loader`. drop_last (bool): if ``True``, the dataloader will drop incomplete batches. Returns: iterable[list]. Length of each list is the batch size of the current GPU. Each element in the list comes from the dataset. """ world_size = get_world_size() assert ( total_batch_size > 0 and total_batch_size % world_size == 0 ), "Total batch size ({}) must be divisible by the number of gpus ({}).".format( total_batch_size, world_size ) batch_size = total_batch_size // world_size logger = logging.getLogger(__name__) logger.info("Making batched data loader with batch_size=%d", batch_size) if isinstance(dataset, torchdata.IterableDataset): assert sampler is None, "sampler must be None if dataset is IterableDataset" else: dataset = ToIterableDataset(dataset, sampler, shard_chunk_size=batch_size) if aspect_ratio_grouping: assert drop_last, "Aspect ratio grouping will drop incomplete batches." data_loader = torchdata.DataLoader( dataset, num_workers=num_workers, collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements worker_init_fn=worker_init_reset_seed, **kwargs ) # yield individual mapped dict data_loader = AspectRatioGroupedDataset(data_loader, batch_size) if collate_fn is None: return data_loader return MapDataset(data_loader, collate_fn) else: return torchdata.DataLoader( dataset, batch_size=batch_size, drop_last=drop_last, num_workers=num_workers, collate_fn=trivial_batch_collator if collate_fn is None else collate_fn, worker_init_fn=worker_init_reset_seed, **kwargs ) def _get_train_datasets_repeat_factors(cfg) -> Dict[str, float]: repeat_factors = cfg.DATASETS.TRAIN_REPEAT_FACTOR assert all(len(tup) == 2 for tup in repeat_factors) name_to_weight = defaultdict(lambda: 1, dict(repeat_factors)) # The sampling weights map should only contain datasets in train config unrecognized = set(name_to_weight.keys()) - set(cfg.DATASETS.TRAIN) assert not unrecognized, f"unrecognized datasets: {unrecognized}" logger = logging.getLogger(__name__) logger.info(f"Found repeat factors: {list(name_to_weight.items())}") # pyre-fixme[7]: Expected `Dict[str, float]` but got `DefaultDict[typing.Any, int]`. return name_to_weight def _build_weighted_sampler(cfg, enable_category_balance=False): dataset_repeat_factors = _get_train_datasets_repeat_factors(cfg) # OrderedDict to guarantee order of values() consistent with repeat factors dataset_name_to_dicts = OrderedDict( { name: get_detection_dataset_dicts( [name], filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.MODEL.KEYPOINT_ON else 0, proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, ) for name in cfg.DATASETS.TRAIN } ) # Repeat factor for every sample in the dataset repeat_factors = [ [dataset_repeat_factors[dsname]] * len(dataset_name_to_dicts[dsname]) for dsname in cfg.DATASETS.TRAIN ] repeat_factors = list(itertools.chain.from_iterable(repeat_factors)) repeat_factors = torch.tensor(repeat_factors) logger = logging.getLogger(__name__) if enable_category_balance: """ 1. Calculate repeat factors using category frequency for each dataset and then merge them. 2. Element wise dot producting the dataset frequency repeat factors with the category frequency repeat factors gives the final repeat factors. """ category_repeat_factors = [ RepeatFactorTrainingSampler.repeat_factors_from_category_frequency( dataset_dict, cfg.DATALOADER.REPEAT_THRESHOLD ) for dataset_dict in dataset_name_to_dicts.values() ] # flatten the category repeat factors from all datasets category_repeat_factors = list(itertools.chain.from_iterable(category_repeat_factors)) category_repeat_factors = torch.tensor(category_repeat_factors) repeat_factors = torch.mul(category_repeat_factors, repeat_factors) repeat_factors = repeat_factors / torch.min(repeat_factors) logger.info( "Using WeightedCategoryTrainingSampler with repeat_factors={}".format( cfg.DATASETS.TRAIN_REPEAT_FACTOR ) ) else: logger.info( "Using WeightedTrainingSampler with repeat_factors={}".format( cfg.DATASETS.TRAIN_REPEAT_FACTOR ) ) sampler = RepeatFactorTrainingSampler(repeat_factors) return sampler def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None): if dataset is None: dataset = get_detection_dataset_dicts( cfg.DATASETS.TRAIN, filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.MODEL.KEYPOINT_ON else 0, proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, ) _log_api_usage("dataset." + cfg.DATASETS.TRAIN[0]) if mapper is None:
mapper = DatasetMapper(cfg, True)
12
2023-12-05 01:13:31+00:00
16k
upfusion3d/upfusion
control_net/ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "control_net/ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white...
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from control_net.ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from control_net.ldm.modules.ema import LitEma from control_net.ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from control_net.ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from control_net.ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from control_net.ldm.models.diffusion.ddim import DDIMSampler
13,601
if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', "cls"]: try: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc except KeyError: # probably no "human_label" in batch pass elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, return_x=False): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None and not self.force_null_conditioning: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', "txt"]: xc = batch[cond_key] elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_x: out.extend([x]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def perform_one_step_denoise(self, batch): """ Returns the recovered image after one step denoising. NOTE: This is a custom function added by BR! """ x, c = self.get_input(batch, self.first_stage_key) t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() _, x0 = self.p_sample( x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=True, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None ) return x0, t def custom_forward(self, batch, **kwargs): """ Performs the usual forward pass but also returns the model output NOTE: This is a custom function added by BR! """ if self.parameterization != "eps": raise NotImplementedError x, c = self.get_input(batch, self.first_stage_key) t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() # Below code was taken and adapted from p_losses noise = torch.randn_like(x) x_noisy = self.q_sample(x_start=x, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, c) loss_simple = self.get_loss(model_output, noise, mean=False).mean([1, 2, 3]) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t loss = self.l_simple_weight * loss.mean() x_recon = self.predict_start_from_noise(x, t=t, noise=model_output) return loss, x_recon def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', "cls"]: try: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc except KeyError: # probably no "human_label" in batch pass elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
self.first_stage_model, IdentityFirstStage):
11
2023-12-12 00:49:11+00:00
16k
nox-410/tvm.tl
python/tvm/topi/arm_cpu/conv2d_gemm.py
[ { "identifier": "get_const_tuple", "path": "python/tvm/topi/utils.py", "snippet": "def get_const_tuple(in_tuple):\n \"\"\"Verifies input tuple is IntImm or Var, returns tuple of int or Var.\n\n Parameters\n ----------\n in_tuple : tuple of Expr\n The input.\n\n Returns\n -------...
import tvm from tvm.target import Target from tvm import te from tvm.topi import nn from tvm.autotvm.task.space import AnnotateEntity, ReorderEntity, OtherOptionEntity from ..utils import get_const_tuple, get_const_int from ..nn.utils import get_pad_tuple from .tensor_intrin import ( gemm_4x4_int8_int8_int32, gemm_acc_4x4_int8_int8_int32, gemm_acc_nx16_int8_int8_int32, gemm_acc_2x2_int8_int8_int32, )
10,903
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-variable, too-many-locals # pylint: disable=unused-argument, redefined-builtin """GEMM Convolution schedule on ARM""" def configure_knobs(cfg, M, K, target): """Configure auto-tuning knobs for the interleaved strategy""" x, y = cfg.axis(M // 4), cfg.axis(K // 16) cfg.define_reorder("reorder_gemm", [x, y], policy="candidate", candidate=[[x, y], [y, x]]) outer_loop, inner_loop = cfg.axis(4), cfg.axis(16) cfg.define_annotate( "A_interleaved_unroll_vec", [outer_loop, inner_loop], policy="try_unroll_vec" ) # Fallback configuration if cfg.is_fallback: cfg["reorder_gemm"] = ReorderEntity([0, 1]) cfg["A_interleaved_unroll_vec"] = AnnotateEntity(["unroll", "vec"]) if not target.features.has_dotprod: cfg.define_knob("gemm_quantized_unroll", [True, False]) if cfg.is_fallback: cfg["gemm_quantized_unroll"] = OtherOptionEntity(False) # Compute function def compute_conv2d_gemm_without_weight_transform( cfg, data, B_interleaved_t, strides, padding, dilation, out_dtype, kernel_size, output_channels, interleave_A, ): """Compute conv2d by transforming the input, executing GEMM and transforming the output back""" batches, IH, IW, IC = get_const_tuple(data.shape) KH, KW = get_const_tuple(kernel_size) OC = get_const_int(output_channels) kernel_area = KH * KW if isinstance(dilation, int): dilation_h = dilation_w = dilation else: dilation_h, dilation_w = get_const_tuple(dilation) dilated_kernel_h = (KH - 1) * dilation_h + 1 dilated_kernel_w = (KW - 1) * dilation_w + 1
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-variable, too-many-locals # pylint: disable=unused-argument, redefined-builtin """GEMM Convolution schedule on ARM""" def configure_knobs(cfg, M, K, target): """Configure auto-tuning knobs for the interleaved strategy""" x, y = cfg.axis(M // 4), cfg.axis(K // 16) cfg.define_reorder("reorder_gemm", [x, y], policy="candidate", candidate=[[x, y], [y, x]]) outer_loop, inner_loop = cfg.axis(4), cfg.axis(16) cfg.define_annotate( "A_interleaved_unroll_vec", [outer_loop, inner_loop], policy="try_unroll_vec" ) # Fallback configuration if cfg.is_fallback: cfg["reorder_gemm"] = ReorderEntity([0, 1]) cfg["A_interleaved_unroll_vec"] = AnnotateEntity(["unroll", "vec"]) if not target.features.has_dotprod: cfg.define_knob("gemm_quantized_unroll", [True, False]) if cfg.is_fallback: cfg["gemm_quantized_unroll"] = OtherOptionEntity(False) # Compute function def compute_conv2d_gemm_without_weight_transform( cfg, data, B_interleaved_t, strides, padding, dilation, out_dtype, kernel_size, output_channels, interleave_A, ): """Compute conv2d by transforming the input, executing GEMM and transforming the output back""" batches, IH, IW, IC = get_const_tuple(data.shape) KH, KW = get_const_tuple(kernel_size) OC = get_const_int(output_channels) kernel_area = KH * KW if isinstance(dilation, int): dilation_h = dilation_w = dilation else: dilation_h, dilation_w = get_const_tuple(dilation) dilated_kernel_h = (KH - 1) * dilation_h + 1 dilated_kernel_w = (KW - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
2
2023-12-14 02:37:47+00:00
16k
yolain/ComfyUI-Easy-Use
py/easyNodes.py
[ { "identifier": "advanced_encode", "path": "py/adv_encode.py", "snippet": "def advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5,\n apply_to_pooled=True):\n tokenized = clip.tokenize(text, return_word_ids=True)\n if isinstance(cli...
import sys import os import re import json import time import math import torch import psutil import random import datetime import comfy.sd import comfy.utils import numpy as np import folder_paths import comfy.samplers import comfy.controlnet import latent_preview import comfy.model_base import comfy.model_management from pathlib import Path from comfy.sd import CLIP, VAE from comfy.cli_args import args from urllib.request import urlopen from collections import defaultdict from PIL.PngImagePlugin import PngInfo from PIL import Image, ImageDraw, ImageFont from comfy.model_patcher import ModelPatcher from comfy_extras.chainner_models import model_loading from typing import Dict, List, Optional, Tuple, Union, Any from .adv_encode import advanced_encode, advanced_encode_XL from server import PromptServer from nodes import VAELoader, MAX_RESOLUTION, RepeatLatentBatch, NODE_CLASS_MAPPINGS as ALL_NODE_CLASS_MAPPINGS, ConditioningSetMask from comfy_extras.nodes_mask import LatentCompositeMasked from .config import BASE_RESOLUTIONS from .log import log_node_info, log_node_error, log_node_warn, log_node_success from .wildcards import process_with_loras, get_wildcard_list from comfy_extras.nodes_stable3d import camera_embeddings from .gradual_latent_hires_fix import sample_dpmpp_2s_ancestral, sample_dpmpp_2m_sde, sample_lcm, sample_euler_ancestral from .dynthres_core import DynThresh
10,885
# ttNl(f'{CC.GREY}X: {x_value_label}, Y: {y_value_label}').t( # f'Plot Values {self.num}/{self.total} ->').p() self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image( plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise, x_value, y_value) self.num += 1 else: # ttNl(f'{CC.GREY}X: {x_value_label}').t(f'Plot Values {self.num}/{self.total} ->').p() self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image( plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise, x_value) self.num += 1 # Rearrange latent array to match preview image grid self.latents_plot = self.rearrange_tensors(self.latents_plot, self.num_cols, self.num_rows) # Concatenate the tensors along the first dimension (dim=0) self.latents_plot = torch.cat(self.latents_plot, dim=0) return self.latents_plot def plot_images_and_labels(self): # Calculate the background dimensions bg_width, bg_height, x_offset_initial, y_offset = self.calculate_background_dimensions() # Create the white background image background = Image.new('RGBA', (int(bg_width), int(bg_height)), color=(255, 255, 255, 255)) output_image = [] for row_index in range(self.num_rows): x_offset = x_offset_initial for col_index in range(self.num_cols): index = col_index * self.num_rows + row_index img = self.image_list[index] output_image.append(sampler.pil2tensor(img)) background.paste(img, (x_offset, y_offset)) # Handle X label if row_index == 0 and self.x_type != "None": label_bg = self.create_label(img, self.x_label[col_index], int(48 * img.width / 512)) label_y = (y_offset - label_bg.height) // 2 background.alpha_composite(label_bg, (x_offset, label_y)) # Handle Y label if col_index == 0 and self.y_type != "None": label_bg = self.create_label(img, self.y_label[row_index], int(48 * img.height / 512), False) label_bg = label_bg.rotate(90, expand=True) label_x = (x_offset - label_bg.width) // 2 label_y = y_offset + (img.height - label_bg.height) // 2 background.alpha_composite(label_bg, (label_x, label_y)) x_offset += img.width + self.grid_spacing y_offset += img.height + self.grid_spacing return (sampler.pil2tensor(background), output_image) easyCache = easyLoader() sampler = easySampler() def check_link_to_clip(node_id, clip_id, visited=None, node=None): """Check if a given node links directly or indirectly to a loader node.""" if visited is None: visited = set() if node_id in visited: return False visited.add(node_id) if "pipe" in node["inputs"]: link_ids = node["inputs"]["pipe"] for id in link_ids: if id != 0 and id == str(clip_id): return True return False def find_nearest_steps(clip_id, prompt): """Find the nearest KSampler or preSampling node that references the given id.""" for id in prompt: node = prompt[id] if "Sampler" in node["class_type"] or "sampler" in node["class_type"] or "Sampling" in node["class_type"]: # Check if this KSampler node directly or indirectly references the given CLIPTextEncode node if check_link_to_clip(id, clip_id, None, node): steps = node["inputs"]["steps"] if "steps" in node["inputs"] else 1 return steps return 1 def find_wildcards_seed(text, prompt): if "__" in text: for i in prompt: if "wildcards" in prompt[i]['class_type'] and text == prompt[i]['inputs']['text']: return prompt[i]['inputs']['seed_num'] if "seed_num" in prompt[i]['inputs'] else None else: return None class easySave: def __init__(self, my_unique_id=0, prompt=None, extra_pnginfo=None, number_padding=5, overwrite_existing=False, output_dir=folder_paths.get_temp_directory()): self.number_padding = int(number_padding) if number_padding not in [None, "None", 0] else None self.overwrite_existing = overwrite_existing self.my_unique_id = my_unique_id self.prompt = prompt self.extra_pnginfo = extra_pnginfo self.type = 'temp' self.output_dir = output_dir if self.output_dir != folder_paths.get_temp_directory(): self.output_dir = self.folder_parser(self.output_dir, self.prompt, self.my_unique_id) if not os.path.exists(self.output_dir): self._create_directory(self.output_dir) @staticmethod def _create_directory(folder: str): """Try to create the directory and log the status.""" log_node_warn("", f"Folder {folder} does not exist. Attempting to create...") if not os.path.exists(folder): try: os.makedirs(folder)
# 加载器 class easyLoader: def __init__(self): self.loaded_objects = { "ckpt": defaultdict(tuple), # {ckpt_name: (model, ...)} "clip": defaultdict(tuple), "clip_vision": defaultdict(tuple), "bvae": defaultdict(tuple), "vae": defaultdict(object), "lora": defaultdict(dict), # {lora_name: {UID: (model_lora, clip_lora)}} } self.memory_threshold = self.determine_memory_threshold(0.7) def clean_values(self, values: str): original_values = values.split("; ") cleaned_values = [] for value in original_values: cleaned_value = value.strip(';').strip() if cleaned_value == "": continue try: cleaned_value = int(cleaned_value) except ValueError: try: cleaned_value = float(cleaned_value) except ValueError: pass cleaned_values.append(cleaned_value) return cleaned_values def clear_unused_objects(self, desired_names: set, object_type: str): keys = set(self.loaded_objects[object_type].keys()) for key in keys - desired_names: del self.loaded_objects[object_type][key] def get_input_value(self, entry, key): val = entry["inputs"][key] return val if isinstance(val, str) else val[0] def process_pipe_loader(self, entry, desired_ckpt_names, desired_vae_names, desired_lora_names, desired_lora_settings, num_loras=3, suffix=""): for idx in range(1, num_loras + 1): lora_name_key = f"{suffix}lora{idx}_name" desired_lora_names.add(self.get_input_value(entry, lora_name_key)) setting = f'{self.get_input_value(entry, lora_name_key)};{entry["inputs"][f"{suffix}lora{idx}_model_strength"]};{entry["inputs"][f"{suffix}lora{idx}_clip_strength"]}' desired_lora_settings.add(setting) desired_ckpt_names.add(self.get_input_value(entry, f"{suffix}ckpt_name")) desired_vae_names.add(self.get_input_value(entry, f"{suffix}vae_name")) def update_loaded_objects(self, prompt): desired_ckpt_names = set() desired_vae_names = set() desired_lora_names = set() desired_lora_settings = set() for entry in prompt.values(): class_type = entry["class_type"] if class_type == "easy a1111Loader" or class_type == "easy comfyLoader": lora_name = self.get_input_value(entry, "lora_name") desired_lora_names.add(lora_name) setting = f'{lora_name};{entry["inputs"]["lora_model_strength"]};{entry["inputs"]["lora_clip_strength"]}' desired_lora_settings.add(setting) desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name")) desired_vae_names.add(self.get_input_value(entry, "vae_name")) elif class_type == "easy zero123Loader" or class_type == 'easy svdLoader': desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name")) desired_vae_names.add(self.get_input_value(entry, "vae_name")) elif class_type == "easy XYInputs: ModelMergeBlocks": desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name_1")) desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name_2")) vae_use = self.get_input_value(entry, "vae_use") if vae_use != 'Use Model 1' and vae_use != 'Use Model 2': desired_vae_names.add(vae_use) object_types = ["ckpt", "clip", "bvae", "vae", "lora"] for object_type in object_types: desired_names = desired_ckpt_names if object_type in ["ckpt", "clip", "bvae"] else desired_vae_names if object_type == "vae" else desired_lora_names self.clear_unused_objects(desired_names, object_type) def add_to_cache(self, obj_type, key, value): """ Add an item to the cache with the current timestamp. """ timestamped_value = (value, time.time()) self.loaded_objects[obj_type][key] = timestamped_value def determine_memory_threshold(self, percentage=0.8): """ Determines the memory threshold as a percentage of the total available memory. Args: - percentage (float): The fraction of total memory to use as the threshold. Should be a value between 0 and 1. Default is 0.8 (80%). Returns: - memory_threshold (int): Memory threshold in bytes. """ total_memory = psutil.virtual_memory().total memory_threshold = total_memory * percentage return memory_threshold def get_memory_usage(self): """ Returns the memory usage of the current process in bytes. """ process = psutil.Process(os.getpid()) return process.memory_info().rss def eviction_based_on_memory(self): """ Evicts objects from cache based on memory usage and priority. """ current_memory = self.get_memory_usage() if current_memory < self.memory_threshold: return eviction_order = ["vae", "lora", "bvae", "clip", "ckpt"] for obj_type in eviction_order: if current_memory < self.memory_threshold: break # Sort items based on age (using the timestamp) items = list(self.loaded_objects[obj_type].items()) items.sort(key=lambda x: x[1][1]) # Sorting by timestamp for item in items: if current_memory < self.memory_threshold: break del self.loaded_objects[obj_type][item[0]] current_memory = self.get_memory_usage() def load_checkpoint(self, ckpt_name, config_name=None, load_vision=False): cache_name = ckpt_name if config_name not in [None, "Default"]: cache_name = ckpt_name + "_" + config_name if cache_name in self.loaded_objects["ckpt"]: cache_out = self.loaded_objects["clip_vision"][cache_name][0] if load_vision else self.loaded_objects["clip"][cache_name][0] return self.loaded_objects["ckpt"][cache_name][0], cache_out, self.loaded_objects["bvae"][cache_name][0] ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name) output_clip = False if load_vision else True output_clipvision = True if load_vision else False if config_name not in [None, "Default"]: config_path = folder_paths.get_full_path("configs", config_name) loaded_ckpt = comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=output_clip, output_clipvision=output_clipvision, embedding_directory=folder_paths.get_folder_paths("embeddings")) else: loaded_ckpt = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=output_clip, output_clipvision=output_clipvision, embedding_directory=folder_paths.get_folder_paths("embeddings")) self.add_to_cache("ckpt", cache_name, loaded_ckpt[0]) self.add_to_cache("bvae", cache_name, loaded_ckpt[2]) if load_vision: out = loaded_ckpt[3] self.add_to_cache("clip_vision", cache_name, out) else: out = loaded_ckpt[1] self.add_to_cache("clip", cache_name, loaded_ckpt[1]) self.eviction_based_on_memory() return loaded_ckpt[0], out, loaded_ckpt[2] def load_vae(self, vae_name): if vae_name in self.loaded_objects["vae"]: return self.loaded_objects["vae"][vae_name][0] vae_path = folder_paths.get_full_path("vae", vae_name) sd = comfy.utils.load_torch_file(vae_path) loaded_vae = comfy.sd.VAE(sd=sd) self.add_to_cache("vae", vae_name, loaded_vae) self.eviction_based_on_memory() return loaded_vae def load_lora(self, lora_name, model, clip, strength_model, strength_clip): model_hash = str(model)[44:-1] clip_hash = str(clip)[25:-1] unique_id = f'{model_hash};{clip_hash};{lora_name};{strength_model};{strength_clip}' if unique_id in self.loaded_objects["lora"] and unique_id in self.loaded_objects["lora"][lora_name]: return self.loaded_objects["lora"][unique_id][0] lora_path = folder_paths.get_full_path("loras", lora_name) lora = comfy.utils.load_torch_file(lora_path, safe_load=True) model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip) self.add_to_cache("lora", unique_id, (model_lora, clip_lora)) self.eviction_based_on_memory() return model_lora, clip_lora # 采样器 class easySampler: def __init__(self): self.last_helds: dict[str, list] = { "results": [], "pipe_line": [], } @staticmethod def tensor2pil(image: torch.Tensor) -> Image.Image: """Convert a torch tensor to a PIL image.""" return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) @staticmethod def pil2tensor(image: Image.Image) -> torch.Tensor: """Convert a PIL image to a torch tensor.""" return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) @staticmethod def enforce_mul_of_64(d): d = int(d) if d <= 7: d = 8 leftover = d % 8 # 8 is the number of pixels per byte if leftover != 0: # if the number of pixels is not a multiple of 8 if (leftover < 4): # if the number of pixels is less than 4 d -= leftover # remove the leftover pixels else: # if the number of pixels is more than 4 d += 8 - leftover # add the leftover pixels return int(d) @staticmethod def safe_split(to_split: str, delimiter: str) -> List[str]: """Split the input string and return a list of non-empty parts.""" parts = to_split.split(delimiter) parts = [part for part in parts if part not in ('', ' ', ' ')] while len(parts) < 2: parts.append('None') return parts def common_ksampler(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, preview_latent=True, disable_pbar=False): device = comfy.model_management.get_torch_device() latent_image = latent["samples"] if disable_noise: noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") else: batch_inds = latent["batch_index"] if "batch_index" in latent else None noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds) noise_mask = None if "noise_mask" in latent: noise_mask = latent["noise_mask"] preview_format = "JPEG" if preview_format not in ["JPEG", "PNG"]: preview_format = "JPEG" previewer = False if preview_latent: previewer = latent_preview.get_previewer(device, model.model.latent_format) pbar = comfy.utils.ProgressBar(steps) def callback(step, x0, x, total_steps): preview_bytes = None if previewer: preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) pbar.update_absolute(step + 1, total_steps, preview_bytes) samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) out = latent.copy() out["samples"] = samples return out def custom_ksampler(self, model, seed, steps, cfg, _sampler, sigmas, positive, negative, latent, disable_noise=False, preview_latent=True, disable_pbar=False): device = comfy.model_management.get_torch_device() latent_image = latent["samples"] if disable_noise: noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") else: batch_inds = latent["batch_index"] if "batch_index" in latent else None noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds) noise_mask = None if "noise_mask" in latent: noise_mask = latent["noise_mask"] preview_format = "JPEG" if preview_format not in ["JPEG", "PNG"]: preview_format = "JPEG" previewer = False if preview_latent: previewer = latent_preview.get_previewer(device, model.model.latent_format) pbar = comfy.utils.ProgressBar(steps) def callback(step, x0, x, total_steps): preview_bytes = None if previewer: preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) pbar.update_absolute(step + 1, total_steps, preview_bytes) samples = comfy.sample.sample_custom(model, noise, cfg, _sampler, sigmas, positive, negative, latent_image, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) out = latent.copy() out["samples"] = samples return out def get_value_by_id(self, key: str, my_unique_id: Any) -> Optional[Any]: """Retrieve value by its associated ID.""" try: for value, id_ in self.last_helds[key]: if id_ == my_unique_id: return value except KeyError: return None def update_value_by_id(self, key: str, my_unique_id: Any, new_value: Any) -> Union[bool, None]: """Update the value associated with a given ID. Return True if updated, False if appended, None if key doesn't exist.""" try: for i, (value, id_) in enumerate(self.last_helds[key]): if id_ == my_unique_id: self.last_helds[key][i] = (new_value, id_) return True self.last_helds[key].append((new_value, my_unique_id)) return False except KeyError: return False def upscale(self, samples, upscale_method, scale_by, crop): s = samples.copy() width = self.enforce_mul_of_64(round(samples["samples"].shape[3] * scale_by)) height = self.enforce_mul_of_64(round(samples["samples"].shape[2] * scale_by)) if (width > MAX_RESOLUTION): width = MAX_RESOLUTION if (height > MAX_RESOLUTION): height = MAX_RESOLUTION s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, crop) return (s,) def handle_upscale(self, samples: dict, upscale_method: str, factor: float, crop: bool) -> dict: """Upscale the samples if the upscale_method is not set to 'None'.""" if upscale_method != "None": samples = self.upscale(samples, upscale_method, factor, crop)[0] return samples def init_state(self, my_unique_id: Any, key: str, default: Any) -> Any: """Initialize the state by either fetching the stored value or setting a default.""" value = self.get_value_by_id(key, my_unique_id) if value is not None: return value return default def get_output(self, pipe: dict,) -> Tuple: """Return a tuple of various elements fetched from the input pipe dictionary.""" return ( pipe, pipe.get("images"), pipe.get("model"), pipe.get("positive"), pipe.get("negative"), pipe.get("samples"), pipe.get("vae"), pipe.get("clip"), pipe.get("seed"), ) def get_output_sdxl(self, sdxl_pipe: dict) -> Tuple: """Return a tuple of various elements fetched from the input sdxl_pipe dictionary.""" return ( sdxl_pipe, sdxl_pipe.get("model"), sdxl_pipe.get("positive"), sdxl_pipe.get("negative"), sdxl_pipe.get("vae"), sdxl_pipe.get("refiner_model"), sdxl_pipe.get("refiner_positive"), sdxl_pipe.get("refiner_negative"), sdxl_pipe.get("refiner_vae"), sdxl_pipe.get("samples"), sdxl_pipe.get("clip"), sdxl_pipe.get("images"), sdxl_pipe.get("seed") ) # XY图表 class easyXYPlot: def __init__(self, xyPlotData, save_prefix, image_output, prompt, extra_pnginfo, my_unique_id): self.x_node_type, self.x_type = easySampler.safe_split(xyPlotData.get("x_axis"), ': ') self.y_node_type, self.y_type = easySampler.safe_split(xyPlotData.get("y_axis"), ': ') self.x_values = xyPlotData.get("x_vals") if self.x_type != "None" else [] self.y_values = xyPlotData.get("y_vals") if self.y_type != "None" else [] self.grid_spacing = xyPlotData.get("grid_spacing") self.latent_id = 0 self.output_individuals = xyPlotData.get("output_individuals") self.x_label, self.y_label = [], [] self.max_width, self.max_height = 0, 0 self.latents_plot = [] self.image_list = [] self.num_cols = len(self.x_values) if len(self.x_values) > 0 else 1 self.num_rows = len(self.y_values) if len(self.y_values) > 0 else 1 self.total = self.num_cols * self.num_rows self.num = 0 self.save_prefix = save_prefix self.image_output = image_output self.prompt = prompt self.extra_pnginfo = extra_pnginfo self.my_unique_id = my_unique_id # Helper Functions @staticmethod def define_variable(plot_image_vars, value_type, value, index): plot_image_vars[value_type] = value if value_type in ["seed", "Seeds++ Batch"]: value_label = f"{value}" else: value_label = f"{value_type}: {value}" if "ControlNet" in value_type: if "," in value: line = value.split(',') value_label = f"{value_type}: {line[2]}" if value_type in ["ModelMergeBlocks"]: if ":" in value: line = value.split(':') value_label = f"{line[0]}" elif len(value) > 16: value_label = f"ModelMergeBlocks {index + 1}" else: value_label = f"MMB: {value}" if value_type in ["Positive Prompt S/R"]: value_label = f"pos prompt {index + 1}" if index>0 else f"pos prompt" if value_type in ["Negative Prompt S/R"]: value_label = f"neg prompt {index + 1}" if index>0 else f"neg prompt" if value_type in ["steps", "cfg", "denoise", "clip_skip", "lora_model_strength", "lora_clip_strength"]: value_label = f"{value_type}: {value}" if value_type == "positive": value_label = f"pos prompt {index + 1}" elif value_type == "negative": value_label = f"neg prompt {index + 1}" return plot_image_vars, value_label @staticmethod def get_font(font_size): return ImageFont.truetype(str(Path(os.path.join(Path(__file__).parent.parent, 'resources/OpenSans-Medium.ttf'))), font_size) @staticmethod def update_label(label, value, num_items): if len(label) < num_items: return [*label, value] return label @staticmethod def rearrange_tensors(latent, num_cols, num_rows): new_latent = [] for i in range(num_rows): for j in range(num_cols): index = j * num_rows + i new_latent.append(latent[index]) return new_latent def calculate_background_dimensions(self): border_size = int((self.max_width // 8) * 1.5) if self.y_type != "None" or self.x_type != "None" else 0 bg_width = self.num_cols * (self.max_width + self.grid_spacing) - self.grid_spacing + border_size * ( self.y_type != "None") bg_height = self.num_rows * (self.max_height + self.grid_spacing) - self.grid_spacing + border_size * ( self.x_type != "None") x_offset_initial = border_size if self.y_type != "None" else 0 y_offset = border_size if self.x_type != "None" else 0 return bg_width, bg_height, x_offset_initial, y_offset def adjust_font_size(self, text, initial_font_size, label_width): font = self.get_font(initial_font_size) text_width, _ = font.getsize(text) scaling_factor = 0.9 if text_width > (label_width * scaling_factor): return int(initial_font_size * (label_width / text_width) * scaling_factor) else: return initial_font_size def create_label(self, img, text, initial_font_size, is_x_label=True, max_font_size=70, min_font_size=10): label_width = img.width if is_x_label else img.height # Adjust font size font_size = self.adjust_font_size(text, initial_font_size, label_width) font_size = min(max_font_size, font_size) # Ensure font isn't too large font_size = max(min_font_size, font_size) # Ensure font isn't too small label_height = int(font_size * 1.5) if is_x_label else font_size label_bg = Image.new('RGBA', (label_width, label_height), color=(255, 255, 255, 0)) d = ImageDraw.Draw(label_bg) font = self.get_font(font_size) # Check if text will fit, if not insert ellipsis and reduce text if d.textsize(text, font=font)[0] > label_width: while d.textsize(text + '...', font=font)[0] > label_width and len(text) > 0: text = text[:-1] text = text + '...' # Compute text width and height for multi-line text text_lines = text.split('\n') text_widths, text_heights = zip(*[d.textsize(line, font=font) for line in text_lines]) max_text_width = max(text_widths) total_text_height = sum(text_heights) # Compute position for each line of text lines_positions = [] current_y = 0 for line, line_width, line_height in zip(text_lines, text_widths, text_heights): text_x = (label_width - line_width) // 2 text_y = current_y + (label_height - total_text_height) // 2 current_y += line_height lines_positions.append((line, (text_x, text_y))) # Draw each line of text for line, (text_x, text_y) in lines_positions: d.text((text_x, text_y), line, fill='black', font=font) return label_bg def sample_plot_image(self, plot_image_vars, samples, preview_latent, latents_plot, image_list, disable_noise, start_step, last_step, force_full_denoise, x_value=None, y_value=None): model, clip, vae, positive, negative, seed, steps, cfg = None, None, None, None, None, None, None, None sampler_name, scheduler, denoise = None, None, None # 高级用法 if plot_image_vars["x_node_type"] == "advanced" or plot_image_vars["y_node_type"] == "advanced": if self.x_type == "Seeds++ Batch" or self.y_type == "Seeds++ Batch": seed = int(x_value) if self.x_type == "Seeds++ Batch" else int(y_value) if self.x_type == "Steps" or self.y_type == "Steps": steps = int(x_value) if self.x_type == "Steps" else int(y_value) if self.x_type == "StartStep" or self.y_type == "StartStep": start_step = int(x_value) if self.x_type == "StartStep" else int(y_value) if self.x_type == "EndStep" or self.y_type == "EndStep": last_step = int(x_value) if self.x_type == "EndStep" else int(y_value) if self.x_type == "CFG Scale" or self.y_type == "CFG Scale": cfg = float(x_value) if self.x_type == "CFG Scale" else float(y_value) if self.x_type == "Sampler" or self.y_type == "Sampler" or self.y_type == "Sampler & Scheduler": sampler_name = float(x_value) if self.x_type == "Sampler" or self.x_type == "Sampler & Scheduler" else float(y_value) if self.x_type == "Scheduler" or self.y_type == "Scheduler" or self.y_type == "Sampler & Scheduler": scheduler = float(x_value) if self.x_type == "Scheduler" or self.x_type == "Sampler & Scheduler" else float(y_value) if self.x_type == "Denoise" or self.y_type == "Denoise": denoise = float(x_value) if self.x_type == "Denoise" else float(y_value) # 模型叠加 if self.x_type == "ModelMergeBlocks" or self.y_type == "ModelMergeBlocks": ckpt_name_1, ckpt_name_2 = plot_image_vars['models'] model1, clip1, vae1 = easyCache.load_checkpoint(ckpt_name_1) model2, clip2, vae2 = easyCache.load_checkpoint(ckpt_name_2) xy_values = x_value if self.x_type == "ModelMergeBlocks" else y_value if ":" in xy_values: xy_line = xy_values.split(':') xy_values = xy_line[1] xy_arrs = xy_values.split(',') # ModelMergeBlocks if len(xy_arrs) == 3: input, middle, out = xy_arrs kwargs = { "input": input, "middle": middle, "out": out } elif len(xy_arrs) == 30: kwargs = {} kwargs["time_embed."] = xy_arrs[0] kwargs["label_emb."] = xy_arrs[1] for i in range(12): kwargs["input_blocks.{}.".format(i)] = xy_arrs[2+i] for i in range(3): kwargs["middle_block.{}.".format(i)] = xy_arrs[14+i] for i in range(12): kwargs["output_blocks.{}.".format(i)] = xy_arrs[17+i] kwargs["out."] = xy_arrs[29] else: raise Exception("ModelMergeBlocks weight length error") default_ratio = next(iter(kwargs.values())) m = model1.clone() kp = model2.get_key_patches("diffusion_model.") for k in kp: ratio = float(default_ratio) k_unet = k[len("diffusion_model."):] last_arg_size = 0 for arg in kwargs: if k_unet.startswith(arg) and last_arg_size < len(arg): ratio = float(kwargs[arg]) last_arg_size = len(arg) m.add_patches({k: kp[k]}, 1.0 - ratio, ratio) vae_use = plot_image_vars['vae_use'] clip = clip2 if vae_use == 'Use Model 2' else clip1 if vae_use == 'Use Model 2': vae = vae2 elif vae_use == 'Use Model 1': vae = vae1 else: (vae,) = VAELoader().load_vae(vae_use) model = m # 如果存在lora_stack叠加lora optional_lora_stack = plot_image_vars['lora_stack'] if optional_lora_stack is not None and optional_lora_stack != []: for lora in optional_lora_stack: lora_name = lora["lora_name"] model = model if model is not None else lora["model"] clip = clip if clip is not None else lora["clip"] lora_model_strength = lora["lora_model_strength"] lora_clip_strength = lora["lora_clip_strength"] if "lbw" in lora: lbw = lora["lbw"] lbw_a = lora["lbw_a"] lbw_b = lora["lbw_b"] cls = ALL_NODE_CLASS_MAPPINGS['LoraLoaderBlockWeight //Inspire'] model, clip, _ = cls().doit(model, clip, lora_name, lora_model_strength, lora_clip_strength, False, 0, lbw_a, lbw_b, "", lbw) model, clip = easyCache.load_lora(lora_name, model, clip, lora_model_strength, lora_clip_strength) # 处理clip clip = clip.clone() if plot_image_vars['clip_skip'] != 0: clip.clip_layer(plot_image_vars['clip_skip']) # 提示词 if "Positive" in self.x_type or "Positive" in self.y_type: if self.x_type == 'Positive Prompt S/R' or self.y_type == 'Positive Prompt S/R': positive = x_value if self.x_type == "Positive Prompt S/R" else y_value if plot_image_vars['a1111_prompt_style']: if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS: cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode'] steps = plot_image_vars['steps'] clip = clip if clip is not None else plot_image_vars["clip"] positive, = cls().encode(clip, positive, "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) else: raise Exception( f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'") else: clip = clip if clip is not None else plot_image_vars["clip"] positive, positive_pooled = advanced_encode(clip, positive, plot_image_vars['positive_token_normalization'], plot_image_vars[ 'positive_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") positive = [[positive, {"pooled_output": positive_pooled}]] if "Negative" in self.x_type or "Negative" in self.y_type: if self.x_type == 'Negative Prompt S/R' or self.y_type == 'Negative Prompt S/R': negative = x_value if self.x_type == "Negative Prompt S/R" else y_value if plot_image_vars['a1111_prompt_style']: if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS: cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode'] steps = plot_image_vars['steps'] clip = clip if clip is not None else plot_image_vars["clip"] negative, = cls().encode(clip, negative, "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) else: raise Exception( f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'") else: clip = clip if clip is not None else plot_image_vars["clip"] negative, negative_pooled = advanced_encode(clip, negative, plot_image_vars['negative_token_normalization'], plot_image_vars[ 'negative_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") negative = [[negative, {"pooled_output": negative_pooled}]] # ControlNet if "ControlNet" in self.x_type or "ControlNet" in self.y_type: _pipe = { "model": model if model is not None else plot_image_vars["model"], "positive": positive if positive is not None else plot_image_vars["positive_cond"], "negative": negative if negative is not None else plot_image_vars["negative_cond"], "vae": vae if vae is not None else plot_image_vars['vae'], "clip": clip if clip is not None else plot_image_vars['clip'], "samples": None, "images": None, "loader_settings": {} } cnet = plot_image_vars["cnet"] if "cnet" in plot_image_vars else None if cnet: strength, start_percent, end_percent = x_value.split(',') if "ControlNet" in self.x_type else y_value.split(',') strength = float(strength) start_percent = float(start_percent) end_percent = float(end_percent) for index, item in enumerate(cnet): control_net_names = item[0] image = item[1] for idx, control_net_name in enumerate(control_net_names): # print(control_net_name) _pipe, = controlnetAdvanced().controlnetApply(_pipe, image, control_net_name, None, strength, start_percent, end_percent) positive = _pipe['positive'] negative = _pipe['negative'] del _pipe # 简单用法 if plot_image_vars["x_node_type"] == "loader" or plot_image_vars["y_node_type"] == "loader": model, clip, vae = easyCache.load_checkpoint(plot_image_vars['ckpt_name']) if plot_image_vars['lora_name'] != "None": model, clip = easyCache.load_lora(plot_image_vars['lora_name'], model, clip, plot_image_vars['lora_model_strength'], plot_image_vars['lora_clip_strength']) # Check for custom VAE if plot_image_vars['vae_name'] not in ["Baked-VAE", "Baked VAE"]: vae = easyCache.load_vae(plot_image_vars['vae_name']) # CLIP skip if not clip: raise Exception("No CLIP found") clip = clip.clone() clip.clip_layer(plot_image_vars['clip_skip']) if plot_image_vars['a1111_prompt_style']: if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS: cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode'] steps = plot_image_vars['steps'] positive, = cls().encode(clip, plot_image_vars['positive'], "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) negative, = cls().encode(clip, plot_image_vars['negative'], "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) else: raise Exception(f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'") else: positive, positive_pooled = advanced_encode(clip, plot_image_vars['positive'], plot_image_vars['positive_token_normalization'], plot_image_vars['positive_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") positive = [[positive, {"pooled_output": positive_pooled}]] negative, negative_pooled = advanced_encode(clip, plot_image_vars['negative'], plot_image_vars['negative_token_normalization'], plot_image_vars['negative_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") negative = [[negative, {"pooled_output": negative_pooled}]] model = model if model is not None else plot_image_vars["model"] clip = clip if clip is not None else plot_image_vars["clip"] vae = vae if vae is not None else plot_image_vars["vae"] positive = positive if positive is not None else plot_image_vars["positive_cond"] negative = negative if negative is not None else plot_image_vars["negative_cond"] seed = seed if seed is not None else plot_image_vars["seed"] steps = steps if steps is not None else plot_image_vars["steps"] cfg = cfg if cfg is not None else plot_image_vars["cfg"] sampler_name = sampler_name if sampler_name is not None else plot_image_vars["sampler_name"] scheduler = scheduler if scheduler is not None else plot_image_vars["scheduler"] denoise = denoise if denoise is not None else plot_image_vars["denoise"] # Sample samples = sampler.common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, samples, denoise=denoise, disable_noise=disable_noise, preview_latent=preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise) # Decode images and store latent = samples["samples"] # Add the latent tensor to the tensors list latents_plot.append(latent) # Decode the image image = vae.decode(latent).cpu() if self.output_individuals in [True, "True"]: easy_save = easySave(self.my_unique_id, self.prompt, self.extra_pnginfo) easy_save.images(image, self.save_prefix, self.image_output, group_id=self.num) # Convert the image from tensor to PIL Image and add it to the list pil_image = easySampler.tensor2pil(image) image_list.append(pil_image) # Update max dimensions self.max_width = max(self.max_width, pil_image.width) self.max_height = max(self.max_height, pil_image.height) # Return the touched variables return image_list, self.max_width, self.max_height, latents_plot # Process Functions def validate_xy_plot(self): if self.x_type == 'None' and self.y_type == 'None': log_node_warn(f'easyKsampler[{self.my_unique_id}]','No Valid Plot Types - Reverting to default sampling...') return False else: return True def get_latent(self, samples): # Extract the 'samples' tensor from the dictionary latent_image_tensor = samples["samples"] # Split the tensor into individual image tensors image_tensors = torch.split(latent_image_tensor, 1, dim=0) # Create a list of dictionaries containing the individual image tensors latent_list = [{'samples': image} for image in image_tensors] # Set latent only to the first latent of batch if self.latent_id >= len(latent_list): log_node_warn(f'easy kSampler[{self.my_unique_id}]',f'The selected latent_id ({self.latent_id}) is out of range.') log_node_warn(f'easy kSampler[{self.my_unique_id}]', f'Automatically setting the latent_id to the last image in the list (index: {len(latent_list) - 1}).') self.latent_id = len(latent_list) - 1 return latent_list[self.latent_id] def get_labels_and_sample(self, plot_image_vars, latent_image, preview_latent, start_step, last_step, force_full_denoise, disable_noise): for x_index, x_value in enumerate(self.x_values): plot_image_vars, x_value_label = self.define_variable(plot_image_vars, self.x_type, x_value, x_index) self.x_label = self.update_label(self.x_label, x_value_label, len(self.x_values)) if self.y_type != 'None': for y_index, y_value in enumerate(self.y_values): plot_image_vars, y_value_label = self.define_variable(plot_image_vars, self.y_type, y_value, y_index) self.y_label = self.update_label(self.y_label, y_value_label, len(self.y_values)) # ttNl(f'{CC.GREY}X: {x_value_label}, Y: {y_value_label}').t( # f'Plot Values {self.num}/{self.total} ->').p() self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image( plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise, x_value, y_value) self.num += 1 else: # ttNl(f'{CC.GREY}X: {x_value_label}').t(f'Plot Values {self.num}/{self.total} ->').p() self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image( plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise, x_value) self.num += 1 # Rearrange latent array to match preview image grid self.latents_plot = self.rearrange_tensors(self.latents_plot, self.num_cols, self.num_rows) # Concatenate the tensors along the first dimension (dim=0) self.latents_plot = torch.cat(self.latents_plot, dim=0) return self.latents_plot def plot_images_and_labels(self): # Calculate the background dimensions bg_width, bg_height, x_offset_initial, y_offset = self.calculate_background_dimensions() # Create the white background image background = Image.new('RGBA', (int(bg_width), int(bg_height)), color=(255, 255, 255, 255)) output_image = [] for row_index in range(self.num_rows): x_offset = x_offset_initial for col_index in range(self.num_cols): index = col_index * self.num_rows + row_index img = self.image_list[index] output_image.append(sampler.pil2tensor(img)) background.paste(img, (x_offset, y_offset)) # Handle X label if row_index == 0 and self.x_type != "None": label_bg = self.create_label(img, self.x_label[col_index], int(48 * img.width / 512)) label_y = (y_offset - label_bg.height) // 2 background.alpha_composite(label_bg, (x_offset, label_y)) # Handle Y label if col_index == 0 and self.y_type != "None": label_bg = self.create_label(img, self.y_label[row_index], int(48 * img.height / 512), False) label_bg = label_bg.rotate(90, expand=True) label_x = (x_offset - label_bg.width) // 2 label_y = y_offset + (img.height - label_bg.height) // 2 background.alpha_composite(label_bg, (label_x, label_y)) x_offset += img.width + self.grid_spacing y_offset += img.height + self.grid_spacing return (sampler.pil2tensor(background), output_image) easyCache = easyLoader() sampler = easySampler() def check_link_to_clip(node_id, clip_id, visited=None, node=None): """Check if a given node links directly or indirectly to a loader node.""" if visited is None: visited = set() if node_id in visited: return False visited.add(node_id) if "pipe" in node["inputs"]: link_ids = node["inputs"]["pipe"] for id in link_ids: if id != 0 and id == str(clip_id): return True return False def find_nearest_steps(clip_id, prompt): """Find the nearest KSampler or preSampling node that references the given id.""" for id in prompt: node = prompt[id] if "Sampler" in node["class_type"] or "sampler" in node["class_type"] or "Sampling" in node["class_type"]: # Check if this KSampler node directly or indirectly references the given CLIPTextEncode node if check_link_to_clip(id, clip_id, None, node): steps = node["inputs"]["steps"] if "steps" in node["inputs"] else 1 return steps return 1 def find_wildcards_seed(text, prompt): if "__" in text: for i in prompt: if "wildcards" in prompt[i]['class_type'] and text == prompt[i]['inputs']['text']: return prompt[i]['inputs']['seed_num'] if "seed_num" in prompt[i]['inputs'] else None else: return None class easySave: def __init__(self, my_unique_id=0, prompt=None, extra_pnginfo=None, number_padding=5, overwrite_existing=False, output_dir=folder_paths.get_temp_directory()): self.number_padding = int(number_padding) if number_padding not in [None, "None", 0] else None self.overwrite_existing = overwrite_existing self.my_unique_id = my_unique_id self.prompt = prompt self.extra_pnginfo = extra_pnginfo self.type = 'temp' self.output_dir = output_dir if self.output_dir != folder_paths.get_temp_directory(): self.output_dir = self.folder_parser(self.output_dir, self.prompt, self.my_unique_id) if not os.path.exists(self.output_dir): self._create_directory(self.output_dir) @staticmethod def _create_directory(folder: str): """Try to create the directory and log the status.""" log_node_warn("", f"Folder {folder} does not exist. Attempting to create...") if not os.path.exists(folder): try: os.makedirs(folder)
log_node_success("",f"{folder} Created Successfully")
6
2023-12-10 07:02:36+00:00
16k
AIFSH/NativeDancer
nativedancer/third_part/detectron2/data/build.py
[ { "identifier": "configurable", "path": "nativedancer/third_part/detectron2/config/config.py", "snippet": "def configurable(init_func=None, *, from_config=None):\n \"\"\"\n Decorate a function or a class's __init__ method so that it can be called\n with a :class:`CfgNode` object using a :func:`...
import itertools import logging import numpy as np import operator import pickle import torch import torch.utils.data as torchdata from collections import OrderedDict, defaultdict from typing import Any, Callable, Dict, List, Optional, Union from tabulate import tabulate from termcolor import colored from ..config import configurable from ..structures import BoxMode from ..utils.comm import get_world_size from ..utils.env import seed_all_rng from ..utils.file_io import PathManager from ..utils.logger import _log_api_usage, log_first_n from .catalog import DatasetCatalog, MetadataCatalog from .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset, ToIterableDataset from .dataset_mapper import DatasetMapper from .detection_utils import check_metadata_consistency from .samplers import ( InferenceSampler, RandomSubsetTrainingSampler, RepeatFactorTrainingSampler, TrainingSampler, )
10,887
# Copyright (c) Facebook, Inc. and its affiliates. """ This file contains the default logic to build a dataloader for training or testing. """ __all__ = [ "build_batch_data_loader", "build_detection_train_loader", "build_detection_test_loader", "get_detection_dataset_dicts", "load_proposals_into_dataset", "print_instances_class_histogram", ] def filter_images_with_only_crowd_annotations(dataset_dicts): """ Filter out images with none annotations or only crowd annotations (i.e., images without non-crowd annotations). A common training-time preprocessing on COCO dataset. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format, but filtered. """ num_before = len(dataset_dicts) def valid(anns): for ann in anns: if ann.get("iscrowd", 0) == 0: return True return False dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with no usable annotations. {} images left.".format( num_before - num_after, num_after ) ) return dataset_dicts def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image): """ Filter out images with too few number of keypoints. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format as dataset_dicts, but filtered. """ num_before = len(dataset_dicts) def visible_keypoints_in_image(dic): # Each keypoints field has the format [x1, y1, v1, ...], where v is visibility annotations = dic["annotations"] return sum( (np.array(ann["keypoints"][2::3]) > 0).sum() for ann in annotations if "keypoints" in ann ) dataset_dicts = [ x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image ] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with fewer than {} keypoints.".format( num_before - num_after, min_keypoints_per_image ) ) return dataset_dicts def load_proposals_into_dataset(dataset_dicts, proposal_file): """ Load precomputed object proposals into the dataset. The proposal file should be a pickled dict with the following keys: - "ids": list[int] or list[str], the image ids - "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id - "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores corresponding to the boxes. - "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. proposal_file (str): file path of pre-computed proposals, in pkl format. Returns: list[dict]: the same format as dataset_dicts, but added proposal field. """ logger = logging.getLogger(__name__) logger.info("Loading proposals from: {}".format(proposal_file))
# Copyright (c) Facebook, Inc. and its affiliates. """ This file contains the default logic to build a dataloader for training or testing. """ __all__ = [ "build_batch_data_loader", "build_detection_train_loader", "build_detection_test_loader", "get_detection_dataset_dicts", "load_proposals_into_dataset", "print_instances_class_histogram", ] def filter_images_with_only_crowd_annotations(dataset_dicts): """ Filter out images with none annotations or only crowd annotations (i.e., images without non-crowd annotations). A common training-time preprocessing on COCO dataset. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format, but filtered. """ num_before = len(dataset_dicts) def valid(anns): for ann in anns: if ann.get("iscrowd", 0) == 0: return True return False dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with no usable annotations. {} images left.".format( num_before - num_after, num_after ) ) return dataset_dicts def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image): """ Filter out images with too few number of keypoints. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format as dataset_dicts, but filtered. """ num_before = len(dataset_dicts) def visible_keypoints_in_image(dic): # Each keypoints field has the format [x1, y1, v1, ...], where v is visibility annotations = dic["annotations"] return sum( (np.array(ann["keypoints"][2::3]) > 0).sum() for ann in annotations if "keypoints" in ann ) dataset_dicts = [ x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image ] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with fewer than {} keypoints.".format( num_before - num_after, min_keypoints_per_image ) ) return dataset_dicts def load_proposals_into_dataset(dataset_dicts, proposal_file): """ Load precomputed object proposals into the dataset. The proposal file should be a pickled dict with the following keys: - "ids": list[int] or list[str], the image ids - "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id - "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores corresponding to the boxes. - "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. proposal_file (str): file path of pre-computed proposals, in pkl format. Returns: list[dict]: the same format as dataset_dicts, but added proposal field. """ logger = logging.getLogger(__name__) logger.info("Loading proposals from: {}".format(proposal_file))
with PathManager.open(proposal_file, "rb") as f:
4
2023-12-10 20:14:00+00:00
16k
ethanweber/nerfiller
nerfiller/nerf/nerfiller_pipeline.py
[ { "identifier": "RGBInpainter", "path": "nerfiller/inpaint/rgb_inpainter.py", "snippet": "class RGBInpainter:\n \"\"\"\n Module for inpainting with the stable diffusion inpainting pipeline.\n \"\"\"\n\n def __init__(\n self,\n half_precision_weights: bool = True,\n lora_...
from dataclasses import dataclass, field from typing import Literal, Optional, Type from torch.cuda.amp.grad_scaler import GradScaler from nerfstudio.pipelines.base_pipeline import VanillaPipelineConfig, VanillaPipeline from nerfiller.inpaint.rgb_inpainter import RGBInpainter, RGBInpainterXL from nerfiller.inpaint.depth_inpainter import DepthInpainter from nerfiller.inpaint.upscaler import Upscaler from nerfstudio.utils import profiler from nerfiller.utils.image_utils import ( get_inpainted_image_row, ) from nerfstudio.utils.rich_utils import Console from nerfstudio.utils.colormaps import apply_colormap, ColormapOptions from jaxtyping import Float from torch import Tensor from nerfiller.utils.mask_utils import downscale_mask from nerfiller.utils.typing import * from nerfstudio.engine.callbacks import TrainingCallback, TrainingCallbackAttributes import torch import mediapy
12,702
edit_num: int = 40 """number of images to edit at a time""" edit_iters: int = 30001 """how many iterations until we stop making changes""" num_inference_steps: int = 20 multidiffusion_steps: int = 1 randomize_latents: bool = True randomize_within_grid: bool = False use_annealing: bool = True lower_bound: float = 0.4 """Lower bound for diffusion timesteps to use for image editing""" upper_bound: float = 1.0 """Upper bound for diffusion timesteps to use for image editing""" denoise_in_grid: bool = True dilate_iters: int = 5 dilate_kernel_size: int = 3 allow_camera_mismatch: bool = False tile_resolution: int = 256 upscale: bool = False inpaint_chunk_size: Optional[int] = None render_all_rate: int = 5000 reference_image: Path = Path("reference.png") lora_model_path: Optional[str] = None only_sample_from_latest: bool = True """Only sample rays from the latest inpaints.""" inpaint_method: str = "inpaint" """Strategy for inpainting a batch of images.""" text_guidance_scale: float = 0.0 image_guidance_scale: float = 1.5 inpaint_index_start: int = 0 """We will edit images starting from this index and onward.""" sds_loss_mult: float = 1.0 sds_guidance_mult: float = 10.0 sds_downscale_factor: int = 1 class NeRFillerPipeline(VanillaPipeline): """The pipeline for the NeRFiller method.""" def __init__( self, config: NeRFillerPipelineConfig, device: str, test_mode: Literal["test", "val", "inference"] = "val", world_size: int = 1, local_rank: int = 0, grad_scaler: Optional[GradScaler] = None, ): super().__init__(config, device, test_mode, world_size, local_rank, grad_scaler=grad_scaler) if test_mode != "val": # skip the rest of setup if we aren't going to train return self.grad_scaler = grad_scaler self.start_step = None self.num_train_images = len(self.datamanager.train_dataparser_outputs.image_filenames) self.load_training_modules() def get_training_callbacks( self, training_callback_attributes: TrainingCallbackAttributes ) -> List[TrainingCallback]: self.trainer_base_dir = training_callback_attributes.trainer.base_dir return super().get_training_callbacks(training_callback_attributes) def load_state_dict(self, state_dict: Mapping[str, Any], strict: Optional[bool] = None): is_ddp_model_state = True model_state = {} for key, value in state_dict.items(): if key.startswith("_model."): # remove the "_model." prefix from key model_state[key[len("_model.") :]] = value # make sure that the "module." prefix comes from DDP, # rather than an attribute of the model named "module" if not key.startswith("_model.module."): is_ddp_model_state = False # remove "module." prefix added by DDP if is_ddp_model_state: model_state = {key[len("module.") :]: value for key, value in model_state.items()} pipeline_state = {key: value for key, value in state_dict.items() if not key.startswith("_model.")} if self.config.allow_camera_mismatch: # Don't set the weights for the appearance embedding # This sets the weights to be zero. key = "field.embedding_appearance.embedding.weight" model_state[key] = torch.zeros(self.model.field.embedding_appearance.embedding.weight.shape) try: self.model.load_state_dict(model_state, strict=True) except RuntimeError: if not strict: self.model.load_state_dict(model_state, strict=False) else: raise super().load_state_dict(pipeline_state, strict=False) def load_training_modules(self): """Load the modules.""" # RGB and depth inpainting rgb_inpaint_device = ( self.config.rgb_inpaint_device if self.config.rgb_inpaint_device is not None else self.device ) rgb_inpaint_vae_device = ( self.config.rgb_inpaint_vae_device if self.config.rgb_inpaint_vae_device is not None else rgb_inpaint_device ) if self.config.rgb_inpainter == "sd": self.rgb_inpainter = RGBInpainter( device=rgb_inpaint_device, vae_device=rgb_inpaint_vae_device, lora_model_path=self.config.lora_model_path, ) elif self.config.rgb_inpainter == "sdxl":
CONSOLE = Console() @dataclass class NeRFillerPipelineConfig(VanillaPipelineConfig): """The config for the NeRFiller pipeline.""" _target: Type = field(default_factory=lambda: NeRFillerPipeline) patch_size: int = 32 # inpaint args rgb_inpainter: str = "sd" rgb_inpaint_device: Optional[str] = "cuda:1" """device to put the rgb inpainting module on""" rgb_inpaint_vae_device: Optional[str] = None """device to put the vae inpainting module on. defaults to rgb inpaint device""" depth_inpaint_device: Optional[str] = "cuda:0" """device to put the depth inpainting module on""" upscale_device: Optional[str] = "cuda:0" """device to put the upscaler module on""" prompt: str = "highly detailed, 4K, hdr, sharp focus, image" """positive prompt for text-conditioned inpainting""" negative_prompt: str = "" """negative prompt for text-conditionied inpainting""" depth_method: Literal["zoedepth", "irondepth"] = "zoedepth" """which depth network to use for depth prediction or depth completion""" # sds use_sds: bool = False # du (dataset update) args use_du: bool = True """how often to update the dataset via inpainting. if 0, don't do dataset updating""" edit_rate: int = 1000 """how often to make an edit""" edit_num: int = 40 """number of images to edit at a time""" edit_iters: int = 30001 """how many iterations until we stop making changes""" num_inference_steps: int = 20 multidiffusion_steps: int = 1 randomize_latents: bool = True randomize_within_grid: bool = False use_annealing: bool = True lower_bound: float = 0.4 """Lower bound for diffusion timesteps to use for image editing""" upper_bound: float = 1.0 """Upper bound for diffusion timesteps to use for image editing""" denoise_in_grid: bool = True dilate_iters: int = 5 dilate_kernel_size: int = 3 allow_camera_mismatch: bool = False tile_resolution: int = 256 upscale: bool = False inpaint_chunk_size: Optional[int] = None render_all_rate: int = 5000 reference_image: Path = Path("reference.png") lora_model_path: Optional[str] = None only_sample_from_latest: bool = True """Only sample rays from the latest inpaints.""" inpaint_method: str = "inpaint" """Strategy for inpainting a batch of images.""" text_guidance_scale: float = 0.0 image_guidance_scale: float = 1.5 inpaint_index_start: int = 0 """We will edit images starting from this index and onward.""" sds_loss_mult: float = 1.0 sds_guidance_mult: float = 10.0 sds_downscale_factor: int = 1 class NeRFillerPipeline(VanillaPipeline): """The pipeline for the NeRFiller method.""" def __init__( self, config: NeRFillerPipelineConfig, device: str, test_mode: Literal["test", "val", "inference"] = "val", world_size: int = 1, local_rank: int = 0, grad_scaler: Optional[GradScaler] = None, ): super().__init__(config, device, test_mode, world_size, local_rank, grad_scaler=grad_scaler) if test_mode != "val": # skip the rest of setup if we aren't going to train return self.grad_scaler = grad_scaler self.start_step = None self.num_train_images = len(self.datamanager.train_dataparser_outputs.image_filenames) self.load_training_modules() def get_training_callbacks( self, training_callback_attributes: TrainingCallbackAttributes ) -> List[TrainingCallback]: self.trainer_base_dir = training_callback_attributes.trainer.base_dir return super().get_training_callbacks(training_callback_attributes) def load_state_dict(self, state_dict: Mapping[str, Any], strict: Optional[bool] = None): is_ddp_model_state = True model_state = {} for key, value in state_dict.items(): if key.startswith("_model."): # remove the "_model." prefix from key model_state[key[len("_model.") :]] = value # make sure that the "module." prefix comes from DDP, # rather than an attribute of the model named "module" if not key.startswith("_model.module."): is_ddp_model_state = False # remove "module." prefix added by DDP if is_ddp_model_state: model_state = {key[len("module.") :]: value for key, value in model_state.items()} pipeline_state = {key: value for key, value in state_dict.items() if not key.startswith("_model.")} if self.config.allow_camera_mismatch: # Don't set the weights for the appearance embedding # This sets the weights to be zero. key = "field.embedding_appearance.embedding.weight" model_state[key] = torch.zeros(self.model.field.embedding_appearance.embedding.weight.shape) try: self.model.load_state_dict(model_state, strict=True) except RuntimeError: if not strict: self.model.load_state_dict(model_state, strict=False) else: raise super().load_state_dict(pipeline_state, strict=False) def load_training_modules(self): """Load the modules.""" # RGB and depth inpainting rgb_inpaint_device = ( self.config.rgb_inpaint_device if self.config.rgb_inpaint_device is not None else self.device ) rgb_inpaint_vae_device = ( self.config.rgb_inpaint_vae_device if self.config.rgb_inpaint_vae_device is not None else rgb_inpaint_device ) if self.config.rgb_inpainter == "sd": self.rgb_inpainter = RGBInpainter( device=rgb_inpaint_device, vae_device=rgb_inpaint_vae_device, lora_model_path=self.config.lora_model_path, ) elif self.config.rgb_inpainter == "sdxl":
self.rgb_inpainter = RGBInpainterXL(
1
2023-12-07 19:12:08+00:00
16k
nnanhuang/Customize-it-3D
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.rank_zero import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from ldm.modules.attention import CrossAttention
11,602
assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape)==len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape)==len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """
mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
15
2023-12-14 11:03:35+00:00
16k
mkang315/ASF-YOLO
segment/predict.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=Tr...
import argparse import os import platform import sys import time import torch from pathlib import Path from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.segment.general import masks2segments, process_mask, process_mask_native from utils.torch_utils import select_device, smart_inference_mode
14,333
seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: im = torch.from_numpy(im).to(model.device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim # Inference with dt[1]: act = time.time() visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred, proto = model(im, augment=augment, visualize=visualize)[:2] print('time.time():',time.time()-act) # NMS with dt[2]: pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32) # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): if retina_masks: # scale bbox first the crop masks det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2]) # HWC else: masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size # Segments if save_txt: segments = reversed(masks2segments(masks)) segments = [ scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True) for x in segments] # Print results for c in det[:, 5].unique(): n = (det[:, 5] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Mask plotting plot_img = torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / 255. \ if retina_masks else im[i] annotator.masks(masks, colors=[colors(x, True) for x in det[:, 5]], im_gpu=plot_img) # Write results for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): if save_txt: # Write to file segj = segments[j].reshape(-1) # (n,2) to (n*2) line = (cls, *segj, conf) if save_conf else (cls, *segj) # label format with open(f'{txt_path}.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') annotator.box_label(xyxy, label, color=colors(c, True)) # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Stream results im0 = annotator.result() if view_img: if platform.system() == 'Linux' and p not in windows: windows.append(p) cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) cv2.imshow(str(p), im0) if cv2.waitKey(1) == ord('q'): # 1 millisecond exit() # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print time (inference-only) LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") # Print results t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update:
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. Usage - sources: $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam img.jpg # image vid.mp4 # video screen # screenshot path/ # directory 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream Usage - formats: $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_model # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @smart_inference_mode() def run( weights=ROOT / 'yolov5s-seg.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/predict-seg', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference vid_stride=1, # video frame-rate stride retina_masks=False, ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, names, pt = model.stride, model.names, model.pt imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader bs = 1 # batch_size if webcam: view_img = check_imshow(warn=True) dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = len(dataset) elif screenshot: dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) vid_path, vid_writer = [None] * bs, [None] * bs # Run inference model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: im = torch.from_numpy(im).to(model.device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim # Inference with dt[1]: act = time.time() visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred, proto = model(im, augment=augment, visualize=visualize)[:2] print('time.time():',time.time()-act) # NMS with dt[2]: pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32) # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): if retina_masks: # scale bbox first the crop masks det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2]) # HWC else: masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size # Segments if save_txt: segments = reversed(masks2segments(masks)) segments = [ scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True) for x in segments] # Print results for c in det[:, 5].unique(): n = (det[:, 5] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Mask plotting plot_img = torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / 255. \ if retina_masks else im[i] annotator.masks(masks, colors=[colors(x, True) for x in det[:, 5]], im_gpu=plot_img) # Write results for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): if save_txt: # Write to file segj = segments[j].reshape(-1) # (n,2) to (n*2) line = (cls, *segj, conf) if save_conf else (cls, *segj) # label format with open(f'{txt_path}.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') annotator.box_label(xyxy, label, color=colors(c, True)) # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Stream results im0 = annotator.result() if view_img: if platform.system() == 'Linux' and p not in windows: windows.append(p) cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) cv2.imshow(str(p), im0) if cv2.waitKey(1) == ord('q'): # 1 millisecond exit() # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print time (inference-only) LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") # Print results t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update:
strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
6
2023-12-10 14:18:29+00:00
16k
youngskkim/CRN
models/camera_radar_net_det.py
[ { "identifier": "BaseBEVDepth", "path": "models/base_bev_depth.py", "snippet": "class BaseBEVDepth(nn.Module):\n \"\"\"Source code of `BEVDepth`, `https://arxiv.org/abs/2112.11790`.\n\n Args:\n backbone_conf (dict): Config of backbone.\n head_conf (dict): Config of head.\n \"\"\"\...
import mmcv from models.base_bev_depth import BaseBEVDepth from layers.backbones.rvt_lss_fpn import RVTLSSFPN from layers.backbones.pts_backbone import PtsBackbone from layers.fuser.multimodal_feature_aggregation import MFAFuser from layers.heads.bev_depth_head_det import BEVDepthHead
11,670
logger = mmcv.utils.get_logger('mmdet') logger.setLevel('WARNING') __all__ = ['CameraRadarNetDet']
logger = mmcv.utils.get_logger('mmdet') logger.setLevel('WARNING') __all__ = ['CameraRadarNetDet']
class CameraRadarNetDet(BaseBEVDepth):
0
2023-12-06 14:57:49+00:00
16k
LIU-Yuxin/SyncMVD
src/pipeline.py
[ { "identifier": "UVProjection", "path": "src/renderer/project.py", "snippet": "class UVProjection():\n\tdef __init__(self, texture_size=96, render_size=64, sampling_mode=\"nearest\", channels=3, device=None):\n\t\tself.channels = channels\n\t\tself.device = device or torch.device(\"cpu\")\n\t\tself.ligh...
import os import numpy as np import math import random import torch import select import sys from typing import Any, Callable, Dict, List, Optional, Tuple, Union from PIL import Image from IPython.display import display from torch import functional as F from torch import nn from torchvision.transforms import Compose, Resize, GaussianBlur, InterpolationMode from diffusers import StableDiffusionControlNetPipeline, ControlNetModel from diffusers import DDPMScheduler, DDIMScheduler, UniPCMultistepScheduler from diffusers.models import AutoencoderKL, ControlNetModel, UNet2DConditionModel from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.image_processor import VaeImageProcessor from diffusers.utils import ( BaseOutput, randn_tensor, numpy_to_pil, pt_to_pil, # make_image_grid, is_accelerate_available, is_accelerate_version, is_compiled_module, logging, randn_tensor, replace_example_docstring ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.models.attention_processor import Attention, AttentionProcessor from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from .renderer.project import UVProjection as UVP from .syncmvd.attention import SamplewiseAttnProcessor2_0, replace_attention_processors from .syncmvd.prompt import * from .syncmvd.step import step_tex from .utils import *
11,978
# 7.1 Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order intermediate_results = [] background_colors = [random.choice(list(color_constants.keys())) for i in range(len(self.camera_poses))] dbres_sizes_list = [] mbres_size_list = [] with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # mix prompt embeds according to azim angle positive_prompt_embeds = [azim_prompt(prompt_embed_dict, pose) for pose in self.camera_poses] positive_prompt_embeds = torch.stack(positive_prompt_embeds, axis=0) negative_prompt_embeds = [azim_neg_prompt(negative_prompt_embed_dict, pose) for pose in self.camera_poses] negative_prompt_embeds = torch.stack(negative_prompt_embeds, axis=0) # expand the latents if we are doing classifier free guidance latent_model_input = self.scheduler.scale_model_input(latents, t) ''' Use groups to manage prompt and results Make sure negative and positive prompt does not perform attention together ''' prompt_embeds_groups = {"positive": positive_prompt_embeds} result_groups = {} if do_classifier_free_guidance: prompt_embeds_groups["negative"] = negative_prompt_embeds for prompt_tag, prompt_embeds in prompt_embeds_groups.items(): if prompt_tag == "positive" or not guess_mode: # controlnet(s) inference control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] # Split into micro-batches according to group meta info # Ignore this feature for now down_block_res_samples_list = [] mid_block_res_sample_list = [] model_input_batches = [torch.index_select(control_model_input, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] prompt_embeds_batches = [torch.index_select(controlnet_prompt_embeds, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] conditioning_images_batches = [torch.index_select(conditioning_images, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] for model_input_batch ,prompt_embeds_batch, conditioning_images_batch \ in zip (model_input_batches, prompt_embeds_batches, conditioning_images_batches): down_block_res_samples, mid_block_res_sample = self.controlnet( model_input_batch, t, encoder_hidden_states=prompt_embeds_batch, controlnet_cond=conditioning_images_batch, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False, ) down_block_res_samples_list.append(down_block_res_samples) mid_block_res_sample_list.append(mid_block_res_sample) ''' For the ith element of down_block_res_samples, concat the ith element of all mini-batch result ''' model_input_batches = prompt_embeds_batches = conditioning_images_batches = None if guess_mode: for dbres in down_block_res_samples_list: dbres_sizes = [] for res in dbres: dbres_sizes.append(res.shape) dbres_sizes_list.append(dbres_sizes) for mbres in mid_block_res_sample_list: mbres_size_list.append(mbres.shape) else: # Infered ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. # We copy the tensor shapes from a conditional batch down_block_res_samples_list = [] mid_block_res_sample_list = [] for dbres_sizes in dbres_sizes_list: down_block_res_samples_list.append([torch.zeros(shape, device=self._execution_device, dtype=latents.dtype) for shape in dbres_sizes]) for mbres in mbres_size_list: mid_block_res_sample_list.append(torch.zeros(mbres, device=self._execution_device, dtype=latents.dtype)) dbres_sizes_list = [] mbres_size_list = [] ''' predict the noise residual, split into mini-batches Downblock res samples has n samples, we split each sample into m batches and re group them into m lists of n mini batch samples. ''' noise_pred_list = [] model_input_batches = [torch.index_select(latent_model_input, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] prompt_embeds_batches = [torch.index_select(prompt_embeds, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] for model_input_batch, prompt_embeds_batch, down_block_res_samples_batch, mid_block_res_sample_batch, meta \ in zip(model_input_batches, prompt_embeds_batches, down_block_res_samples_list, mid_block_res_sample_list, self.group_metas): if t > num_timesteps * (1- ref_attention_end):
if torch.cuda.is_available(): device = torch.device("cuda:0") torch.cuda.set_device(device) else: device = torch.device("cpu") # Background colors color_constants = {"black": [-1, -1, -1], "white": [1, 1, 1], "maroon": [0, -1, -1], "red": [1, -1, -1], "olive": [0, 0, -1], "yellow": [1, 1, -1], "green": [-1, 0, -1], "lime": [-1 ,1, -1], "teal": [-1, 0, 0], "aqua": [-1, 1, 1], "navy": [-1, -1, 0], "blue": [-1, -1, 1], "purple": [0, -1 , 0], "fuchsia": [1, -1, 1]} color_names = list(color_constants.keys()) # Used to generate depth or normal conditioning images @torch.no_grad() def get_conditioning_images(uvp, output_size, render_size=512, blur_filter=5, cond_type="normal"): verts, normals, depths, cos_maps, texels, fragments = uvp.render_geometry(image_size=render_size) masks = normals[...,3][:,None,...] masks = Resize((output_size//8,)*2, antialias=True)(masks) normals_transforms = Compose([ Resize((output_size,)*2, interpolation=InterpolationMode.BILINEAR, antialias=True), GaussianBlur(blur_filter, blur_filter//3+1)] ) if cond_type == "normal": view_normals = uvp.decode_view_normal(normals).permute(0,3,1,2) *2 - 1 conditional_images = normals_transforms(view_normals) # Some problem here, depth controlnet don't work when depth is normalized # But it do generate using the unnormalized form as below elif cond_type == "depth": view_depths = uvp.decode_normalized_depth(depths).permute(0,3,1,2) conditional_images = normals_transforms(view_depths) return conditional_images, masks # Revert time 0 background to time t to composite with time t foreground @torch.no_grad() def composite_rendered_view(scheduler, backgrounds, foregrounds, masks, t): composited_images = [] for i, (background, foreground, mask) in enumerate(zip(backgrounds, foregrounds, masks)): if t > 0: alphas_cumprod = scheduler.alphas_cumprod[t] noise = torch.normal(0, 1, background.shape, device=background.device) background = (1-alphas_cumprod) * noise + alphas_cumprod * background composited = foreground * mask + background * (1-mask) composited_images.append(composited) composited_tensor = torch.stack(composited_images) return composited_tensor # Split into micro-batches to use less memory in each unet prediction # But need more investigation on reducing memory usage # Assume it has no possitive effect and use a large "max_batch_size" to skip splitting def split_groups(attention_mask, max_batch_size, ref_view=[]): group_sets = [] group = set() ref_group = set() idx = 0 while idx < len(attention_mask): new_group = group | set([idx]) new_ref_group = (ref_group | set(attention_mask[idx] + ref_view)) - new_group if len(new_group) + len(new_ref_group) <= max_batch_size: group = new_group ref_group = new_ref_group idx += 1 else: assert len(group) != 0, "Cannot fit into a group" group_sets.append((group, ref_group)) group = set() ref_group = set() if len(group)>0: group_sets.append((group, ref_group)) group_metas = [] for group, ref_group in group_sets: in_mask = sorted(list(group | ref_group)) out_mask = [] group_attention_masks = [] for idx in in_mask: if idx in group: out_mask.append(in_mask.index(idx)) group_attention_masks.append([in_mask.index(idxx) for idxx in attention_mask[idx] if idxx in in_mask]) ref_attention_mask = [in_mask.index(idx) for idx in ref_view] group_metas.append([in_mask, out_mask, group_attention_masks, ref_attention_mask]) return group_metas ''' MultiView-Diffusion Stable-Diffusion Pipeline Modified from a Diffusers StableDiffusionControlNetPipeline Just mimic the pipeline structure but did not follow any API convention ''' class StableSyncMVDPipeline(StableDiffusionControlNetPipeline): def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel]], scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = False, ): super().__init__( vae, text_encoder, tokenizer, unet, controlnet, scheduler, safety_checker, feature_extractor, requires_safety_checker ) self.scheduler = DDPMScheduler.from_config(self.scheduler.config) self.model_cpu_offload_seq = "vae->text_encoder->unet->vae" self.enable_model_cpu_offload() self.enable_vae_slicing() self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def initialize_pipeline( self, mesh_path=None, mesh_transform=None, mesh_autouv=None, camera_azims=None, camera_centers=None, top_cameras=True, ref_views=[], latent_size=None, render_rgb_size=None, texture_size=None, texture_rgb_size=None, max_batch_size=24, logging_config=None, ): # Make output dir output_dir = logging_config["output_dir"] self.result_dir = f"{output_dir}/results" self.intermediate_dir = f"{output_dir}/intermediate" dirs = [output_dir, self.result_dir, self.intermediate_dir] for dir_ in dirs: if not os.path.isdir(dir_): os.mkdir(dir_) # Define the cameras for rendering self.camera_poses = [] self.attention_mask=[] self.centers = camera_centers cam_count = len(camera_azims) front_view_diff = 360 back_view_diff = 360 front_view_idx = 0 back_view_idx = 0 for i, azim in enumerate(camera_azims): if azim < 0: azim += 360 self.camera_poses.append((0, azim)) self.attention_mask.append([(cam_count+i-1)%cam_count, i, (i+1)%cam_count]) if abs(azim) < front_view_diff: front_view_idx = i front_view_diff = abs(azim) if abs(azim - 180) < back_view_diff: back_view_idx = i back_view_diff = abs(azim - 180) # Add two additional cameras for painting the top surfaces if top_cameras: self.camera_poses.append((30, 0)) self.camera_poses.append((30, 180)) self.attention_mask.append([front_view_idx, cam_count]) self.attention_mask.append([back_view_idx, cam_count+1]) # Reference view for attention (all views attend the the views in this list) # A forward view will be used if not specified if len(ref_views) == 0: ref_views = [front_view_idx] # Calculate in-group attention mask self.group_metas = split_groups(self.attention_mask, max_batch_size, ref_views) # Set up pytorch3D for projection between screen space and UV space # uvp is for latent and uvp_rgb for rgb color self.uvp = UVP(texture_size=texture_size, render_size=latent_size, sampling_mode="nearest", channels=4, device=self._execution_device) if mesh_path.lower().endswith(".obj"): self.uvp.load_mesh(mesh_path, scale_factor=mesh_transform["scale"] or 1, autouv=mesh_autouv) elif mesh_path.lower().endswith(".glb"): self.uvp.load_glb_mesh(mesh_path, scale_factor=mesh_transform["scale"] or 1, autouv=mesh_autouv) else: assert False, "The mesh file format is not supported. Use .obj or .glb." self.uvp.set_cameras_and_render_settings(self.camera_poses, centers=camera_centers, camera_distance=4.0) self.uvp_rgb = UVP(texture_size=texture_rgb_size, render_size=render_rgb_size, sampling_mode="nearest", channels=3, device=self._execution_device) self.uvp_rgb.mesh = self.uvp.mesh.clone() self.uvp_rgb.set_cameras_and_render_settings(self.camera_poses, centers=camera_centers, camera_distance=4.0) _,_,_,cos_maps,_, _ = self.uvp_rgb.render_geometry() self.uvp_rgb.calculate_cos_angle_weights(cos_maps, fill=False) # Save some VRAM del _, cos_maps self.uvp.to("cpu") self.uvp_rgb.to("cpu") color_images = torch.FloatTensor([color_constants[name] for name in color_names]).reshape(-1,3,1,1).to(dtype=self.text_encoder.dtype, device=self._execution_device) color_images = torch.ones( (1,1,latent_size*8, latent_size*8), device=self._execution_device, dtype=self.text_encoder.dtype ) * color_images color_images *= ((0.5*color_images)+0.5) color_latents = encode_latents(self.vae, color_images) self.color_latents = {color[0]:color[1] for color in zip(color_names, [latent for latent in color_latents])} self.vae = self.vae.to("cpu") print("Done Initialization") ''' Modified from a StableDiffusion ControlNet pipeline Multi ControlNet not supported yet ''' @torch.no_grad() def __call__( self, prompt: str = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: str = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, return_dict: bool = False, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, max_batch_size=6, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_guess_mode: bool = False, controlnet_conditioning_scale: Union[float, List[float]] = 0.7, controlnet_conditioning_end_scale: Union[float, List[float]] = 0.9, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 0.99, guidance_rescale: float = 0.0, mesh_path: str = None, mesh_transform: dict = None, mesh_autouv = False, camera_azims=None, camera_centers=None, top_cameras=True, texture_size = 1536, render_rgb_size=1024, texture_rgb_size = 1024, multiview_diffusion_end=0.8, shuffle_background_change=0.4, shuffle_background_end=0.99, #0.4 use_directional_prompt=True, ref_attention_end=0.2, logging_config=None, cond_type="depth", ): # Setup pipeline settings self.initialize_pipeline( mesh_path=mesh_path, mesh_transform=mesh_transform, mesh_autouv=mesh_autouv, camera_azims=camera_azims, camera_centers=camera_centers, top_cameras=top_cameras, ref_views=[], latent_size=height//8, render_rgb_size=render_rgb_size, texture_size=texture_size, texture_rgb_size=texture_rgb_size, max_batch_size=max_batch_size, logging_config=logging_config ) num_timesteps = self.scheduler.config.num_train_timesteps initial_controlnet_conditioning_scale = controlnet_conditioning_scale log_interval = logging_config.get("log_interval", 10) view_fast_preview = logging_config.get("view_fast_preview", True) tex_fast_preview = logging_config.get("tex_fast_preview", True) controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet # align format for control guidance if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): # mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 mult = 1 control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [ control_guidance_end ] # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, torch.zeros((1,3,height,width), device=self._execution_device), callback_steps, negative_prompt, None, None, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, list): assert len(prompt) == 1 and len(negative_prompt) == 1, "Only implemented for 1 (negative) prompt" assert num_images_per_prompt == 1, "Only implemented for 1 image per-prompt" batch_size = len(self.uvp.cameras) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): # controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = ( controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions ) guess_mode = controlnet_guess_mode or global_pool_conditions # 3. Encode input prompt prompt, negative_prompt = prepare_directional_prompt(prompt, negative_prompt) text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=None, negative_prompt_embeds=None, lora_scale=text_encoder_lora_scale, ) negative_prompt_embeds, prompt_embeds = torch.chunk(prompt_embeds, 2) prompt_embed_dict = dict(zip(direction_names, [emb for emb in prompt_embeds])) negative_prompt_embed_dict = dict(zip(direction_names, [emb for emb in negative_prompt_embeds])) # (4. Prepare image) This pipeline use internal conditional images from Pytorch3D self.uvp.to(self._execution_device) conditioning_images, masks = get_conditioning_images(self.uvp, height, cond_type=cond_type) conditioning_images = conditioning_images.type(prompt_embeds.dtype) cond = (conditioning_images/2+0.5).permute(0,2,3,1).cpu().numpy() cond = np.concatenate([img for img in cond], axis=1) numpy_to_pil(cond)[0].save(f"{self.intermediate_dir}/cond.jpg") # 5. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 6. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, None, ) latent_tex = self.uvp.set_noise_texture() noise_views = self.uvp.render_textured_views() foregrounds = [view[:-1] for view in noise_views] masks = [view[-1:] for view in noise_views] composited_tensor = composite_rendered_view(self.scheduler, latents, foregrounds, masks, timesteps[0]+1) latents = composited_tensor.type(latents.dtype) self.uvp.to("cpu") # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7.1 Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order intermediate_results = [] background_colors = [random.choice(list(color_constants.keys())) for i in range(len(self.camera_poses))] dbres_sizes_list = [] mbres_size_list = [] with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # mix prompt embeds according to azim angle positive_prompt_embeds = [azim_prompt(prompt_embed_dict, pose) for pose in self.camera_poses] positive_prompt_embeds = torch.stack(positive_prompt_embeds, axis=0) negative_prompt_embeds = [azim_neg_prompt(negative_prompt_embed_dict, pose) for pose in self.camera_poses] negative_prompt_embeds = torch.stack(negative_prompt_embeds, axis=0) # expand the latents if we are doing classifier free guidance latent_model_input = self.scheduler.scale_model_input(latents, t) ''' Use groups to manage prompt and results Make sure negative and positive prompt does not perform attention together ''' prompt_embeds_groups = {"positive": positive_prompt_embeds} result_groups = {} if do_classifier_free_guidance: prompt_embeds_groups["negative"] = negative_prompt_embeds for prompt_tag, prompt_embeds in prompt_embeds_groups.items(): if prompt_tag == "positive" or not guess_mode: # controlnet(s) inference control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] # Split into micro-batches according to group meta info # Ignore this feature for now down_block_res_samples_list = [] mid_block_res_sample_list = [] model_input_batches = [torch.index_select(control_model_input, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] prompt_embeds_batches = [torch.index_select(controlnet_prompt_embeds, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] conditioning_images_batches = [torch.index_select(conditioning_images, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] for model_input_batch ,prompt_embeds_batch, conditioning_images_batch \ in zip (model_input_batches, prompt_embeds_batches, conditioning_images_batches): down_block_res_samples, mid_block_res_sample = self.controlnet( model_input_batch, t, encoder_hidden_states=prompt_embeds_batch, controlnet_cond=conditioning_images_batch, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False, ) down_block_res_samples_list.append(down_block_res_samples) mid_block_res_sample_list.append(mid_block_res_sample) ''' For the ith element of down_block_res_samples, concat the ith element of all mini-batch result ''' model_input_batches = prompt_embeds_batches = conditioning_images_batches = None if guess_mode: for dbres in down_block_res_samples_list: dbres_sizes = [] for res in dbres: dbres_sizes.append(res.shape) dbres_sizes_list.append(dbres_sizes) for mbres in mid_block_res_sample_list: mbres_size_list.append(mbres.shape) else: # Infered ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. # We copy the tensor shapes from a conditional batch down_block_res_samples_list = [] mid_block_res_sample_list = [] for dbres_sizes in dbres_sizes_list: down_block_res_samples_list.append([torch.zeros(shape, device=self._execution_device, dtype=latents.dtype) for shape in dbres_sizes]) for mbres in mbres_size_list: mid_block_res_sample_list.append(torch.zeros(mbres, device=self._execution_device, dtype=latents.dtype)) dbres_sizes_list = [] mbres_size_list = [] ''' predict the noise residual, split into mini-batches Downblock res samples has n samples, we split each sample into m batches and re group them into m lists of n mini batch samples. ''' noise_pred_list = [] model_input_batches = [torch.index_select(latent_model_input, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] prompt_embeds_batches = [torch.index_select(prompt_embeds, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] for model_input_batch, prompt_embeds_batch, down_block_res_samples_batch, mid_block_res_sample_batch, meta \ in zip(model_input_batches, prompt_embeds_batches, down_block_res_samples_list, mid_block_res_sample_list, self.group_metas): if t > num_timesteps * (1- ref_attention_end):
replace_attention_processors(self.unet, SamplewiseAttnProcessor2_0, attention_mask=meta[2], ref_attention_mask=meta[3], ref_weight=1)
2
2023-12-09 03:27:58+00:00
16k