repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
lumina-test/lumina
lumina/e2e_test/test_gbn.py
[ { "identifier": "get_qp_info_list", "path": "lumina/analyzer/main.py", "snippet": "def get_qp_info_list(switch_msg_snapshot):\n \"\"\" Get the list of QP info from the switch message snapshot\n\n Args:\n switch_msg_snapshot (str): The path to the switch message snapshot\n\n Returns:\n ...
import argparse, os, math, glob, logging, time import lumina.analyzer.checker.integrity_check as integrity_check import lumina.analyzer.checker.host_check as host_check import lumina.analyzer.checker.gbn_check as gbn_check import lumina.analyzer.checker.read_gbn_check as read_gbn_check import lumina.orchestrator.host as host import lumina.orchestrator.switch as switch from lumina.analyzer.main import get_qp_info_list from lumina.orchestrator.main import Orchestrator from lumina.analyzer.counter.switch_counter import SwitchCounter from lumina.analyzer.counter.host_counter import MLNXHostCounter, IntelHostCounter from lumina.analyzer.pcap_processor.pcap_process import get_packet_list from lumina.analyzer.measurer.latency_measure import LatencyMeasure from lumina.utils.config_loggers import config_stream_handler, config_file_handler from lumina.analyzer.packet_parser.roce_packet import TRIGGER_OOS, TRIGGER_TIMEOUT
15,247
trigger = nack.get_trigger() if trigger == TRIGGER_OOS: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) elif trigger == TRIGGER_TIMEOUT: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.error("\t\t NACK READ request should be triggered by either OOS or timeout") else: # For other verbs, we can only find a NACK in case of out of sequence arriving packets if latency_measurement.get_nack(pkt) != None: # Out of sequence/NACK triggered retransmission next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK response latency: %fus' % (nack_resp_latency * 1e6)) elif latency_measurement.get_qp_first_nack_before_retrans(pkt) != None: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK indicates a loss (%d) before this packet (%d)") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) def verify_results(orchestrator): """ Verify the experiment results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ result_dir = orchestrator.result_path num_repeats = orchestrator.num_repeats mtu = orchestrator.traffic_conf['mtu'] msg_size = orchestrator.traffic_conf['message-size'] num_msgs_per_qp = orchestrator.traffic_conf['num-msgs-per-qp'] aggregate_pcap_filename = orchestrator.aggregate_pcap_filename port_map = {'requester': orchestrator.requester.conf['nic']['switch-port'], 'responder': orchestrator.responder.conf['nic']['switch-port'], 'requester-mirror': orchestrator.requester_mirror.conf['nic']['switch-port'], 'responder-mirror': orchestrator.responder_mirror.conf['nic']['switch-port']} requester_ip_list = orchestrator.get_requester_ip_list() responder_ip_list = orchestrator.get_responder_ip_list() for iter in range(num_repeats): iter = str(iter) result_logger = logging.getLogger('Analysis iter %s' % (iter)) result_logger.handlers.clear() config_file_handler(logger=result_logger, log_file=os.path.join(result_dir, iter, RESULT_FILENAME), no_format=True) result_logger.info("=" * 100) result_logger.info("Iteration %s" % iter) switch_msg_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_MESSAGE_SNAPSHOT) switch_state_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_STATE_SNAPSHOT) pcap_filename = os.path.join(result_dir, iter, host.PCAP_RESULT_DIR, aggregate_pcap_filename) requester_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_START_COUNTER_FILE_NAME) requester_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_FINISH_COUNTER_FILE_NAME) responder_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_START_COUNTER_FILE_NAME) responder_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_FINISH_COUNTER_FILE_NAME) switch_counter = SwitchCounter(switch_state_snapshot, port_map) if orchestrator.requester.is_mlnx_nic(): requester_counter = MLNXHostCounter(requester_counter_start, requester_counter_finish) elif orchestrator.requester.is_intel_nic(): requester_counter = IntelHostCounter(requester_counter_start, requester_counter_finish) else: logging.error("Unkown NIC Vendor for rdma requester.") requester_counter = None if orchestrator.responder.is_mlnx_nic(): responder_counter = MLNXHostCounter(responder_counter_start, responder_counter_finish) elif orchestrator.responder.is_intel_nic(): responder_counter = IntelHostCounter(responder_counter_start, responder_counter_finish) else: logging.error("Unkown NIC Vendor for rdma responder.") responder_counter = None qp_info_list = get_qp_info_list(switch_msg_snapshot)
## All logs will be logged into file LOG_FILENAME LOG_FILENAME = "test_gbn.log" ## Results (checkers and measurements) will also be dumped into file RESULT_FILENAME RESULT_FILENAME = "result.log" ## Max # of retries for each experiment iteration MAX_NB_EXP_RETRIES = 3 def setup_root_logger(orchestrator): """ Setup the root logger for the test Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ root_logger = logging.getLogger() root_logger.handlers.clear() config_stream_handler(root_logger) config_file_handler(logger=root_logger, log_file=os.path.join(orchestrator.result_path, LOG_FILENAME), no_format=False) def run_traffic(orchestrator): """ Run the traffic and collect the results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: bool: True if the experiment is successful, False otherwise """ orchestrator.rm_old_files() if orchestrator.sync_and_compile() == False: logging.error("Failed to sync and compile the code") sys.exit(-1) logging.info("Sync and compile completed") if orchestrator.generate_switch_config_file() == False: logging.error("Failed to generate switch configuration file") sys.exit(-1) num_repeats = orchestrator.get_num_repeats() for i in range(num_repeats): logging.info("=" * 100) nb_retry = 0 iter_result = False while nb_retry < MAX_NB_EXP_RETRIES: if orchestrator.run_experiment() == False: logging.error("Iteration %d: Failed to complete experiment" % i) logging.error("Iteration %d: Rerun experiment (retry: %d)" % i, nb_retry) nb_retry += 1 orchestrator.clean_up() time.sleep(5) continue logging.info("Iteration %d: Completed experiment" % i) try: orchestrator.clean_up() orchestrator.fetch_results(i) logging.info("Iteration %d: Fetch experiment results" % i) orchestrator.merge_traces(i) logging.info("Iteration %d: Merge the pcap files" % i) except: logging.error("Iteration %d: Result collection failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue if orchestrator.check_integrity(i) == False: logging.error("Iteration %d: Integrity check failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue iter_result = True break if iter_result is False: logging.error("Iteration %d: Still failed after %d retries" % (i, nb_retry)) return False return True def analyze_retrans_latency(pkt, latency_measurement, is_read, logger): """ Analyze the retransmission latency breakdown for an undelivered packet Args: pkt (Packet object): The undelivered packet latency_measurement (LatencyMeasure object): A LatencyMeasure object that can compute latency breakdown is_read (bool): If we use RDMA READ in this experiment logger (logging.Logger): A logger object Returns: N/A """ # All the undelivered packets should be retransmitted in our test cases if latency_measurement.get_retransmit_pkt(pkt) == None: logger.error("\t\t No retransmit packet found for this packet") logger.error("\t\t It is possible that this undelivered packet is a redundant transmission") return retrans_latency = latency_measurement.get_retransmit_latency(pkt) if is_read == True: # For RDMA READ, we should always find a NACK READ request that triggers retransmission nack = latency_measurement.get_nack(pkt) if nack is not None: trigger = nack.get_trigger() if trigger == TRIGGER_OOS: next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK READ request generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK READ request response latency: %fus' % (nack_resp_latency * 1e6)) elif trigger == TRIGGER_TIMEOUT: nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t NACK READ request response latency: %fus' % (nack_resp_latency * 1e6)) else: logger.error("\t\t NACK READ request should be triggered by either OOS or timeout") else: nack = latency_measurement.get_qp_first_nack_before_retrans(pkt) if nack is None: logger.error("\t\t Cannot find the NACK READ request to recover this lost packet") return trigger = nack.get_trigger() if trigger == TRIGGER_OOS: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) elif trigger == TRIGGER_TIMEOUT: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.error("\t\t NACK READ request should be triggered by either OOS or timeout") else: # For other verbs, we can only find a NACK in case of out of sequence arriving packets if latency_measurement.get_nack(pkt) != None: # Out of sequence/NACK triggered retransmission next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK response latency: %fus' % (nack_resp_latency * 1e6)) elif latency_measurement.get_qp_first_nack_before_retrans(pkt) != None: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK indicates a loss (%d) before this packet (%d)") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) def verify_results(orchestrator): """ Verify the experiment results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ result_dir = orchestrator.result_path num_repeats = orchestrator.num_repeats mtu = orchestrator.traffic_conf['mtu'] msg_size = orchestrator.traffic_conf['message-size'] num_msgs_per_qp = orchestrator.traffic_conf['num-msgs-per-qp'] aggregate_pcap_filename = orchestrator.aggregate_pcap_filename port_map = {'requester': orchestrator.requester.conf['nic']['switch-port'], 'responder': orchestrator.responder.conf['nic']['switch-port'], 'requester-mirror': orchestrator.requester_mirror.conf['nic']['switch-port'], 'responder-mirror': orchestrator.responder_mirror.conf['nic']['switch-port']} requester_ip_list = orchestrator.get_requester_ip_list() responder_ip_list = orchestrator.get_responder_ip_list() for iter in range(num_repeats): iter = str(iter) result_logger = logging.getLogger('Analysis iter %s' % (iter)) result_logger.handlers.clear() config_file_handler(logger=result_logger, log_file=os.path.join(result_dir, iter, RESULT_FILENAME), no_format=True) result_logger.info("=" * 100) result_logger.info("Iteration %s" % iter) switch_msg_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_MESSAGE_SNAPSHOT) switch_state_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_STATE_SNAPSHOT) pcap_filename = os.path.join(result_dir, iter, host.PCAP_RESULT_DIR, aggregate_pcap_filename) requester_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_START_COUNTER_FILE_NAME) requester_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_FINISH_COUNTER_FILE_NAME) responder_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_START_COUNTER_FILE_NAME) responder_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_FINISH_COUNTER_FILE_NAME) switch_counter = SwitchCounter(switch_state_snapshot, port_map) if orchestrator.requester.is_mlnx_nic(): requester_counter = MLNXHostCounter(requester_counter_start, requester_counter_finish) elif orchestrator.requester.is_intel_nic(): requester_counter = IntelHostCounter(requester_counter_start, requester_counter_finish) else: logging.error("Unkown NIC Vendor for rdma requester.") requester_counter = None if orchestrator.responder.is_mlnx_nic(): responder_counter = MLNXHostCounter(responder_counter_start, responder_counter_finish) elif orchestrator.responder.is_intel_nic(): responder_counter = IntelHostCounter(responder_counter_start, responder_counter_finish) else: logging.error("Unkown NIC Vendor for rdma responder.") responder_counter = None qp_info_list = get_qp_info_list(switch_msg_snapshot)
packet_list = get_packet_list(pcap_filename)
5
2023-12-09 08:21:14+00:00
24k
ebb-earl-co/tidal-wave
tidal_wave/main.py
[ { "identifier": "login", "path": "tidal_wave/login.py", "snippet": "def login(\n audio_format: AudioFormat,\n) -> Tuple[Optional[requests.Session], Optional[AudioFormat]]:\n \"\"\"Given a selected audio_format, either log in \"automatically\"\n via the Fire TV OAuth 2.0 flow, or ask for an Andr...
from contextlib import closing from pathlib import Path from typing import Optional, Union from .login import login, AudioFormat, LogLevel from .album import Album from .artist import Artist from .mix import Mix from .playlist import Playlist from .track import Track from .video import Video from .models import ( match_tidal_url, TidalAlbum, TidalArtist, TidalMix, TidalPlaylist, TidalTrack, TidalVideo, ) from platformdirs import user_music_path from typing_extensions import Annotated import logging import typer
17,473
app = typer.Typer() @app.command() def main( tidal_url: Annotated[ str, typer.Argument( help="The Tidal album or artist or mix or playlist or track or video to download" ), ], audio_format: Annotated[ AudioFormat, typer.Option(case_sensitive=False) ] = AudioFormat.lossless.value, output_directory: Annotated[ Path, typer.Argument( help="The parent directory under which directory(ies) of files will be written" ), ] = user_music_path(), loglevel: Annotated[ LogLevel, typer.Option(case_sensitive=False) ] = LogLevel.info.value, include_eps_singles: Annotated[ bool, typer.Option( "--include-eps-singles", help="No-op unless passing TIDAL artist. Whether to include artist's EPs and singles with albums", ), ] = False, ): logging.basicConfig( format="%(asctime)s,%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d:%H:%M:%S", level=logging.getLevelName(loglevel.value), ) logger = logging.getLogger(__name__) tidal_resource: Optional[ Union[TidalAlbum, TidalMix, TidalPlaylist, TidalTrack, TidalVideo] ] = match_tidal_url(tidal_url) if tidal_resource is None: logger.critical( f"Cannot parse '{tidal_url}' as a TIDAL album, artist, mix, playlist, track, or video URL" ) raise typer.Exit(code=1) s, audio_format = login(audio_format=audio_format) if s is None: raise typer.Exit(code=1) with closing(s) as session: if isinstance(tidal_resource, TidalTrack): track = Track(track_id=tidal_resource.tidal_id) track.get( session=session, audio_format=audio_format, out_dir=output_directory ) if loglevel == LogLevel.debug: track.dump() raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalAlbum): album = Album(album_id=tidal_resource.tidal_id) album.get( session=session, audio_format=audio_format, out_dir=output_directory ) if loglevel == LogLevel.debug: album.dump() raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalArtist): artist = Artist(artist_id=tidal_resource.tidal_id) artist.get( session=session, audio_format=audio_format, out_dir=output_directory, include_eps_singles=include_eps_singles, ) raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalVideo): video = Video(video_id=tidal_resource.tidal_id) video.get(session=session, out_dir=output_directory) if loglevel == LogLevel.debug: video.dump() raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalPlaylist):
app = typer.Typer() @app.command() def main( tidal_url: Annotated[ str, typer.Argument( help="The Tidal album or artist or mix or playlist or track or video to download" ), ], audio_format: Annotated[ AudioFormat, typer.Option(case_sensitive=False) ] = AudioFormat.lossless.value, output_directory: Annotated[ Path, typer.Argument( help="The parent directory under which directory(ies) of files will be written" ), ] = user_music_path(), loglevel: Annotated[ LogLevel, typer.Option(case_sensitive=False) ] = LogLevel.info.value, include_eps_singles: Annotated[ bool, typer.Option( "--include-eps-singles", help="No-op unless passing TIDAL artist. Whether to include artist's EPs and singles with albums", ), ] = False, ): logging.basicConfig( format="%(asctime)s,%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d:%H:%M:%S", level=logging.getLevelName(loglevel.value), ) logger = logging.getLogger(__name__) tidal_resource: Optional[ Union[TidalAlbum, TidalMix, TidalPlaylist, TidalTrack, TidalVideo] ] = match_tidal_url(tidal_url) if tidal_resource is None: logger.critical( f"Cannot parse '{tidal_url}' as a TIDAL album, artist, mix, playlist, track, or video URL" ) raise typer.Exit(code=1) s, audio_format = login(audio_format=audio_format) if s is None: raise typer.Exit(code=1) with closing(s) as session: if isinstance(tidal_resource, TidalTrack): track = Track(track_id=tidal_resource.tidal_id) track.get( session=session, audio_format=audio_format, out_dir=output_directory ) if loglevel == LogLevel.debug: track.dump() raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalAlbum): album = Album(album_id=tidal_resource.tidal_id) album.get( session=session, audio_format=audio_format, out_dir=output_directory ) if loglevel == LogLevel.debug: album.dump() raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalArtist): artist = Artist(artist_id=tidal_resource.tidal_id) artist.get( session=session, audio_format=audio_format, out_dir=output_directory, include_eps_singles=include_eps_singles, ) raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalVideo): video = Video(video_id=tidal_resource.tidal_id) video.get(session=session, out_dir=output_directory) if loglevel == LogLevel.debug: video.dump() raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalPlaylist):
playlist = Playlist(playlist_id=tidal_resource.tidal_id)
6
2023-12-12 21:50:25+00:00
24k
lbcb-sci/GNNome
pipeline.py
[ { "identifier": "train", "path": "train.py", "snippet": "def train(train_path, valid_path, out, assembler, overfit=False, dropout=None, seed=None, resume=False):\n hyperparameters = get_hyperparameters()\n if seed is None:\n seed = hyperparameters['seed']\n num_epochs = hyperparameters['...
import argparse import gzip import os import re import pickle import subprocess import time import torch import requests import graph_dataset import evaluate import train_valid_chrs import hyperparameters from datetime import datetime from tqdm import tqdm from Bio import SeqIO, AlignIO from train import train from inference import inference
14,681
def evaluate_genome(eval_path, assembler, model_path, asm_path, ref_path, genome, save_dir): real_path = os.path.join(eval_path, 'real') save_dir = os.path.join(asm_path, 'real', assembler, save_dir) print(f'New genome') chr_path = os.path.join(real_path, genome) save_path = os.path.join(save_dir, genome) if not os.path.isdir(save_path): os.makedirs(save_path) os.mkdir(os.path.join(save_path, f'assembly')) os.mkdir(os.path.join(save_path, f'decode')) os.mkdir(os.path.join(save_path, f'reports')) inference(chr_path, model_path, assembler, save_path) ref = ref_path idx = ref_path + '.fai' asm = os.path.join(save_path, f'assembly', f'0_assembly.fasta') report = os.path.join(save_path, f'reports', '0_minigraph.txt') paf = os.path.join(save_path, f'asm.paf') p = evaluate.run_minigraph(ref, asm, paf) p.wait() p = evaluate.parse_pafs(idx, report, paf) p.wait() evaluate.parse_minigraph_for_full(save_dir) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--out', type=str, default=None, help='Output name for models') parser.add_argument('--overfit', action='store_true', default=False, help='Overfit on the chromosomes in the train directory') args = parser.parse_args() out = args.out overfit = args.overfit hyperparams = hyperparameters.get_hyperparameters() data_path = hyperparams['data_path'] # Location of the master database (storage) temp_path = hyperparams['temp_path'] # Location where the data will be temporarily stored for training eval_path = hyperparams['eval_path'] # Location where the synth and real evaluation data is stored refs_path = hyperparams['refs_path'] # Location where the references are stored - local because everythin else can be generated from this asms_path = hyperparams['asms_path'] # Where the assemblies and other inference info will be stored assembler = hyperparams['assembler'] # Which assembler we are using, currently: raven/hifiasm models_path = hyperparams['models_path'] threads = hyperparams['num_threads'] # dataset = hyperparams['dataset'] # Which dataset we are using, currently it's only chm13 data_path_ont = hyperparams['data_path_ont'] eval_path_ont = hyperparams['eval_path_ont'] initials = hyperparams['initials'] time_start = datetime.now() if out is None: timestamp = time_start.strftime('%Y-%b-%d-%H-%M-%S') out = f'{timestamp}_{initials}' else: timestamp = time_start.strftime('%y-%m-%d') out = f'{timestamp}_{initials}_{out}' # Model name must start with the date when the model was trained, in the yy-mm-dd format # Following is the underscore and a name of the model # E.g., 22-10-31_modelA # All the info about the training (train/valid data, hyperparameters, etc.) should be stored in the logbook # You can also include them in the model name, but they NEED to be stored in the logbook! model_name = out # In the inference, model_name represents the model used for evaluation # All the inference data (predictions, walks, assemblies, and reports) # Will be stored in a directory with name {model_name}_{decoding} # Suffix should indicate info about the decoding strategy = hyperparams['strategy'] B = hyperparams['B'] num_decoding_paths = hyperparams['num_decoding_paths'] if strategy == 'greedy': save_dir = f'{model_name}_Gx{num_decoding_paths}' elif strategy == 'beamsearch': save_dir = f'{model_name}_B{B}x{num_decoding_paths}' dicts = train_valid_chrs.get_config() train_dict = dicts['train_dict'] valid_dict = dicts['valid_dict'] test_dict = dicts['test_dict'] train_dict_ont = dicts['train_dict_ont'] valid_dict_ont = dicts['valid_dict_ont'] test_dict_ont = {} specs = { 'threads': threads, 'filter': 0.99, 'out': 'assembly.fasta', 'assembler': assembler, } torch.set_num_threads(threads) model_path = os.path.join(models_path, f'model_{model_name}.pt') all_chr = merge_dicts(train_dict, valid_dict, test_dict) all_chr_ont = merge_dicts(train_dict_ont, valid_dict_ont) # file_structure_setup(data_path, refs_path) # download_reference(refs_path) # simulate_reads_hifi(data_path, refs_path, all_chr, assembler) # simulate_reads_combo(data_path, refs_path, all_chr, assembler) # generate_graphs_hifi(data_path, all_chr, assembler) # simulate_reads_ont(data_path_ont, refs_path, all_chr_ont, 'raven') # generate_graphs_ont(data_path_ont, all_chr_ont, 'raven') # exit(0) if overfit: train_path, valid_path, test_path = train_valid_split(data_path, eval_path, temp_path, assembler, train_dict, valid_dict, test_dict, out, overfit=True)
#### DEPRECATED #### def change_description(file_path): new_fasta = [] for record in SeqIO.parse(file_path, file_path[-5:]): # 'fasta' for FASTA file, 'fastq' for FASTQ file des = record.description.split(",") id = des[0][5:] if des[1] == "forward": strand = '+' else: strand = '-' position = des[2][9:].split("-") start = position[0] end = position[1] record.id = id record.description = f'strand={strand} start={start} end={end}' new_fasta.append(record) SeqIO.write(new_fasta, file_path, "fasta") def change_description2(fastq_path, maf_path, chr): chr = int(chr[3:]) reads = {r.id: r for r in SeqIO.parse(fastq_path, 'fastq')} # print(len(reads)) # counter = 0 for align in AlignIO.parse(maf_path, 'maf'): ref, read_m = align start = ref.annotations['start'] end = start + ref.annotations['size'] strand = '+' if read_m.annotations['strand'] == 1 else '-' description = f'strand={strand} start={start} end={end} chr={chr}' reads[read_m.id].id += f'_chr{chr}' reads[read_m.id].name += f'_chr{chr}' reads[read_m.id].description = description # counter += 1 # print(counter) fasta_path = fastq_path[:-1] + 'a' SeqIO.write(list(reads.values()), fasta_path, 'fasta') os.remove(fastq_path) return fasta_path def create_chr_dirs(pth): for i in range(1, 24): if i == 23: i = 'X' subprocess.run(f'mkdir chr{i}', shell=True, cwd=pth) subprocess.run(f'mkdir raw raven hifiasm', shell=True, cwd=os.path.join(pth, f'chr{i}')) subprocess.run(f'mkdir processed info output graphia', shell=True, cwd=os.path.join(pth, f'chr{i}/raven')) subprocess.run(f'mkdir processed info output graphia', shell=True, cwd=os.path.join(pth, f'chr{i}/hifiasm')) def merge_dicts(d1, d2, d3={}): keys = {*d1, *d2, *d3} merged = {key: d1.get(key, 0) + d2.get(key, 0) + d3.get(key, 0) for key in keys} return merged # -1. Set up the data file structure def file_structure_setup(data_path, refs_path): # TODO: Do something with this! return print(f'SETUP::filesystem:: Create directories for storing data') if not os.path.isdir(data_path): os.makedirs(data_path) if 'CHM13' not in os.listdir(refs_path): os.mkdir(os.path.join(refs_path, 'CHM13')) if 'chromosomes' not in os.listdir(refs_path): os.mkdir(os.path.join(refs_path, 'chromosomes')) if 'simulated_hifi' not in os.listdir(data_path): os.mkdir(os.path.join(data_path, 'simulated_hifi')) create_chr_dirs(os.path.join(data_path, 'simulated_hifi')) if 'simulated_ont' not in os.listdir(data_path): os.mkdir(os.path.join(data_path, 'simulated_ont')) create_chr_dirs(os.path.join(data_path, 'simulated_ont')) # if 'real' not in os.listdir(data_path): # subprocess.run(f'bash download_dataset.sh {data_path}', shell=True) # os.mkdir(os.path.join(data_path, 'real')) # create_chr_dirs(os.path.join(data_path, 'real')) if 'experiments' not in os.listdir(data_path): os.mkdir(os.path.join(data_path, 'experiments')) # 0. Download the CHM13 if necessary def download_reference(refs_path): chm_path = os.path.join(refs_path, 'CHM13') chr_path = os.path.join(refs_path, 'chromosomes') chm13_url = 'https://s3-us-west-2.amazonaws.com/human-pangenomics/T2T/CHM13/assemblies/chm13.draft_v1.1.fasta.gz' chm13_path = os.path.join(chm_path, 'chm13.draft_v1.1.fasta.gz') if len(os.listdir(chm_path)) == 0: # Download the CHM13 reference # Code for tqdm from: https://stackoverflow.com/questions/37573483/progress-bar-while-download-file-over-http-with-requests print(f'SETUP::download:: CHM13 not found! Downloading...') response = requests.get(chm13_url, stream=True) total_size_in_bytes= int(response.headers.get('content-length', 0)) block_size = 1024 #1 Kibibyte progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True) with open(chm13_path, 'wb') as file: for data in response.iter_content(block_size): progress_bar.update(len(data)) file.write(data) progress_bar.close() if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes: print("ERROR, something went wrong") if len(os.listdir(chr_path)) == 0: # Parse the CHM13 into individual chromosomes print(f'SETUP::download:: Split CHM13 per chromosome') with gzip.open(chm13_path, 'rt') as f: for record in SeqIO.parse(f, 'fasta'): SeqIO.write(record, os.path.join(chr_path, f'{record.id}.fasta'), 'fasta') def handle_pbsim_output(idx, chrN, chr_raw_path, combo=False): if combo == True: idx = chrN subprocess.run(f'mv {idx}_0001.fastq {idx}.fastq', shell=True, cwd=chr_raw_path) subprocess.run(f'mv {idx}_0001.maf {idx}.maf', shell=True, cwd=chr_raw_path) subprocess.run(f'rm {idx}_0001.ref', shell=True, cwd=chr_raw_path) fastq_path = os.path.join(chr_raw_path, f'{idx}.fastq') maf_path = os.path.join(chr_raw_path, f'{idx}.maf') print(f'Adding positions for training...') fasta_path = change_description2(fastq_path, maf_path, chr=chrN) # Extract positional info from the MAF file print(f'Removing the MAF file...') subprocess.run(f'rm {idx}.maf', shell=True, cwd=chr_raw_path) if combo: return fasta_path else: return None # 1. Simulate the sequences - HiFi def simulate_reads_hifi(data_path, refs_path, chr_dict, assembler): print(f'SETUP::simulate') if 'vendor' not in os.listdir(): os.mkdir('vendor') pbsim3_dir = f'/home/vrcekl/pbsim3' data_path = os.path.abspath(data_path) for chrN_flag, n_need in chr_dict.items(): if chrN_flag.endswith('_r'): continue if '+' in chrN_flag: continue # elif chrN_flag.endswith('_chm13'): # chrN = chrN_flag[:-6] # chr_path = os.path.join(refs_path, 'CHM13', 'chromosomes') # pbsim_path = os.path.join(data_path, 'chm13_pbsim3') # chr_seq_path = os.path.join(chr_path, f'{chrN}.fasta') # sample_file_path = f'/mnt/sod2-project/csb4/wgs/lovro/sequencing_data/CHM13/chm13_subsampled.fastq' # sample_profile_id = f'chm13' # depth = 60 # elif chrN_flag.endswith('_ncbr'): # chrN = chrN_flag[:-5] # chr_path = os.path.join(refs_path, 'ncoibor', 'chromosomes') # pbsim_path = os.path.join(data_path, 'ncoibor_pbsim3') # chr_seq_path = os.path.join(chr_path, f'{chrN}.fasta') # sample_file_path = f'/mnt/sod2-project/csb4/wgs/lovro/sequencing_data/CHM13/chm13_subsampled.fastq' # sample_profile_id = f'chm13' # depth = 60 elif chrN_flag.endswith('_hg002'): chrN = chrN_flag[:-6] chr_path = os.path.join(refs_path, f'HG002', 'hg002_chromosomes') # TODO: redefine refs path pbsim_path = os.path.join(data_path, 'hg002_pbsim3') # TODO: redefine data path chr_seq_path = os.path.join(chr_path, f'{chrN}_MATERNAL.fasta') sample_file_path = f'/mnt/sod2-project/csb4/wgs/lovro/sequencing_data/HG002/20kb/m64011_190830_220126.sub.fastq' # TODO: Need to provide this as an argument sample_profile_id = f'20kb-m64011_190830_220126' # TODO: Need to provide this as an argument depth = 60 else: print('Give valid suffix!') raise Exception chr_raw_path = os.path.join(pbsim_path, f'{chrN}/raw') chr_processed_path = os.path.join(pbsim_path, f'{chrN}/{assembler}/processed') if not os.path.isdir(chr_raw_path): os.makedirs(chr_raw_path) if not os.path.isdir(chr_processed_path): os.makedirs(chr_processed_path) # TODO: Fix so that you can delete raw files raw_files = {int(re.findall(r'(\d+).fast*', raw)[0]) for raw in os.listdir(chr_raw_path)} prc_files = {int(re.findall(r'(\d+).dgl', prc)[0]) for prc in os.listdir(chr_processed_path)} all_files = raw_files | prc_files n_have = max(all_files) + 1 if all_files else 0 if n_need <= n_have: continue n_diff = n_need - n_have print(f'SETUP::simulate:: Simulate {n_diff} datasets for {chrN_flag} with PBSIM3') for i in range(n_diff): idx = n_have + i chr_save_path = os.path.join(chr_raw_path, f'{idx}.fasta') print(f'\nStep {i}: Simulating reads {chr_save_path}') # Use the CHM13/HG002 profile for all the chromosomes if f'sample_profile_{sample_profile_id}.fastq' not in os.listdir(pbsim3_dir): subprocess.run(f'./src/pbsim --strategy wgs --method sample --depth {depth} --genome {chr_seq_path} ' \ f'--sample {sample_file_path} ' f'--sample-profile-id {sample_profile_id} --prefix {chr_raw_path}/{idx}', shell=True, cwd=pbsim3_dir) else: subprocess.run(f'./src/pbsim --strategy wgs --method sample --depth {depth} --genome {chr_seq_path} ' \ f'--sample-profile-id {sample_profile_id} --prefix {chr_raw_path}/{idx}', shell=True, cwd=pbsim3_dir) handle_pbsim_output(idx, chrN, chr_raw_path) def simulate_reads_combo(data_path, refs_path, chr_dict, assembler): data_path = os.path.abspath(data_path) pbsim_path = os.path.join(data_path, 'combo') pbsim3_dir = hyperparameters.get_hyperparameters()['pbsim3_dir'] if len(pbsim3_dir) == 0: pbsim3_dir = 'pbsim3' for chrN_combo, n_need in chr_dict.items(): if '+' not in chrN_combo: continue chromosomes = chrN_combo.split('+') # chr1_chm13+chr2_chm13+chr3_chm13 chr_raw_path = os.path.join(pbsim_path, f'{chrN_combo}/raw') if not os.path.isdir(chr_raw_path): os.makedirs(chr_raw_path) chr_processed_path = os.path.join(pbsim_path, f'{chrN_combo}/{assembler}/processed') # TODO: Fix so that you can delete raw files if not os.path.isdir(chr_processed_path): os.makedirs(chr_processed_path) raw_files = {int(re.findall(r'(\d+).fast*', raw)[0]) for raw in os.listdir(chr_raw_path)} prc_files = {int(re.findall(r'(\d+).dgl', prc)[0]) for prc in os.listdir(chr_processed_path)} all_files = raw_files | prc_files if all_files: n_have = max(all_files) + 1 else: n_have = 0 if n_need <= n_have: continue else: n_diff = n_need - n_have print(f'SETUP::simulate:: Simulate {n_diff} datasets for {chrN_combo} with PBSIM3') # Simulate reads for chrN_combo n_diff times for i in range(n_diff): idx = n_have + i all_reads = [] for chromosome in chromosomes: if chromosome.endswith('_chm13'): chrN = chromosome[:-6] chr_path = os.path.join(refs_path, 'CHM13', 'chromosomes') chr_seq_path = os.path.join(chr_path, f'{chrN}.fasta') chr_save_path = os.path.join(chr_raw_path, f'{chrN}.fasta') sample_file_path = f'/mnt/sod2-project/csb4/wgs/lovro/sequencing_data/CHM13/chm13_subsampled.fastq' sample_profile_id = f'chm13' depth = 30 elif chromosome.endswith('_ncbr'): chrN = chromosome[:-5] chr_path = os.path.join(refs_path, 'ncoibor', 'chromosomes') chr_seq_path = os.path.join(chr_path, f'{chrN}.fasta') chr_save_path = os.path.join(chr_raw_path, f'{chrN}.fasta') sample_file_path = f'/mnt/sod2-project/csb4/wgs/lovro/sequencing_data/CHM13/chm13_subsampled.fastq' sample_profile_id = f'chm13' depth = 30 elif chromosome.endswith('_hg002'): chrN = chromosome[:-6] chr_path = os.path.join(refs_path, 'HG002', 'hg002_chromosomes') chr_seq_path = os.path.join(chr_path, f'{chrN}_MATERNAL.fasta') chr_save_path = os.path.join(chr_raw_path, f'{chrN}.fasta') sample_file_path = f'/mnt/sod2-project/csb4/wgs/lovro/sequencing_data/HG002/20kb/m64011_190830_220126.sub.fastq' sample_profile_id = f'20kb-m64011_190830_220126' depth = 30 print(f'\nStep {i}: Simulating reads {chr_save_path}') if f'sample_profile_{sample_profile_id}.fastq' not in os.listdir(pbsim3_dir): subprocess.run(f'./src/pbsim --strategy wgs --method sample --depth {depth} --genome {chr_seq_path} ' \ f'--sample {sample_file_path} ' f'--sample-profile-id {sample_profile_id} --prefix {chr_raw_path}/{chrN}', shell=True, cwd=pbsim3_dir) else: subprocess.run(f'./src/pbsim --strategy wgs --method sample --depth {depth} --genome {chr_seq_path} ' \ f'--sample-profile-id {sample_profile_id} --prefix {chr_raw_path}/{chrN}', shell=True, cwd=pbsim3_dir) fasta_path = handle_pbsim_output(idx, chrN, chr_raw_path, combo=True) # Because it's combo we pass chrN instead of idx! We get idx.fasta later # Combining individual chromosome FASTAs into a unified list print(f'Appending the list of all the reads with {chromosome}...', end='\t') all_reads.extend(list(SeqIO.parse(fasta_path, 'fasta'))) subprocess.run(f'rm {fasta_path}', shell=True) print(f'Done!') # Saving the unified FASTA file as idx.fasta all_reads_path = os.path.join(chr_raw_path, f'{idx}.fasta') SeqIO.write(all_reads, all_reads_path, 'fasta') def simulate_reads_ont(data_path, refs_path, chr_dict, assembler='raven'): print(f'SETUP::simulate') if 'vendor' not in os.listdir(): os.mkdir('vendor') pbsim3_dir = f'/home/vrcekl/pbsim3' data_path = os.path.abspath(data_path) for chrN_flag, n_need in chr_dict.items(): if chrN_flag.endswith('_r'): # Training on real reads continue if '+' in chrN_flag: # Training on combined synthetic chromosomes continue elif chrN_flag.endswith('_ncbr'): # Training on synthetic ncoibor chromosomes continue elif chrN_flag.endswith('_chm13'): chrN = chrN_flag[:-6] # chrN_chm13 chr_path = os.path.join(refs_path, 'CHM13', 'chromosomes') pbsim_path = os.path.join(data_path, 'chm13_pbsim3') chr_seq_path = os.path.join(chr_path, f'{chrN}.fasta') sample_file_path = f'/mnt/sod2-project/csb4/wgs/lovro/sequencing_data-DELETE/CHM13/ONT/chm13_ont-subsampled_2M_trimmed.fastq' sample_profile_id = f'chm13-ont' depth = 120 elif chrN_flag.endswith('_hg002'): chrN = chrN_flag[:-6] # chrN_hg002 chr_path = os.path.join(refs_path, 'HG002', 'hg002_chromosomes') pbsim_path = os.path.join(data_path, 'hg002_pbsim3') chr_seq_path = os.path.join(chr_path, f'{chrN}_MATERNAL.fasta') sample_file_path = f'/mnt/sod2-project/csb4/wgs/lovro/sequencing_data-DELETE/CHM13/ONT/chm13_ont-subsampled_2M_trimmed.fastq' sample_profile_id = f'chm13-ont' depth = 120 else: print(f'Chromosome suffix incorrect!') raise Exception chr_raw_path = os.path.join(pbsim_path, f'{chrN}/raw') chr_processed_path = os.path.join(pbsim_path, f'{chrN}/{assembler}/processed') if not os.path.isdir(chr_raw_path): os.makedirs(chr_raw_path) if not os.path.isdir(chr_processed_path): os.makedirs(chr_processed_path) # TODO: Fix so that you can delete raw files raw_files = {int(re.findall(r'(\d+).fast*', raw)[0]) for raw in os.listdir(chr_raw_path)} prc_files = {int(re.findall(r'(\d+).dgl', prc)[0]) for prc in os.listdir(chr_processed_path)} all_files = raw_files | prc_files n_have = max(all_files) + 1 if all_files else 0 if n_need <= n_have: continue n_diff = n_need - n_have print(f'SETUP::simulate:: Simulate {n_diff} datasets for {chrN} with PBSIM3') for i in range(n_diff): idx = n_have + i chr_save_path = os.path.join(chr_raw_path, f'{idx}.fasta') print(f'\nStep {i}: Simulating reads {chr_save_path}') if f'sample_profile_{sample_profile_id}.fastq' in os.listdir(pbsim3_dir): # Use the CHM13 profile for all the chromosomes subprocess.run(f'./src/pbsim --strategy wgs --method sample --depth {depth} --genome {chr_seq_path} ' \ f'--sample-profile-id {sample_profile_id} --prefix {chr_raw_path}/{idx}', shell=True, cwd=pbsim3_dir) else: subprocess.run(f'./src/pbsim --strategy wgs --method sample --depth {depth} --genome {chr_seq_path} ' \ f'--sample {sample_file_path} ' \ f'--sample-profile-id {sample_profile_id} --prefix {chr_raw_path}/{idx}', shell=True, cwd=pbsim3_dir) handle_pbsim_output(idx, chrN, chr_raw_path) # 2. Generate the graphs def generate_graphs_hifi(data_path, chr_dict, assembler): print(f'SETUP::generate') if 'raven' not in os.listdir('vendor'): print(f'SETUP::generate:: Download Raven') subprocess.run(f'git clone -b print_graphs https://github.com/lbcb-sci/raven', shell=True, cwd='vendor') subprocess.run(f'cmake -S ./ -B./build -DRAVEN_BUILD_EXE=1 -DCMAKE_BUILD_TYPE=Release', shell=True, cwd='vendor/raven') subprocess.run(f'cmake --build build', shell=True, cwd='vendor/raven') data_path = os.path.abspath(data_path) for chrN_flag, n_need in chr_dict.items(): if '+' in chrN_flag: chrN = chrN_flag # Called chrN_combo in simulate_reads_combo function chr_sim_path = os.path.join(data_path, 'combo', f'{chrN}') elif chrN_flag.endswith('_r'): chrN = chrN_flag[:-2] chr_sim_path = os.path.join(data_path, 'real', f'{chrN}') elif chrN_flag.endswith('_chm13'): chrN = chrN_flag[:-6] chr_sim_path = os.path.join(data_path, 'chm13_pbsim3', f'{chrN}') elif chrN_flag.endswith('_ncbr'): chrN = chrN_flag[:-5] chr_sim_path = os.path.join(data_path, 'ncoibor_pbsim3', f'{chrN}') elif chrN_flag.endswith('_hg002'): chrN = chrN_flag[:-6] chr_sim_path = os.path.join(data_path, 'hg002_pbsim3', f'{chrN}') else: print(f'Give valid suffix') raise Exception chr_raw_path = os.path.join(chr_sim_path, 'raw') chr_prc_path = os.path.join(chr_sim_path, f'{assembler}/processed') n_raw = len(os.listdir(chr_raw_path)) n_prc = len(os.listdir(chr_prc_path)) n_diff = max(0, n_raw - n_prc) print(f'SETUP::generate:: Generate {n_diff} graphs for {chrN}') specs = { 'threads': 32, 'filter': 0.99, 'out': 'assembly.fasta', 'assembler': assembler, } graph_dataset.AssemblyGraphDataset_HiFi(chr_sim_path, nb_pos_enc=None, assembler=assembler, specs=specs, generate=True) def generate_graphs_ont(data_path, chr_dict, assembler): print(f'SETUP::generate') if 'raven' not in os.listdir('vendor'): print(f'SETUP::generate:: Download Raven') subprocess.run(f'git clone -b print_graphs https://github.com/lbcb-sci/raven', shell=True, cwd='vendor') subprocess.run(f'cmake -S ./ -B./build -DRAVEN_BUILD_EXE=1 -DCMAKE_BUILD_TYPE=Release', shell=True, cwd='vendor/raven') subprocess.run(f'cmake --build build', shell=True, cwd='vendor/raven') data_path = os.path.abspath(data_path) for chrN_flag, n_need in chr_dict.items(): if '+' in chrN_flag: chrN = chrN_flag # Called chrN_combo in simulate_reads_combo function chr_sim_path = os.path.join(data_path, 'combo', f'{chrN}') elif chrN_flag.endswith('_r'): chrN = chrN_flag[:-2] chr_sim_path = os.path.join(data_path, 'real', f'{chrN}') elif chrN_flag.endswith('_chm13'): chrN = chrN_flag[:-6] chr_sim_path = os.path.join(data_path, 'chm13_pbsim3', f'{chrN}') elif chrN_flag.endswith('_hg002'): chrN = chrN_flag[:-6] chr_sim_path = os.path.join(data_path, 'hg002_pbsim3', f'{chrN}') else: print(f'Chromosome suffix incorrect!') raise Exception chr_raw_path = os.path.join(chr_sim_path, 'raw') chr_prc_path = os.path.join(chr_sim_path, f'{assembler}/processed') n_raw = len(os.listdir(chr_raw_path)) n_prc = len(os.listdir(chr_prc_path)) n_diff = max(0, n_raw - n_prc) print(f'SETUP::generate:: Generate {n_diff} graphs for {chrN}') specs = { 'threads': 32, 'filter': 0.99, 'out': 'assembly.fasta', 'assembler': 'raven', } graph_dataset.AssemblyGraphDataset_ONT(chr_sim_path, nb_pos_enc=None, assembler='raven', specs=specs, generate=True) # 2.5 Train-valid-test split def train_valid_split(data_path, eval_path, temp_path, assembler, train_dict, valid_dict, test_dict={}, out=None, overfit=False): print(f'SETUP::split') data_path = os.path.abspath(data_path) eval_path = os.path.abspath(eval_path) if overfit: data_path = eval_path real_path = os.path.join(eval_path, 'real') # pbsim_path = os.path.join(data_path, 'pbsim3') ncoibor_path = os.path.join(data_path, 'ncoibor_pbsim3') hg002_path = os.path.join(data_path, 'hg002_pbsim3') combo_path = os.path.join(data_path, 'combo') chm13_path = os.path.join(data_path, 'chm13_pbsim3') arab_path = os.path.join(data_path, 'arabidopsis_pbsim3') zmays_path = os.path.join(data_path, 'zmays_Mo17_pbsim3') exp_path = temp_path if out is None: train_path = os.path.join(exp_path, f'train', assembler) valid_path = os.path.join(exp_path, f'valid', assembler) test_path = os.path.join(exp_path, f'test', assembler) else: train_path = os.path.join(exp_path, f'train_{out}', assembler) valid_path = os.path.join(exp_path, f'valid_{out}', assembler) test_path = os.path.join(exp_path, f'test_{out}', assembler) if not os.path.isdir(train_path): os.makedirs(train_path) subprocess.run(f'mkdir processed info', shell=True, cwd=train_path) if not os.path.isdir(valid_path): os.makedirs(valid_path) subprocess.run(f'mkdir processed info', shell=True, cwd=valid_path) if not os.path.isdir(test_path) and len(test_dict) > 0: os.makedirs(test_path) subprocess.run(f'mkdir processed info', shell=True, cwd=test_path) train_g_to_chr = {} # Remember chromosomes for each graph in the dataset train_g_to_org_g = {} # Remember index of the graph in the master dataset for each graph in this dataset n_have = 0 if assembler == 'both': assemblers = ['hifiasm', 'raven'] else: assemblers = [assembler] for assembler in assemblers: for chrN_flag, n_need in train_dict.items(): # copy n_need datasets from chrN into train dict if '_r' in chrN_flag and n_need > 1: print(f'SETUP::split::WARNING Cannot copy more than one graph for real data: {chrN_flag}') n_need = 1 print(f'SETUP::split:: Copying {n_need} graphs of {chrN_flag} - {assembler} into {train_path}') for i in range(n_need): if '+' in chrN_flag: chrN = chrN_flag chr_sim_path = os.path.join(combo_path, chrN, assembler) elif chrN_flag.endswith('_r'): chrN = chrN_flag[:-2] chr_sim_path = os.path.join(real_path, 'chm13_chromosomes', chrN, assembler) # elif chrN_flag.endswith('_pbs'): # chrN = chrN_flag[:-4] # chr_sim_path = os.path.join(pbsim_path, chrN, assembler) elif chrN_flag.endswith('_ncbr'): chrN = chrN_flag[:-5] chr_sim_path = os.path.join(ncoibor_path, chrN, assembler) elif chrN_flag.endswith('_hg002'): chrN = chrN_flag[:-6] chr_sim_path = os.path.join(hg002_path, chrN, assembler) elif chrN_flag.endswith('_chm13'): chrN = chrN_flag[:-6] chr_sim_path = os.path.join(chm13_path, chrN, assembler) elif chrN_flag.endswith('_arab'): chrN = chrN_flag[:-5] chr_sim_path = os.path.join(arab_path, chrN, assembler) elif chrN_flag.endswith('_zmays'): chrN = chrN_flag[:-6] chr_sim_path = os.path.join(zmays_path, chrN, assembler) else: print(f'Give proper suffix!') raise Exception train_g_to_chr[n_have] = chrN print(f'Copying {chr_sim_path}/processed/{i}.dgl into {train_path}/processed/{n_have}.dgl') subprocess.run(f'cp {chr_sim_path}/processed/{i}.dgl {train_path}/processed/{n_have}.dgl', shell=True) # subprocess.run(f'cp {chr_sim_path}/info/{i}_succ.pkl {train_path}/info/{n_have}_succ.pkl', shell=True) # subprocess.run(f'cp {chr_sim_path}/info/{i}_pred.pkl {train_path}/info/{n_have}_pred.pkl', shell=True) # subprocess.run(f'cp {chr_sim_path}/info/{i}_edges.pkl {train_path}/info/{n_have}_edges.pkl', shell=True) # subprocess.run(f'cp {chr_sim_path}/info/{i}_reads.pkl {train_path}/info/{n_have}_reads.pkl', shell=True) train_g_to_org_g[n_have] = i n_have += 1 pickle.dump(train_g_to_chr, open(f'{train_path}/info/g_to_chr.pkl', 'wb')) pickle.dump(train_g_to_org_g, open(f'{train_path}/info/g_to_org_g.pkl', 'wb')) valid_g_to_chr = {} valid_g_to_org_g = {} n_have = 0 for assembler in assemblers: for chrN_flag, n_need in valid_dict.items(): # copy n_need datasets from chrN into train dict if '_r' in chrN_flag and n_need > 1: print(f'SETUP::split::WARNING Cannot copy more than one graph for real data: {chrN_flag}') n_need = 1 print(f'SETUP::split:: Copying {n_need} graphs of {chrN_flag} - {assembler} into {valid_path}') for i in range(n_need): if '+' in chrN_flag: chrN = chrN_flag chr_sim_path = os.path.join(combo_path, chrN, assembler) j = i + train_dict.get(chrN_flag, 0) elif chrN_flag.endswith('_r'): chrN = chrN_flag[:-2] chr_sim_path = os.path.join(real_path, 'chm13_chromosomes', chrN, assembler) j = 0 # elif chrN_flag.endswith('_pbs'): # chrN = chrN_flag[:-4] # chr_sim_path = os.path.join(pbsim_path, chrN, assembler) # j = i + train_dict.get(chrN_flag, 0) elif chrN_flag.endswith('_ncbr'): chrN = chrN_flag[:-5] chr_sim_path = os.path.join(ncoibor_path, chrN, assembler) j = i + train_dict.get(chrN_flag, 0) elif chrN_flag.endswith('_hg002'): chrN = chrN_flag[:-6] chr_sim_path = os.path.join(hg002_path, chrN, assembler) j = i + train_dict.get(chrN_flag, 0) elif chrN_flag.endswith('_chm13'): chrN = chrN_flag[:-6] chr_sim_path = os.path.join(chm13_path, chrN, assembler) j = i + train_dict.get(chrN_flag, 0) elif chrN_flag.endswith('_arab'): chrN = chrN_flag[:-5] chr_sim_path = os.path.join(arab_path, chrN, assembler) j = i + train_dict.get(chrN_flag, 0) elif chrN_flag.endswith('_zmays'): chrN = chrN_flag[:-6] chr_sim_path = os.path.join(zmays_path, chrN, assembler) j = i + train_dict.get(chrN_flag, 0) else: print(f'Give proper suffix!') raise Exception valid_g_to_chr[n_have] = chrN print(f'Copying {chr_sim_path}/processed/{j}.dgl into {valid_path}/processed/{n_have}.dgl') subprocess.run(f'cp {chr_sim_path}/processed/{j}.dgl {valid_path}/processed/{n_have}.dgl', shell=True) # subprocess.run(f'cp {chr_sim_path}/info/{j}_succ.pkl {valid_path}/info/{n_have}_succ.pkl', shell=True) # subprocess.run(f'cp {chr_sim_path}/info/{j}_pred.pkl {valid_path}/info/{n_have}_pred.pkl', shell=True) # subprocess.run(f'cp {chr_sim_path}/info/{j}_edges.pkl {valid_path}/info/{n_have}_edges.pkl', shell=True) # subprocess.run(f'cp {chr_sim_path}/info/{j}_reads.pkl {valid_path}/info/{n_have}_reads.pkl', shell=True) valid_g_to_org_g[n_have] = j n_have += 1 pickle.dump(valid_g_to_chr, open(f'{valid_path}/info/g_to_chr.pkl', 'wb')) pickle.dump(valid_g_to_org_g, open(f'{valid_path}/info/g_to_org_g.pkl', 'wb')) # TODO: FIX THIS !!!!!!!!!!!!!!!!!! train_path = os.path.join(train_path, os.path.pardir) valid_path = os.path.join(valid_path, os.path.pardir) test_path = os.path.join(test_path, os.path.pardir) ################################### return train_path, valid_path, test_path # def predict_baselines(test_path, assembler, out, model_path=None, device='cpu'): # if model_path is None: # model_path = os.path.abspath(f'pretrained/model_{out}.pt') # walks_and_contigs = inference_baselines(test_path, model_path, assembler, device) # walks_per_graph, contigs_per_graph = walks_and_contigs[0], walks_and_contigs[1] # walks_per_graph_ol_len, contigs_per_graph_ol_len = walks_and_contigs[2], walks_and_contigs[3] # walks_per_graph_ol_sim, contigs_per_graph_ol_sim = walks_and_contigs[4], walks_and_contigs[5] # g_to_chr = pickle.load(open(f'{test_path}/info/g_to_chr.pkl', 'rb')) # for idx, (contigs, contigs_ol_len, contigs_ol_sim) in enumerate(zip(contigs_per_graph, contigs_per_graph_ol_len, contigs_per_graph_ol_sim)): # chrN = g_to_chr[idx] # print(f'GNN: Scores') # num_contigs, longest_contig, reconstructed, n50, ng50 = evaluate.quick_evaluation(contigs, chrN) # evaluate.print_summary(test_path, idx, chrN, num_contigs, longest_contig, reconstructed, n50, ng50) # print(f'Baseline: Overlap lengths') # num_contigs, longest_contig, reconstructed, n50, ng50 = evaluate.quick_evaluation(contigs_ol_len, chrN) # evaluate.print_summary(test_path, idx, chrN, num_contigs, longest_contig, reconstructed, n50, ng50) # print(f'Baseline: Overlap similarities') # num_contigs, longest_contig, reconstructed, n50, ng50 = evaluate.quick_evaluation(contigs_ol_sim, chrN) # evaluate.print_summary(test_path, idx, chrN, num_contigs, longest_contig, reconstructed, n50, ng50) def cleanup(train_path, valid_path): subprocess.run(f'rm -rf {train_path}', shell=True) subprocess.run(f'rm -rf {valid_path}', shell=True) def evaluate_real(eval_path, assembler, model_path, asm_path, ref_path, save_dir): real_path = os.path.join(eval_path, 'real') save_dir = os.path.join(asm_path, 'real', assembler, save_dir) procs = [] for i in range(1, 24): if i == 23: i = 'X' print(f'\nChromosome {i}') chr_path = os.path.join(real_path, f'chr{i}') save_path = os.path.join(save_dir, f'chr{i}') if not os.path.isdir(save_path): os.makedirs(save_path) os.mkdir(os.path.join(save_path, f'assembly')) os.mkdir(os.path.join(save_path, f'decode')) os.mkdir(os.path.join(save_path, f'reports')) inference(chr_path, model_path, assembler, save_path) ref = os.path.join(ref_path, 'CHM13', 'chromosomes', f'chr{i}.fasta') idx = os.path.join(ref_path, 'CHM13', 'chromosomes', 'indexed', f'chr{i}.fasta.fai') asm = os.path.join(save_path, f'assembly', f'0_assembly.fasta') report = os.path.join(save_path, f'reports', '0_minigraph.txt') paf = os.path.join(save_path, f'asm.paf') p = evaluate.run_minigraph(ref, asm, paf) procs.append(p) for p in procs: p.wait() procs = [] for i in range(1, 24): if i == 23: i = 'X' save_path = os.path.join(save_dir, f'chr{i}') idx = os.path.join(ref_path, 'indexed', f'chr{i}.fasta.fai') paf = os.path.join(save_path, f'asm.paf') report = os.path.join(save_path, f'reports', '0_minigraph.txt') p = evaluate.parse_pafs(idx, report, paf) procs.append(p) for p in procs: p.wait() evaluate.parse_minigraph_for_chrs(save_dir) def evaluate_synth(eval_path, assembler, model_path, asm_path, ref_path, save_dir): synth_path = os.path.join(eval_path, 'synth') save_dir = os.path.join(asm_path, 'synth', assembler, save_dir) procs = [] for i in range(1, 24): if i == 23: i = 'X' print(f'\nChromosome {i}') chr_path = os.path.join(synth_path, f'chr{i}') save_path = os.path.join(save_dir, f'chr{i}') if not os.path.isdir(save_path): os.makedirs(save_path) os.mkdir(os.path.join(save_path, f'assembly')) os.mkdir(os.path.join(save_path, f'decode')) os.mkdir(os.path.join(save_path, f'reports')) inference(chr_path, model_path, assembler, save_path) ref = os.path.join(ref_path, 'CHM13', 'chromosomes', f'chr{i}.fasta') idx = os.path.join(ref_path, 'CHM13', 'chromosomes', 'indexed', f'chr{i}.fasta.fai') asm = os.path.join(save_path, f'assembly', f'0_assembly.fasta') report = os.path.join(save_path, f'reports', '0_minigraph.txt') paf = os.path.join(save_path, f'asm.paf') p = evaluate.run_minigraph(ref, asm, paf) procs.append(p) for p in procs: p.wait() procs = [] for i in range(1, 24): if i == 23: i = 'X' save_path = os.path.join(save_dir, f'chr{i}') idx = os.path.join(ref_path, 'indexed', f'chr{i}.fasta.fai') paf = os.path.join(save_path, f'asm.paf') report = os.path.join(save_path, f'reports', '0_minigraph.txt') p = evaluate.parse_pafs(idx, report, paf) procs.append(p) for p in procs: p.wait() evaluate.parse_minigraph_for_chrs(save_dir) def evaluate_genome(eval_path, assembler, model_path, asm_path, ref_path, genome, save_dir): real_path = os.path.join(eval_path, 'real') save_dir = os.path.join(asm_path, 'real', assembler, save_dir) print(f'New genome') chr_path = os.path.join(real_path, genome) save_path = os.path.join(save_dir, genome) if not os.path.isdir(save_path): os.makedirs(save_path) os.mkdir(os.path.join(save_path, f'assembly')) os.mkdir(os.path.join(save_path, f'decode')) os.mkdir(os.path.join(save_path, f'reports')) inference(chr_path, model_path, assembler, save_path) ref = ref_path idx = ref_path + '.fai' asm = os.path.join(save_path, f'assembly', f'0_assembly.fasta') report = os.path.join(save_path, f'reports', '0_minigraph.txt') paf = os.path.join(save_path, f'asm.paf') p = evaluate.run_minigraph(ref, asm, paf) p.wait() p = evaluate.parse_pafs(idx, report, paf) p.wait() evaluate.parse_minigraph_for_full(save_dir) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--out', type=str, default=None, help='Output name for models') parser.add_argument('--overfit', action='store_true', default=False, help='Overfit on the chromosomes in the train directory') args = parser.parse_args() out = args.out overfit = args.overfit hyperparams = hyperparameters.get_hyperparameters() data_path = hyperparams['data_path'] # Location of the master database (storage) temp_path = hyperparams['temp_path'] # Location where the data will be temporarily stored for training eval_path = hyperparams['eval_path'] # Location where the synth and real evaluation data is stored refs_path = hyperparams['refs_path'] # Location where the references are stored - local because everythin else can be generated from this asms_path = hyperparams['asms_path'] # Where the assemblies and other inference info will be stored assembler = hyperparams['assembler'] # Which assembler we are using, currently: raven/hifiasm models_path = hyperparams['models_path'] threads = hyperparams['num_threads'] # dataset = hyperparams['dataset'] # Which dataset we are using, currently it's only chm13 data_path_ont = hyperparams['data_path_ont'] eval_path_ont = hyperparams['eval_path_ont'] initials = hyperparams['initials'] time_start = datetime.now() if out is None: timestamp = time_start.strftime('%Y-%b-%d-%H-%M-%S') out = f'{timestamp}_{initials}' else: timestamp = time_start.strftime('%y-%m-%d') out = f'{timestamp}_{initials}_{out}' # Model name must start with the date when the model was trained, in the yy-mm-dd format # Following is the underscore and a name of the model # E.g., 22-10-31_modelA # All the info about the training (train/valid data, hyperparameters, etc.) should be stored in the logbook # You can also include them in the model name, but they NEED to be stored in the logbook! model_name = out # In the inference, model_name represents the model used for evaluation # All the inference data (predictions, walks, assemblies, and reports) # Will be stored in a directory with name {model_name}_{decoding} # Suffix should indicate info about the decoding strategy = hyperparams['strategy'] B = hyperparams['B'] num_decoding_paths = hyperparams['num_decoding_paths'] if strategy == 'greedy': save_dir = f'{model_name}_Gx{num_decoding_paths}' elif strategy == 'beamsearch': save_dir = f'{model_name}_B{B}x{num_decoding_paths}' dicts = train_valid_chrs.get_config() train_dict = dicts['train_dict'] valid_dict = dicts['valid_dict'] test_dict = dicts['test_dict'] train_dict_ont = dicts['train_dict_ont'] valid_dict_ont = dicts['valid_dict_ont'] test_dict_ont = {} specs = { 'threads': threads, 'filter': 0.99, 'out': 'assembly.fasta', 'assembler': assembler, } torch.set_num_threads(threads) model_path = os.path.join(models_path, f'model_{model_name}.pt') all_chr = merge_dicts(train_dict, valid_dict, test_dict) all_chr_ont = merge_dicts(train_dict_ont, valid_dict_ont) # file_structure_setup(data_path, refs_path) # download_reference(refs_path) # simulate_reads_hifi(data_path, refs_path, all_chr, assembler) # simulate_reads_combo(data_path, refs_path, all_chr, assembler) # generate_graphs_hifi(data_path, all_chr, assembler) # simulate_reads_ont(data_path_ont, refs_path, all_chr_ont, 'raven') # generate_graphs_ont(data_path_ont, all_chr_ont, 'raven') # exit(0) if overfit: train_path, valid_path, test_path = train_valid_split(data_path, eval_path, temp_path, assembler, train_dict, valid_dict, test_dict, out, overfit=True)
train(train_path, valid_path, out, assembler, overfit)
0
2023-12-08 04:45:45+00:00
24k
ZS-YANG/FemtoDet-v3
mmdet/models/roi_heads/bbox_heads/multi_instance_bbox_head.py
[ { "identifier": "BBoxHead", "path": "mmdet/models/roi_heads/bbox_heads/bbox_head.py", "snippet": "class BBoxHead(BaseModule):\n \"\"\"Simplest RoI head, with only two fc layers for classification and\n regression respectively.\"\"\"\n\n def __init__(self,\n with_avg_pool: bool =...
from typing import List, Optional, Tuple, Union from mmcv.cnn import ConvModule from mmengine.config import ConfigDict from mmengine.structures import InstanceData from torch import Tensor, nn from mmdet.models.roi_heads.bbox_heads.bbox_head import BBoxHead from mmdet.models.task_modules.samplers import SamplingResult from mmdet.models.utils import empty_instances from mmdet.registry import MODELS from mmdet.structures.bbox import bbox_overlaps import numpy as np import torch import torch.nn.functional as F
15,729
bbox_pred: Tensor, img_meta: dict, rescale: bool = False, rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5). last dimension 5 arrange as (batch_index, x1, y1, x2, y2). cls_score (Tensor): Box scores, has shape (num_boxes, num_classes + 1). bbox_pred (Tensor): Box energies / deltas. has shape (num_boxes, num_classes * 4). img_meta (dict): image information. rescale (bool): If True, return boxes in original image space. Defaults to False. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Defaults to None Returns: :obj:`InstanceData`: Detection results of each image. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cls_score = cls_score.reshape(-1, self.num_classes + 1) bbox_pred = bbox_pred.reshape(-1, 4) roi = roi.repeat_interleave(self.num_instance, dim=0) results = InstanceData() if roi.shape[0] == 0: return empty_instances([img_meta], roi.device, task_type='bbox', instance_results=[results])[0] scores = cls_score.softmax(dim=-1) if cls_score is not None else None img_shape = img_meta['img_shape'] bboxes = self.bbox_coder.decode( roi[..., 1:], bbox_pred, max_shape=img_shape) if rescale and bboxes.size(0) > 0: assert img_meta.get('scale_factor') is not None scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat( (1, 2)) bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view( bboxes.size()[0], -1) if rcnn_test_cfg is None: # This means that it is aug test. # It needs to return the raw results without nms. results.bboxes = bboxes results.scores = scores else: roi_idx = np.tile( np.arange(bboxes.shape[0] / self.num_instance)[:, None], (1, self.num_instance)).reshape(-1, 1)[:, 0] roi_idx = torch.from_numpy(roi_idx).to(bboxes.device).reshape( -1, 1) bboxes = torch.cat([bboxes, roi_idx], dim=1) det_bboxes, det_scores = self.set_nms( bboxes, scores[:, 1], rcnn_test_cfg.score_thr, rcnn_test_cfg.nms['iou_threshold'], rcnn_test_cfg.max_per_img) results.bboxes = det_bboxes[:, :-1] results.scores = det_scores results.labels = torch.zeros_like(det_scores) return results @staticmethod def set_nms(bboxes: Tensor, scores: Tensor, score_thr: float, iou_threshold: float, max_num: int = -1) -> Tuple[Tensor, Tensor]: """NMS for multi-instance prediction. Please refer to https://github.com/Purkialo/CrowdDet for more details. Args: bboxes (Tensor): predict bboxes. scores (Tensor): The score of each predict bbox. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. iou_threshold (float): IoU threshold to be considered as conflicted. max_num (int, optional): if there are more than max_num bboxes after NMS, only top max_num will be kept. Default to -1. Returns: Tuple[Tensor, Tensor]: (bboxes, scores). """ bboxes = bboxes[scores > score_thr] scores = scores[scores > score_thr] ordered_scores, order = scores.sort(descending=True) ordered_bboxes = bboxes[order] roi_idx = ordered_bboxes[:, -1] keep = torch.ones(len(ordered_bboxes)) == 1 ruler = torch.arange(len(ordered_bboxes)) keep = keep.to(bboxes.device) ruler = ruler.to(bboxes.device) while ruler.shape[0] > 0: basement = ruler[0] ruler = ruler[1:] idx = roi_idx[basement] # calculate the body overlap basement_bbox = ordered_bboxes[:, :4][basement].reshape(-1, 4) ruler_bbox = ordered_bboxes[:, :4][ruler].reshape(-1, 4)
# Copyright (c) OpenMMLab. All rights reserved. @MODELS.register_module() class MultiInstanceBBoxHead(BBoxHead): r"""Bbox head used in CrowdDet. .. code-block:: none /-> cls convs_1 -> cls fcs_1 -> cls_1 |-- | \-> reg convs_1 -> reg fcs_1 -> reg_1 | | /-> cls convs_2 -> cls fcs_2 -> cls_2 shared convs -> shared fcs |-- | \-> reg convs_2 -> reg fcs_2 -> reg_2 | | ... | | /-> cls convs_k -> cls fcs_k -> cls_k |-- \-> reg convs_k -> reg fcs_k -> reg_k Args: num_instance (int): The number of branches after shared fcs. Defaults to 2. with_refine (bool): Whether to use refine module. Defaults to False. num_shared_convs (int): The number of shared convs. Defaults to 0. num_shared_fcs (int): The number of shared fcs. Defaults to 2. num_cls_convs (int): The number of cls convs. Defaults to 0. num_cls_fcs (int): The number of cls fcs. Defaults to 0. num_reg_convs (int): The number of reg convs. Defaults to 0. num_reg_fcs (int): The number of reg fcs. Defaults to 0. conv_out_channels (int): The number of conv out channels. Defaults to 256. fc_out_channels (int): The number of fc out channels. Defaults to 1024. init_cfg (dict or list[dict], optional): Initialization config dict. Defaults to None. """ # noqa: W605 def __init__(self, num_instance: int = 2, with_refine: bool = False, num_shared_convs: int = 0, num_shared_fcs: int = 2, num_cls_convs: int = 0, num_cls_fcs: int = 0, num_reg_convs: int = 0, num_reg_fcs: int = 0, conv_out_channels: int = 256, fc_out_channels: int = 1024, init_cfg: Optional[Union[dict, ConfigDict]] = None, *args, **kwargs) -> None: super().__init__(*args, init_cfg=init_cfg, **kwargs) assert (num_shared_convs + num_shared_fcs + num_cls_convs + num_cls_fcs + num_reg_convs + num_reg_fcs > 0) assert num_instance == 2, 'Currently only 2 instances are supported' if num_cls_convs > 0 or num_reg_convs > 0: assert num_shared_fcs == 0 if not self.with_cls: assert num_cls_convs == 0 and num_cls_fcs == 0 if not self.with_reg: assert num_reg_convs == 0 and num_reg_fcs == 0 self.num_instance = num_instance self.num_shared_convs = num_shared_convs self.num_shared_fcs = num_shared_fcs self.num_cls_convs = num_cls_convs self.num_cls_fcs = num_cls_fcs self.num_reg_convs = num_reg_convs self.num_reg_fcs = num_reg_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.with_refine = with_refine # add shared convs and fcs self.shared_convs, self.shared_fcs, last_layer_dim = \ self._add_conv_fc_branch( self.num_shared_convs, self.num_shared_fcs, self.in_channels, True) self.shared_out_channels = last_layer_dim self.relu = nn.ReLU(inplace=True) if self.with_refine: refine_model_cfg = { 'type': 'Linear', 'in_features': self.shared_out_channels + 20, 'out_features': self.shared_out_channels } self.shared_fcs_ref = MODELS.build(refine_model_cfg) self.fc_cls_ref = nn.ModuleList() self.fc_reg_ref = nn.ModuleList() self.cls_convs = nn.ModuleList() self.cls_fcs = nn.ModuleList() self.reg_convs = nn.ModuleList() self.reg_fcs = nn.ModuleList() self.cls_last_dim = list() self.reg_last_dim = list() self.fc_cls = nn.ModuleList() self.fc_reg = nn.ModuleList() for k in range(self.num_instance): # add cls specific branch cls_convs, cls_fcs, cls_last_dim = self._add_conv_fc_branch( self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) self.cls_convs.append(cls_convs) self.cls_fcs.append(cls_fcs) self.cls_last_dim.append(cls_last_dim) # add reg specific branch reg_convs, reg_fcs, reg_last_dim = self._add_conv_fc_branch( self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) self.reg_convs.append(reg_convs) self.reg_fcs.append(reg_fcs) self.reg_last_dim.append(reg_last_dim) if self.num_shared_fcs == 0 and not self.with_avg_pool: if self.num_cls_fcs == 0: self.cls_last_dim *= self.roi_feat_area if self.num_reg_fcs == 0: self.reg_last_dim *= self.roi_feat_area if self.with_cls: if self.custom_cls_channels: cls_channels = self.loss_cls.get_cls_channels( self.num_classes) else: cls_channels = self.num_classes + 1 cls_predictor_cfg_ = self.cls_predictor_cfg.copy() # deepcopy cls_predictor_cfg_.update( in_features=self.cls_last_dim[k], out_features=cls_channels) self.fc_cls.append(MODELS.build(cls_predictor_cfg_)) if self.with_refine: self.fc_cls_ref.append(MODELS.build(cls_predictor_cfg_)) if self.with_reg: out_dim_reg = (4 if self.reg_class_agnostic else 4 * self.num_classes) reg_predictor_cfg_ = self.reg_predictor_cfg.copy() reg_predictor_cfg_.update( in_features=self.reg_last_dim[k], out_features=out_dim_reg) self.fc_reg.append(MODELS.build(reg_predictor_cfg_)) if self.with_refine: self.fc_reg_ref.append(MODELS.build(reg_predictor_cfg_)) if init_cfg is None: # when init_cfg is None, # It has been set to # [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))], # [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))] # after `super(ConvFCBBoxHead, self).__init__()` # we only need to append additional configuration # for `shared_fcs`, `cls_fcs` and `reg_fcs` self.init_cfg += [ dict( type='Xavier', distribution='uniform', override=[ dict(name='shared_fcs'), dict(name='cls_fcs'), dict(name='reg_fcs') ]) ] def _add_conv_fc_branch(self, num_branch_convs: int, num_branch_fcs: int, in_channels: int, is_shared: bool = False) -> tuple: """Add shared or separable branch. convs -> avg pool (optional) -> fcs """ last_layer_dim = in_channels # add branch specific conv layers branch_convs = nn.ModuleList() if num_branch_convs > 0: for i in range(num_branch_convs): conv_in_channels = ( last_layer_dim if i == 0 else self.conv_out_channels) branch_convs.append( ConvModule( conv_in_channels, self.conv_out_channels, 3, padding=1)) last_layer_dim = self.conv_out_channels # add branch specific fc layers branch_fcs = nn.ModuleList() if num_branch_fcs > 0: # for shared branch, only consider self.with_avg_pool # for separated branches, also consider self.num_shared_fcs if (is_shared or self.num_shared_fcs == 0) and not self.with_avg_pool: last_layer_dim *= self.roi_feat_area for i in range(num_branch_fcs): fc_in_channels = ( last_layer_dim if i == 0 else self.fc_out_channels) branch_fcs.append( nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels return branch_convs, branch_fcs, last_layer_dim def forward(self, x: Tuple[Tensor]) -> tuple: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of classification scores and bbox prediction. - cls_score (Tensor): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. - bbox_pred (Tensor): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 4. - cls_score_ref (Tensor): The cls_score after refine model. - bbox_pred_ref (Tensor): The bbox_pred after refine model. """ # shared part if self.num_shared_convs > 0: for conv in self.shared_convs: x = conv(x) if self.num_shared_fcs > 0: if self.with_avg_pool: x = self.avg_pool(x) x = x.flatten(1) for fc in self.shared_fcs: x = self.relu(fc(x)) x_cls = x x_reg = x # separate branches cls_score = list() bbox_pred = list() for k in range(self.num_instance): for conv in self.cls_convs[k]: x_cls = conv(x_cls) if x_cls.dim() > 2: if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.flatten(1) for fc in self.cls_fcs[k]: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs[k]: x_reg = conv(x_reg) if x_reg.dim() > 2: if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.flatten(1) for fc in self.reg_fcs[k]: x_reg = self.relu(fc(x_reg)) cls_score.append(self.fc_cls[k](x_cls) if self.with_cls else None) bbox_pred.append(self.fc_reg[k](x_reg) if self.with_reg else None) if self.with_refine: x_ref = x cls_score_ref = list() bbox_pred_ref = list() for k in range(self.num_instance): feat_ref = cls_score[k].softmax(dim=-1) feat_ref = torch.cat((bbox_pred[k], feat_ref[:, 1][:, None]), dim=1).repeat(1, 4) feat_ref = torch.cat((x_ref, feat_ref), dim=1) feat_ref = F.relu_(self.shared_fcs_ref(feat_ref)) cls_score_ref.append(self.fc_cls_ref[k](feat_ref)) bbox_pred_ref.append(self.fc_reg_ref[k](feat_ref)) cls_score = torch.cat(cls_score, dim=1) bbox_pred = torch.cat(bbox_pred, dim=1) cls_score_ref = torch.cat(cls_score_ref, dim=1) bbox_pred_ref = torch.cat(bbox_pred_ref, dim=1) return cls_score, bbox_pred, cls_score_ref, bbox_pred_ref cls_score = torch.cat(cls_score, dim=1) bbox_pred = torch.cat(bbox_pred, dim=1) return cls_score, bbox_pred def get_targets(self, sampling_results: List[SamplingResult], rcnn_train_cfg: ConfigDict, concat: bool = True) -> tuple: """Calculate the ground truth for all samples in a batch according to the sampling_results. Almost the same as the implementation in bbox_head, we passed additional parameters pos_inds_list and neg_inds_list to `_get_targets_single` function. Args: sampling_results (List[obj:SamplingResult]): Assign results of all images in a batch after sampling. rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. concat (bool): Whether to concatenate the results of all the images in a single batch. Returns: Tuple[Tensor]: Ground truth for proposals in a single image. Containing the following list of Tensors: - labels (list[Tensor],Tensor): Gt_labels for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - label_weights (list[Tensor]): Labels_weights for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - bbox_targets (list[Tensor],Tensor): Regression target for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - bbox_weights (list[tensor],Tensor): Regression weights for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4). """ labels = [] bbox_targets = [] bbox_weights = [] label_weights = [] for i in range(len(sampling_results)): sample_bboxes = torch.cat([ sampling_results[i].pos_gt_bboxes, sampling_results[i].neg_gt_bboxes ]) sample_priors = sampling_results[i].priors sample_priors = sample_priors.repeat(1, self.num_instance).reshape( -1, 4) sample_bboxes = sample_bboxes.reshape(-1, 4) if not self.reg_decoded_bbox: _bbox_targets = self.bbox_coder.encode(sample_priors, sample_bboxes) else: _bbox_targets = sample_priors _bbox_targets = _bbox_targets.reshape(-1, self.num_instance * 4) _bbox_weights = torch.ones(_bbox_targets.shape) _labels = torch.cat([ sampling_results[i].pos_gt_labels, sampling_results[i].neg_gt_labels ]) _labels_weights = torch.ones(_labels.shape) bbox_targets.append(_bbox_targets) bbox_weights.append(_bbox_weights) labels.append(_labels) label_weights.append(_labels_weights) if concat: labels = torch.cat(labels, 0) label_weights = torch.cat(label_weights, 0) bbox_targets = torch.cat(bbox_targets, 0) bbox_weights = torch.cat(bbox_weights, 0) return labels, label_weights, bbox_targets, bbox_weights def loss(self, cls_score: Tensor, bbox_pred: Tensor, rois: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, bbox_weights: Tensor, **kwargs) -> dict: """Calculate the loss based on the network predictions and targets. Args: cls_score (Tensor): Classification prediction results of all class, has shape (batch_size * num_proposals_single_image, (num_classes + 1) * k), k represents the number of prediction boxes generated by each proposal box. bbox_pred (Tensor): Regression prediction results, has shape (batch_size * num_proposals_single_image, 4 * k), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. rois (Tensor): RoIs with the shape (batch_size * num_proposals_single_image, 5) where the first column indicates batch id of each RoI. labels (Tensor): Gt_labels for all proposals in a batch, has shape (batch_size * num_proposals_single_image, k). label_weights (Tensor): Labels_weights for all proposals in a batch, has shape (batch_size * num_proposals_single_image, k). bbox_targets (Tensor): Regression target for all proposals in a batch, has shape (batch_size * num_proposals_single_image, 4 * k), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. bbox_weights (Tensor): Regression weights for all proposals in a batch, has shape (batch_size * num_proposals_single_image, 4 * k). Returns: dict: A dictionary of loss. """ losses = dict() if bbox_pred.numel(): loss_0 = self.emd_loss(bbox_pred[:, 0:4], cls_score[:, 0:2], bbox_pred[:, 4:8], cls_score[:, 2:4], bbox_targets, labels) loss_1 = self.emd_loss(bbox_pred[:, 4:8], cls_score[:, 2:4], bbox_pred[:, 0:4], cls_score[:, 0:2], bbox_targets, labels) loss = torch.cat([loss_0, loss_1], dim=1) _, min_indices = loss.min(dim=1) loss_emd = loss[torch.arange(loss.shape[0]), min_indices] loss_emd = loss_emd.mean() else: loss_emd = bbox_pred.sum() losses['loss_rcnn_emd'] = loss_emd return losses def emd_loss(self, bbox_pred_0: Tensor, cls_score_0: Tensor, bbox_pred_1: Tensor, cls_score_1: Tensor, targets: Tensor, labels: Tensor) -> Tensor: """Calculate the emd loss. Note: This implementation is modified from https://github.com/Purkialo/ CrowdDet/blob/master/lib/det_oprs/loss_opr.py Args: bbox_pred_0 (Tensor): Part of regression prediction results, has shape (batch_size * num_proposals_single_image, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. cls_score_0 (Tensor): Part of classification prediction results, has shape (batch_size * num_proposals_single_image, (num_classes + 1)), where 1 represents the background. bbox_pred_1 (Tensor): The other part of regression prediction results, has shape (batch_size*num_proposals_single_image, 4). cls_score_1 (Tensor):The other part of classification prediction results, has shape (batch_size * num_proposals_single_image, (num_classes + 1)). targets (Tensor):Regression target for all proposals in a batch, has shape (batch_size * num_proposals_single_image, 4 * k), the last dimension 4 represents [tl_x, tl_y, br_x, br_y], k represents the number of prediction boxes generated by each proposal box. labels (Tensor): Gt_labels for all proposals in a batch, has shape (batch_size * num_proposals_single_image, k). Returns: torch.Tensor: The calculated loss. """ bbox_pred = torch.cat([bbox_pred_0, bbox_pred_1], dim=1).reshape(-1, bbox_pred_0.shape[-1]) cls_score = torch.cat([cls_score_0, cls_score_1], dim=1).reshape(-1, cls_score_0.shape[-1]) targets = targets.reshape(-1, 4) labels = labels.long().flatten() # masks valid_masks = labels >= 0 fg_masks = labels > 0 # multiple class bbox_pred = bbox_pred.reshape(-1, self.num_classes, 4) fg_gt_classes = labels[fg_masks] bbox_pred = bbox_pred[fg_masks, fg_gt_classes - 1, :] # loss for regression loss_bbox = self.loss_bbox(bbox_pred, targets[fg_masks]) loss_bbox = loss_bbox.sum(dim=1) # loss for classification labels = labels * valid_masks loss_cls = self.loss_cls(cls_score, labels) loss_cls[fg_masks] = loss_cls[fg_masks] + loss_bbox loss = loss_cls.reshape(-1, 2).sum(dim=1) return loss.reshape(-1, 1) def _predict_by_feat_single( self, roi: Tensor, cls_score: Tensor, bbox_pred: Tensor, img_meta: dict, rescale: bool = False, rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5). last dimension 5 arrange as (batch_index, x1, y1, x2, y2). cls_score (Tensor): Box scores, has shape (num_boxes, num_classes + 1). bbox_pred (Tensor): Box energies / deltas. has shape (num_boxes, num_classes * 4). img_meta (dict): image information. rescale (bool): If True, return boxes in original image space. Defaults to False. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Defaults to None Returns: :obj:`InstanceData`: Detection results of each image. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cls_score = cls_score.reshape(-1, self.num_classes + 1) bbox_pred = bbox_pred.reshape(-1, 4) roi = roi.repeat_interleave(self.num_instance, dim=0) results = InstanceData() if roi.shape[0] == 0: return empty_instances([img_meta], roi.device, task_type='bbox', instance_results=[results])[0] scores = cls_score.softmax(dim=-1) if cls_score is not None else None img_shape = img_meta['img_shape'] bboxes = self.bbox_coder.decode( roi[..., 1:], bbox_pred, max_shape=img_shape) if rescale and bboxes.size(0) > 0: assert img_meta.get('scale_factor') is not None scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat( (1, 2)) bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view( bboxes.size()[0], -1) if rcnn_test_cfg is None: # This means that it is aug test. # It needs to return the raw results without nms. results.bboxes = bboxes results.scores = scores else: roi_idx = np.tile( np.arange(bboxes.shape[0] / self.num_instance)[:, None], (1, self.num_instance)).reshape(-1, 1)[:, 0] roi_idx = torch.from_numpy(roi_idx).to(bboxes.device).reshape( -1, 1) bboxes = torch.cat([bboxes, roi_idx], dim=1) det_bboxes, det_scores = self.set_nms( bboxes, scores[:, 1], rcnn_test_cfg.score_thr, rcnn_test_cfg.nms['iou_threshold'], rcnn_test_cfg.max_per_img) results.bboxes = det_bboxes[:, :-1] results.scores = det_scores results.labels = torch.zeros_like(det_scores) return results @staticmethod def set_nms(bboxes: Tensor, scores: Tensor, score_thr: float, iou_threshold: float, max_num: int = -1) -> Tuple[Tensor, Tensor]: """NMS for multi-instance prediction. Please refer to https://github.com/Purkialo/CrowdDet for more details. Args: bboxes (Tensor): predict bboxes. scores (Tensor): The score of each predict bbox. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. iou_threshold (float): IoU threshold to be considered as conflicted. max_num (int, optional): if there are more than max_num bboxes after NMS, only top max_num will be kept. Default to -1. Returns: Tuple[Tensor, Tensor]: (bboxes, scores). """ bboxes = bboxes[scores > score_thr] scores = scores[scores > score_thr] ordered_scores, order = scores.sort(descending=True) ordered_bboxes = bboxes[order] roi_idx = ordered_bboxes[:, -1] keep = torch.ones(len(ordered_bboxes)) == 1 ruler = torch.arange(len(ordered_bboxes)) keep = keep.to(bboxes.device) ruler = ruler.to(bboxes.device) while ruler.shape[0] > 0: basement = ruler[0] ruler = ruler[1:] idx = roi_idx[basement] # calculate the body overlap basement_bbox = ordered_bboxes[:, :4][basement].reshape(-1, 4) ruler_bbox = ordered_bboxes[:, :4][ruler].reshape(-1, 4)
overlap = bbox_overlaps(basement_bbox, ruler_bbox)
4
2023-12-11 15:23:03+00:00
24k
open-mmlab/PIA
animatediff/pipelines/i2v_pipeline.py
[ { "identifier": "InflatedConv3d", "path": "animatediff/models/resnet.py", "snippet": "class InflatedConv3d(nn.Conv2d):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f)...
import inspect import os.path as osp import numpy as np import torch from dataclasses import dataclass from typing import Callable, List, Optional, Union from diffusers.configuration_utils import FrozenDict from diffusers.loaders import IPAdapterMixin, TextualInversionLoaderMixin from diffusers.models import AutoencoderKL from diffusers.pipelines import DiffusionPipeline from diffusers.schedulers import (DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler) from diffusers.utils import (BaseOutput, deprecate, is_accelerate_available, logging) from diffusers.utils.import_utils import is_xformers_available from einops import rearrange from omegaconf import OmegaConf from packaging import version from safetensors import safe_open from tqdm import tqdm from transformers import (CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection) from animatediff.models.resnet import InflatedConv3d from animatediff.models.unet import UNet3DConditionModel from animatediff.utils.convert_from_ckpt import (convert_ldm_clip_checkpoint, convert_ldm_unet_checkpoint, convert_ldm_vae_checkpoint) from animatediff.utils.convert_lora_safetensor_to_diffusers import \ convert_lora_model_level from animatediff.utils.util import prepare_mask_coef_by_statistics from accelerate import cpu_offload
14,784
"""Encode image for ip-adapter. Copied from https://github.com/huggingface/diffusers/blob/f9487783228cd500a21555da3346db40e8f05992/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L492-L514 # noqa """ dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds @torch.no_grad() def __call__( self, image: np.ndarray, prompt: Union[str, List[str]], video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, global_inf_num: int = 0, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, cond_frame: int = 0, mask_sim_template_idx: int = 0, ip_adapter_scale: float = 0, strength: float = 1, progress_fn=None, **kwargs, ): # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor assert strength > 0 and strength <= 1, ( f'"strength" for img2vid must in (0, 1]. But receive {strength}.') # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is None: negative_prompt = DEFAULT_N_PROMPT negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) #timesteps = self.scheduler.timesteps timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size) # Prepare latent variables num_channels_latents = self.unet.in_channels latents = self.prepare_latents( latent_timestep, batch_size * num_videos_per_prompt, 4, video_length, height, width, text_embeddings.dtype, device, generator, latents, ) shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor) raw_image = image.copy() image = torch.from_numpy(image)[None, ...].permute(0, 3, 1, 2) image = image / 255 # [0, 1] image = image * 2 - 1 # [-1, 1] image = image.to(device=device, dtype=self.vae.dtype) if isinstance(generator, list): image_latent = [ self.vae.encode(image[k : k + 1]).latent_dist.sample(generator[k]) for k in range(batch_size) ] image_latent = torch.cat(image_latent, dim=0) else: image_latent = self.vae.encode(image).latent_dist.sample(generator) image_latent = image_latent.to(device=device, dtype=self.unet.dtype) image_latent = torch.nn.functional.interpolate(image_latent, size=[shape[-2], shape[-1]]) image_latent_padding = image_latent.clone() * 0.18215 mask = torch.zeros((shape[0], 1, shape[2], shape[3], shape[4])).to(device=device, dtype=self.unet.dtype) # prepare mask
# Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py logger = logging.get_logger(__name__) # pylint: disable=invalid-name DEFAULT_N_PROMPT = ('wrong white balance, dark, sketches,worst quality,' 'low quality, deformed, distorted, disfigured, bad eyes, ' 'wrong lips,weird mouth, bad teeth, mutated hands and fingers, ' 'bad anatomy,wrong anatomy, amputation, extra limb, ' 'missing limb, floating,limbs, disconnected limbs, mutation, ' 'ugly, disgusting, bad_pictures, negative_hand-neg') @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class I2VPipeline(DiffusionPipeline, IPAdapterMixin, TextualInversionLoaderMixin): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], # memory_format: torch.memory_format, feature_extractor: CLIPImageProcessor = None, image_encoder: CLIPVisionModelWithProjection = None, ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, image_encoder=image_encoder, feature_extractor=feature_extractor, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) # self.memory_format = memory_format self.use_ip_adapter = False @classmethod def build_pipeline(cls, base_cfg, base_model: str, unet_path: str, dreambooth_path: Optional[str] = None, lora_path: Optional[str] = None, lora_alpha: float = 0, vae_path: Optional[str] = None, ip_adapter_path: Optional[str] = None, ip_adapter_scale: float = 0.0, only_load_vae_decoder: bool = False, only_load_vae_encoder: bool = False) -> 'I2VPipeline': """Method to build pipeline in a faster way~ Args: base_cfg: The config to build model base_mode: The model id to initialize StableDiffusion unet_path: Path for i2v unet dreambooth_path: path for dreambooth model lora_path: path for lora model lora_alpha: value for lora scale only_load_vae_decoder: Only load VAE decoder from dreambooth / VAE ckpt and maitain encoder as original. """ # build unet unet = UNet3DConditionModel.from_pretrained_2d( base_model, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container( base_cfg.unet_additional_kwargs)) old_weights = unet.conv_in.weight old_bias = unet.conv_in.bias new_conv1 = InflatedConv3d( 9, old_weights.shape[0], kernel_size=unet.conv_in.kernel_size, stride=unet.conv_in.stride, padding=unet.conv_in.padding, bias=True if old_bias is not None else False) param = torch.zeros((320,5,3,3),requires_grad=True) new_conv1.weight = torch.nn.Parameter(torch.cat((old_weights,param),dim=1)) if old_bias is not None: new_conv1.bias = old_bias unet.conv_in = new_conv1 unet.config["in_channels"] = 9 unet_ckpt = torch.load(unet_path, map_location='cpu') unet.load_state_dict(unet_ckpt, strict=False) # NOTE: only load temporal layers and condition module # for key, value in unet_ckpt.items(): # if 'motion' in key or 'conv_in' in key: # unet.state_dict()[key].copy_(value) # load vae, tokenizer, text encoder vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae") tokenizer = CLIPTokenizer.from_pretrained(base_model, subfolder="tokenizer") text_encoder = CLIPTextModel.from_pretrained(base_model, subfolder="text_encoder") noise_scheduler = DDIMScheduler(**OmegaConf.to_container(base_cfg.noise_scheduler_kwargs)) if dreambooth_path: print(" >>> Begin loading DreamBooth >>>") base_model_state_dict = {} with safe_open(dreambooth_path, framework="pt", device="cpu") as f: for key in f.keys(): base_model_state_dict[key] = f.get_tensor(key) # load unet converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_model_state_dict, unet.config) old_value = converted_unet_checkpoint['conv_in.weight'] new_param = unet_ckpt['conv_in.weight'][:,4:,:,:].clone().cpu() new_value = torch.nn.Parameter(torch.cat((old_value, new_param), dim=1)) converted_unet_checkpoint['conv_in.weight'] = new_value unet.load_state_dict(converted_unet_checkpoint, strict=False) # load vae converted_vae_checkpoint = convert_ldm_vae_checkpoint( base_model_state_dict, vae.config, only_decoder=only_load_vae_decoder, only_encoder=only_load_vae_encoder,) need_strict = not (only_load_vae_decoder or only_load_vae_encoder) vae.load_state_dict(converted_vae_checkpoint, strict=need_strict) print('Prefix in loaded VAE checkpoint: ') print(set([k.split('.')[0] for k in converted_vae_checkpoint.keys()])) # load text encoder text_encoder_checkpoint = convert_ldm_clip_checkpoint(base_model_state_dict) if text_encoder_checkpoint: text_encoder.load_state_dict(text_encoder_checkpoint, strict=False) print(" <<< Loaded DreamBooth <<<") if vae_path: print(' >>> Begin loading VAE >>>') vae_state_dict = {} if vae_path.endswith('safetensors'): with safe_open(vae_path, framework="pt", device="cpu") as f: for key in f.keys(): vae_state_dict[key] = f.get_tensor(key) elif vae_path.endswith('ckpt') or vae_path.endswith('pt'): vae_state_dict = torch.load(vae_path, map_location='cpu') if 'state_dict' in vae_state_dict: vae_state_dict = vae_state_dict['state_dict'] vae_state_dict = {f'first_stage_model.{k}': v for k, v in vae_state_dict.items()} converted_vae_checkpoint = convert_ldm_vae_checkpoint( vae_state_dict, vae.config, only_decoder=only_load_vae_decoder, only_encoder=only_load_vae_encoder,) print('Prefix in loaded VAE checkpoint: ') print(set([k.split('.')[0] for k in converted_vae_checkpoint.keys()])) need_strict = not (only_load_vae_decoder or only_load_vae_encoder) vae.load_state_dict(converted_vae_checkpoint, strict=need_strict) print(" <<< Loaded VAE <<<") if lora_path: print(" >>> Begin loading LoRA >>>") lora_dict = {} with safe_open(lora_path, framework='pt', device='cpu') as file: for k in file.keys(): lora_dict[k] = file.get_tensor(k) unet, text_encoder = convert_lora_model_level( lora_dict, unet, text_encoder, alpha=lora_alpha) print(" <<< Loaded LoRA <<<") # move model to device device = torch.device('cuda') unet_dtype = torch.float16 tenc_dtype = torch.float16 vae_dtype = torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float32 unet = unet.to(device=device, dtype=unet_dtype) text_encoder = text_encoder.to(device=device, dtype=tenc_dtype) vae = vae.to(device=device, dtype=vae_dtype) print(f'Set Unet to {unet_dtype}') print(f'Set text encoder to {tenc_dtype}') print(f'Set vae to {vae_dtype}') if is_xformers_available(): unet.enable_xformers_memory_efficient_attention() pipeline = cls(unet=unet, vae=vae, tokenizer=tokenizer, text_encoder=text_encoder, scheduler=noise_scheduler) # ip_adapter_path = 'h94/IP-Adapter' if ip_adapter_path and ip_adapter_scale > 0: ip_adapter_name = 'ip-adapter_sd15.bin' # only online repo need subfolder if not osp.isdir(ip_adapter_path): subfolder = 'models' else: subfolder = '' pipeline.load_ip_adapter(ip_adapter_path, subfolder, ip_adapter_name) pipeline.set_ip_adapter_scale(ip_adapter_scale) pipeline.use_ip_adapter = True print(f'Load IP-Adapter, scale: {ip_adapter_scale}') # text_inversion_path = './models/TextualInversion/easynegative.safetensors' # if text_inversion_path: # pipeline.load_textual_inversion(text_inversion_path, 'easynegative') return pipeline def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0])): video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def prepare_latents(self, add_noise_time_step, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): shape = shape # shape = (1,) + shape[1:] latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) return latents def encode_image(self, image, device, num_images_per_prompt): """Encode image for ip-adapter. Copied from https://github.com/huggingface/diffusers/blob/f9487783228cd500a21555da3346db40e8f05992/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L492-L514 # noqa """ dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds @torch.no_grad() def __call__( self, image: np.ndarray, prompt: Union[str, List[str]], video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, global_inf_num: int = 0, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, cond_frame: int = 0, mask_sim_template_idx: int = 0, ip_adapter_scale: float = 0, strength: float = 1, progress_fn=None, **kwargs, ): # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor assert strength > 0 and strength <= 1, ( f'"strength" for img2vid must in (0, 1]. But receive {strength}.') # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is None: negative_prompt = DEFAULT_N_PROMPT negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) #timesteps = self.scheduler.timesteps timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size) # Prepare latent variables num_channels_latents = self.unet.in_channels latents = self.prepare_latents( latent_timestep, batch_size * num_videos_per_prompt, 4, video_length, height, width, text_embeddings.dtype, device, generator, latents, ) shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor) raw_image = image.copy() image = torch.from_numpy(image)[None, ...].permute(0, 3, 1, 2) image = image / 255 # [0, 1] image = image * 2 - 1 # [-1, 1] image = image.to(device=device, dtype=self.vae.dtype) if isinstance(generator, list): image_latent = [ self.vae.encode(image[k : k + 1]).latent_dist.sample(generator[k]) for k in range(batch_size) ] image_latent = torch.cat(image_latent, dim=0) else: image_latent = self.vae.encode(image).latent_dist.sample(generator) image_latent = image_latent.to(device=device, dtype=self.unet.dtype) image_latent = torch.nn.functional.interpolate(image_latent, size=[shape[-2], shape[-1]]) image_latent_padding = image_latent.clone() * 0.18215 mask = torch.zeros((shape[0], 1, shape[2], shape[3], shape[4])).to(device=device, dtype=self.unet.dtype) # prepare mask
mask_coef = prepare_mask_coef_by_statistics(video_length, cond_frame, mask_sim_template_idx)
6
2023-12-21 03:29:34+00:00
24k
chinhsuanwu/ifusion
model/zero123.py
[ { "identifier": "inject_trainable_lora_extended", "path": "ldm/lora.py", "snippet": "def inject_trainable_lora_extended(\n model: nn.Module,\n target_replace_module: Set[str] = UNET_EXTENDED_TARGET_REPLACE,\n r: int = 4,\n loras=None, # path to lora .pt\n eval=True,\n):\n \"\"\"\n ...
import itertools import torch import torch.nn as nn from dataclasses import dataclass from diffusers import DDIMScheduler from einops import rearrange from omegaconf import OmegaConf from ldm.lora import ( inject_trainable_lora_extended, monkeypatch_remove_lora, save_lora_weight, ) from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import load_model_from_config from util.pose import make_T from util.typing import * from util.util import default
14,862
class Zero123(nn.Module): @dataclass class Config: pretrained_model_name_or_path: str = "ldm/ckpt/zero123-xl.ckpt" pretrained_config: str = "ldm/ckpt/sd-objaverse-finetune-c_concat-256.yaml" vram_O: bool = False min_step_percent: float = 0.02 max_step_percent: float = 0.98 config: Config def __init__(self, **kwargs) -> None: super().__init__() self.config = OmegaConf.structured(self.Config(**kwargs)) self.device = "cuda" self.require_grad_params = [] self.configure() def configure(self) -> None: print("[INFO] Loading Zero123...") self.pretrained_config = OmegaConf.load(self.config.pretrained_config) self.weights_dtype = torch.float32
class Zero123(nn.Module): @dataclass class Config: pretrained_model_name_or_path: str = "ldm/ckpt/zero123-xl.ckpt" pretrained_config: str = "ldm/ckpt/sd-objaverse-finetune-c_concat-256.yaml" vram_O: bool = False min_step_percent: float = 0.02 max_step_percent: float = 0.98 config: Config def __init__(self, **kwargs) -> None: super().__init__() self.config = OmegaConf.structured(self.Config(**kwargs)) self.device = "cuda" self.require_grad_params = [] self.configure() def configure(self) -> None: print("[INFO] Loading Zero123...") self.pretrained_config = OmegaConf.load(self.config.pretrained_config) self.weights_dtype = torch.float32
self.model: LatentDiffusion = load_model_from_config(
3
2023-12-17 12:45:38+00:00
24k
penghao-wu/vstar
VisualSearch/utils/dataset.py
[ { "identifier": "conversation", "path": "VisualSearch/model/llava/conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n ...
import glob import os import random import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image from pycocotools import mask from transformers import CLIPImageProcessor from transformers import OwlViTProcessor from VisualSearch.model.llava import conversation as conversation_lib from VisualSearch.model.llava.constants import (DEFAULT_IMAGE_TOKEN, IGNORE_INDEX, IMAGE_TOKEN_INDEX) from VisualSearch.model.llava.mm_utils import tokenizer_image_token from VisualSearch.utils.data_processing import get_mask_from_json from VisualSearch.utils.refer import REFER from VisualSearch.utils.refer_seg_dataset import ReferSegDataset from VisualSearch.utils.general_segdet_dataset import SegDetDataset from VisualSearch.utils.mixed_grounding_dataset import MixedGroundingDataset from VisualSearch.utils.vqa_dataset import VQADataset from VisualSearch.utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IMAGE_TOKEN) from VisualSearch.utils.utils import box_xyxy_to_cxcywh, expand2square
14,499
cv2.setNumThreads(1) def collate_fn( batch, tokenizer=None, conv_type="llava_v1", use_mm_start_end=True, local_rank=-1 ): image_path_list = [] images_list = [] images_clip_list = [] conversation_list = [] masks_list = [] label_list = [] bboxes_labels_list = [] bboxes_valid_list = [] masks_valid_list = [] resize_list = [] questions_list = [] sampled_classes_list = [] offset_list = [0] cnt = 0 inferences = [] for ( image_path, images, images_clip, conversations, masks, label, bboxes_labels, bboxes_valid, masks_valid, resize, questions, sampled_classes, inference, ) in batch: image_path_list.append(image_path) images_list.append(images) images_clip_list.append(images_clip) conversation_list.extend(conversations) label_list.append(label) masks_list.append(masks.float()) bboxes_labels_list.extend(bboxes_labels) bboxes_valid_list.extend(bboxes_valid) masks_valid_list.append(torch.tensor(masks_valid)) resize_list.append(resize) questions_list.append(questions) sampled_classes_list.append(sampled_classes) cnt += len(conversations) offset_list.append(cnt) inferences.append(inference) if use_mm_start_end: # replace <image> token for i in range(len(conversation_list)): replace_token = DEFAULT_IMAGE_TOKEN replace_token = ( DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN ) conversation_list[i] = conversation_list[i].replace( DEFAULT_IMAGE_TOKEN, replace_token ) input_ids = [ tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversation_list ] input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id ) attention_masks = input_ids.ne(tokenizer.pad_token_id) for i in range(len(bboxes_valid_list)): bboxes_valid = bboxes_valid_list[i] attention_mask = attention_masks[i] if not bboxes_valid: attention_mask = attention_mask & input_ids[i].ne(tokenizer("[LOC]", add_special_tokens=False).input_ids[0]) attention_masks[i] = attention_mask
cv2.setNumThreads(1) def collate_fn( batch, tokenizer=None, conv_type="llava_v1", use_mm_start_end=True, local_rank=-1 ): image_path_list = [] images_list = [] images_clip_list = [] conversation_list = [] masks_list = [] label_list = [] bboxes_labels_list = [] bboxes_valid_list = [] masks_valid_list = [] resize_list = [] questions_list = [] sampled_classes_list = [] offset_list = [0] cnt = 0 inferences = [] for ( image_path, images, images_clip, conversations, masks, label, bboxes_labels, bboxes_valid, masks_valid, resize, questions, sampled_classes, inference, ) in batch: image_path_list.append(image_path) images_list.append(images) images_clip_list.append(images_clip) conversation_list.extend(conversations) label_list.append(label) masks_list.append(masks.float()) bboxes_labels_list.extend(bboxes_labels) bboxes_valid_list.extend(bboxes_valid) masks_valid_list.append(torch.tensor(masks_valid)) resize_list.append(resize) questions_list.append(questions) sampled_classes_list.append(sampled_classes) cnt += len(conversations) offset_list.append(cnt) inferences.append(inference) if use_mm_start_end: # replace <image> token for i in range(len(conversation_list)): replace_token = DEFAULT_IMAGE_TOKEN replace_token = ( DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN ) conversation_list[i] = conversation_list[i].replace( DEFAULT_IMAGE_TOKEN, replace_token ) input_ids = [ tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversation_list ] input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id ) attention_masks = input_ids.ne(tokenizer.pad_token_id) for i in range(len(bboxes_valid_list)): bboxes_valid = bboxes_valid_list[i] attention_mask = attention_masks[i] if not bboxes_valid: attention_mask = attention_mask & input_ids[i].ne(tokenizer("[LOC]", add_special_tokens=False).input_ids[0]) attention_masks[i] = attention_mask
conv = conversation_lib.default_conversation.copy()
11
2023-12-15 14:58:24+00:00
24k
sinoyou/nelf-pro
nerfstudio/data/dataparsers/nelfpro_dataparser.py
[ { "identifier": "plot_point3d", "path": "nerfstudio/utils/plotly_utils_nelfpro.py", "snippet": "def plot_point3d(xyz, color):\n point_cloud_size = 0.8\n cam_centers_go = go.Scatter3d(\n x = xyz[:, 0], \n y = xyz[:, 1], \n z = xyz[:, 2], \n mode=\"markers\",\n nam...
from curses import meta from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Type from rich.console import Console from typing_extensions import Literal from nerfstudio.utils.plotly_utils_nelfpro import plot_point3d from nerfstudio.data.utils.probe_sample import FactorPoseGenerator from nerfstudio.cameras import camera_utils from nerfstudio.cameras.cameras import Cameras, CameraType from nerfstudio.data.dataparsers.base_dataparser import ( DataParser, DataParserConfig, DataparserOutputs, ) from nerfstudio.data.scene_box import SceneBox from nerfstudio.data.dataparsers.raw_dataset_loader.llff_dataset_raw_loader import LLFFRawLoader from nerfstudio.data.dataparsers.raw_dataset_loader.kitti360_dataset_raw_loader import KITTI360RawLoader from nerfstudio.data.dataparsers.raw_dataset_loader.bungee_dataset_raw_loader import BungeeRawLoader from nerfstudio.data.dataparsers.raw_dataset_loader.nerf_dataset_raw_loader import NeRFStudioRawLoader import numpy as np import torch
20,994
from __future__ import annotations CONSOLE = Console(width=120) @dataclass class NeLFProDataParserConfig(DataParserConfig): """Configuration for the SpherRiFDataParser.""" _target: Type = field(default_factory=lambda: NeLFProDataParser) # raw dataset loader config raw_loader: Literal["llff", "kitti360", "bungee", "nerfstudio"] = "llff" data: Path = Path("./data/please_fill_in_the_path_to_your_raw_dataset") eval_interval: int = 8 eval_type: Literal["dev"] = "dev" # camera pose config scale_factor: float = 1.0 downscale_factor: Optional[int] = None scenebox_scale: int = 1.0 orientation_method: Literal["none", "up", "pca"] = "up" center_poses: bool = True auto_scale_poses: bool = True # probe generation config data_num_core: int = 3 data_num_basis: int = 64 use_kmeans_core: bool = True use_fps_basis: bool = True factor_pos_noise_scale: float = 0.02 # point cloud config point_cloud_sample_num: int = -1 @dataclass class NeLFProDataParser(DataParser): """Dataset Parser for Raw Mipnerf360 dataset.""" config: NeLFProDataParserConfig downscale_factor: Optional[int] = None def _generate_dataparser_outputs(self, split="train"): data_dir = Path(self.config.data) if self.config.raw_loader == "llff": raw_loader = LLFFRawLoader(data_dir, downscale_factor=self.config.downscale_factor, partition_index=None) elif self.config.raw_loader == "kitti360": raw_loader = KITTI360RawLoader(data_dir, downscale_factor=self.config.downscale_factor, partition_index=None, eval_type=self.config.eval_type) elif self.config.raw_loader == 'bungee': raw_loader = BungeeRawLoader(data_dir, downscale_factor=self.config.downscale_factor, partition_index=None) elif self.config.raw_loader == "nerfstudio":
from __future__ import annotations CONSOLE = Console(width=120) @dataclass class NeLFProDataParserConfig(DataParserConfig): """Configuration for the SpherRiFDataParser.""" _target: Type = field(default_factory=lambda: NeLFProDataParser) # raw dataset loader config raw_loader: Literal["llff", "kitti360", "bungee", "nerfstudio"] = "llff" data: Path = Path("./data/please_fill_in_the_path_to_your_raw_dataset") eval_interval: int = 8 eval_type: Literal["dev"] = "dev" # camera pose config scale_factor: float = 1.0 downscale_factor: Optional[int] = None scenebox_scale: int = 1.0 orientation_method: Literal["none", "up", "pca"] = "up" center_poses: bool = True auto_scale_poses: bool = True # probe generation config data_num_core: int = 3 data_num_basis: int = 64 use_kmeans_core: bool = True use_fps_basis: bool = True factor_pos_noise_scale: float = 0.02 # point cloud config point_cloud_sample_num: int = -1 @dataclass class NeLFProDataParser(DataParser): """Dataset Parser for Raw Mipnerf360 dataset.""" config: NeLFProDataParserConfig downscale_factor: Optional[int] = None def _generate_dataparser_outputs(self, split="train"): data_dir = Path(self.config.data) if self.config.raw_loader == "llff": raw_loader = LLFFRawLoader(data_dir, downscale_factor=self.config.downscale_factor, partition_index=None) elif self.config.raw_loader == "kitti360": raw_loader = KITTI360RawLoader(data_dir, downscale_factor=self.config.downscale_factor, partition_index=None, eval_type=self.config.eval_type) elif self.config.raw_loader == 'bungee': raw_loader = BungeeRawLoader(data_dir, downscale_factor=self.config.downscale_factor, partition_index=None) elif self.config.raw_loader == "nerfstudio":
raw_loader = NeRFStudioRawLoader(data_dir, downscale_factor=self.config.downscale_factor, partition_index=None)
12
2023-12-15 20:07:22+00:00
24k
amazon-science/c2f-seg
data/dataloader_transformer.py
[ { "identifier": "FishBowl", "path": "data/dataloader_Fishbowl.py", "snippet": "class FishBowl(object):\n def __init__(self, config, mode, subtest=None):\n self.datatype = mode\n data_dir = config.root_path\n\n self.img_path = os.path.join(data_dir, self.datatype+\"_data\", self.d...
from data.dataloader_Fishbowl import FishBowl from data.dataloader_MOViD_A import MOViD_A from data.dataloader_KINS import Kins_Fusion_dataset, KINS_Aisformer_VRSP_Intersection from data.dataloader_COCOA import COCOA_Fusion_dataset, COCOA_VRSP
21,188
def load_dataset(config, args, mode): if mode=="train": if args.dataset=="KINS": train_dataset = Kins_Fusion_dataset(config, mode='train') test_dataset = Kins_Fusion_dataset(config, mode='test') elif args.dataset=="COCOA":
def load_dataset(config, args, mode): if mode=="train": if args.dataset=="KINS": train_dataset = Kins_Fusion_dataset(config, mode='train') test_dataset = Kins_Fusion_dataset(config, mode='test') elif args.dataset=="COCOA":
train_dataset = COCOA_Fusion_dataset(config, mode='train')
4
2023-12-21 04:25:47+00:00
24k
alipay/PainlessInferenceAcceleration
pia/lookahead/models/baichuan/modeling_baichuan.py
[ { "identifier": "LookaheadPreTrainedModel", "path": "pia/lookahead/common/pretrained_model.py", "snippet": "class LookaheadPreTrainedModel(PreTrainedModel):\n _batch_generation = False\n _stream_generation = False\n\n def __init__(self, config):\n super().__init__(config=config)\n\n d...
import math import os import torch import torch.utils.checkpoint from contextlib import contextmanager from threading import Thread from typing import List, Optional, Tuple, Union from torch import nn from torch.nn import CrossEntropyLoss from torch.nn import functional as F from transformers import PretrainedConfig from transformers.activations import ACT2FN from transformers.generation.utils import GenerationConfig from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from transformers.utils import logging, ContextManagers from pia.lookahead.common.pretrained_model import LookaheadPreTrainedModel from pia.lookahead.models.baichuan.configuration_baichuan import BaichuanConfig from pia.lookahead.models.baichuan.generation_utils import build_chat_input, TextIterStreamer from xformers import ops as xops from .quantizer import quantize_offline, init_model_weight_int4 from .quantizer import init_model_weight_int4 from accelerate import init_empty_weights, dispatch_model, infer_auto_device_map from accelerate.utils import CustomDtype from accelerate.utils import get_balanced_memory from .quantizer import quantize_online
18,125
if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.W_pack = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) self.rotary_emb = RotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() proj = self.W_pack(hidden_states) proj = proj.unflatten(-1, (3, self.hidden_size)).unsqueeze(0).transpose(0, -2).squeeze(-2) query_states = proj[0].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = proj[1].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = proj[2].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) # [bsz, nh, t, hd] if past_key_value is not None: # reuse k, v, self_attention key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None if xops is not None and self.training: attn_weights = None query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) attn_output = xops.memory_efficient_attention( query_states, key_states, value_states, attn_bias=xops.LowerTriangularMask() ) else: with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=True): attn_output = F.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=attention_mask) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value class DecoderLayer(nn.Module): def __init__(self, config: BaichuanConfig): super().__init__() self.hidden_size = config.hidden_size self.self_attn = Attention(config=config) self.mlp = MLP( hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, ) self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs
# Copyright 2023 Baichuan Inc. All Rights Reserved. # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.get_logger(__name__) try: except ImportError: xops = None logger.warning( "Xformers is not installed correctly. If you want to use memory_efficient_attention to accelerate training use the following command to install Xformers\npip install xformers." ) # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ if len(mask.size()) == 3: bsz, src_len, _ = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, :, :].expand(bsz, 1, tgt_len, src_len).to(dtype) else: bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class RMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ RMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states class RotaryEmbedding(torch.nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim)) self.max_seq_len_cached = max_position_embeddings t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=torch.float32) freqs = torch.outer(t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1) self.cos_cached = emb.cos()[None, None, :, :].to(torch.float32) self.sin_cached = emb.sin()[None, None, :, :].to(torch.float32) def forward(self, x, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case. if seq_len > self.max_seq_len_cached: self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=torch.float32) freqs = torch.outer(t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1) self.cos_cached = emb.cos()[None, None, :, :].to(torch.float32).to(x.device) self.sin_cached = emb.sin()[None, None, :, :].to(torch.float32).to(x.device) elif self.cos_cached.device != x.device: self.cos_cached = self.cos_cached.to(x.device) self.sin_cached = self.sin_cached.to(x.device) return ( self.cos_cached[:, :, :seq_len, ...], self.sin_cached[:, :, :seq_len, ...], ) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2:] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos_, sin_, position_ids): cos = cos_.squeeze(1).squeeze(0) # [seq_len, dim] sin = sin_.squeeze(1).squeeze(0) # [seq_len, dim] cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] q_embed = (q.float() * cos) + (rotate_half(q.float()) * sin) k_embed = (k.float() * cos) + (rotate_half(k.float()) * sin) return q_embed.to(q.dtype), k_embed.to(k.dtype) class MLP(nn.Module): def __init__( self, hidden_size: int, intermediate_size: int, hidden_act: str, ): super().__init__() self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False) self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.act_fn = ACT2FN[hidden_act] def forward(self, x): return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) class Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: BaichuanConfig): super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.max_position_embeddings = config.max_position_embeddings if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.W_pack = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) self.rotary_emb = RotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() proj = self.W_pack(hidden_states) proj = proj.unflatten(-1, (3, self.hidden_size)).unsqueeze(0).transpose(0, -2).squeeze(-2) query_states = proj[0].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = proj[1].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = proj[2].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) # [bsz, nh, t, hd] if past_key_value is not None: # reuse k, v, self_attention key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None if xops is not None and self.training: attn_weights = None query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) attn_output = xops.memory_efficient_attention( query_states, key_states, value_states, attn_bias=xops.LowerTriangularMask() ) else: with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=True): attn_output = F.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=attention_mask) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value class DecoderLayer(nn.Module): def __init__(self, config: BaichuanConfig): super().__init__() self.hidden_size = config.hidden_size self.self_attn = Attention(config=config) self.mlp = MLP( hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, ) self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs
class BaichuanPreTrainedModel(LookaheadPreTrainedModel):
0
2023-12-19 13:11:38+00:00
24k
MingtaoGuo/AnimateAnyone_unofficial
aldm/aldm.py
[ { "identifier": "conv_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(...
import einops import torch import torch as th import torch.nn as nn from ldm.modules.diffusionmodules.util import ( conv_nd, linear, zero_module, timestep_embedding, ) from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.attention import SpatialTransformer, SpatialTransformerPlus from ldm.modules.diffusionmodules.openaimodel import ResBlock, TimestepEmbedSequential, Downsample, AttentionBlock, Upsample, normalization, checkpoint, convert_module_to_f16, convert_module_to_f32 from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import log_txt_as_img, exists, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from omegaconf.listconfig import ListConfig from omegaconf.listconfig import ListConfig
18,218
self, image_size, in_channels, model_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support context_dim=None, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, disable_self_attentions=None, num_attention_blocks=None, disable_middle_self_attn=False, use_linear_in_transformer=False, ): super().__init__() if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' if type(context_dim) == ListConfig: context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " f"This option has LESS priority than attention_resolutions {attention_resolutions}, " f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " f"attention will still not be set.") self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential( conv_nd(dims, in_channels, model_channels, 3, padding=1) ) ] ) self._feature_size = model_channels input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for nr in range(self.num_res_blocks[level]): layers = [ ResBlock( ch, time_embed_dim, dropout, out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = mult * model_channels if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
class ReferenceNet(nn.Module): """ The full UNet model with attention and timestep embedding. :param in_channels: channels in the input Tensor. :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set, list, or tuple. For example, if this contains 4, then at 4x downsampling, attention will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and downsampling. :param dims: determines if the signal is 1D, 2D, or 3D. :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes. :param use_checkpoint: use gradient checkpointing to reduce memory usage. :param num_heads: the number of attention heads in each attention layer. :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head. :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated. :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. :param resblock_updown: use residual blocks for up/downsampling. :param use_new_attention_order: use a different attention pattern for potentially increased efficiency. """ def __init__( self, image_size, in_channels, model_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support context_dim=None, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, disable_self_attentions=None, num_attention_blocks=None, disable_middle_self_attn=False, use_linear_in_transformer=False, ): super().__init__() if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' if type(context_dim) == ListConfig: context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " f"This option has LESS priority than attention_resolutions {attention_resolutions}, " f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " f"attention will still not be set.") self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential( conv_nd(dims, in_channels, model_channels, 3, padding=1) ) ] ) self._feature_size = model_channels input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for nr in range(self.num_res_blocks[level]): layers = [ ResBlock( ch, time_embed_dim, dropout, out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = mult * model_channels if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
if exists(disable_self_attentions):
9
2023-12-16 03:31:33+00:00
24k
yasserben/CLOUDS
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "clouds/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME...
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import ( MetadataCatalog, build_detection_train_loader, build_detection_test_loader, ) from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.modeling import build_model from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, inference_on_dataset, print_csv_format, DatasetEvaluator, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from fvcore.nn.precise_bn import get_bn_modules from clouds import ( CityscapesSemSegEvaluator, ClassicalSemSegEvaluator, MapperTrain, MapperTest, add_maskformer2_config, add_clouds_config, add_wandb_config, add_prerocessing_training_set_config, PersoEvalHook, add_repeat_factors, ) from clouds.utils import setup_wandb, WandbWriter import warnings import copy import itertools import logging import os import ast import torch import detectron2.utils.comm as comm
14,542
@classmethod def test(cls, cfg, model, output_folder=None, evaluators=None): """ Evaluate the given model. The given model is expected to already contain weights to evaluate. Args: cfg (CfgNode): model (nn.Module): evaluators (list[DatasetEvaluator] or None): if None, will call :meth:`build_evaluator`. Otherwise, must have the same length as ``cfg.DATASETS.TEST``. Returns: dict: a dict of result metrics """ logger = logging.getLogger(__name__) if isinstance(evaluators, DatasetEvaluator): evaluators = [evaluators] if evaluators is not None: assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( len(cfg.DATASETS.TEST), len(evaluators) ) results = OrderedDict() for idx, dataset_name in enumerate(cfg.DATASETS.TEST): data_loader = cls.build_test_loader(cfg, dataset_name) # When evaluators are passed in as arguments, # implicitly assume that evaluators can be created before data_loader. if evaluators is not None: evaluator = evaluators[idx] else: try: evaluator = cls.build_evaluator( cfg, dataset_name, output_folder=output_folder ) except NotImplementedError: logger.warn( "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " "or implement its `build_evaluator` method." ) results[dataset_name] = {} continue results_i = inference_on_dataset(model, data_loader, evaluator) results[dataset_name] = results_i if comm.is_main_process(): assert isinstance( results_i, dict ), "Evaluator must return a dict on the main process. Got {} instead.".format( results_i ) logger.info( "Evaluation results for {} in csv format:".format(dataset_name) ) print_csv_format(results_i) if len(results) == 1: results = list(results.values())[0] return results def build_hooks(self): """ Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]: """ cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN ret = [ hooks.IterationTimer(), hooks.LRScheduler(), hooks.PreciseBN( # Run at the same freq as (but before) evaluation. cfg.TEST.EVAL_PERIOD, self.model, # Build a new data loader to not affect training self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER, ) if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) else None, ] # Do PreciseBN before checkpointer, because it updates the model and need to # be saved by checkpointer. # This is not always the best: if checkpointing has a different frequency, # some checkpoints may have more precise statistics than others. if comm.is_main_process(): ret.append( hooks.PeriodicCheckpointer(self.checkpointer, cfg.TEST.EVAL_PERIOD * 5) ) def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results # Do evaluation after checkpointer, because then if it fails, # we can use the saved checkpoint to debug. # ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) ret.append(PersoEvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) if comm.is_main_process(): # Here the default print/log frequency of each writer is used. # run writers in the end, so that evaluation metrics are written ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) return ret def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg) add_maskformer2_config(cfg)
""" Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/train_net.py CLOUDS Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to CLOUDS. """ def build_writers(self): writers = super().build_writers() # use wandb writer instead. writers[-1] = WandbWriter() return writers @classmethod def build_model(cls, cfg): """ Returns: torch.nn.Module: It now calls :func:`detectron2.modeling.build_model`. Overwrite it if you'd like a different model. """ model = build_model(cfg) # logger = logging.getLogger(__name__) # logger.info("Model:\n{}".format(model)) return model # @classmethod # def build_model(cls, cfg): # """ # Returns: # torch.nn.Module: # # It now calls :func:`detectron2.modeling.build_model`. # Overwrite it if you'd like a different model. # """ # model = build_model(cfg) # # logger = logging.getLogger(__name__) # # logger.info("Model:\n{}".format(model)) # return model @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") else: output_folder = os.path.join(cfg.OUTPUT_DIR, output_folder, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if ( evaluator_type == "bdd_sem_seg" or evaluator_type == "mapillary_sem_seg" or evaluator_type == "acdc_sem_seg" ): evaluator_list.append( ClassicalSemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, save_pl=cfg.MODEL.SAVE_PSEUDO_LABELS, ) ) # Cityscapes if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." # return CityscapesSemSegEvaluator(dataset_name) if cfg.MODEL.SAVE_PSEUDO_LABELS: return CityscapesSemSegEvaluator( dataset_name, save_pl=True, output_dir=output_folder ) else: return CityscapesSemSegEvaluator(dataset_name) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper mapper = MapperTrain(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_test_loader(cls, cfg, dataset_name): mapper = MapperTest(cfg, False) return build_detection_test_loader( cfg, dataset_name, batch_size=1, mapper=mapper ) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue if cfg.MODEL.CLOUDS.OVERWRITING: if any( ignored_module in module_name for ignored_module in ["sem_seg_head_ema.", "sam.sam."] ): continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = ( hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER ) if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain( *[x["params"] for x in self.param_groups] ) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test(cls, cfg, model, output_folder=None, evaluators=None): """ Evaluate the given model. The given model is expected to already contain weights to evaluate. Args: cfg (CfgNode): model (nn.Module): evaluators (list[DatasetEvaluator] or None): if None, will call :meth:`build_evaluator`. Otherwise, must have the same length as ``cfg.DATASETS.TEST``. Returns: dict: a dict of result metrics """ logger = logging.getLogger(__name__) if isinstance(evaluators, DatasetEvaluator): evaluators = [evaluators] if evaluators is not None: assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( len(cfg.DATASETS.TEST), len(evaluators) ) results = OrderedDict() for idx, dataset_name in enumerate(cfg.DATASETS.TEST): data_loader = cls.build_test_loader(cfg, dataset_name) # When evaluators are passed in as arguments, # implicitly assume that evaluators can be created before data_loader. if evaluators is not None: evaluator = evaluators[idx] else: try: evaluator = cls.build_evaluator( cfg, dataset_name, output_folder=output_folder ) except NotImplementedError: logger.warn( "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " "or implement its `build_evaluator` method." ) results[dataset_name] = {} continue results_i = inference_on_dataset(model, data_loader, evaluator) results[dataset_name] = results_i if comm.is_main_process(): assert isinstance( results_i, dict ), "Evaluator must return a dict on the main process. Got {} instead.".format( results_i ) logger.info( "Evaluation results for {} in csv format:".format(dataset_name) ) print_csv_format(results_i) if len(results) == 1: results = list(results.values())[0] return results def build_hooks(self): """ Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]: """ cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN ret = [ hooks.IterationTimer(), hooks.LRScheduler(), hooks.PreciseBN( # Run at the same freq as (but before) evaluation. cfg.TEST.EVAL_PERIOD, self.model, # Build a new data loader to not affect training self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER, ) if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) else None, ] # Do PreciseBN before checkpointer, because it updates the model and need to # be saved by checkpointer. # This is not always the best: if checkpointing has a different frequency, # some checkpoints may have more precise statistics than others. if comm.is_main_process(): ret.append( hooks.PeriodicCheckpointer(self.checkpointer, cfg.TEST.EVAL_PERIOD * 5) ) def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results # Do evaluation after checkpointer, because then if it fails, # we can use the saved checkpoint to debug. # ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) ret.append(PersoEvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) if comm.is_main_process(): # Here the default print/log frequency of each writer is used. # run writers in the end, so that evaluation metrics are written ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) return ret def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg) add_maskformer2_config(cfg)
add_clouds_config(cfg)
1
2023-12-15 15:40:58+00:00
24k
modelscope/scepter
scepter/modules/model/network/ldm/ldm_sce.py
[ { "identifier": "ANNOTATORS", "path": "scepter/modules/annotator/registry.py", "snippet": "ANNOTATORS = Registry('ANNOTATORS', build_func=build_annotator)" }, { "identifier": "MODELS", "path": "scepter/modules/model/registry.py", "snippet": "MODELS = Registry('MODELS', build_func=build_m...
import copy import torch import torch.nn as nn import torchvision.transforms as TT from scepter.modules.annotator.registry import ANNOTATORS from scepter.modules.model.registry import MODELS, TUNERS from scepter.modules.utils.config import Config, dict_to_yaml from .ldm import LatentDiffusion from .ldm_xl import LatentDiffusionXL
14,472
# -*- coding: utf-8 -*- # Copyright (c) Alibaba, Inc. and its affiliates. @MODELS.register_class() class LatentDiffusionSCETuning(LatentDiffusion): para_dict = {} para_dict.update(LatentDiffusion.para_dict) def __init__(self, cfg, logger): super().__init__(cfg, logger=logger) def init_params(self): super().init_params() self.tuner_model_config = self.cfg.TUNER_MODEL def construct_network(self): super().construct_network() input_block_channels = self.model._input_block_chans sc_tuner_cfg = self.tuner_model_config['SC_TUNER_CFG'] use_layers = self.tuner_model_config.get('USE_LAYERS', None) lsc_tuner_blocks = nn.ModuleList([]) for i, chan in enumerate(input_block_channels[::-1]): if use_layers and i not in use_layers: lsc_tuner_blocks.append(nn.Identity()) continue tuner_cfg = copy.deepcopy(sc_tuner_cfg) tuner_cfg['DIM'] = chan tuner_cfg['TUNER_LENGTH'] = int(chan * tuner_cfg.get('DOWN_RATIO', 1.0)) sc_tuner = TUNERS.build(tuner_cfg, logger=self.logger) lsc_tuner_blocks.append(sc_tuner) self.model.lsc_identity = lsc_tuner_blocks def save_pretrained(self, *args, destination=None, prefix='', keep_vars=False): save_state = { key: value for key, value in self.state_dict().items() if 'lsc_identity' in key } return save_state def save_pretrained_config(self): return copy.deepcopy(self.cfg.TUNER_MODEL.cfg_dict) @staticmethod def get_config_template():
# -*- coding: utf-8 -*- # Copyright (c) Alibaba, Inc. and its affiliates. @MODELS.register_class() class LatentDiffusionSCETuning(LatentDiffusion): para_dict = {} para_dict.update(LatentDiffusion.para_dict) def __init__(self, cfg, logger): super().__init__(cfg, logger=logger) def init_params(self): super().init_params() self.tuner_model_config = self.cfg.TUNER_MODEL def construct_network(self): super().construct_network() input_block_channels = self.model._input_block_chans sc_tuner_cfg = self.tuner_model_config['SC_TUNER_CFG'] use_layers = self.tuner_model_config.get('USE_LAYERS', None) lsc_tuner_blocks = nn.ModuleList([]) for i, chan in enumerate(input_block_channels[::-1]): if use_layers and i not in use_layers: lsc_tuner_blocks.append(nn.Identity()) continue tuner_cfg = copy.deepcopy(sc_tuner_cfg) tuner_cfg['DIM'] = chan tuner_cfg['TUNER_LENGTH'] = int(chan * tuner_cfg.get('DOWN_RATIO', 1.0)) sc_tuner = TUNERS.build(tuner_cfg, logger=self.logger) lsc_tuner_blocks.append(sc_tuner) self.model.lsc_identity = lsc_tuner_blocks def save_pretrained(self, *args, destination=None, prefix='', keep_vars=False): save_state = { key: value for key, value in self.state_dict().items() if 'lsc_identity' in key } return save_state def save_pretrained_config(self): return copy.deepcopy(self.cfg.TUNER_MODEL.cfg_dict) @staticmethod def get_config_template():
return dict_to_yaml('MODELS',
4
2023-12-21 02:01:48+00:00
24k
Ruiyuan-Zhang/CCS
multi_part_assembly/utils/wx_transformer_utilities/transformer_layer.py
[ { "identifier": "LayerNorm", "path": "multi_part_assembly/utils/wx_transformer_utilities/layer_norm.py", "snippet": "def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):\n if not export and torch.cuda.is_available() and has_fused_layernorm:\n return FusedLayerNorm(...
from typing import Dict, List, Optional from .layer_norm import LayerNorm from .multihead_attention import MultiheadAttention from .relational_memory import RelationalMemory from .group_linear_layer import GroupLinearLayer from .basic_mha import MemoryAttention from .quant_noise import quant_noise from .fairseq_dropout import FairseqDropout from torch import Tensor import torch import torch.nn as nn import multi_part_assembly.utils.wx_transformer_utilities.fairseq_utils as utils import random import torch.nn.functional as F
15,265
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer class TransformerEncoderLayerVanilla(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, out_proj = None): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = self.build_self_attention(self.embed_dim, args)
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer class TransformerEncoderLayerVanilla(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, out_proj = None): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = self.build_self_attention(self.embed_dim, args)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, eps=1e-5)
0
2023-12-15 13:13:01+00:00
24k
camenduru/FreeInit-hf
app.py
[ { "identifier": "UNet3DConditionModel", "path": "animatediff/models/unet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in...
import os import torch import random import gradio as gr from glob import glob from omegaconf import OmegaConf from safetensors import safe_open from diffusers import AutoencoderKL from diffusers import EulerDiscreteScheduler, DDIMScheduler from diffusers.utils.import_utils import is_xformers_available from transformers import CLIPTextModel, CLIPTokenizer from animatediff.models.unet import UNet3DConditionModel from animatediff.pipelines.pipeline_animation import AnimationFreeInitPipeline from animatediff.utils.util import save_videos_grid from animatediff.utils.convert_from_ckpt import convert_ldm_unet_checkpoint, convert_ldm_clip_checkpoint, convert_ldm_vae_checkpoint from diffusers.training_utils import set_seed from animatediff.utils.freeinit_utils import get_freq_filter from collections import namedtuple
15,176
base_model_dropdown = os.path.join(self.personalized_model_dir, base_model_dropdown) base_model_state_dict = {} with safe_open(base_model_dropdown, framework="pt", device="cpu") as f: for key in f.keys(): base_model_state_dict[key] = f.get_tensor(key) converted_vae_checkpoint = convert_ldm_vae_checkpoint(base_model_state_dict, self.vae.config) self.vae.load_state_dict(converted_vae_checkpoint) converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_model_state_dict, self.unet.config) self.unet.load_state_dict(converted_unet_checkpoint, strict=False) self.text_encoder = convert_ldm_clip_checkpoint(base_model_state_dict) return gr.Dropdown.update() def update_motion_module(self, motion_module_dropdown): self.selected_motion_module = motion_module_dropdown motion_module_dropdown = os.path.join(self.motion_module_dir, motion_module_dropdown) motion_module_state_dict = torch.load(motion_module_dropdown, map_location="cpu") _, unexpected = self.unet.load_state_dict(motion_module_state_dict, strict=False) assert len(unexpected) == 0 return gr.Dropdown.update() # def update_filter(self, shape, method, n, d_s, d_t): def update_filter(self, width_slider, height_slider, filter_type_dropdown, d_s_slider, d_t_slider): self.set_width = width_slider self.set_height = height_slider self.selected_filter_type = filter_type_dropdown self.set_d_s = d_s_slider self.set_d_t = d_t_slider vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) shape = [1, 4, 16, self.set_width//vae_scale_factor, self.set_height//vae_scale_factor] self.freq_filter = get_freq_filter( shape, device="cuda", filter_type=self.selected_filter_type, n=4, d_s=self.set_d_s, d_t=self.set_d_t ) def animate( self, base_model_dropdown, motion_module_dropdown, prompt_textbox, negative_prompt_textbox, width_slider, height_slider, seed_textbox, # freeinit params filter_type_dropdown, d_s_slider, d_t_slider, num_iters_slider, # speed up speed_up_options ): # set global seed set_seed(42) d_s = float(d_s_slider) d_t = float(d_t_slider) num_iters = int(num_iters_slider) if self.selected_base_model != base_model_dropdown: self.update_base_model(base_model_dropdown) if self.selected_motion_module != motion_module_dropdown: self.update_motion_module(motion_module_dropdown) self.set_width = width_slider self.set_height = height_slider self.selected_filter_type = filter_type_dropdown self.set_d_s = d_s self.set_d_t = d_t if self.set_width != width_slider or self.set_height != height_slider or self.selected_filter_type != filter_type_dropdown or self.set_d_s != d_s or self.set_d_t != d_t: self.update_filter(width_slider, height_slider, filter_type_dropdown, d_s, d_t) if is_xformers_available(): self.unet.enable_xformers_memory_efficient_attention() pipeline = AnimationFreeInitPipeline( vae=self.vae, text_encoder=self.text_encoder, tokenizer=self.tokenizer, unet=self.unet, scheduler=DDIMScheduler(**OmegaConf.to_container(self.inference_config.noise_scheduler_kwargs)) ).to("cuda") # (freeinit) initialize frequency filter for noise reinitialization ------------- pipeline.freq_filter = self.freq_filter # ------------------------------------------------------------------------------- if int(seed_textbox) > 0: seed = int(seed_textbox) else: seed = random.randint(1, 1e16) torch.manual_seed(int(seed)) assert seed == torch.initial_seed() print(f"### seed: {seed}") generator = torch.Generator(device="cuda") generator.manual_seed(seed) sample_output = pipeline( prompt_textbox, negative_prompt = negative_prompt_textbox, num_inference_steps = 25, guidance_scale = 7.5, width = width_slider, height = height_slider, video_length = 16, num_iters = num_iters, use_fast_sampling = True if "use_coarse_to_fine_sampling" in speed_up_options else False, save_intermediate = False, return_orig = True, use_fp16 = True if "use_fp16" in speed_up_options else False ) orig_sample = sample_output.orig_videos sample = sample_output.videos save_sample_path = os.path.join(self.savedir, f"sample.mp4")
pretrained_model_path = "models/StableDiffusion/stable-diffusion-v1-5" inference_config_path = "configs/inference/inference-v1.yaml" css = """ .toolbutton { margin-buttom: 0em 0em 0em 0em; max-width: 2.5em; min-width: 2.5em !important; height: 2.5em; } """ examples = [ # 0-RealisticVision [ "realisticVisionV51_v20Novae.safetensors", "mm_sd_v14.ckpt", "A panda standing on a surfboard in the ocean under moonlight.", "worst quality, low quality, nsfw, logo", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 1-ToonYou [ "toonyou_beta3.safetensors", "mm_sd_v14.ckpt", "(best quality, masterpiece), 1girl, looking at viewer, blurry background, upper body, contemporary, dress", "(worst quality, low quality)", 512, 512, "478028150728261", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 2-Lyriel [ "lyriel_v16.safetensors", "mm_sd_v14.ckpt", "hypercars cyberpunk moving, muted colors, swirling color smokes, legend, cityscape, space", "3d, cartoon, anime, sketches, worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 3-RCNZ [ "rcnzCartoon3d_v10.safetensors", "mm_sd_v14.ckpt", "A cute raccoon playing guitar in a boat on the ocean", "worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 4-MajicMix [ "majicmixRealistic_v5Preview.safetensors", "mm_sd_v14.ckpt", "1girl, reading book", "(ng_deepnegative_v1_75t:1.2), (badhandv4:1), (worst quality:2), (low quality:2), (normal quality:2), lowres, bad anatomy, bad hands, watermark, moles", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # # 5-RealisticVision # [ # "realisticVisionV51_v20Novae.safetensors", # "mm_sd_v14.ckpt", # "A panda standing on a surfboard in the ocean in sunset.", # "worst quality, low quality, nsfw, logo", # 512, 512, "2005563494988190", # "butterworth", 0.25, 0.25, 3, # ["use_fp16"] # ] ] # clean unrelated ckpts # ckpts = [ # "realisticVisionV40_v20Novae.safetensors", # "majicmixRealistic_v5Preview.safetensors", # "rcnzCartoon3d_v10.safetensors", # "lyriel_v16.safetensors", # "toonyou_beta3.safetensors" # ] # for path in glob(os.path.join("models", "DreamBooth_LoRA", "*.safetensors")): # for ckpt in ckpts: # if path.endswith(ckpt): break # else: # print(f"### Cleaning {path} ...") # os.system(f"rm -rf {path}") # os.system(f"rm -rf {os.path.join('models', 'DreamBooth_LoRA', '*.safetensors')}") # os.system(f"bash download_bashscripts/1-ToonYou.sh") # os.system(f"bash download_bashscripts/2-Lyriel.sh") # os.system(f"bash download_bashscripts/3-RcnzCartoon.sh") # os.system(f"bash download_bashscripts/4-MajicMix.sh") # os.system(f"bash download_bashscripts/5-RealisticVision.sh") # # clean Gradio cache # print(f"### Cleaning cached examples ...") # os.system(f"rm -rf gradio_cached_examples/") class AnimateController: def __init__(self): # config dirs self.basedir = os.getcwd() self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion") self.motion_module_dir = os.path.join(self.basedir, "models", "Motion_Module") self.personalized_model_dir = os.path.join(self.basedir, "models", "DreamBooth_LoRA") self.savedir = os.path.join(self.basedir, "samples") os.makedirs(self.savedir, exist_ok=True) self.base_model_list = [] self.motion_module_list = [] self.filter_type_list = [ "butterworth", "gaussian", "box", "ideal" ] self.selected_base_model = None self.selected_motion_module = None self.selected_filter_type = None self.set_width = None self.set_height = None self.set_d_s = None self.set_d_t = None self.refresh_motion_module() self.refresh_personalized_model() # config models self.inference_config = OmegaConf.load(inference_config_path) self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") self.text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda() self.vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda() self.unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).cuda() self.freq_filter = None self.update_base_model(self.base_model_list[-2]) self.update_motion_module(self.motion_module_list[0]) self.update_filter(512, 512, self.filter_type_list[0], 0.25, 0.25) def refresh_motion_module(self): motion_module_list = glob(os.path.join(self.motion_module_dir, "*.ckpt")) self.motion_module_list = sorted([os.path.basename(p) for p in motion_module_list]) def refresh_personalized_model(self): base_model_list = glob(os.path.join(self.personalized_model_dir, "*.safetensors")) self.base_model_list = sorted([os.path.basename(p) for p in base_model_list]) def update_base_model(self, base_model_dropdown): self.selected_base_model = base_model_dropdown base_model_dropdown = os.path.join(self.personalized_model_dir, base_model_dropdown) base_model_state_dict = {} with safe_open(base_model_dropdown, framework="pt", device="cpu") as f: for key in f.keys(): base_model_state_dict[key] = f.get_tensor(key) converted_vae_checkpoint = convert_ldm_vae_checkpoint(base_model_state_dict, self.vae.config) self.vae.load_state_dict(converted_vae_checkpoint) converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_model_state_dict, self.unet.config) self.unet.load_state_dict(converted_unet_checkpoint, strict=False) self.text_encoder = convert_ldm_clip_checkpoint(base_model_state_dict) return gr.Dropdown.update() def update_motion_module(self, motion_module_dropdown): self.selected_motion_module = motion_module_dropdown motion_module_dropdown = os.path.join(self.motion_module_dir, motion_module_dropdown) motion_module_state_dict = torch.load(motion_module_dropdown, map_location="cpu") _, unexpected = self.unet.load_state_dict(motion_module_state_dict, strict=False) assert len(unexpected) == 0 return gr.Dropdown.update() # def update_filter(self, shape, method, n, d_s, d_t): def update_filter(self, width_slider, height_slider, filter_type_dropdown, d_s_slider, d_t_slider): self.set_width = width_slider self.set_height = height_slider self.selected_filter_type = filter_type_dropdown self.set_d_s = d_s_slider self.set_d_t = d_t_slider vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) shape = [1, 4, 16, self.set_width//vae_scale_factor, self.set_height//vae_scale_factor] self.freq_filter = get_freq_filter( shape, device="cuda", filter_type=self.selected_filter_type, n=4, d_s=self.set_d_s, d_t=self.set_d_t ) def animate( self, base_model_dropdown, motion_module_dropdown, prompt_textbox, negative_prompt_textbox, width_slider, height_slider, seed_textbox, # freeinit params filter_type_dropdown, d_s_slider, d_t_slider, num_iters_slider, # speed up speed_up_options ): # set global seed set_seed(42) d_s = float(d_s_slider) d_t = float(d_t_slider) num_iters = int(num_iters_slider) if self.selected_base_model != base_model_dropdown: self.update_base_model(base_model_dropdown) if self.selected_motion_module != motion_module_dropdown: self.update_motion_module(motion_module_dropdown) self.set_width = width_slider self.set_height = height_slider self.selected_filter_type = filter_type_dropdown self.set_d_s = d_s self.set_d_t = d_t if self.set_width != width_slider or self.set_height != height_slider or self.selected_filter_type != filter_type_dropdown or self.set_d_s != d_s or self.set_d_t != d_t: self.update_filter(width_slider, height_slider, filter_type_dropdown, d_s, d_t) if is_xformers_available(): self.unet.enable_xformers_memory_efficient_attention() pipeline = AnimationFreeInitPipeline( vae=self.vae, text_encoder=self.text_encoder, tokenizer=self.tokenizer, unet=self.unet, scheduler=DDIMScheduler(**OmegaConf.to_container(self.inference_config.noise_scheduler_kwargs)) ).to("cuda") # (freeinit) initialize frequency filter for noise reinitialization ------------- pipeline.freq_filter = self.freq_filter # ------------------------------------------------------------------------------- if int(seed_textbox) > 0: seed = int(seed_textbox) else: seed = random.randint(1, 1e16) torch.manual_seed(int(seed)) assert seed == torch.initial_seed() print(f"### seed: {seed}") generator = torch.Generator(device="cuda") generator.manual_seed(seed) sample_output = pipeline( prompt_textbox, negative_prompt = negative_prompt_textbox, num_inference_steps = 25, guidance_scale = 7.5, width = width_slider, height = height_slider, video_length = 16, num_iters = num_iters, use_fast_sampling = True if "use_coarse_to_fine_sampling" in speed_up_options else False, save_intermediate = False, return_orig = True, use_fp16 = True if "use_fp16" in speed_up_options else False ) orig_sample = sample_output.orig_videos sample = sample_output.videos save_sample_path = os.path.join(self.savedir, f"sample.mp4")
save_videos_grid(sample, save_sample_path)
2
2023-12-19 21:06:32+00:00
24k
m-abr/FCPCodebase
world/World.py
[ { "identifier": "Logger", "path": "logs/Logger.py", "snippet": "class Logger():\n _folder = None\n\n def __init__(self, is_enabled:bool, topic:str) -> None:\n self.no_of_entries = 0 \n self.enabled = is_enabled\n self.topic = topic\n\n def write(self, msg:str, timestamp:boo...
from collections import deque from cpp.ball_predictor import ball_predictor from cpp.localization import localization from logs.Logger import Logger from math import atan2, pi from math_ops.Matrix_4x4 import Matrix_4x4 from world.commons.Draw import Draw from world.commons.Other_Robot import Other_Robot from world.Robot import Robot import numpy as np
17,861
class World(): STEPTIME = 0.02 # Fixed step time STEPTIME_MS = 20 # Fixed step time in milliseconds VISUALSTEP = 0.04 # Fixed visual step time VISUALSTEP_MS = 40 # Fixed visual step time in milliseconds # play modes in our favor M_OUR_KICKOFF = 0 M_OUR_KICK_IN = 1 M_OUR_CORNER_KICK = 2 M_OUR_GOAL_KICK = 3 M_OUR_FREE_KICK = 4 M_OUR_PASS = 5 M_OUR_DIR_FREE_KICK = 6 M_OUR_GOAL = 7 M_OUR_OFFSIDE = 8 # play modes in their favor M_THEIR_KICKOFF = 9 M_THEIR_KICK_IN = 10 M_THEIR_CORNER_KICK = 11 M_THEIR_GOAL_KICK = 12 M_THEIR_FREE_KICK = 13 M_THEIR_PASS = 14 M_THEIR_DIR_FREE_KICK = 15 M_THEIR_GOAL = 16 M_THEIR_OFFSIDE = 17 # neutral play modes M_BEFORE_KICKOFF = 18 M_GAME_OVER = 19 M_PLAY_ON = 20 # play mode groups MG_OUR_KICK = 0 MG_THEIR_KICK = 1 MG_ACTIVE_BEAM = 2 MG_PASSIVE_BEAM = 3 MG_OTHER = 4 # play on, game over FLAGS_CORNERS_POS = ((-15,-10,0), (-15,+10,0), (+15,-10,0), (+15,+10,0)) FLAGS_POSTS_POS = ((-15,-1.05,0.8),(-15,+1.05,0.8),(+15,-1.05,0.8),(+15,+1.05,0.8)) def __init__(self,robot_type:int, team_name:str, unum:int, apply_play_mode_correction:bool,
class World(): STEPTIME = 0.02 # Fixed step time STEPTIME_MS = 20 # Fixed step time in milliseconds VISUALSTEP = 0.04 # Fixed visual step time VISUALSTEP_MS = 40 # Fixed visual step time in milliseconds # play modes in our favor M_OUR_KICKOFF = 0 M_OUR_KICK_IN = 1 M_OUR_CORNER_KICK = 2 M_OUR_GOAL_KICK = 3 M_OUR_FREE_KICK = 4 M_OUR_PASS = 5 M_OUR_DIR_FREE_KICK = 6 M_OUR_GOAL = 7 M_OUR_OFFSIDE = 8 # play modes in their favor M_THEIR_KICKOFF = 9 M_THEIR_KICK_IN = 10 M_THEIR_CORNER_KICK = 11 M_THEIR_GOAL_KICK = 12 M_THEIR_FREE_KICK = 13 M_THEIR_PASS = 14 M_THEIR_DIR_FREE_KICK = 15 M_THEIR_GOAL = 16 M_THEIR_OFFSIDE = 17 # neutral play modes M_BEFORE_KICKOFF = 18 M_GAME_OVER = 19 M_PLAY_ON = 20 # play mode groups MG_OUR_KICK = 0 MG_THEIR_KICK = 1 MG_ACTIVE_BEAM = 2 MG_PASSIVE_BEAM = 3 MG_OTHER = 4 # play on, game over FLAGS_CORNERS_POS = ((-15,-10,0), (-15,+10,0), (+15,-10,0), (+15,+10,0)) FLAGS_POSTS_POS = ((-15,-1.05,0.8),(-15,+1.05,0.8),(+15,-1.05,0.8),(+15,+1.05,0.8)) def __init__(self,robot_type:int, team_name:str, unum:int, apply_play_mode_correction:bool,
enable_draw:bool, logger:Logger, host:str) -> None:
0
2023-12-16 23:40:23+00:00
24k
Sam-Izdat/tinycio
src/tinycio/tonemapping.py
[ { "identifier": "applyAgX", "path": "src/tinycio/np_agx/agx.py", "snippet": "def applyAgX(array):\n # type: (numpy.ndarray) -> numpy.ndarray\n \"\"\"\n -> take linear - sRGB image data as input\n - apply custom grading if any\n - apply the AgX Punchy view-transform\n - return a display...
import torch import numpy as np import typing from enum import IntEnum from .np_agx.agx import applyAgX, applyAgXPunchy from .colorspace import ColorSpace, TransferFunction
17,575
class ToneMapping: """ Map high-dynamic-range values to low-dynamic-range. LDR is typically sRGB in [0, 1] range. Example: .. highlight:: python .. code-block:: python tm = ToneMapping.Variant.HABLE tonemapped_image = ToneMapping.apply(input_im, tone_mapper=tm) """ class Variant(IntEnum): """ Tone mapper enum. Available options are: .. highlight:: text .. code-block:: text - NONE - CLAMP - AGX - AGX_PUNCHY - HABLE - REINHARD - ACESCG """ NONE = 1<<0 CLAMP = 1<<1 AGX = 1<<2 AGX_PUNCHY = 1<<3 HABLE = 1<<4 REINHARD = 1<<5 ACESCG = 1<<6 IP_SRGB_LIN = CLAMP | AGX | AGX_PUNCHY | HABLE | REINHARD IP_ACESCG = ACESCG OP_SRGB_LIN = CLAMP | AGX | AGX_PUNCHY | HABLE | REINHARD OP_ACESCG = ACESCG DISABLED = 0 @classmethod def apply(cls, im:torch.Tensor, tone_mapper:Variant): """ Apply tone mapping to HDR image tensor. Input data is expected to be in the correct color space for the chosen tone mapper. .. note:: :code:`ACESCG` tone mapping is performed on AP1 primaries and expects input in the :code:`ACESCG` color space. All other tone mappers expect :code:`SRGB_LIN`. The :code:`tone_map()` method of :class:`ColorImage` handles this conversion automatically. :param torch.Tensor im: [C=3, H, W] sized image tensor :param ToneMapping.Variant tone_mapper: tonemapper to be used :return: image tensor :rtype: torch.Tensor """ assert im.dim() == 3 and im.size(0) == 3, f"expected [C=3, H, W] image tensor, got {im.size()}" op, tm = tone_mapper, cls.Variant err_not_supported, err_disabled = f"ToneMapping {op.name} is not supported", f"ToneMapping {op.name} is disabled" if op & tm.DISABLED: raise Exception(err_disabled) if op == tm.NONE: return im elif op == tm.CLAMP: return im.clamp(0., 1.) elif op == tm.AGX: return cls._agx(im) elif op == tm.AGX_PUNCHY: return cls._agx_punchy(im) elif op == tm.HABLE: return cls._hable(im) elif op == tm.REINHARD: return cls._reinhard_extended_luminance(im) elif op == tm.ACESCG: return cls._aces_fitted(im) else: raise Exception(err_not_supported) return out @classmethod def _agx(cls, im:torch.Tensor): device = im.device
class ToneMapping: """ Map high-dynamic-range values to low-dynamic-range. LDR is typically sRGB in [0, 1] range. Example: .. highlight:: python .. code-block:: python tm = ToneMapping.Variant.HABLE tonemapped_image = ToneMapping.apply(input_im, tone_mapper=tm) """ class Variant(IntEnum): """ Tone mapper enum. Available options are: .. highlight:: text .. code-block:: text - NONE - CLAMP - AGX - AGX_PUNCHY - HABLE - REINHARD - ACESCG """ NONE = 1<<0 CLAMP = 1<<1 AGX = 1<<2 AGX_PUNCHY = 1<<3 HABLE = 1<<4 REINHARD = 1<<5 ACESCG = 1<<6 IP_SRGB_LIN = CLAMP | AGX | AGX_PUNCHY | HABLE | REINHARD IP_ACESCG = ACESCG OP_SRGB_LIN = CLAMP | AGX | AGX_PUNCHY | HABLE | REINHARD OP_ACESCG = ACESCG DISABLED = 0 @classmethod def apply(cls, im:torch.Tensor, tone_mapper:Variant): """ Apply tone mapping to HDR image tensor. Input data is expected to be in the correct color space for the chosen tone mapper. .. note:: :code:`ACESCG` tone mapping is performed on AP1 primaries and expects input in the :code:`ACESCG` color space. All other tone mappers expect :code:`SRGB_LIN`. The :code:`tone_map()` method of :class:`ColorImage` handles this conversion automatically. :param torch.Tensor im: [C=3, H, W] sized image tensor :param ToneMapping.Variant tone_mapper: tonemapper to be used :return: image tensor :rtype: torch.Tensor """ assert im.dim() == 3 and im.size(0) == 3, f"expected [C=3, H, W] image tensor, got {im.size()}" op, tm = tone_mapper, cls.Variant err_not_supported, err_disabled = f"ToneMapping {op.name} is not supported", f"ToneMapping {op.name} is disabled" if op & tm.DISABLED: raise Exception(err_disabled) if op == tm.NONE: return im elif op == tm.CLAMP: return im.clamp(0., 1.) elif op == tm.AGX: return cls._agx(im) elif op == tm.AGX_PUNCHY: return cls._agx_punchy(im) elif op == tm.HABLE: return cls._hable(im) elif op == tm.REINHARD: return cls._reinhard_extended_luminance(im) elif op == tm.ACESCG: return cls._aces_fitted(im) else: raise Exception(err_not_supported) return out @classmethod def _agx(cls, im:torch.Tensor): device = im.device
out = applyAgX(im.permute(1, 2, 0).cpu().numpy())
0
2023-12-15 15:39:08+00:00
24k
quocanh34/magic-animate-modified
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,...
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.multicontrolnet import ControlNetProcessor #fix from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
15,878
for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate: v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f) new_latents[:,:,new_index,:,:] = v.to(latents.device) new_index += 1 new_latents[:,:,new_index,:,:] = v1 new_index += 1 return new_latents def select_controlnet_res_samples(self, controlnet_res_samples_cache_dict, context, do_classifier_free_guidance, b, f): _down_block_res_samples = [] _mid_block_res_sample = [] for i in np.concatenate(np.array(context)): _down_block_res_samples.append(controlnet_res_samples_cache_dict[i][0]) _mid_block_res_sample.append(controlnet_res_samples_cache_dict[i][1]) down_block_res_samples = [[] for _ in range(len(controlnet_res_samples_cache_dict[i][0]))] for res_t in _down_block_res_samples: for i, res in enumerate(res_t): down_block_res_samples[i].append(res) down_block_res_samples = [torch.cat(res) for res in down_block_res_samples] mid_block_res_sample = torch.cat(_mid_block_res_sample) # reshape controlnet output to match the unet3d inputs b = b // 2 if do_classifier_free_guidance else b _down_block_res_samples = [] for sample in down_block_res_samples: sample = rearrange(sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: sample = sample.repeat(2, 1, 1, 1, 1) _down_block_res_samples.append(sample) down_block_res_samples = _down_block_res_samples mid_block_res_sample = rearrange(mid_block_res_sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: mid_block_res_sample = mid_block_res_sample.repeat(2, 1, 1, 1, 1) return down_block_res_samples, mid_block_res_sample @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], processors: List[ControlNetProcessor], #fix video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, controlnet_condition1: list = None, controlnet_condition2: list = None, controlnet_conditioning_scale: float = 1.0, context_frames: int = 16, context_stride: int = 1, context_overlap: int = 4, context_batch_size: int = 1, context_schedule: str = "uniform", init_latents: Optional[torch.FloatTensor] = None, num_actual_inference_steps: Optional[int] = None, appearance_encoder = None, reference_control_writer = None, reference_control_reader = None, source_image: str = None, decoder_consistency = None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ # controlnet = self.controlnet # processors = self.processors # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size)
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ # from magicanimate.models.controlnet import ControlNetModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, # controlnet: ControlNetModel, # processors: List[ControlNetProcessor], scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, # controlnet1=processors[0], scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents, rank, decoder_consistency=None): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0]), disable=(rank!=0)): if decoder_consistency is not None: video.append(decoder_consistency(latents[frame_idx:frame_idx+1])) else: video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None, clip_length=16): shape = (batch_size, num_channels_latents, clip_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) latents = latents.repeat(1, 1, video_length//clip_length, 1, 1) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_condition(self, condition1, condition2, num_videos_per_prompt, device, dtype, do_classifier_free_guidance): # Prepare first condition condition1 = torch.from_numpy(condition1.copy()).to(device=device, dtype=dtype) / 255.0 condition1 = torch.stack([condition1 for _ in range(num_videos_per_prompt)], dim=0) condition1 = rearrange(condition1, 'b f h w c -> (b f) c h w').clone() # Prepare second condition condition2 = torch.from_numpy(condition2.copy()).to(device=device, dtype=dtype) / 255.0 condition2 = torch.stack([condition2 for _ in range(num_videos_per_prompt)], dim=0) condition2 = rearrange(condition2, 'b f h w c -> (b f) c h w').clone() # Here, we're averaging the two conditions combined_condition = (condition1*8+condition2*2)/10 if do_classifier_free_guidance: combined_condition = torch.cat([combined_condition] * 2) #combined_condition = torch.from_numpy(combined_condition.copy()).to(device=device, dtype=dtype) return combined_condition def next_step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0., verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate: v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f) new_latents[:,:,new_index,:,:] = v.to(latents.device) new_index += 1 new_latents[:,:,new_index,:,:] = v1 new_index += 1 return new_latents def select_controlnet_res_samples(self, controlnet_res_samples_cache_dict, context, do_classifier_free_guidance, b, f): _down_block_res_samples = [] _mid_block_res_sample = [] for i in np.concatenate(np.array(context)): _down_block_res_samples.append(controlnet_res_samples_cache_dict[i][0]) _mid_block_res_sample.append(controlnet_res_samples_cache_dict[i][1]) down_block_res_samples = [[] for _ in range(len(controlnet_res_samples_cache_dict[i][0]))] for res_t in _down_block_res_samples: for i, res in enumerate(res_t): down_block_res_samples[i].append(res) down_block_res_samples = [torch.cat(res) for res in down_block_res_samples] mid_block_res_sample = torch.cat(_mid_block_res_sample) # reshape controlnet output to match the unet3d inputs b = b // 2 if do_classifier_free_guidance else b _down_block_res_samples = [] for sample in down_block_res_samples: sample = rearrange(sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: sample = sample.repeat(2, 1, 1, 1, 1) _down_block_res_samples.append(sample) down_block_res_samples = _down_block_res_samples mid_block_res_sample = rearrange(mid_block_res_sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: mid_block_res_sample = mid_block_res_sample.repeat(2, 1, 1, 1, 1) return down_block_res_samples, mid_block_res_sample @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], processors: List[ControlNetProcessor], #fix video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, controlnet_condition1: list = None, controlnet_condition2: list = None, controlnet_conditioning_scale: float = 1.0, context_frames: int = 16, context_stride: int = 1, context_overlap: int = 4, context_batch_size: int = 1, context_schedule: str = "uniform", init_latents: Optional[torch.FloatTensor] = None, num_actual_inference_steps: Optional[int] = None, appearance_encoder = None, reference_control_writer = None, reference_control_reader = None, source_image: str = None, decoder_consistency = None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ # controlnet = self.controlnet # processors = self.processors # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size)
reference_control_writer = ReferenceAttentionControl(appearance_encoder, do_classifier_free_guidance=True, mode='write', batch_size=context_batch_size)
2
2023-12-15 01:22:37+00:00
24k
Azure-Samples/functions-python-web-crawler
.venv/Lib/site-packages/urllib3/poolmanager.py
[ { "identifier": "HTTPHeaderDict", "path": ".venv/Lib/site-packages/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(typing.MutableMapping[str, str]):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-in...
import functools import logging import typing import warnings import ssl from types import TracebackType from urllib.parse import urljoin from ._collections import HTTPHeaderDict, RecentlyUsedContainer from ._request_methods import RequestMethods from .connection import ProxyConfig from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme from .exceptions import ( LocationValueError, MaxRetryError, ProxySchemeUnknown, URLSchemeUnknown, ) from .response import BaseHTTPResponse from .util.connection import _TYPE_SOCKET_OPTIONS from .util.proxy import connection_requires_http_tunnel from .util.retry import Retry from .util.timeout import Timeout from .util.url import Url, parse_url from typing import Literal
20,175
from __future__ import annotations if typing.TYPE_CHECKING: __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ca_cert_data", "ssl_version", "ssl_minimum_version", "ssl_maximum_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # Default value for `blocksize` - a new parameter introduced to # http.client.HTTPConnection & http.client.HTTPSConnection in Python 3.7 _DEFAULT_BLOCKSIZE = 16384 _SelfT = typing.TypeVar("_SelfT") class PoolKey(typing.NamedTuple): """ All known keyword arguments that could be provided to the pool manager, its pools, or the underlying connections. All custom key schemes should include the fields in this key at a minimum. """ key_scheme: str key_host: str key_port: int | None key_timeout: Timeout | float | int | None key_retries: Retry | bool | int | None key_block: bool | None key_source_address: tuple[str, int] | None key_key_file: str | None key_key_password: str | None key_cert_file: str | None key_cert_reqs: str | None key_ca_certs: str | None key_ca_cert_data: str | bytes | None key_ssl_version: int | str | None key_ssl_minimum_version: ssl.TLSVersion | None key_ssl_maximum_version: ssl.TLSVersion | None key_ca_cert_dir: str | None key_ssl_context: ssl.SSLContext | None key_maxsize: int | None key_headers: frozenset[tuple[str, str]] | None key__proxy: Url | None key__proxy_headers: frozenset[tuple[str, str]] | None
from __future__ import annotations if typing.TYPE_CHECKING: __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ca_cert_data", "ssl_version", "ssl_minimum_version", "ssl_maximum_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # Default value for `blocksize` - a new parameter introduced to # http.client.HTTPConnection & http.client.HTTPSConnection in Python 3.7 _DEFAULT_BLOCKSIZE = 16384 _SelfT = typing.TypeVar("_SelfT") class PoolKey(typing.NamedTuple): """ All known keyword arguments that could be provided to the pool manager, its pools, or the underlying connections. All custom key schemes should include the fields in this key at a minimum. """ key_scheme: str key_host: str key_port: int | None key_timeout: Timeout | float | int | None key_retries: Retry | bool | int | None key_block: bool | None key_source_address: tuple[str, int] | None key_key_file: str | None key_key_password: str | None key_cert_file: str | None key_cert_reqs: str | None key_ca_certs: str | None key_ca_cert_data: str | bytes | None key_ssl_version: int | str | None key_ssl_minimum_version: ssl.TLSVersion | None key_ssl_maximum_version: ssl.TLSVersion | None key_ca_cert_dir: str | None key_ssl_context: ssl.SSLContext | None key_maxsize: int | None key_headers: frozenset[tuple[str, str]] | None key__proxy: Url | None key__proxy_headers: frozenset[tuple[str, str]] | None
key__proxy_config: ProxyConfig | None
3
2023-12-16 04:12:01+00:00
24k
YaoFANGUK/video-subtitle-remover
backend/scenedetect/scene_manager.py
[ { "identifier": "SimpleTableCell", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableCell(object):\n \"\"\"A table class to create table cells.\n\n Example:\n cell = SimpleTableCell('Hello, world!')\n \"\"\"\n\n def __init__(self, text, header=False):\...
import csv import threading import queue import logging import math import sys import cv2 import numpy as np from enum import Enum from typing import Iterable, List, Tuple, Optional, Dict, Callable, Union, TextIO from backend.scenedetect._thirdparty.simpletable import (SimpleTableCell, SimpleTableImage, SimpleTableRow, SimpleTable, HTMLPage) from backend.scenedetect.platform import (tqdm, get_and_create_path, get_cv2_imwrite_params, Template) from backend.scenedetect.frame_timecode import FrameTimecode from backend.scenedetect.video_stream import VideoStream from backend.scenedetect.scene_detector import SceneDetector, SparseSceneDetector from backend.scenedetect.stats_manager import StatsManager, FrameMetricRegistered
14,568
while preserving the aspect ratio. interpolation: Type of interpolation to use when resizing images. video_manager: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: Dictionary of the format { scene_num : [image_paths] }, where scene_num is the number of the scene in scene_list (starting from 1), and image_paths is a list of the paths to the newly saved/created images. Raises: ValueError: Raised if any arguments are invalid or out of range (e.g. if num_images is negative). """ # TODO(v0.7): Add DeprecationWarning that `video_manager` will be removed in v0.8. if video_manager is not None: logger.error('`video_manager` argument is deprecated, use `video` instead.') video = video_manager if not scene_list: return {} if num_images <= 0 or frame_margin < 0: raise ValueError() # TODO: Validate that encoder_param is within the proper range. # Should be between 0 and 100 (inclusive) for jpg/webp, and 1-9 for png. imwrite_param = [get_cv2_imwrite_params()[image_extension], encoder_param ] if encoder_param is not None else [] video.reset() # Setup flags and init progress bar if available. completed = True logger.info('Generating output images (%d per scene)...', num_images) progress_bar = None if show_progress: progress_bar = tqdm(total=len(scene_list) * num_images, unit='images', dynamic_ncols=True) filename_template = Template(image_name_template) scene_num_format = '%0' scene_num_format += str(max(3, math.floor(math.log(len(scene_list), 10)) + 1)) + 'd' image_num_format = '%0' image_num_format += str(math.floor(math.log(num_images, 10)) + 2) + 'd' framerate = scene_list[0][0].framerate # TODO(v1.0): Split up into multiple sub-expressions so auto-formatter works correctly. timecode_list = [ [ FrameTimecode(int(f), fps=framerate) for f in [ # middle frames a[len(a) // 2] if (0 < j < num_images - 1) or num_images == 1 # first frame else min(a[0] + frame_margin, a[-1]) if j == 0 # last frame else max(a[-1] - frame_margin, a[0]) # for each evenly-split array of frames in the scene list for j, a in enumerate(np.array_split(r, num_images)) ] ] for i, r in enumerate([ # pad ranges to number of images r if 1 + r[-1] - r[0] >= num_images else list(r) + [r[-1]] * (num_images - len(r)) # create range of frames in scene for r in ( range( start.get_frames(), start.get_frames() + max( 1, # guard against zero length scenes end.get_frames() - start.get_frames())) # for each scene in scene list for start, end in scene_list) ]) ] image_filenames = {i: [] for i in range(len(timecode_list))} aspect_ratio = video.aspect_ratio if abs(aspect_ratio - 1.0) < 0.01: aspect_ratio = None logger.debug('Writing images with template %s', filename_template.template) for i, scene_timecodes in enumerate(timecode_list): for j, image_timecode in enumerate(scene_timecodes): video.seek(image_timecode) frame_im = video.read() if frame_im is not None: # TODO: Allow NUM to be a valid suffix in addition to NUMBER. file_path = '%s.%s' % (filename_template.safe_substitute( VIDEO_NAME=video.name, SCENE_NUMBER=scene_num_format % (i + 1), IMAGE_NUMBER=image_num_format % (j + 1), FRAME_NUMBER=image_timecode.get_frames()), image_extension) image_filenames[i].append(file_path) # TODO(0.6.3): Combine this resize with the ones below. if aspect_ratio is not None: frame_im = cv2.resize( frame_im, (0, 0), fx=aspect_ratio, fy=1.0, interpolation=interpolation.value) frame_height = frame_im.shape[0] frame_width = frame_im.shape[1] # Figure out what kind of resizing needs to be done if height or width: if height and not width: factor = height / float(frame_height) width = int(factor * frame_width) if width and not height: factor = width / float(frame_width) height = int(factor * frame_height) assert height > 0 and width > 0 frame_im = cv2.resize( frame_im, (width, height), interpolation=interpolation.value) elif scale: frame_im = cv2.resize( frame_im, (0, 0), fx=scale, fy=scale, interpolation=interpolation.value)
# -*- coding: utf-8 -*- # # PySceneDetect: Python-Based Video Scene Detector # ------------------------------------------------------------------- # [ Site: https://scenedetect.com ] # [ Docs: https://scenedetect.com/docs/ ] # [ Github: https://github.com/Breakthrough/PySceneDetect/ ] # # Copyright (C) 2014-2023 Brandon Castellano <http://www.bcastell.com>. # PySceneDetect is licensed under the BSD 3-Clause License; see the # included LICENSE file, or visit one of the above pages for details. # """``scenedetect.scene_manager`` Module This module implements :class:`SceneManager`, coordinates running a :mod:`SceneDetector <scenedetect.detectors>` over the frames of a video (:mod:`VideoStream <scenedetect.video_stream>`). Video decoding is done in a separate thread to improve performance. This module also contains other helper functions (e.g. :func:`save_images`) which can be used to process the resulting scene list. =============================================================== Usage =============================================================== The following example shows basic usage of a :class:`SceneManager`: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector video = open_video(video_path) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) # Detect all scenes in video from current position to end. scene_manager.detect_scenes(video) # `get_scene_list` returns a list of start/end timecode pairs # for each scene that was found. scenes = scene_manager.get_scene_list() An optional callback can also be invoked on each detected scene, for example: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector # Callback to invoke on the first frame of every new scene detection. def on_new_scene(frame_img: numpy.ndarray, frame_num: int): print("New scene found at frame %d." % frame_num) video = open_video(test_video_file) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video, callback=on_new_scene) To use a `SceneManager` with a webcam/device or existing `cv2.VideoCapture` device, use the :class:`VideoCaptureAdapter <scenedetect.backends.opencv.VideoCaptureAdapter>` instead of `open_video`. ======================================================================= Storing Per-Frame Statistics ======================================================================= `SceneManager` can use an optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to save frame statistics to disk: .. code:: python from scenedetect import open_video, ContentDetector, SceneManager, StatsManager video = open_video(test_video_file) scene_manager = SceneManager(stats_manager=StatsManager()) scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video) scene_list = scene_manager.get_scene_list() print_scenes(scene_list=scene_list) # Save per-frame statistics to disk. scene_manager.stats_manager.save_to_csv(csv_file=STATS_FILE_PATH) The statsfile can be used to find a better threshold for certain inputs, or perform statistical analysis of the video. """ logger = logging.getLogger('pyscenedetect') # TODO: This value can and should be tuned for performance improvements as much as possible, # until accuracy falls, on a large enough dataset. This has yet to be done, but the current # value doesn't seem to have caused any issues at least. DEFAULT_MIN_WIDTH: int = 256 """The default minimum width a frame will be downscaled to when calculating a downscale factor.""" MAX_FRAME_QUEUE_LENGTH: int = 4 """Maximum number of decoded frames which can be buffered while waiting to be processed.""" PROGRESS_BAR_DESCRIPTION = 'Detected: %d | Progress' """Template to use for progress bar.""" class Interpolation(Enum): """Interpolation method used for image resizing. Based on constants defined in OpenCV.""" NEAREST = cv2.INTER_NEAREST """Nearest neighbor interpolation.""" LINEAR = cv2.INTER_LINEAR """Bilinear interpolation.""" CUBIC = cv2.INTER_CUBIC """Bicubic interpolation.""" AREA = cv2.INTER_AREA """Pixel area relation resampling. Provides moire'-free downscaling.""" LANCZOS4 = cv2.INTER_LANCZOS4 """Lanczos interpolation over 8x8 neighborhood.""" def compute_downscale_factor(frame_width: int, effective_width: int = DEFAULT_MIN_WIDTH) -> int: """Get the optimal default downscale factor based on a video's resolution (currently only the width in pixels is considered). The resulting effective width of the video will be between frame_width and 1.5 * frame_width pixels (e.g. if frame_width is 200, the range of effective widths will be between 200 and 300). Arguments: frame_width: Actual width of the video frame in pixels. effective_width: Desired minimum width in pixels. Returns: int: The default downscale factor to use to achieve at least the target effective_width. """ assert not (frame_width < 1 or effective_width < 1) if frame_width < effective_width: return 1 return frame_width // effective_width def get_scenes_from_cuts( cut_list: Iterable[FrameTimecode], start_pos: Union[int, FrameTimecode], end_pos: Union[int, FrameTimecode], base_timecode: Optional[FrameTimecode] = None, ) -> List[Tuple[FrameTimecode, FrameTimecode]]: """Returns a list of tuples of start/end FrameTimecodes for each scene based on a list of detected scene cuts/breaks. This function is called when using the :meth:`SceneManager.get_scene_list` method. The scene list is generated from a cutting list (:meth:`SceneManager.get_cut_list`), noting that each scene is contiguous, starting from the first to last frame of the input. If `cut_list` is empty, the resulting scene will span from `start_pos` to `end_pos`. Arguments: cut_list: List of FrameTimecode objects where scene cuts/breaks occur. base_timecode: The base_timecode of which all FrameTimecodes in the cut_list are based on. num_frames: The number of frames, or FrameTimecode representing duration, of the video that was processed (used to generate last scene's end time). start_frame: The start frame or FrameTimecode of the cut list. Used to generate the first scene's start time. base_timecode: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: List of tuples in the form (start_time, end_time), where both start_time and end_time are FrameTimecode objects representing the exact time/frame where each scene occupies based on the input cut_list. """ # TODO(v0.7): Use the warnings module to turn this into a warning. if base_timecode is not None: logger.error('`base_timecode` argument is deprecated has no effect.') # Scene list, where scenes are tuples of (Start FrameTimecode, End FrameTimecode). scene_list = [] if not cut_list: scene_list.append((start_pos, end_pos)) return scene_list # Initialize last_cut to the first frame we processed,as it will be # the start timecode for the first scene in the list. last_cut = start_pos for cut in cut_list: scene_list.append((last_cut, cut)) last_cut = cut # Last scene is from last cut to end of video. scene_list.append((last_cut, end_pos)) return scene_list def write_scene_list(output_csv_file: TextIO, scene_list: Iterable[Tuple[FrameTimecode, FrameTimecode]], include_cut_list: bool = True, cut_list: Optional[Iterable[FrameTimecode]] = None) -> None: """Writes the given list of scenes to an output file handle in CSV format. Arguments: output_csv_file: Handle to open file in write mode. scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. include_cut_list: Bool indicating if the first row should include the timecodes where each scene starts. Should be set to False if RFC 4180 compliant CSV output is required. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not specified, the cut list is generated using the start times of each scene following the first one. """ csv_writer = csv.writer(output_csv_file, lineterminator='\n') # If required, output the cutting list as the first row (i.e. before the header row). if include_cut_list: csv_writer.writerow( ["Timecode List:"] + cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]]) csv_writer.writerow([ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ]) for i, (start, end) in enumerate(scene_list): duration = end - start csv_writer.writerow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) def write_scene_list_html(output_html_filename, scene_list, cut_list=None, css=None, css_class='mytable', image_filenames=None, image_width=None, image_height=None): """Writes the given list of scenes to an output file handle in html format. Arguments: output_html_filename: filename of output html file scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not passed, the start times of each scene (besides the 0th scene) is used instead. css: String containing all the css information for the resulting html page. css_class: String containing the named css class image_filenames: dict where key i contains a list with n elements (filenames of the n saved images from that scene) image_width: Optional desired width of images in table in pixels image_height: Optional desired height of images in table in pixels """ if not css: css = """ table.mytable { font-family: times; font-size:12px; color:#000000; border-width: 1px; border-color: #eeeeee; border-collapse: collapse; background-color: #ffffff; width=100%; max-width:550px; table-layout:fixed; } table.mytable th { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; background-color: #e6eed6; color:#000000; } table.mytable td { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; } #code { display:inline; font-family: courier; color: #3d9400; } #string { display:inline; font-weight: bold; } """ # Output Timecode list timecode_table = SimpleTable( [["Timecode List:"] + (cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]])], css_class=css_class) # Output list of scenes header_row = [ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ] for i, (start, end) in enumerate(scene_list): duration = end - start row = SimpleTableRow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) if image_filenames: for image in image_filenames[i]: row.add_cell( SimpleTableCell( SimpleTableImage(image, width=image_width, height=image_height))) if i == 0: scene_table = SimpleTable(rows=[row], header_row=header_row, css_class=css_class) else: scene_table.add_row(row=row) # Write html file page = HTMLPage() page.add_table(timecode_table) page.add_table(scene_table) page.css = css page.save(output_html_filename) # # TODO(v1.0): Refactor to take a SceneList object; consider moving this and save scene list # to a better spot, or just move them to scene_list.py. # def save_images(scene_list: List[Tuple[FrameTimecode, FrameTimecode]], video: VideoStream, num_images: int = 3, frame_margin: int = 1, image_extension: str = 'jpg', encoder_param: int = 95, image_name_template: str = '$VIDEO_NAME-Scene-$SCENE_NUMBER-$IMAGE_NUMBER', output_dir: Optional[str] = None, show_progress: Optional[bool] = False, scale: Optional[float] = None, height: Optional[int] = None, width: Optional[int] = None, interpolation: Interpolation = Interpolation.CUBIC, video_manager=None) -> Dict[int, List[str]]: """Save a set number of images from each scene, given a list of scenes and the associated video/frame source. Arguments: scene_list: A list of scenes (pairs of FrameTimecode objects) returned from calling a SceneManager's detect_scenes() method. video: A VideoStream object corresponding to the scene list. Note that the video will be closed/re-opened and seeked through. num_images: Number of images to generate for each scene. Minimum is 1. frame_margin: Number of frames to pad each scene around the beginning and end (e.g. moves the first/last image into the scene by N frames). Can set to 0, but will result in some video files failing to extract the very last frame. image_extension: Type of image to save (must be one of 'jpg', 'png', or 'webp'). encoder_param: Quality/compression efficiency, based on type of image: 'jpg' / 'webp': Quality 0-100, higher is better quality. 100 is lossless for webp. 'png': Compression from 1-9, where 9 achieves best filesize but is slower to encode. image_name_template: Template to use when creating the images on disk. Can use the macros $VIDEO_NAME, $SCENE_NUMBER, and $IMAGE_NUMBER. The image extension is applied automatically as per the argument image_extension. output_dir: Directory to output the images into. If not set, the output is created in the working directory. show_progress: If True, shows a progress bar if tqdm is installed. scale: Optional factor by which to rescale saved images. A scaling factor of 1 would not result in rescaling. A value < 1 results in a smaller saved image, while a value > 1 results in an image larger than the original. This value is ignored if either the height or width values are specified. height: Optional value for the height of the saved images. Specifying both the height and width will resize images to an exact size, regardless of aspect ratio. Specifying only height will rescale the image to that number of pixels in height while preserving the aspect ratio. width: Optional value for the width of the saved images. Specifying both the width and height will resize images to an exact size, regardless of aspect ratio. Specifying only width will rescale the image to that number of pixels wide while preserving the aspect ratio. interpolation: Type of interpolation to use when resizing images. video_manager: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: Dictionary of the format { scene_num : [image_paths] }, where scene_num is the number of the scene in scene_list (starting from 1), and image_paths is a list of the paths to the newly saved/created images. Raises: ValueError: Raised if any arguments are invalid or out of range (e.g. if num_images is negative). """ # TODO(v0.7): Add DeprecationWarning that `video_manager` will be removed in v0.8. if video_manager is not None: logger.error('`video_manager` argument is deprecated, use `video` instead.') video = video_manager if not scene_list: return {} if num_images <= 0 or frame_margin < 0: raise ValueError() # TODO: Validate that encoder_param is within the proper range. # Should be between 0 and 100 (inclusive) for jpg/webp, and 1-9 for png. imwrite_param = [get_cv2_imwrite_params()[image_extension], encoder_param ] if encoder_param is not None else [] video.reset() # Setup flags and init progress bar if available. completed = True logger.info('Generating output images (%d per scene)...', num_images) progress_bar = None if show_progress: progress_bar = tqdm(total=len(scene_list) * num_images, unit='images', dynamic_ncols=True) filename_template = Template(image_name_template) scene_num_format = '%0' scene_num_format += str(max(3, math.floor(math.log(len(scene_list), 10)) + 1)) + 'd' image_num_format = '%0' image_num_format += str(math.floor(math.log(num_images, 10)) + 2) + 'd' framerate = scene_list[0][0].framerate # TODO(v1.0): Split up into multiple sub-expressions so auto-formatter works correctly. timecode_list = [ [ FrameTimecode(int(f), fps=framerate) for f in [ # middle frames a[len(a) // 2] if (0 < j < num_images - 1) or num_images == 1 # first frame else min(a[0] + frame_margin, a[-1]) if j == 0 # last frame else max(a[-1] - frame_margin, a[0]) # for each evenly-split array of frames in the scene list for j, a in enumerate(np.array_split(r, num_images)) ] ] for i, r in enumerate([ # pad ranges to number of images r if 1 + r[-1] - r[0] >= num_images else list(r) + [r[-1]] * (num_images - len(r)) # create range of frames in scene for r in ( range( start.get_frames(), start.get_frames() + max( 1, # guard against zero length scenes end.get_frames() - start.get_frames())) # for each scene in scene list for start, end in scene_list) ]) ] image_filenames = {i: [] for i in range(len(timecode_list))} aspect_ratio = video.aspect_ratio if abs(aspect_ratio - 1.0) < 0.01: aspect_ratio = None logger.debug('Writing images with template %s', filename_template.template) for i, scene_timecodes in enumerate(timecode_list): for j, image_timecode in enumerate(scene_timecodes): video.seek(image_timecode) frame_im = video.read() if frame_im is not None: # TODO: Allow NUM to be a valid suffix in addition to NUMBER. file_path = '%s.%s' % (filename_template.safe_substitute( VIDEO_NAME=video.name, SCENE_NUMBER=scene_num_format % (i + 1), IMAGE_NUMBER=image_num_format % (j + 1), FRAME_NUMBER=image_timecode.get_frames()), image_extension) image_filenames[i].append(file_path) # TODO(0.6.3): Combine this resize with the ones below. if aspect_ratio is not None: frame_im = cv2.resize( frame_im, (0, 0), fx=aspect_ratio, fy=1.0, interpolation=interpolation.value) frame_height = frame_im.shape[0] frame_width = frame_im.shape[1] # Figure out what kind of resizing needs to be done if height or width: if height and not width: factor = height / float(frame_height) width = int(factor * frame_width) if width and not height: factor = width / float(frame_width) height = int(factor * frame_height) assert height > 0 and width > 0 frame_im = cv2.resize( frame_im, (width, height), interpolation=interpolation.value) elif scale: frame_im = cv2.resize( frame_im, (0, 0), fx=scale, fy=scale, interpolation=interpolation.value)
cv2.imwrite(get_and_create_path(file_path, output_dir), frame_im, imwrite_param)
5
2023-10-25 02:50:01+00:00
24k
EulerSearch/embedding_studio
plugins/default_fine_tuning_method.py
[ { "identifier": "settings", "path": "embedding_studio/core/config.py", "snippet": "class Settings(BaseSettings):\n API_V1_STR: str = \"/api/v1\"\n SECRET_KEY: str = secrets.token_urlsafe(32)\n ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 * 24 * 8\n BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = []\n F...
from typing import List from sentence_transformers import SentenceTransformer from embedding_studio.core.config import settings from embedding_studio.core.plugin import FineTuningMethod from embedding_studio.embeddings.data.clickstream.parsers.s3_parser import ( AWSS3ClickstreamParser, ) from embedding_studio.embeddings.data.clickstream.search_event import ( DummyEventType, SearchResult, ) from embedding_studio.embeddings.data.clickstream.splitter import ( ClickstreamSessionsSplitter, ) from embedding_studio.embeddings.data.clickstream.text_query_item import ( TextQueryItem, ) from embedding_studio.embeddings.data.clickstream.text_query_retriever import ( TextQueryRetriever, ) from embedding_studio.embeddings.data.loaders.s3.s3_loader import ( AWSS3DataLoader, ) from embedding_studio.embeddings.data.storages.producers.clip import ( CLIPItemStorageProducer, ) from embedding_studio.embeddings.data.utils.fields_normalizer import ( DatasetFieldsNormalizer, ) from embedding_studio.embeddings.losses.prob_cosine_margin_ranking_loss import ( CosineProbMarginRankingLoss, ) from embedding_studio.embeddings.models.text_to_image.clip import ( TextToImageCLIPModel, ) from embedding_studio.models.clickstream.sessions import SessionWithEvents from embedding_studio.models.plugin import FineTuningBuilder, PluginMeta from embedding_studio.workers.fine_tuning.data.prepare_data import prepare_data from embedding_studio.workers.fine_tuning.experiments.experiments_tracker import ( ExperimentsManager, ) from embedding_studio.workers.fine_tuning.experiments.finetuning_settings import ( FineTuningSettings, ) from embedding_studio.workers.fine_tuning.experiments.initial_params.clip import ( INITIAL_PARAMS, ) from embedding_studio.workers.fine_tuning.experiments.metrics_accumulator import ( MetricsAccumulator, )
16,704
class DefaultFineTuningMethod(FineTuningMethod): meta = PluginMeta( name="Default Fine Tuning Method", version="0.0.1", description="A default fine-tuning plugin", ) def __init__(self): # uncomment and pass your credentials to use your own s3 bucket # creds = { # "role_arn": "arn:aws:iam::123456789012:role/some_data" # "aws_access_key_id": "TESTACCESSKEIDTEST11", # "aws_secret_access_key": "QWERTY1232qdsadfasfg5349BBdf30ekp23odk03", # } # self.data_loader = AWSS3DataLoader(**creds) # with empty creds, use anonymous session creds = { } self.data_loader = AWSS3DataLoader(**creds) self.retriever = TextQueryRetriever() self.parser = AWSS3ClickstreamParser( TextQueryItem, SearchResult, DummyEventType ) self.splitter = ClickstreamSessionsSplitter() self.normalizer = DatasetFieldsNormalizer("item", "item_id") self.storage_producer = CLIPItemStorageProducer(self.normalizer) self.accumulators = [ MetricsAccumulator("train_loss", True, True, True, True), MetricsAccumulator( "train_not_irrelevant_dist_shift", True, True, True, True ), MetricsAccumulator( "train_irrelevant_dist_shift", True, True, True, True ), MetricsAccumulator("test_loss"), MetricsAccumulator("test_not_irrelevant_dist_shift"), MetricsAccumulator("test_irrelevant_dist_shift"), ] self.manager = ExperimentsManager(
class DefaultFineTuningMethod(FineTuningMethod): meta = PluginMeta( name="Default Fine Tuning Method", version="0.0.1", description="A default fine-tuning plugin", ) def __init__(self): # uncomment and pass your credentials to use your own s3 bucket # creds = { # "role_arn": "arn:aws:iam::123456789012:role/some_data" # "aws_access_key_id": "TESTACCESSKEIDTEST11", # "aws_secret_access_key": "QWERTY1232qdsadfasfg5349BBdf30ekp23odk03", # } # self.data_loader = AWSS3DataLoader(**creds) # with empty creds, use anonymous session creds = { } self.data_loader = AWSS3DataLoader(**creds) self.retriever = TextQueryRetriever() self.parser = AWSS3ClickstreamParser( TextQueryItem, SearchResult, DummyEventType ) self.splitter = ClickstreamSessionsSplitter() self.normalizer = DatasetFieldsNormalizer("item", "item_id") self.storage_producer = CLIPItemStorageProducer(self.normalizer) self.accumulators = [ MetricsAccumulator("train_loss", True, True, True, True), MetricsAccumulator( "train_not_irrelevant_dist_shift", True, True, True, True ), MetricsAccumulator( "train_irrelevant_dist_shift", True, True, True, True ), MetricsAccumulator("test_loss"), MetricsAccumulator("test_not_irrelevant_dist_shift"), MetricsAccumulator("test_irrelevant_dist_shift"), ] self.manager = ExperimentsManager(
tracking_uri=settings.MLFLOW_TRACKING_URI,
0
2023-10-31 00:33:13+00:00
24k
facebookresearch/minimax
src/minimax/runners/xp_runner.py
[ { "identifier": "EvalRunner", "path": "src/minimax/runners/eval_runner.py", "snippet": "class EvalRunner:\n def __init__(\n self,\n pop,\n env_names,\n env_kwargs=None,\n n_episodes=10,\n agent_idxs='*',\n render_mode=None):\n\n self.pop = pop\n...
import copy import time import numpy as np import jax import minimax.envs as envs import minimax.models as models import minimax.agents as agents from functools import partial from collections import defaultdict from jax.sharding import Mesh, PartitionSpec as P from jax.experimental import mesh_utils from jax.experimental.shard_map import shard_map from .eval_runner import EvalRunner from .dr_runner import DRRunner from .paired_runner import PAIREDRunner from .plr_runner import PLRRunner from minimax.util.rl import UEDScore, PopPLRManager
16,378
""" Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ class RunnerInfo: def __init__( self, runner_cls, is_ued=False): self.runner_cls = runner_cls self.is_ued = is_ued RUNNER_INFO = { 'dr': RunnerInfo( runner_cls=DRRunner, ), 'plr': RunnerInfo(
""" Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ class RunnerInfo: def __init__( self, runner_cls, is_ued=False): self.runner_cls = runner_cls self.is_ued = is_ued RUNNER_INFO = { 'dr': RunnerInfo( runner_cls=DRRunner, ), 'plr': RunnerInfo(
runner_cls=PLRRunner,
3
2023-10-28 12:12:01+00:00
24k
nv-tlabs/vid2player3d
uhc/smpllib/smpl_local_robot.py
[ { "identifier": "Skeleton", "path": "uhc/khrylib/mocap/skeleton_local.py", "snippet": "class Skeleton:\n def __init__(\n self, template_dir=\"/hdd/zen/dev/copycat/Copycat/assets/bigfoot_template_v1.pkl\"\n ):\n self.bones = []\n self.name2bone = {}\n self.mass_scale = 1...
import os import sys import time import argparse import torch import os.path as osp import mujoco_py import numpy as np import math import uuid import atexit import shutil from copy import deepcopy from lxml.etree import XMLParser, parse, Element, SubElement from lxml import etree from io import BytesIO from scipy.spatial import ConvexHull from stl import mesh from mujoco_py import load_model_from_path, MjSim, MjViewer from uhc.khrylib.mocap.skeleton_local import Skeleton from uhc.khrylib.mocap.skeleton_mesh_local import Skeleton as SkeletonMesh from uhc.smpllib.smpl_parser import ( SMPL_Parser, SMPLH_Parser, SMPLX_Parser, ) from uhc.utils.geom import quadric_mesh_decimation from uhc.utils.flags import flags
15,111
# ############################## # joint_range["L_Thorax"][0] = np.array([-np.pi , np.pi ]) # joint_range["L_Thorax"][1] = np.array([-np.pi , np.pi]) # joint_range["L_Thorax"][2] = np.array([-np.pi, np.pi]) # joint_range["R_Thorax"][0] = np.array([-np.pi , np.pi ]) # joint_range["R_Thorax"][1] = np.array([-np.pi, np.pi]) # joint_range["R_Thorax"][2] = np.array([-np.pi, np.pi]) # joint_range["L_Shoulder"][0] = np.array([-np.pi , np.pi ]) # joint_range["L_Shoulder"][1] = np.array([-np.pi , np.pi / 2]) # joint_range["L_Shoulder"][2] = np.array([-np.pi, np.pi]) # joint_range["R_Shoulder"][0] = np.array([-np.pi , np.pi ]) # joint_range["R_Shoulder"][1] = np.array([-np.pi/2, np.pi]) # joint_range["R_Shoulder"][2] = np.array([-np.pi, np.pi]) # ############################## # joint_range["L_Hip"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["L_Hip"][1] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["L_Hip"][2] = np.array([-np.pi / 3, np.pi /2]) # joint_range["R_Hip"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["R_Hip"][1] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["R_Hip"][2] = np.array([-np.pi / 2, np.pi / 3]) # joint_range["L_Knee"][0] = np.array([-np.pi / 16, np.pi]) # joint_range["L_Knee"][1] = np.array([-np.pi / 16, np.pi / 16]) # joint_range["L_Knee"][2] = np.array([-np.pi / 16, np.pi / 16]) # joint_range["R_Knee"][0] = np.array([-np.pi / 16, np.pi]) # joint_range["R_Knee"][1] = np.array([-np.pi / 16, np.pi / 16]) # joint_range["R_Knee"][2] = np.array([-np.pi / 16, np.pi / 16]) # joint_range["L_Ankle"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["L_Ankle"][1] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["L_Ankle"][2] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["R_Ankle"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["R_Ankle"][1] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["R_Ankle"][2] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["L_Toe"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["L_Toe"][1] = np.array([-np.pi / 4, np.pi / 4]) # joint_range["L_Toe"][2] = np.array([-np.pi / 4, np.pi / 4]) # joint_range["R_Toe"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["R_Toe"][1] = np.array([-np.pi / 4, np.pi / 4]) # joint_range["R_Toe"][2] = np.array([-np.pi / 4, np.pi / 4]) self.height = np.max(verts[:, 1]) - np.min(verts[:, 1]) size_dict = {} if ( len(self.get_params(get_name=True)) > 1 and not params is None ): # ZL: dank code, very dank code self.set_params(params) size_dict = self.get_size() size_dict = self.enforce_length_size(size_dict) # Gear based size # gear_dict = self.get_gear() # for k, v in size_dict.items(): # for idx, suffix in enumerate(["_x", "_y", "_z"]): # if k + suffix in gear_dict: # size_dict[k][idx] *= gear_dict[k + suffix] self.hull_dict = get_joint_geometries( verts, joints, skin_weights, joint_names, scale_dict=size_dict, geom_dir=f"{self.geom_dir}/geom", ) self.skeleton.load_from_offsets( joint_offsets, joint_parents, joint_axes, joint_dofs, joint_range, sites={}, scale=1, equalities={}, exclude_contacts = [ ["Chest", "L_Shoulder"], ["Chest", "R_Shoulder"], ["Chest", "R_Thorax"], ["Chest", "L_Thorax"], ['L_Hip', 'Pelvis'], ['R_Hip', 'Pelvis'], ['Torso', 'Pelvis'], ['L_Knee', 'L_Hip'], ['R_Knee', 'R_Hip'], ['Spine', 'Torso'], ['L_Ankle', 'L_Knee'], ['R_Ankle', 'R_Knee'], ['Chest', 'Spine'], ['L_Toe', 'L_Ankle'], ['R_Toe', 'R_Ankle'], ['Neck', 'Chest'], ['L_Thorax', 'Chest'], ['R_Thorax', 'Chest'], ['Head', 'Neck'], ['L_Shoulder', 'L_Thorax'], ['R_Shoulder', 'R_Thorax'], ['L_Elbow', 'L_Shoulder'], ['R_Elbow', 'R_Shoulder'], ['L_Wrist', 'L_Elbow'], ['R_Wrist', 'R_Elbow'], ['L_Hand', 'L_Wrist'], ['R_Hand', 'R_Wrist'] ], collision_groups=contype, conaffinity=conaffinity, simple_geom=False, ) else:
sys.path.append(os.getcwd()) def parse_vec(string): return np.fromstring(string, sep=" ") def parse_fromto(string): fromto = np.fromstring(string, sep=" ") return fromto[:3], fromto[3:] def normalize_range(value, lb, ub): return (value - lb) / (ub - lb) * 2 - 1 def denormalize_range(value, lb, ub): return (value + 1) * 0.5 * (ub - lb) + lb def vec_to_polar(v): phi = math.atan2(v[1], v[0]) theta = math.acos(v[2]) return np.array([theta, phi]) def polar_to_vec(p): v = np.zeros(3) v[0] = math.sin(p[0]) * math.cos(p[1]) v[1] = math.sin(p[0]) * math.sin(p[1]) v[2] = math.cos(p[0]) return v def in_hull(hull, queries): tolerance = 1e-3 if len(queries.shape) == 1: queries = queries[ None, ] return np.all( np.add(np.dot(queries, hull.equations[:, :-1].T), hull.equations[:, -1]) <= tolerance, axis=1, ) def get_joint_geometries( smpl_verts, smpl_jts, skin_weights, joint_names, geom_dir, scale_dict={}, suffix = None, verbose=False, min_num_vert = 50, ): vert_to_joint = skin_weights.argmax(axis=1) hull_dict = {} # create joint geometries os.makedirs(geom_dir, exist_ok=True) for jind, jname in enumerate(joint_names): vind = np.where(vert_to_joint == jind)[0] if len(vind) == 0: print(f"{jname} has no vertices!") continue vert = (smpl_verts[vind] - smpl_jts[jind]) * scale_dict.get(jname, 1) hull = ConvexHull(vert) norm_verts = vert - smpl_jts[jind] norm_hull = ConvexHull(norm_verts) hull_dict[jname] = { "norm_hull": norm_hull, "norm_verts": norm_verts, "verts": vert, "hull": hull, } # print(jname, hull.simplices.shape[0]) center = vert[hull.vertices].mean(axis=0) jgeom = mesh.Mesh(np.zeros(hull.simplices.shape[0], dtype=mesh.Mesh.dtype)) for i, f in enumerate(hull.simplices): for j in range(3): jgeom.vectors[i][j] = vert[f[j], :] # check if the face's normal is facing outward normal = np.cross( jgeom.vectors[i][1] - jgeom.vectors[i][0], jgeom.vectors[i][2] - jgeom.vectors[i][0], ) out_vec = jgeom.vectors[i].mean(axis=0) - center if np.dot(normal, out_vec) < 0: jgeom.vectors[i] = jgeom.vectors[i][[0, 2, 1]] # flip the face if suffix is None: fname = f"{geom_dir}/{jname}.stl" else: fname = f"{geom_dir}/{jname}_{suffix}.stl" jgeom.save(fname) # mesh simplification with vtk # min_num_vert = 50 min_num_vert = 50 cur_num_vert = len(hull.vertices) reduction_rate = min(0.9, 1.0 - min_num_vert / cur_num_vert) quadric_mesh_decimation(fname, reduction_rate, verbose=verbose) return hull_dict class Joint: def __init__(self, node, body): self.node = node self.body = body self.cfg = body.cfg self.local_coord = body.local_coord self.name = node.attrib["name"] self.type = node.attrib["type"] if "type" in node.attrib else "free" if self.type == "hinge": self.range = np.deg2rad(parse_vec(node.attrib.get("range", "-360 360"))) actu_node = ( body.tree.getroot().find("actuator").find(f'motor[@joint="{self.name}"]') ) if actu_node is not None: self.actuator = Actuator(actu_node, self) else: self.actuator = None self.parse_param_specs() self.param_inited = False # tunable parameters self.pos = parse_vec("0 0 0") if self.type == "hinge": self.axis = vec_to_polar(parse_vec(node.attrib["axis"])) if self.local_coord: self.pos += body.pos self.damping = ( parse_vec(node.attrib["damping"]) if "damping" in node.attrib else np.array([0]) ) self.stiffness = ( parse_vec(node.attrib["stiffness"]) if "stiffness" in node.attrib else np.array([0]) ) self.armature = ( parse_vec(node.attrib["armature"]) if "armature" in node.attrib else np.array([0.01]) ) self.frictionloss = ( parse_vec(node.attrib["frictionloss"]) if "frictionloss" in node.attrib else np.array([0]) ) # import ipdb; ipdb.set_trace() # assert np.all(self.pos == body.pos) def __repr__(self): return "joint_" + self.name def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("joint_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): specs["ub"] = np.array(specs["ub"]) def sync_node(self, rename=False, index=0): pos = self.pos - self.body.pos if self.local_coord else self.pos if rename: self.name = self.body.name + "_joint_" + str(index) self.node.attrib["name"] = self.name if self.type == "hinge": axis_vec = polar_to_vec(self.axis) self.node.attrib["axis"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in axis_vec] ) self.node.attrib["pos"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in pos] ) self.node.attrib["damping"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.damping] ) self.node.attrib["stiffness"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.stiffness] ) self.node.attrib["armature"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.armature] ) elif self.type == "free": pass if self.actuator is not None: self.actuator.sync_node() # if self.name != "Pelvis": # self.node.attrib["frictionloss"] = " ".join( # [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.frictionloss] # ) # if np.sum([self.name.startswith(i) for i in ["L_Knee", "R_Knee", "L_Ankle", "R_Ankle", "L_Toe", "R_Toe"]]): # self.node.attrib["frictionloss"] = "500" # self.node.attrib["stiffness"] = "5" # self.node.attrib["damping"] = "5" # if self.name != "Pelvis": # self.node.attrib["frictionloss"] = "5000" def get_params(self, param_list, get_name=False, pad_zeros=False): if "axis" in self.param_specs: if self.type == "hinge": if get_name: param_list += ["axis_theta", "axis_phi"] else: axis = normalize_range( self.axis, np.array([0, -2 * np.pi]), np.array([np.pi, 2 * np.pi]), ) param_list.append(axis) elif pad_zeros: param_list.append(np.zeros(2)) if self.actuator is not None: self.actuator.get_params(param_list, get_name) elif pad_zeros: param_list.append( np.zeros(3 if self.type == "free" else 1) ) # ZL currently a workaround for supporting 3D joints if "damping" in self.param_specs: if get_name: param_list.append("damping") else: if not self.param_inited and self.param_specs["damping"].get( "rel", False ): self.param_specs["damping"]["lb"] += self.damping self.param_specs["damping"]["ub"] += self.damping self.param_specs["damping"]["lb"] = max( self.param_specs["damping"]["lb"], self.param_specs["damping"].get("min", -np.inf), ) self.param_specs["damping"]["ub"] = min( self.param_specs["damping"]["ub"], self.param_specs["damping"].get("max", np.inf), ) damping = normalize_range( self.damping, self.param_specs["damping"]["lb"], self.param_specs["damping"]["ub"], ) param_list.append(damping.flatten()) if "armature" in self.param_specs: if get_name: param_list.append("armature") else: if not self.param_inited and self.param_specs["armature"].get( "rel", False ): self.param_specs["armature"]["lb"] += self.armature self.param_specs["armature"]["ub"] += self.armature self.param_specs["armature"]["lb"] = max( self.param_specs["armature"]["lb"], self.param_specs["armature"].get("min", -np.inf), ) self.param_specs["armature"]["ub"] = min( self.param_specs["armature"]["ub"], self.param_specs["armature"].get("max", np.inf), ) armature = normalize_range( self.armature, self.param_specs["armature"]["lb"], self.param_specs["armature"]["ub"], ) param_list.append(armature.flatten()) if "stiffness" in self.param_specs: if get_name: param_list.append("stiffness") else: if not self.param_inited and self.param_specs["stiffness"].get( "rel", False ): self.param_specs["stiffness"]["lb"] += self.stiffness self.param_specs["stiffness"]["ub"] += self.stiffness self.param_specs["stiffness"]["lb"] = max( self.param_specs["stiffness"]["lb"], self.param_specs["stiffness"].get("min", -np.inf), ) self.param_specs["stiffness"]["ub"] = min( self.param_specs["stiffness"]["ub"], self.param_specs["stiffness"].get("max", np.inf), ) stiffness = normalize_range( self.stiffness, self.param_specs["stiffness"]["lb"], self.param_specs["stiffness"]["ub"], ) param_list.append(stiffness.flatten()) if "frictionloss" in self.param_specs: if get_name: param_list.append("frictionloss") else: if not self.param_inited and self.param_specs["frictionloss"].get( "rel", False ): self.param_specs["frictionloss"]["lb"] += self.frictionloss self.param_specs["frictionloss"]["ub"] += self.frictionloss self.param_specs["frictionloss"]["lb"] = max( self.param_specs["frictionloss"]["lb"], self.param_specs["frictionloss"].get("min", -np.inf), ) self.param_specs["frictionloss"]["ub"] = min( self.param_specs["frictionloss"]["ub"], self.param_specs["frictionloss"].get("max", np.inf), ) frictionloss = normalize_range( self.frictionloss, self.param_specs["frictionloss"]["lb"], self.param_specs["frictionloss"]["ub"], ) param_list.append(frictionloss.flatten()) if not get_name: self.param_inited = True # import ipdb; ipdb.set_trace() def set_params(self, params, pad_zeros=False): if "axis" in self.param_specs: if self.type == "hinge": self.axis = denormalize_range( params[:2], np.array([0, -2 * np.pi]), np.array([np.pi, 2 * np.pi]) ) params = params[2:] elif pad_zeros: params = params[2:] if self.actuator is not None: params = self.actuator.set_params(params) elif pad_zeros: params = params[1:] # Order of this matters!!! Should always be damping, aramature, stiffness (the order they are read) if "damping" in self.param_specs: self.damping = denormalize_range( params[[0]], self.param_specs["damping"]["lb"], self.param_specs["damping"]["ub"], ) params = params[1:] if "armature" in self.param_specs: self.armature = denormalize_range( params[[0]], self.param_specs["armature"]["lb"], self.param_specs["armature"]["ub"], ) params = params[1:] if "stiffness" in self.param_specs: self.stiffness = denormalize_range( params[[0]], self.param_specs["stiffness"]["lb"], self.param_specs["stiffness"]["ub"], ) params = params[1:] if "frictionloss" in self.param_specs: self.frictionloss = denormalize_range( params[[0]], self.param_specs["frictionloss"]["lb"], self.param_specs["frictionloss"]["ub"], ) params = params[1:] return params class Geom: def __init__(self, node, body): self.node = node self.body = body self.cfg = body.cfg self.local_coord = body.local_coord self.name = node.attrib.get("name", "") self.type = node.attrib["type"] self.density = ( parse_vec(node.attrib["density"]) / 1000 if "density" in node.attrib else np.array([1]) ) self.parse_param_specs() self.param_inited = False # tunable parameters # self.size = ( # parse_vec(node.attrib["size"]) if "size" in node.attrib else np.array([0]) # ) self.size = ( parse_vec(node.attrib["size"]) if "size" in node.attrib else np.array([1, 1, 1]) ) if self.type == "box": self.start = self.end = self.pos = parse_vec(node.attrib["pos"]) self.pos_delta = np.array([0, 0, 0]) self.rot = parse_vec(node.attrib["quat"]) elif self.type == "sphere": self.pos_delta = np.array([0, 0, 0]) self.start = self.end = self.pos = parse_vec(node.attrib["pos"]) elif self.type == "capsule": self.start, self.end = parse_fromto(node.attrib["fromto"]) elif self.type == "mesh": self.start, self.end = body.pos.copy(), body.pos.copy() if self.local_coord: self.start += body.pos self.end += body.pos if body.bone_start is None: self.bone_start = self.start.copy() body.bone_start = self.bone_start.copy() else: self.bone_start = body.bone_start.copy() self.ext_start = np.linalg.norm( self.bone_start - self.start ) ## Geom extension from bone start def __repr__(self): return "geom_" + self.name def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("geom_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): if self.type == "box": specs["lb"] = np.array([specs["lb"]] * 3) elif self.type == "capsule": specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): if self.type == "box": specs["lb"] = np.array([specs["lb"]] * 3) elif self.type == "capsule": specs["lb"] = np.array(specs["lb"]) def update_start(self): if self.type == "capsule": vec = self.bone_start - self.end self.start = self.bone_start + vec * (self.ext_start / np.linalg.norm(vec)) def sync_node(self): # self.node.attrib['name'] = self.name self.node.attrib.pop("name", None) if not self.size is None: self.node.attrib["size"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.size] ) self.node.attrib["density"] = " ".join( [f"{x * 1000:.6f}".rstrip("0").rstrip(".") for x in self.density] ) # if self.type == "capsule": # start = self.start - self.body.pos if self.local_coord else self.start # end = self.end - self.body.pos if self.local_coord else self.end # self.node.attrib["fromto"] = " ".join( # [ # f"{x:.6f}".rstrip("0").rstrip(".") # for x in np.concatenate([start, end]) # ] # ) # elif self.type == "box" or self.type == "sphere": # # self.node.attrib["pos"] = " ".join( # # [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.pos + self.pos_delta] # # ) # import ipdb; ipdb.set_trace() # pass def get_params(self, param_list, get_name=False, pad_zeros=False): if "size" in self.param_specs: if get_name: param_list.append("size") else: if ( self.type == "capsule" or self.type == "box" or self.type == "sphere" or self.type == "mesh" ): if not self.param_inited and self.param_specs["size"].get( "rel", False ): self.param_specs["size"]["lb"] += self.size self.param_specs["size"]["ub"] += self.size self.param_specs["size"]["lb"] = max( self.param_specs["size"]["lb"], self.param_specs["size"].get("min", -np.inf), ) self.param_specs["size"]["ub"] = min( self.param_specs["size"]["ub"], self.param_specs["size"].get("max", np.inf), ) size = normalize_range( self.size, self.param_specs["size"]["lb"], self.param_specs["size"]["ub"], ) param_list.append(size.flatten()) if pad_zeros and self.type == "capsule": param_list.append( np.zeros(2) ) # capsule has needs to be 3 for GNN elif pad_zeros: param_list.append(np.zeros(self.size.shape)) if "ext_start" in self.param_specs: if get_name: param_list.append("ext_start") else: if ( self.type == "capsule" or self.type == "box" or self.type == "sphere" ): if not self.param_inited and self.param_specs["ext_start"].get( "rel", False ): self.param_specs["ext_start"]["lb"] += self.ext_start self.param_specs["ext_start"]["ub"] += self.ext_start self.param_specs["ext_start"]["lb"] = max( self.param_specs["ext_start"]["lb"], self.param_specs["ext_start"].get("min", -np.inf), ) self.param_specs["ext_start"]["ub"] = min( self.param_specs["ext_start"]["ub"], self.param_specs["ext_start"].get("max", np.inf), ) ext_start = normalize_range( self.ext_start, self.param_specs["ext_start"]["lb"], self.param_specs["ext_start"]["ub"], ) param_list.append(ext_start.flatten()) elif pad_zeros: param_list.append(np.zeros(self.size.shape)) if "density" in self.param_specs: if get_name: param_list.append("density") else: if not self.param_inited and self.param_specs["density"].get( "rel", False ): self.param_specs["density"]["lb"] += self.density self.param_specs["density"]["ub"] += self.density self.param_specs["density"]["lb"] = max( self.param_specs["density"]["lb"], self.param_specs["density"].get("min", -np.inf), ) self.param_specs["density"]["ub"] = min( self.param_specs["density"]["ub"], self.param_specs["density"].get("max", np.inf), ) density = normalize_range( self.density, self.param_specs["density"]["lb"], self.param_specs["density"]["ub"], ) param_list.append(density.flatten()) # if pad_zeros: # param_list.append(np.zeros(self.density.shape)) if "pos_delta" in self.param_specs: if get_name: param_list.append("pos_delta") else: if self.type == "box" or self.type == "sphere": if not self.param_inited and self.param_specs["pos_delta"].get( "rel", False ): self.param_specs["pos_delta"]["lb"] += self.density self.param_specs["pos_delta"]["ub"] += self.density self.param_specs["pos_delta"]["lb"] = max( self.param_specs["pos_delta"]["lb"], self.param_specs["pos_delta"].get("min", -np.inf), ) self.param_specs["pos_delta"]["ub"] = min( self.param_specs["pos_delta"]["ub"], self.param_specs["pos_delta"].get("max", np.inf), ) pos_delta = normalize_range( self.pos_delta, self.param_specs["pos_delta"]["lb"], self.param_specs["pos_delta"]["ub"], ) param_list.append(pos_delta.flatten()) elif pad_zeros: param_list.append(np.zeros(3)) if not get_name: self.param_inited = True def set_params(self, params, pad_zeros=False): if "size" in self.param_specs: if ( self.type == "capsule" or self.type == "box" or self.type == "sphere" or self.type == "mesh" ): if len(self.size) == 1: self.size = denormalize_range( params[[0]], self.param_specs["size"]["lb"], self.param_specs["size"]["ub"], ) params = params[1:] elif len(self.size) == 3: self.size = denormalize_range( np.array(params[:3]), self.param_specs["size"]["lb"], self.param_specs["size"]["ub"], ) params = params[3:] elif pad_zeros: params = params[1:] if "ext_start" in self.param_specs: if self.type == "capsule" or self.type == "box" or self.type == "sphere": self.ext_start = denormalize_range( params[[0]], self.param_specs["ext_start"]["lb"], self.param_specs["ext_start"]["ub"], ) params = params[1:] elif pad_zeros: params = params[1:] if "density" in self.param_specs: if ( self.type == "capsule" or self.type == "box" or self.type == "sphere" or self.type == "mesh" ): self.density = denormalize_range( params[[0]], self.param_specs["density"]["lb"], self.param_specs["density"]["ub"], ) params = params[1:] elif pad_zeros: params = params[1:] if "pos_delta" in self.param_specs: if self.type == "box" or self.type == "sphere": self.pos_delta = denormalize_range( np.array(params[:3]), self.param_specs["pos_delta"]["lb"], self.param_specs["pos_delta"]["ub"], ) params = params[3:] elif pad_zeros: params = params[3:] return params class Actuator: def __init__(self, node, joint): self.node = node self.joint = joint self.cfg = joint.cfg self.joint_name = node.attrib["joint"] self.name = self.joint_name self.parse_param_specs() self.param_inited = False # tunable parameters self.gear = float(node.attrib["gear"]) def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("actuator_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): specs["ub"] = np.array(specs["ub"]) def sync_node(self): self.node.attrib["gear"] = f"{self.gear:.6f}".rstrip("0").rstrip(".") self.name = self.joint.name self.node.attrib["name"] = self.name self.node.attrib["joint"] = self.joint.name def get_params(self, param_list, get_name=False): if "gear" in self.param_specs: if get_name: param_list.append("gear") else: if not self.param_inited and self.param_specs["gear"].get("rel", False): self.param_specs["gear"]["lb"] += self.gear self.param_specs["gear"]["ub"] += self.gear self.param_specs["gear"]["lb"] = max( self.param_specs["gear"]["lb"], self.param_specs["gear"].get("min", -np.inf), ) self.param_specs["gear"]["ub"] = min( self.param_specs["gear"]["ub"], self.param_specs["gear"].get("max", np.inf), ) gear = normalize_range( self.gear, self.param_specs["gear"]["lb"], self.param_specs["gear"]["ub"], ) param_list.append(np.array([gear])) if not get_name: self.param_inited = True def set_params(self, params): if "gear" in self.param_specs: self.gear = denormalize_range( params[0].item(), self.param_specs["gear"]["lb"], self.param_specs["gear"]["ub"], ) params = params[1:] return params class Body: def __init__(self, node, parent_body, robot, cfg, new_body=False): self.node = node self.parent = parent_body self.new_body = new_body if parent_body is not None: parent_body.child.append(self) parent_body.cind += 1 self.depth = parent_body.depth + 1 else: self.depth = 0 self.robot = robot self.cfg = cfg self.tree = robot.tree self.local_coord = robot.local_coord self.name = ( node.attrib["name"] if "name" in node.attrib else self.parent.name + f"_child{len(self.parent.child)}" ) self.child = [] self.cind = 0 self.pos = parse_vec(node.attrib["pos"]) if self.local_coord and parent_body is not None: self.pos += parent_body.pos if cfg.get("init_root_from_geom", False): self.bone_start = None if parent_body is None else self.pos.copy() else: self.bone_start = self.pos.copy() self.joints = [Joint(x, self) for x in node.findall('joint[@type="hinge"]')] + \ [Joint(x, self) for x in node.findall('joint[@type="free"]')] + \ [Joint(x, self) for x in node.findall('freejoint')] # self.geoms = [Geom(x, self) for x in node.findall('geom[@type="capsule"]')] supported_geoms = self.cfg.get("supported_geoms", ["capsule", "box"]) self.geoms = [ Geom(x, self) for geom_type in supported_geoms for x in node.findall(f'geom[@type="{geom_type}"]') ] # self.geoms = [Geom(x, self) for x in node.findall('geom[@type="capsule"]')] + [Geom(x, self) for x in node.findall('geom[@type="sphere"]')] + [Geom(x, self) for x in node.findall('geom[@type="box"]')] self.parse_param_specs() self.param_inited = False # parameters self.bone_end = None self.bone_offset = None def __repr__(self): return "body_" + self.name def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("body_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): specs["ub"] = np.array(specs["ub"]) if name == "bone_ang": specs["lb"] = np.deg2rad(specs["lb"]) specs["ub"] = np.deg2rad(specs["ub"]) def reindex(self): if self.parent is None: self.index = "0" else: ind = self.parent.child.index(self) + 1 pname = "" if self.parent.index == "0" else self.parent.index self.index = str(ind) + pname if self.new_body: self.name = self.index def init(self): if len(self.child) > 0: bone_ends = [x.bone_start for x in self.child] else: bone_ends = [x.end for x in self.geoms] if len(bone_ends) > 0: self.bone_end = np.mean(np.stack(bone_ends), axis=0) self.bone_offset = self.bone_end - self.bone_start def get_actuator_name(self): for joint in self.joints: if joint.actuator is not None: return joint.actuator.name def get_joint_range(self): assert len(self.joints) == 1 return self.joints[0].range def sync_node(self): pos = ( self.pos - self.parent.pos if self.local_coord and self.parent is not None else self.pos ) self.node.attrib["name"] = self.name self.node.attrib["pos"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in pos] ) for idx, joint in enumerate(self.joints): joint.sync_node(rename=self.new_body, index=idx) for geom in self.geoms: geom.sync_node() def sync_geom(self): for geom in self.geoms: geom.bone_start = self.bone_start.copy() # geom.end = self.bone_end.copy() # geom.update_start() def sync_joint(self): if self.parent is not None: for joint in self.joints: joint.pos = self.pos.copy() def rebuild(self): if self.parent is not None: # self.bone_start = self.parent.bone_end.copy() self.pos = self.bone_start.copy() if self.bone_offset is not None: self.bone_end = self.bone_start + self.bone_offset if self.parent is None and self.cfg.get("no_root_offset", False): self.bone_end = self.bone_start self.sync_geom() self.sync_joint() def get_params( self, param_list, get_name=False, pad_zeros=False, demap_params=False ): if self.bone_offset is not None and "offset" in self.param_specs: if get_name: if self.param_specs["offset"]["type"] == "xz": param_list += ["offset_x", "offset_z"] elif self.param_specs["offset"]["type"] == "xy": param_list += ["offset_x", "offset_y"] else: param_list += ["offset_x", "offset_y", "offset_z"] else: if self.param_specs["offset"]["type"] == "xz": offset = self.bone_offset[[0, 2]] elif self.param_specs["offset"]["type"] == "xy": offset = self.bone_offset[[0, 1]] else: offset = self.bone_offset if not self.param_inited and self.param_specs["offset"].get( "rel", False ): self.param_specs["offset"]["lb"] += offset self.param_specs["offset"]["ub"] += offset self.param_specs["offset"]["lb"] = np.maximum( self.param_specs["offset"]["lb"], self.param_specs["offset"].get( "min", np.full_like(offset, -np.inf) ), ) self.param_specs["offset"]["ub"] = np.minimum( self.param_specs["offset"]["ub"], self.param_specs["offset"].get( "max", np.full_like(offset, np.inf) ), ) offset = normalize_range( offset, self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) param_list.append(offset.flatten()) if self.bone_offset is not None and "bone_len" in self.param_specs: if get_name: param_list += ["bone_len"] else: bone_len = np.linalg.norm(self.bone_offset) if not self.param_inited and self.param_specs["bone_len"].get( "rel", False ): self.param_specs["bone_len"]["lb"] += bone_len self.param_specs["bone_len"]["ub"] += bone_len self.param_specs["bone_len"]["lb"] = max( self.param_specs["bone_len"]["lb"], self.param_specs["bone_len"].get("min", -np.inf), ) self.param_specs["bone_len"]["ub"] = min( self.param_specs["bone_len"]["ub"], self.param_specs["bone_len"].get("max", np.inf), ) bone_len = normalize_range( bone_len, self.param_specs["bone_len"]["lb"], self.param_specs["bone_len"]["ub"], ) param_list.append(np.array([bone_len])) if self.bone_offset is not None and "bone_ang" in self.param_specs: if get_name: param_list += ["bone_ang"] else: bone_ang = math.atan2(self.bone_offset[2], self.bone_offset[0]) if not self.param_inited and self.param_specs["bone_ang"].get( "rel", False ): self.param_specs["bone_ang"]["lb"] += bone_ang self.param_specs["bone_ang"]["ub"] += bone_ang self.param_specs["bone_ang"]["lb"] = max( self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"].get("min", -np.inf), ) self.param_specs["bone_ang"]["ub"] = min( self.param_specs["bone_ang"]["ub"], self.param_specs["bone_ang"].get("max", np.inf), ) bone_ang = normalize_range( bone_ang, self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"]["ub"], ) param_list.append(np.array([bone_ang])) for joint in self.joints: joint.get_params(param_list, get_name, pad_zeros) for geom in self.geoms: geom.get_params(param_list, get_name, pad_zeros) if not get_name: self.param_inited = True if demap_params and not get_name and len(param_list) > 0: params = self.robot.demap_params(np.concatenate(param_list)) return params def set_params(self, params, pad_zeros=False, map_params=False): if map_params: params = self.robot.map_params(params) if self.bone_offset is not None and "offset" in self.param_specs: if self.param_specs["offset"]["type"] in {"xz", "xy"}: offset = denormalize_range( params[:2], self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) if np.all(offset == 0.0): offset[0] += 1e-8 if self.param_specs["offset"]["type"] == "xz": self.bone_offset[[0, 2]] = offset elif self.param_specs["offset"]["type"] == "xy": self.bone_offset[[0, 1]] = offset params = params[2:] else: offset = denormalize_range( params[:3], self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) if np.all(offset == 0.0): offset[0] += 1e-8 self.bone_offset[:] = offset params = params[3:] if self.bone_offset is not None and "bone_len" in self.param_specs: bone_len = denormalize_range( params[0].item(), self.param_specs["bone_len"]["lb"], self.param_specs["bone_len"]["ub"], ) bone_len = max(bone_len, 1e-4) params = params[1:] elif self.bone_offset is not None: bone_len = np.linalg.norm(self.bone_offset) if self.bone_offset is not None and "bone_ang" in self.param_specs: bone_ang = denormalize_range( params[0].item(), self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"]["ub"], ) params = params[1:] elif self.bone_offset is not None: bone_ang = math.atan2(self.bone_offset[2], self.bone_offset[0]) if "bone_len" in self.param_specs or "bone_ang" in self.param_specs: self.bone_offset = np.array( [bone_len * math.cos(bone_ang), 0, bone_len * math.sin(bone_ang)] ) for joint in self.joints: params = joint.set_params(params, pad_zeros) for geom in self.geoms: params = geom.set_params(params, pad_zeros) # rebuild bone, geom, joint self.rebuild() return params class Robot: def __init__(self, cfg, data_dir="data/smpl", model_xml_path=None, masterfoot=False, create_default_skeleton=False, clean_up=False): self.bodies = [] self.weight = 0 self.height = 0 self.cfg = cfg if model_xml_path is not None: self.set_model_xml_path(model_xml_path) else: self.model_xml_path = None self.param_mapping = cfg.get("param_mapping", "clip") self.smpl_model = cfg.get("model", "smpl") self.mesh = cfg.get("mesh", False) self.gender = cfg.get("gender", "neutral") self.flatfoot = cfg.get("flatfoot", True) self.rel_joint_lm = cfg.get( "rel_joint_lm", True ) # Rolling this out worldwide!! self.masterfoot = masterfoot self.param_specs = self.cfg.get("body_params", {}) self.hull_dict = {} self.beta = ( torch.zeros((1, 10)).float() if self.smpl_model == "smpl" else torch.zeros((1, 16)).float() ) if self.smpl_model == "smpl": self.smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral", create_transl=False) self.smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male", create_transl=False) self.smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female", create_transl=False) elif self.smpl_model == "smplh": self.smpl_parser_n = SMPLH_Parser( model_path=data_dir, gender="neutral", use_pca=False, create_transl=False, ) self.smpl_parser_m = SMPLH_Parser( model_path=data_dir, gender="male", use_pca=False, create_transl=False ) self.smpl_parser_f = SMPLH_Parser( model_path=data_dir, gender="female", use_pca=False, create_transl=False ) elif self.smpl_model == "smplx": self.smpl_parser_n = SMPLX_Parser( model_path=data_dir, gender="neutral", use_pca=False, create_transl=False, ) self.smpl_parser_m = SMPLX_Parser( model_path=data_dir, gender="male", use_pca=False, create_transl=False ) self.smpl_parser_f = SMPLX_Parser( model_path=data_dir, gender="female", use_pca=False, create_transl=False ) if create_default_skeleton: self.load_from_skeleton() if clean_up: atexit.register(self.clean_up) def set_model_xml_path(self, model_xml_path): self.model_xml_path = model_xml_path self.model_dir = osp.dirname(model_xml_path) self.geom_dir = f'{self.model_dir}/mesh/{uuid.uuid4()}' os.makedirs(self.model_dir, exist_ok=True) def clean_up(self): if os.path.exists(self.model_xml_path): os.remove(self.model_xml_path) if osp.isdir(self.geom_dir): shutil.rmtree(self.geom_dir, ignore_errors=True) def get_joint_vertices(self, pose_aa, th_betas=None, th_trans=None, gender=[0]): if gender[0] == 0: smpl_parser = self.smpl_parser_n elif gender[0] == 1: smpl_parser = self.smpl_parser_m elif gender[0] == 2: smpl_parser = self.smpl_parser_f else: print(gender) raise Exception("Gender Not Supported!!") vertices, joints = smpl_parser.get_joints_verts( pose=pose_aa, th_betas=th_betas, th_trans=th_trans ) return vertices, joints def load_from_skeleton( self, betas=None, scale=None, v_template=None, gender=[0], objs_info=None, obj_pose=None, params=None, model_xml_path=None, ): if model_xml_path is not None: self.set_model_xml_path(model_xml_path) self.tree = None # xml tree if gender[0] == 0: self.smpl_parser = smpl_parser = self.smpl_parser_n elif gender[0] == 1: self.smpl_parser = smpl_parser = self.smpl_parser_m elif gender[0] == 2: self.smpl_parser = smpl_parser = self.smpl_parser_f else: print(gender) raise Exception("Gender Not Supported!!") if betas is None and self.beta is None: betas = ( torch.zeros((1, 10)).float() if self.smpl_model == "smpl" else torch.zeros((1, 16)).float() ) else: if len(betas.shape) == 1: betas = betas[None, :] if params is None: self.beta = betas if not betas is None else self.beta else: # If params is not none, we need to set the beta first betas = self.map_params(betas) self.beta = torch.from_numpy( denormalize_range( betas.numpy().squeeze(), self.param_specs["beta"]["lb"], self.param_specs["beta"]["ub"], )[ None, ] ) if flags.debug: print(self.beta) ## Clear up beta for smpl and smplh if self.smpl_model == "smpl" and self.beta.shape[1] == 16: self.beta = self.beta[:, :10] # print(f"Incorrect shape size for {self.model}!!!") elif self.smpl_model == "smplh" and self.beta.shape[1] == 10: self.beta = torch.hstack([self.beta, torch.zeros((1, 6)).float()]) # print(f"Incorrect shape size for {self.model}!!!") if self.mesh: rel_geom_dir = os.path.relpath(self.geom_dir, self.model_dir) self.skeleton = SkeletonMesh(self.geom_dir, rel_geom_dir) zero_pose = torch.zeros((1,72)) ( verts, joints, skin_weights, joint_names, joint_offsets, joint_parents, joint_axes, joint_dofs, joint_range, contype, conaffinity, ) = (smpl_parser.get_mesh_offsets( zero_pose=zero_pose, betas=self.beta, flatfoot=self.flatfoot, scale=scale) if self.smpl_model != "smplx" else smpl_parser.get_mesh_offsets(v_template=v_template)) # if self.rel_joint_lm: # # if False: # joint_range["Head"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["Head"][1] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["Head"][2] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["Chest"][0] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["Chest"][1] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["Chest"][2] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["Spine"][0] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["Spine"][1] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["Spine"][2] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["Torso"][0] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["Torso"][1] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["Torso"][2] = np.array([-np.pi / 3, np.pi / 3]) # ############################## # joint_range["L_Thorax"][0] = np.array([-np.pi , np.pi ]) # joint_range["L_Thorax"][1] = np.array([-np.pi , np.pi]) # joint_range["L_Thorax"][2] = np.array([-np.pi, np.pi]) # joint_range["R_Thorax"][0] = np.array([-np.pi , np.pi ]) # joint_range["R_Thorax"][1] = np.array([-np.pi, np.pi]) # joint_range["R_Thorax"][2] = np.array([-np.pi, np.pi]) # joint_range["L_Shoulder"][0] = np.array([-np.pi , np.pi ]) # joint_range["L_Shoulder"][1] = np.array([-np.pi , np.pi / 2]) # joint_range["L_Shoulder"][2] = np.array([-np.pi, np.pi]) # joint_range["R_Shoulder"][0] = np.array([-np.pi , np.pi ]) # joint_range["R_Shoulder"][1] = np.array([-np.pi/2, np.pi]) # joint_range["R_Shoulder"][2] = np.array([-np.pi, np.pi]) # ############################## # joint_range["L_Hip"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["L_Hip"][1] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["L_Hip"][2] = np.array([-np.pi / 3, np.pi /2]) # joint_range["R_Hip"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["R_Hip"][1] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["R_Hip"][2] = np.array([-np.pi / 2, np.pi / 3]) # joint_range["L_Knee"][0] = np.array([-np.pi / 16, np.pi]) # joint_range["L_Knee"][1] = np.array([-np.pi / 16, np.pi / 16]) # joint_range["L_Knee"][2] = np.array([-np.pi / 16, np.pi / 16]) # joint_range["R_Knee"][0] = np.array([-np.pi / 16, np.pi]) # joint_range["R_Knee"][1] = np.array([-np.pi / 16, np.pi / 16]) # joint_range["R_Knee"][2] = np.array([-np.pi / 16, np.pi / 16]) # joint_range["L_Ankle"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["L_Ankle"][1] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["L_Ankle"][2] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["R_Ankle"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["R_Ankle"][1] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["R_Ankle"][2] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["L_Toe"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["L_Toe"][1] = np.array([-np.pi / 4, np.pi / 4]) # joint_range["L_Toe"][2] = np.array([-np.pi / 4, np.pi / 4]) # joint_range["R_Toe"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["R_Toe"][1] = np.array([-np.pi / 4, np.pi / 4]) # joint_range["R_Toe"][2] = np.array([-np.pi / 4, np.pi / 4]) self.height = np.max(verts[:, 1]) - np.min(verts[:, 1]) size_dict = {} if ( len(self.get_params(get_name=True)) > 1 and not params is None ): # ZL: dank code, very dank code self.set_params(params) size_dict = self.get_size() size_dict = self.enforce_length_size(size_dict) # Gear based size # gear_dict = self.get_gear() # for k, v in size_dict.items(): # for idx, suffix in enumerate(["_x", "_y", "_z"]): # if k + suffix in gear_dict: # size_dict[k][idx] *= gear_dict[k + suffix] self.hull_dict = get_joint_geometries( verts, joints, skin_weights, joint_names, scale_dict=size_dict, geom_dir=f"{self.geom_dir}/geom", ) self.skeleton.load_from_offsets( joint_offsets, joint_parents, joint_axes, joint_dofs, joint_range, sites={}, scale=1, equalities={}, exclude_contacts = [ ["Chest", "L_Shoulder"], ["Chest", "R_Shoulder"], ["Chest", "R_Thorax"], ["Chest", "L_Thorax"], ['L_Hip', 'Pelvis'], ['R_Hip', 'Pelvis'], ['Torso', 'Pelvis'], ['L_Knee', 'L_Hip'], ['R_Knee', 'R_Hip'], ['Spine', 'Torso'], ['L_Ankle', 'L_Knee'], ['R_Ankle', 'R_Knee'], ['Chest', 'Spine'], ['L_Toe', 'L_Ankle'], ['R_Toe', 'R_Ankle'], ['Neck', 'Chest'], ['L_Thorax', 'Chest'], ['R_Thorax', 'Chest'], ['Head', 'Neck'], ['L_Shoulder', 'L_Thorax'], ['R_Shoulder', 'R_Thorax'], ['L_Elbow', 'L_Shoulder'], ['R_Elbow', 'R_Shoulder'], ['L_Wrist', 'L_Elbow'], ['R_Wrist', 'R_Elbow'], ['L_Hand', 'L_Wrist'], ['R_Hand', 'R_Wrist'] ], collision_groups=contype, conaffinity=conaffinity, simple_geom=False, ) else:
self.skeleton = Skeleton()
1
2023-10-30 20:43:43+00:00
24k
masked-spacetime-hashing/msth
nerfstudio/models/semantic_nerfw.py
[ { "identifier": "RayBundle", "path": "nerfstudio/cameras/rays.py", "snippet": "class RayBundle(TensorDataclass):\n \"\"\"A bundle of ray parameters.\"\"\"\n\n # TODO(ethan): make sure the sizes with ... are correct\n origins: TensorType[..., 3]\n \"\"\"Ray origins (XYZ)\"\"\"\n directions...
from dataclasses import dataclass, field from typing import Dict, List, Tuple, Type from torch.nn import Parameter from torchmetrics import PeakSignalNoiseRatio from torchmetrics.functional import structural_similarity_index_measure from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity from nerfstudio.cameras.rays import RayBundle from nerfstudio.data.dataparsers.base_dataparser import Semantics from nerfstudio.engine.callbacks import ( TrainingCallback, TrainingCallbackAttributes, TrainingCallbackLocation, ) from nerfstudio.field_components.field_heads import FieldHeadNames from nerfstudio.field_components.spatial_distortions import SceneContraction from nerfstudio.fields.density_fields import HashMLPDensityField from nerfstudio.fields.nerfacto_field import TCNNNerfactoField from nerfstudio.model_components.losses import MSELoss, distortion_loss, interlevel_loss from nerfstudio.model_components.ray_samplers import ProposalNetworkSampler from nerfstudio.model_components.renderers import ( AccumulationRenderer, DepthRenderer, RGBRenderer, SemanticRenderer, UncertaintyRenderer, ) from nerfstudio.model_components.scene_colliders import NearFarCollider from nerfstudio.models.base_model import Model from nerfstudio.models.nerfacto import NerfactoModelConfig from nerfstudio.utils import colormaps import numpy as np import torch
14,794
# Copyright 2022 The nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Semantic NeRF-W implementation which should be fast enough to view in the viewer. """ from __future__ import annotations @dataclass class SemanticNerfWModelConfig(NerfactoModelConfig): """Nerfacto Model Config""" _target: Type = field(default_factory=lambda: SemanticNerfWModel) use_transient_embedding: bool = False """Whether to use transient embedding.""" semantic_loss_weight: float = 1.0 pass_semantic_gradients: bool = False class SemanticNerfWModel(Model): """Nerfacto model Args: config: Nerfacto configuration to instantiate model """ config: SemanticNerfWModelConfig def __init__(self, config: SemanticNerfWModelConfig, metadata: Dict, **kwargs) -> None: assert "semantics" in metadata.keys() and isinstance(metadata["semantics"], Semantics) self.semantics = metadata["semantics"] super().__init__(config=config, **kwargs) self.colormap = self.semantics.colors.clone().detach().to(self.device) def populate_modules(self): """Set the fields and modules.""" super().populate_modules() scene_contraction = SceneContraction(order=float("inf")) if self.config.use_transient_embedding: raise ValueError("Transient embedding is not fully working for semantic nerf-w.") # Fields self.field = TCNNNerfactoField( self.scene_box.aabb, num_levels=self.config.num_levels, max_res=self.config.max_res, log2_hashmap_size=self.config.log2_hashmap_size, spatial_distortion=scene_contraction, num_images=self.num_train_data, use_average_appearance_embedding=self.config.use_average_appearance_embedding, use_transient_embedding=self.config.use_transient_embedding, use_semantics=True, num_semantic_classes=len(self.semantics.classes), pass_semantic_gradients=self.config.pass_semantic_gradients, ) # Build the proposal network(s) self.proposal_networks = torch.nn.ModuleList() if self.config.use_same_proposal_network: network = HashMLPDensityField(self.scene_box.aabb, spatial_distortion=scene_contraction) self.proposal_networks.append(network) self.density_fns = [network.density_fn for _ in range(self.config.num_proposal_iterations)] else: for _ in range(self.config.num_proposal_iterations): network = HashMLPDensityField(self.scene_box.aabb, spatial_distortion=scene_contraction) self.proposal_networks.append(network) self.density_fns = [network.density_fn for network in self.proposal_networks] # Collider self.collider = NearFarCollider(near_plane=self.config.near_plane, far_plane=self.config.far_plane) # Samplers self.proposal_sampler = ProposalNetworkSampler( num_nerf_samples_per_ray=self.config.num_nerf_samples_per_ray, num_proposal_samples_per_ray=self.config.num_proposal_samples_per_ray, num_proposal_network_iterations=self.config.num_proposal_iterations, single_jitter=self.config.use_single_jitter, ) # renderers self.renderer_rgb = RGBRenderer(background_color=self.config.background_color) self.renderer_accumulation = AccumulationRenderer()
# Copyright 2022 The nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Semantic NeRF-W implementation which should be fast enough to view in the viewer. """ from __future__ import annotations @dataclass class SemanticNerfWModelConfig(NerfactoModelConfig): """Nerfacto Model Config""" _target: Type = field(default_factory=lambda: SemanticNerfWModel) use_transient_embedding: bool = False """Whether to use transient embedding.""" semantic_loss_weight: float = 1.0 pass_semantic_gradients: bool = False class SemanticNerfWModel(Model): """Nerfacto model Args: config: Nerfacto configuration to instantiate model """ config: SemanticNerfWModelConfig def __init__(self, config: SemanticNerfWModelConfig, metadata: Dict, **kwargs) -> None: assert "semantics" in metadata.keys() and isinstance(metadata["semantics"], Semantics) self.semantics = metadata["semantics"] super().__init__(config=config, **kwargs) self.colormap = self.semantics.colors.clone().detach().to(self.device) def populate_modules(self): """Set the fields and modules.""" super().populate_modules() scene_contraction = SceneContraction(order=float("inf")) if self.config.use_transient_embedding: raise ValueError("Transient embedding is not fully working for semantic nerf-w.") # Fields self.field = TCNNNerfactoField( self.scene_box.aabb, num_levels=self.config.num_levels, max_res=self.config.max_res, log2_hashmap_size=self.config.log2_hashmap_size, spatial_distortion=scene_contraction, num_images=self.num_train_data, use_average_appearance_embedding=self.config.use_average_appearance_embedding, use_transient_embedding=self.config.use_transient_embedding, use_semantics=True, num_semantic_classes=len(self.semantics.classes), pass_semantic_gradients=self.config.pass_semantic_gradients, ) # Build the proposal network(s) self.proposal_networks = torch.nn.ModuleList() if self.config.use_same_proposal_network: network = HashMLPDensityField(self.scene_box.aabb, spatial_distortion=scene_contraction) self.proposal_networks.append(network) self.density_fns = [network.density_fn for _ in range(self.config.num_proposal_iterations)] else: for _ in range(self.config.num_proposal_iterations): network = HashMLPDensityField(self.scene_box.aabb, spatial_distortion=scene_contraction) self.proposal_networks.append(network) self.density_fns = [network.density_fn for network in self.proposal_networks] # Collider self.collider = NearFarCollider(near_plane=self.config.near_plane, far_plane=self.config.far_plane) # Samplers self.proposal_sampler = ProposalNetworkSampler( num_nerf_samples_per_ray=self.config.num_nerf_samples_per_ray, num_proposal_samples_per_ray=self.config.num_proposal_samples_per_ray, num_proposal_network_iterations=self.config.num_proposal_iterations, single_jitter=self.config.use_single_jitter, ) # renderers self.renderer_rgb = RGBRenderer(background_color=self.config.background_color) self.renderer_accumulation = AccumulationRenderer()
self.renderer_depth = DepthRenderer()
12
2023-10-26 04:39:15+00:00
24k
chenruduan/OAReactDiff
oa_reactdiff/trainer/pl_trainer.py
[ { "identifier": "ProcessedQM9", "path": "oa_reactdiff/dataset/qm9.py", "snippet": "class ProcessedQM9(BaseQM9):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=2,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n **kw...
from typing import Dict, List, Optional, Tuple from pathlib import Path from torch import nn from torch.utils.data import DataLoader from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, StepLR from pytorch_lightning import LightningModule from torchmetrics.classification import ( BinaryAccuracy, BinaryAUROC, BinaryF1Score, BinaryPrecision, BinaryCohenKappa, ) from torchmetrics import PearsonCorrCoef, SpearmanCorrCoef, MeanAbsoluteError from oa_reactdiff.dataset import ( ProcessedQM9, ProcessedDoubleQM9, ProcessedTripleQM9, ProcessedTS1x, ) from oa_reactdiff.dynamics import EGNNDynamics, Confidence from oa_reactdiff.diffusion._schedule import DiffSchedule, PredefinedNoiseSchedule from oa_reactdiff.diffusion._normalizer import Normalizer, FEATURE_MAPPING from oa_reactdiff.diffusion.en_diffusion import EnVariationalDiffusion from oa_reactdiff.trainer._metrics import average_over_batch_metrics, pretty_print from oa_reactdiff.analyze.rmsd import batch_rmsd import torch import copy import torch.nn.functional as F import numpy as np import pandas as pd import oa_reactdiff.utils.training_tools as utils
20,055
PROCESS_FUNC = { "QM9": ProcessedQM9, "DoubleQM9": ProcessedDoubleQM9, "TripleQM9": ProcessedTripleQM9, "TS1x": ProcessedTS1x, } FILE_TYPE = { "QM9": ".npz", "DoubleQM9": ".npz", "TripleQM9": ".npz", "TS1x": ".pkl", } LR_SCHEDULER = { "cos": CosineAnnealingWarmRestarts, "step": StepLR, } class DDPMModule(LightningModule): def __init__( self, model_config: Dict, optimizer_config: Dict, training_config: Dict, node_nfs: List[int] = [9] * 3, edge_nf: int = 4, condition_nf: int = 3, fragment_names: List[str] = ["inorg_node", "org_edge", "org_node"], pos_dim: int = 3, update_pocket_coords: bool = True, condition_time: bool = True, edge_cutoff: Optional[float] = None, norm_values: Tuple = (1.0, 1.0, 1.0), norm_biases: Tuple = (0.0, 0.0, 0.0), noise_schedule: str = "polynomial_2", timesteps: int = 1000, precision: float = 1e-5, loss_type: str = "l2", pos_only: bool = False, process_type: Optional[str] = None, model: nn.Module = None, enforce_same_encoding: Optional[List] = None, scales: List[float] = [1.0, 1.0, 1.0], eval_epochs: int = 20, source: Optional[Dict] = None, fixed_idx: Optional[List] = None, ) -> None: super().__init__() egnn_dynamics = EGNNDynamics( model_config=model_config, node_nfs=node_nfs, edge_nf=edge_nf, condition_nf=condition_nf, fragment_names=fragment_names, pos_dim=pos_dim, update_pocket_coords=update_pocket_coords, condition_time=condition_time, edge_cutoff=edge_cutoff, model=model, enforce_same_encoding=enforce_same_encoding, source=source, ) normalizer = Normalizer( norm_values=norm_values, norm_biases=norm_biases, pos_dim=pos_dim, ) gamma_module = PredefinedNoiseSchedule( noise_schedule=noise_schedule, timesteps=timesteps, precision=precision, ) schedule = DiffSchedule(gamma_module=gamma_module, norm_values=norm_values)
PROCESS_FUNC = { "QM9": ProcessedQM9, "DoubleQM9": ProcessedDoubleQM9, "TripleQM9": ProcessedTripleQM9, "TS1x": ProcessedTS1x, } FILE_TYPE = { "QM9": ".npz", "DoubleQM9": ".npz", "TripleQM9": ".npz", "TS1x": ".pkl", } LR_SCHEDULER = { "cos": CosineAnnealingWarmRestarts, "step": StepLR, } class DDPMModule(LightningModule): def __init__( self, model_config: Dict, optimizer_config: Dict, training_config: Dict, node_nfs: List[int] = [9] * 3, edge_nf: int = 4, condition_nf: int = 3, fragment_names: List[str] = ["inorg_node", "org_edge", "org_node"], pos_dim: int = 3, update_pocket_coords: bool = True, condition_time: bool = True, edge_cutoff: Optional[float] = None, norm_values: Tuple = (1.0, 1.0, 1.0), norm_biases: Tuple = (0.0, 0.0, 0.0), noise_schedule: str = "polynomial_2", timesteps: int = 1000, precision: float = 1e-5, loss_type: str = "l2", pos_only: bool = False, process_type: Optional[str] = None, model: nn.Module = None, enforce_same_encoding: Optional[List] = None, scales: List[float] = [1.0, 1.0, 1.0], eval_epochs: int = 20, source: Optional[Dict] = None, fixed_idx: Optional[List] = None, ) -> None: super().__init__() egnn_dynamics = EGNNDynamics( model_config=model_config, node_nfs=node_nfs, edge_nf=edge_nf, condition_nf=condition_nf, fragment_names=fragment_names, pos_dim=pos_dim, update_pocket_coords=update_pocket_coords, condition_time=condition_time, edge_cutoff=edge_cutoff, model=model, enforce_same_encoding=enforce_same_encoding, source=source, ) normalizer = Normalizer( norm_values=norm_values, norm_biases=norm_biases, pos_dim=pos_dim, ) gamma_module = PredefinedNoiseSchedule( noise_schedule=noise_schedule, timesteps=timesteps, precision=precision, ) schedule = DiffSchedule(gamma_module=gamma_module, norm_values=norm_values)
self.ddpm = EnVariationalDiffusion(
10
2023-10-30 02:53:38+00:00
24k
lewandofskee/DiAD
sgn/sgn.py
[ { "identifier": "conv_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(...
import einops import torch import torch as th import torch.nn as nn import torchvision from ldm.modules.diffusionmodules.util import ( conv_nd, linear, zero_module, timestep_embedding, ) from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.attention import SpatialTransformer from ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample, AttentionBlock, Upsample from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import log_txt_as_img, exists, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from omegaconf.listconfig import ListConfig
21,254
def custom_sigmoid(x): return 1 / (1 + torch.exp(-(x - 600) / 10)) class ControlledUnetModel(UNetModel): def forward(self, x, timesteps=None, context=None, control=None, only_mid_control=False, **kwargs): hs = [] with torch.no_grad():
def custom_sigmoid(x): return 1 / (1 + torch.exp(-(x - 600) / 10)) class ControlledUnetModel(UNetModel): def forward(self, x, timesteps=None, context=None, control=None, only_mid_control=False, **kwargs): hs = [] with torch.no_grad():
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
3
2023-10-30 14:21:09+00:00
24k
nv-tlabs/pacer
pacer/utils/motion_lib_smpl.py
[ { "identifier": "torch_utils", "path": "pacer/utils/torch_utils.py", "snippet": "def my_quat_rotate(q, v):\ndef quat_to_angle_axis(q):\ndef angle_axis_to_exp_map(angle, axis):\ndef quat_to_exp_map(q):\ndef quat_to_tan_norm(q):\ndef euler_xyz_to_exp_map(roll, pitch, yaw):\ndef exp_map_to_angle_axis(exp_m...
from ast import If from tqdm import tqdm from poselib.poselib.core.rotation3d import * from isaacgym.torch_utils import * from pacer.utils import torch_utils from poselib.poselib.skeleton.skeleton3d import SkeletonMotion, SkeletonState from uhc.smpllib.smpl_parser import ( SMPL_Parser, SMPLH_Parser, SMPLX_Parser, ) import numpy as np import os import yaml import joblib import torch import torch.multiprocessing as mp import copy import gc
18,179
betas = curr_gender_betas[1:] height_tolorance = 0.0 vertices_curr, joints_curr = smpl_parsers[gender.item()].get_joints_verts(pose_aa, betas[None, ], trans) offset = joints_curr[:, 0] - trans diff_fix = ((vertices_curr - offset[:, None])[..., -1].min(dim=-1).values - height_tolorance).min() vertices_curr[..., 2].max() - vertices_curr[..., 2].min() trans[..., -1] -= diff_fix return trans def load_motion_with_skeleton(ids, motion_data_list, skeleton_trees, gender_betas, fix_height, smpl_parsers, masterfoot_config, queue, pid): # ZL: loading motion with the specified skeleton. Perfoming forward kinematics to get the joint positions res = {} for f in range(len(motion_data_list)): assert (len(ids) == len(motion_data_list)) curr_id = ids[f] # id for this datasample curr_file = motion_data_list[f] curr_gender_beta = gender_betas[f] trans = curr_file['root_trans_offset'].clone() pose_aa = torch.from_numpy(curr_file['pose_aa']) if fix_height: trans = fix_trans_height(pose_aa, trans, curr_gender_beta, smpl_parsers) pose_quat_global = curr_file['pose_quat_global'] B, J, N = pose_quat_global.shape if not masterfoot_config is None: num_bodies = len(masterfoot_config['body_names']) pose_quat_holder = np.zeros([B, num_bodies, N]) pose_quat_holder[..., -1] = 1 pose_quat_holder[...,masterfoot_config['body_to_orig_without_toe'], :] \ = pose_quat_global[..., masterfoot_config['orig_to_orig_without_toe'], :] pose_quat_holder[..., [ masterfoot_config['body_names'].index(name) for name in ["L_Toe", "L_Toe_1", "L_Toe_1_1", "L_Toe_2"] ], :] = pose_quat_holder[..., [masterfoot_config['body_names'].index(name) for name in ["L_Ankle"]], :] pose_quat_holder[..., [ masterfoot_config['body_names'].index(name) for name in ["R_Toe", "R_Toe_1", "R_Toe_1_1", "R_Toe_2"] ], :] = pose_quat_holder[..., [masterfoot_config['body_names'].index(name) for name in ["R_Ankle"]], :] pose_quat_global = pose_quat_holder sk_state = SkeletonState.from_rotation_and_root_translation( skeleton_trees[f], torch.from_numpy(pose_quat_global), trans, is_local=False) curr_motion = SkeletonMotion.from_skeleton_state(sk_state, curr_file.get("fps", 30)) curr_dof_vels = compute_motion_dof_vels(curr_motion) curr_motion.dof_vels = curr_dof_vels curr_motion.gender_beta = curr_gender_beta res[curr_id] = (curr_file, curr_motion) if not queue is None: queue.put(res) else: return res class DeviceCache: def __init__(self, obj, device): self.obj = obj self.device = device keys = dir(obj) num_added = 0 for k in keys: try: out = getattr(obj, k) except: # print("Error for key=", k) continue if isinstance(out, torch.Tensor): if out.is_floating_point(): out = out.to(self.device, dtype=torch.float32) else: out.to(self.device) setattr(self, k, out) num_added += 1 elif isinstance(out, np.ndarray): out = torch.tensor(out) if out.is_floating_point(): out = out.to(self.device, dtype=torch.float32) else: out.to(self.device) setattr(self, k, out) num_added += 1 # print("Total added", num_added) def __getattr__(self, string): out = getattr(self.obj, string) return out class MotionLib(): def __init__(self, motion_file, key_body_ids, device, fix_height = True, masterfoot_conifg = None, min_length = -1): self._key_body_ids = torch.tensor(key_body_ids, device=device) self._device = device self._motion_data = joblib.load(motion_file) if min_length != -1: data_list = {k: v for k, v in list(self._motion_data.items()) if len(v['pose_quat_global']) >= min_length} self._motion_data_list = np.array(list(data_list.values())) self._motion_data_keys = np.array(list(data_list.keys())) else: self._motion_data_list = np.array(list(self._motion_data.values())) self._motion_data_keys = np.array(list(self._motion_data.keys())) self._num_unique_motions = len(self._motion_data_list) self._masterfoot_conifg = masterfoot_conifg data_dir = "data/smpl"
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. USE_CACHE = True print("MOVING MOTION DATA TO GPU, USING CACHE:", USE_CACHE) if not USE_CACHE: old_numpy = torch.Tensor.numpy class Patch: def numpy(self): if self.is_cuda: return self.to("cpu").numpy() else: return old_numpy(self) torch.Tensor.numpy = Patch.numpy def local_rotation_to_dof_vel(local_rot0, local_rot1, dt): # Assume each joint is 3dof diff_quat_data = quat_mul_norm(quat_inverse(local_rot0), local_rot1) diff_angle, diff_axis = quat_angle_axis(diff_quat_data) dof_vel = diff_axis * diff_angle.unsqueeze(-1) / dt return dof_vel[1:, :].flatten() def compute_motion_dof_vels(motion): num_frames = motion.tensor.shape[0] dt = 1.0 / motion.fps dof_vels = [] for f in range(num_frames - 1): local_rot0 = motion.local_rotation[f] local_rot1 = motion.local_rotation[f + 1] frame_dof_vel = local_rotation_to_dof_vel(local_rot0, local_rot1, dt) dof_vels.append(frame_dof_vel) dof_vels.append(dof_vels[-1]) dof_vels = torch.stack(dof_vels, dim=0).view(num_frames, -1, 3) return dof_vels def fix_trans_height(pose_aa, trans, curr_gender_betas, smpl_parsers): with torch.no_grad(): gender = curr_gender_betas[0] betas = curr_gender_betas[1:] height_tolorance = 0.0 vertices_curr, joints_curr = smpl_parsers[gender.item()].get_joints_verts(pose_aa, betas[None, ], trans) offset = joints_curr[:, 0] - trans diff_fix = ((vertices_curr - offset[:, None])[..., -1].min(dim=-1).values - height_tolorance).min() vertices_curr[..., 2].max() - vertices_curr[..., 2].min() trans[..., -1] -= diff_fix return trans def load_motion_with_skeleton(ids, motion_data_list, skeleton_trees, gender_betas, fix_height, smpl_parsers, masterfoot_config, queue, pid): # ZL: loading motion with the specified skeleton. Perfoming forward kinematics to get the joint positions res = {} for f in range(len(motion_data_list)): assert (len(ids) == len(motion_data_list)) curr_id = ids[f] # id for this datasample curr_file = motion_data_list[f] curr_gender_beta = gender_betas[f] trans = curr_file['root_trans_offset'].clone() pose_aa = torch.from_numpy(curr_file['pose_aa']) if fix_height: trans = fix_trans_height(pose_aa, trans, curr_gender_beta, smpl_parsers) pose_quat_global = curr_file['pose_quat_global'] B, J, N = pose_quat_global.shape if not masterfoot_config is None: num_bodies = len(masterfoot_config['body_names']) pose_quat_holder = np.zeros([B, num_bodies, N]) pose_quat_holder[..., -1] = 1 pose_quat_holder[...,masterfoot_config['body_to_orig_without_toe'], :] \ = pose_quat_global[..., masterfoot_config['orig_to_orig_without_toe'], :] pose_quat_holder[..., [ masterfoot_config['body_names'].index(name) for name in ["L_Toe", "L_Toe_1", "L_Toe_1_1", "L_Toe_2"] ], :] = pose_quat_holder[..., [masterfoot_config['body_names'].index(name) for name in ["L_Ankle"]], :] pose_quat_holder[..., [ masterfoot_config['body_names'].index(name) for name in ["R_Toe", "R_Toe_1", "R_Toe_1_1", "R_Toe_2"] ], :] = pose_quat_holder[..., [masterfoot_config['body_names'].index(name) for name in ["R_Ankle"]], :] pose_quat_global = pose_quat_holder sk_state = SkeletonState.from_rotation_and_root_translation( skeleton_trees[f], torch.from_numpy(pose_quat_global), trans, is_local=False) curr_motion = SkeletonMotion.from_skeleton_state(sk_state, curr_file.get("fps", 30)) curr_dof_vels = compute_motion_dof_vels(curr_motion) curr_motion.dof_vels = curr_dof_vels curr_motion.gender_beta = curr_gender_beta res[curr_id] = (curr_file, curr_motion) if not queue is None: queue.put(res) else: return res class DeviceCache: def __init__(self, obj, device): self.obj = obj self.device = device keys = dir(obj) num_added = 0 for k in keys: try: out = getattr(obj, k) except: # print("Error for key=", k) continue if isinstance(out, torch.Tensor): if out.is_floating_point(): out = out.to(self.device, dtype=torch.float32) else: out.to(self.device) setattr(self, k, out) num_added += 1 elif isinstance(out, np.ndarray): out = torch.tensor(out) if out.is_floating_point(): out = out.to(self.device, dtype=torch.float32) else: out.to(self.device) setattr(self, k, out) num_added += 1 # print("Total added", num_added) def __getattr__(self, string): out = getattr(self.obj, string) return out class MotionLib(): def __init__(self, motion_file, key_body_ids, device, fix_height = True, masterfoot_conifg = None, min_length = -1): self._key_body_ids = torch.tensor(key_body_ids, device=device) self._device = device self._motion_data = joblib.load(motion_file) if min_length != -1: data_list = {k: v for k, v in list(self._motion_data.items()) if len(v['pose_quat_global']) >= min_length} self._motion_data_list = np.array(list(data_list.values())) self._motion_data_keys = np.array(list(data_list.keys())) else: self._motion_data_list = np.array(list(self._motion_data.values())) self._motion_data_keys = np.array(list(self._motion_data.keys())) self._num_unique_motions = len(self._motion_data_list) self._masterfoot_conifg = masterfoot_conifg data_dir = "data/smpl"
smpl_parser_n = SMPL_Parser(model_path=data_dir,
3
2023-10-31 20:47:12+00:00
24k
Improbable-AI/dexenv
dexenv/envs/dclaw_multiobjs.py
[ { "identifier": "DClawBase", "path": "dexenv/envs/dclaw_base.py", "snippet": "class DClawBase(VecTask):\n\n def __init__(self, cfg, sim_device, rl_device, graphics_device_id):\n\n self.cfg = cfg\n headless = self.cfg.headless\n self.randomize = self.cfg[\"task\"][\"randomize\"]\n...
import numpy as np import torch import dexenv from gym.utils import seeding from isaacgym import gymapi from loguru import logger from tqdm import tqdm from dexenv.envs.dclaw_base import DClawBase from dexenv.utils.common import chunker_list from dexenv.utils.common import get_all_files_with_name from dexenv.utils.common import load_from_pickle from dexenv.utils.isaac_utils import load_a_goal_object_asset from dexenv.utils.isaac_utils import load_an_object_asset from dexenv.utils.isaac_utils import load_obj_texture
15,462
# add object object_handle = self.gym.create_actor(env_ptr, object_assets[obj_asset_id], object_start_pose, "object", i, 0, 1) self.object_handles.append(object_handle) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) self.object_cat_indices.append(object_cat_ids[obj_asset_id]) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_assets[obj_asset_id], goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.obj.load_texture: self.gym.set_rigid_body_texture(env_ptr, object_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) self.gym.set_rigid_body_texture(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) else: color = np.array([179, 193, 134]) / 255.0 self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(180 / 255., 180 / 255., 180 / 255.)) if self.cfg.rgb_render: render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params) self.render_camera_handles.append(render_camera_handle[0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.setup_torch_states() self.env_obj_ids = torch.LongTensor(env_obj_ids).to(self.device).view(-1, 1) self.object_cat_indices = torch.LongTensor(self.object_cat_indices).to(self.device).view(-1, 1) def parse_obj_dataset(self, dataset): asset_root = dexenv.LIB_PATH.joinpath('assets') split_dataset_name = dataset.split(':') if len(split_dataset_name) == 1: dataset_path = asset_root.joinpath(dataset, 'train') else: target_object = split_dataset_name[1] dataset_path = asset_root.joinpath(split_dataset_name[0], 'train', target_object) logger.warning(f'Dataset path:{dataset_path}') urdf_files = get_all_files_with_name(dataset_path, name='model.urdf') permute_ids = self.np_random.permutation(np.arange(len(urdf_files))) permuted_urdfs = [urdf_files[i] for i in permute_ids] object_categories = sorted(list(set([self.get_object_category(urdf) for urdf in permuted_urdfs]))) obj_name_to_id = {name: idx for idx, name in enumerate(object_categories)} return permuted_urdfs, dataset_path, obj_name_to_id def get_object_category(self, urdf_path): cat = urdf_path.parents[0].name if 'var_' in cat: cat = urdf_path.parents[1].name return cat def load_object_asset(self): asset_root = dexenv.LIB_PATH.joinpath('assets') object_urdfs = self.object_urdfs object_assets, goal_assets, object_ids, object_tex_handles, object_ptds = [], [], [], [], [] object_cat_ids = [] if self.cfg.obj.object_id is not None: urdf_to_load = self.object_urdfs[self.cfg.obj.object_id] logger.info(f'Loading a single object: {urdf_to_load}') obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) else: if self.cfg.obj.start_id is None: start = 0 end = min(len(object_urdfs), self.cfg.obj.num_objs) else: start = self.cfg.obj.start_id end = min(start + self.cfg.obj.num_objs, len(object_urdfs)) iters = range(start, end) logger.info(f'Loading object IDs from {start} to {end}.') for idx in tqdm(iters, desc='Loading Asset'): urdf_to_load = object_urdfs[idx] obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) return object_assets, goal_assets, object_ids, object_tex_handles, object_ptds, object_cat_ids def load_an_object(self, asset_root, object_urdf): out = []
class DclawMultiObjs(DClawBase): def __init__(self, cfg, sim_device, rl_device, graphics_device_id): self.set_random_gen() self.object_urdfs, self.dataset_path, self.obj_name_to_cat_id = self.parse_obj_dataset(cfg.obj.dataset) self.num_objects = len(self.object_urdfs) logger.info(f'Object urdf root path:{self.dataset_path}.') logger.info(f'Number of available objects:{self.num_objects}.') super().__init__(cfg=cfg, sim_device=sim_device, rl_device=rl_device, graphics_device_id=graphics_device_id) def set_random_gen(self, seed=12345): self.np_random, seed = seeding.np_random(seed) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw').as_posix() dclaw_asset, dclaw_dof_props = self.get_dclaw_asset(asset_root=asset_root) # load manipulated object and goal assets table_asset = self.get_table_asset() table_pose = self.get_table_pose() object_assets, goal_assets, object_ids, object_textures, object_ptds, object_cat_ids = self.load_object_asset() # create fingertip force sensors, if needed if self.obs_type == "full_state": sensor_pose = gymapi.Transform() for ft_handle in self.fingertip_handles: self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose) dclaw_start_pose = self.get_dclaw_start_pose() object_start_pose = self.get_object_start_pose(dclaw_start_pose) goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose) self.dclaws = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.object_cat_indices = [] self.goal_object_indices = [] self.render_camera_handles = [] if self.cfg.rgb_render: render_cam_pose, render_cam_params = self.get_visual_render_camera_setup() self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in self.fingertips] dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_assets[0]) self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count)) self.object_handles = [] num_object_assets = len(object_assets) env_obj_ids = [] for i in range(self.num_envs): # create env instance obj_asset_id = i % num_object_assets env_obj_ids.append(object_ids[obj_asset_id]) env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 1: # compute aggregate size obj_num_bodies = self.gym.get_asset_rigid_body_count(object_assets[obj_asset_id]) obj_num_shapes = self.gym.get_asset_rigid_shape_count(object_assets[obj_asset_id]) max_agg_bodies = self.num_dclaw_bodies + obj_num_bodies * 2 + 1 max_agg_shapes = self.num_dclaw_shapes + obj_num_shapes * 2 + 1 self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) self.create_hand_actor(env_ptr=env_ptr, dclaw_asset=dclaw_asset, dclaw_start_pose=dclaw_start_pose, dclaw_dof_props=dclaw_dof_props, env_id=i) # add object object_handle = self.gym.create_actor(env_ptr, object_assets[obj_asset_id], object_start_pose, "object", i, 0, 1) self.object_handles.append(object_handle) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) self.object_cat_indices.append(object_cat_ids[obj_asset_id]) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_assets[obj_asset_id], goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.obj.load_texture: self.gym.set_rigid_body_texture(env_ptr, object_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) self.gym.set_rigid_body_texture(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) else: color = np.array([179, 193, 134]) / 255.0 self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(180 / 255., 180 / 255., 180 / 255.)) if self.cfg.rgb_render: render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params) self.render_camera_handles.append(render_camera_handle[0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.setup_torch_states() self.env_obj_ids = torch.LongTensor(env_obj_ids).to(self.device).view(-1, 1) self.object_cat_indices = torch.LongTensor(self.object_cat_indices).to(self.device).view(-1, 1) def parse_obj_dataset(self, dataset): asset_root = dexenv.LIB_PATH.joinpath('assets') split_dataset_name = dataset.split(':') if len(split_dataset_name) == 1: dataset_path = asset_root.joinpath(dataset, 'train') else: target_object = split_dataset_name[1] dataset_path = asset_root.joinpath(split_dataset_name[0], 'train', target_object) logger.warning(f'Dataset path:{dataset_path}') urdf_files = get_all_files_with_name(dataset_path, name='model.urdf') permute_ids = self.np_random.permutation(np.arange(len(urdf_files))) permuted_urdfs = [urdf_files[i] for i in permute_ids] object_categories = sorted(list(set([self.get_object_category(urdf) for urdf in permuted_urdfs]))) obj_name_to_id = {name: idx for idx, name in enumerate(object_categories)} return permuted_urdfs, dataset_path, obj_name_to_id def get_object_category(self, urdf_path): cat = urdf_path.parents[0].name if 'var_' in cat: cat = urdf_path.parents[1].name return cat def load_object_asset(self): asset_root = dexenv.LIB_PATH.joinpath('assets') object_urdfs = self.object_urdfs object_assets, goal_assets, object_ids, object_tex_handles, object_ptds = [], [], [], [], [] object_cat_ids = [] if self.cfg.obj.object_id is not None: urdf_to_load = self.object_urdfs[self.cfg.obj.object_id] logger.info(f'Loading a single object: {urdf_to_load}') obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) else: if self.cfg.obj.start_id is None: start = 0 end = min(len(object_urdfs), self.cfg.obj.num_objs) else: start = self.cfg.obj.start_id end = min(start + self.cfg.obj.num_objs, len(object_urdfs)) iters = range(start, end) logger.info(f'Loading object IDs from {start} to {end}.') for idx in tqdm(iters, desc='Loading Asset'): urdf_to_load = object_urdfs[idx] obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) return object_assets, goal_assets, object_ids, object_tex_handles, object_ptds, object_cat_ids def load_an_object(self, asset_root, object_urdf): out = []
obj_asset = load_an_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=self.cfg.env.vhacd)
5
2023-10-25 17:22:41+00:00
24k
ai-safety-foundation/sparse_autoencoder
sparse_autoencoder/train/pipeline.py
[ { "identifier": "ActivationResampler", "path": "sparse_autoencoder/activation_resampler/activation_resampler.py", "snippet": "class ActivationResampler:\n \"\"\"Activation resampler.\n\n Collates the number of times each neuron fires over a set number of learned activation vectors,\n and then p...
from collections.abc import Iterator from functools import partial from pathlib import Path from tempfile import gettempdir from typing import TYPE_CHECKING, final from deepspeed import DeepSpeedEngine from jaxtyping import Float, Int, Int64 from pydantic import NonNegativeInt, PositiveInt, validate_call from torch import Tensor from torch.nn.parallel import DataParallel from torch.optim.lr_scheduler import LRScheduler from torch.utils.data import DataLoader from tqdm.auto import tqdm from transformer_lens import HookedTransformer from sparse_autoencoder.activation_resampler.activation_resampler import ( ActivationResampler, ParameterUpdateResults, ) from sparse_autoencoder.activation_store.tensor_store import TensorActivationStore from sparse_autoencoder.autoencoder.model import SparseAutoencoder from sparse_autoencoder.loss.abstract_loss import AbstractLoss, LossReductionType from sparse_autoencoder.metrics.metrics_container import MetricsContainer, default_metrics from sparse_autoencoder.metrics.train.abstract_train_metric import TrainMetricData from sparse_autoencoder.metrics.validate.abstract_validate_metric import ValidationMetricData from sparse_autoencoder.optimizer.abstract_optimizer import AbstractOptimizerWithReset from sparse_autoencoder.source_data.abstract_dataset import SourceDataset, TorchTokenizedPrompts from sparse_autoencoder.source_model.replace_activations_hook import replace_activations_hook from sparse_autoencoder.source_model.store_activations_hook import store_activations_hook from sparse_autoencoder.source_model.zero_ablate_hook import zero_ablate_hook from sparse_autoencoder.tensor_types import Axis from sparse_autoencoder.train.utils.get_model_device import get_model_device from sparse_autoencoder.metrics.abstract_metric import MetricResult import torch import wandb
18,535
"""Default pipeline.""" if TYPE_CHECKING: DEFAULT_CHECKPOINT_DIRECTORY: Path = Path(gettempdir()) / "sparse_autoencoder" class Pipeline: """Pipeline for training a Sparse Autoencoder on TransformerLens activations. Includes all the key functionality to train a sparse autoencoder, with a specific set of hyperparameters. """ activation_resampler: ActivationResampler | None """Activation resampler to use.""" autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine """Sparse autoencoder to train.""" n_input_features: int """Number of input features in the sparse autoencoder.""" n_learned_features: int """Number of learned features in the sparse autoencoder.""" cache_names: list[str] """Names of the cache hook points to use in the source model.""" layer: int """Layer to stope the source model at (if we don't need activations after this layer).""" log_frequency: int """Frequency at which to log metrics (in steps).""" loss: AbstractLoss """Loss function to use.""" metrics: MetricsContainer """Metrics to use."""
"""Default pipeline.""" if TYPE_CHECKING: DEFAULT_CHECKPOINT_DIRECTORY: Path = Path(gettempdir()) / "sparse_autoencoder" class Pipeline: """Pipeline for training a Sparse Autoencoder on TransformerLens activations. Includes all the key functionality to train a sparse autoencoder, with a specific set of hyperparameters. """ activation_resampler: ActivationResampler | None """Activation resampler to use.""" autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine """Sparse autoencoder to train.""" n_input_features: int """Number of input features in the sparse autoencoder.""" n_learned_features: int """Number of learned features in the sparse autoencoder.""" cache_names: list[str] """Names of the cache hook points to use in the source model.""" layer: int """Layer to stope the source model at (if we don't need activations after this layer).""" log_frequency: int """Frequency at which to log metrics (in steps).""" loss: AbstractLoss """Loss function to use.""" metrics: MetricsContainer """Metrics to use."""
optimizer: AbstractOptimizerWithReset
9
2023-10-27 07:37:15+00:00
24k
OATML-Markslab/ProteinNPT
scripts/train.py
[ { "identifier": "ProteinNPTModel", "path": "proteinnpt/model.py", "snippet": "class ProteinNPTModel(nn.Module):\n def __init__(self, args, alphabet):\n super().__init__()\n self.args = args\n self.alphabet = alphabet\n self.alphabet_size = len(alphabet)\n self.paddi...
import os,gc import json import argparse import random import numpy as np import pandas as pd import wandb import torch import proteinnpt,baselines,utils from collections import defaultdict from proteinnpt.model import ProteinNPTModel from baselines.model import AugmentedPropertyPredictor from utils.esm.data import Alphabet from utils.tranception.model_pytorch import get_tranception_tokenizer from utils.data_utils import get_train_val_test_data, standardize, pnpt_count_non_nan, pnpt_spearmanr from utils.msa_utils import process_MSA from utils.model_utils import Trainer
18,292
def setup_config_and_paths(args): # All parameters that are not defined by end user are fetched from the config file if args.model_config_location is not None: args.main_config=json.load(open(args.model_config_location)) for key in args.main_config: if args.__dict__[key] is None: args.__dict__[key] = args.main_config[key] # File paths config for local_path in ['embedding_model_location','MSA_data_folder','MSA_weight_data_folder','path_to_hhfilter']: if getattr(args, local_path): setattr(args, local_path, args.data_location + os.sep + getattr(args, local_path)) if not os.path.exists(args.data_location + os.sep + 'model_predictions'): os.mkdir(args.data_location + os.sep + 'model_predictions') if not os.path.exists(args.data_location + os.sep + 'checkpoint'): os.mkdir(args.data_location + os.sep + 'checkpoint') args.output_scores_location = args.data_location + os.sep + 'model_predictions' + os.sep + args.model_name_suffix if not os.path.exists(args.output_scores_location): os.mkdir(args.output_scores_location) args.model_location = args.data_location + os.sep + 'checkpoint' + os.sep + args.model_name_suffix if not os.path.exists(args.model_location): os.mkdir(args.model_location) # Target config args.target_config=json.load(open(args.target_config_location)) zero_shot_predictions_mapping={ "MSA_Transformer_pred": "MSA_Transformer_ensemble", "ESM1v_pred": "ESM1v_ensemble", "TranceptEVE_pred": "TranceptEVE_L", "Tranception_pred": "Tranception_L", "DeepSequence_pred": "DeepSequence_ensemble" } if args.model_type=="ProteinNPT": zero_shot_predictions_mapping["ProteinNPT"]=zero_shot_predictions_mapping[args.aa_embeddings+"_pred"] if args.augmentation=="zero_shot_fitness_predictions_auxiliary_labels": # Add auxiliary label to target_config assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as auxiliary labels not properly referenced" print("Using zero-shot fitness predictions as auxiliary labels") args.target_config["zero_shot_fitness_predictions"] = { "type": "continuous", "dim": 1, "var_name": zero_shot_predictions_mapping[args.model_type], #Select the relevant model for zero-shot fitness predictions "location": args.zero_shot_fitness_predictions_location, "in_NPT_loss": False, "main_target": False } args.augmentation_short="auxiliary" elif args.augmentation=="zero_shot_fitness_predictions_covariate": # Will use zero-shot fitness predictions as an additional model covariate assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as model covariate not properly referenced" print("Using zero-shot fitness predictions as covariate") args.augmentation_short="covariate" args.zero_shot_fitness_predictions_var_name = zero_shot_predictions_mapping[args.model_type] else: args.augmentation_short="none" for target_index,target in enumerate(args.target_config): if "location" not in args.target_config[target].keys(): # Note: the case of zero-shot fitness predictions is already handled above if present if args.assay_location is not None: # We passed at least one path for the assay location num_targets = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] if len(args.assay_location) > 1: assert len(args.assay_location)==num_targets, "Trying to predict {} targets, but only referencing {} distinct paths for them.".format(num_targets,len(args.assay_location)) args.target_config[target]["location"] = args.assay_location[target_index] print("Location used for target {} if {}".format(target,args.assay_location[target_index])) else: args.target_config[target]["location"] = args.assay_location[0] print("Location used for target {} if {}".format(target,args.assay_location[0])) else: print("Assay location not provided. Defaulting to location for single substitutions fitness assays: {}".format(args.data_location + os.sep + 'data/fitness/substitutions_singles')) args.target_config[target]["location"] = args.data_location + os.sep + 'data/fitness/substitutions_singles' return args def log_performance_fold(args,target_names,test_eval_results,trainer_final_status,perf_list,logs_folder=None): test_logs = {'total_training_steps': trainer_final_status['total_training_steps'], 'total_training_epochs': trainer_final_status['total_training_epochs'], 'total_train_time': trainer_final_status['total_train_time']} if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) if args.model_type=="ProteinNPT": normalization = 0 for target_name in target_names: normalization += test_eval_results['eval_num_masked_targets'][target_name] else: normalization = test_eval_results['eval_num_predicted_targets'] test_logs['Test total loss per seq.'] = test_eval_results['eval_total_loss'] / normalization
def setup_config_and_paths(args): # All parameters that are not defined by end user are fetched from the config file if args.model_config_location is not None: args.main_config=json.load(open(args.model_config_location)) for key in args.main_config: if args.__dict__[key] is None: args.__dict__[key] = args.main_config[key] # File paths config for local_path in ['embedding_model_location','MSA_data_folder','MSA_weight_data_folder','path_to_hhfilter']: if getattr(args, local_path): setattr(args, local_path, args.data_location + os.sep + getattr(args, local_path)) if not os.path.exists(args.data_location + os.sep + 'model_predictions'): os.mkdir(args.data_location + os.sep + 'model_predictions') if not os.path.exists(args.data_location + os.sep + 'checkpoint'): os.mkdir(args.data_location + os.sep + 'checkpoint') args.output_scores_location = args.data_location + os.sep + 'model_predictions' + os.sep + args.model_name_suffix if not os.path.exists(args.output_scores_location): os.mkdir(args.output_scores_location) args.model_location = args.data_location + os.sep + 'checkpoint' + os.sep + args.model_name_suffix if not os.path.exists(args.model_location): os.mkdir(args.model_location) # Target config args.target_config=json.load(open(args.target_config_location)) zero_shot_predictions_mapping={ "MSA_Transformer_pred": "MSA_Transformer_ensemble", "ESM1v_pred": "ESM1v_ensemble", "TranceptEVE_pred": "TranceptEVE_L", "Tranception_pred": "Tranception_L", "DeepSequence_pred": "DeepSequence_ensemble" } if args.model_type=="ProteinNPT": zero_shot_predictions_mapping["ProteinNPT"]=zero_shot_predictions_mapping[args.aa_embeddings+"_pred"] if args.augmentation=="zero_shot_fitness_predictions_auxiliary_labels": # Add auxiliary label to target_config assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as auxiliary labels not properly referenced" print("Using zero-shot fitness predictions as auxiliary labels") args.target_config["zero_shot_fitness_predictions"] = { "type": "continuous", "dim": 1, "var_name": zero_shot_predictions_mapping[args.model_type], #Select the relevant model for zero-shot fitness predictions "location": args.zero_shot_fitness_predictions_location, "in_NPT_loss": False, "main_target": False } args.augmentation_short="auxiliary" elif args.augmentation=="zero_shot_fitness_predictions_covariate": # Will use zero-shot fitness predictions as an additional model covariate assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as model covariate not properly referenced" print("Using zero-shot fitness predictions as covariate") args.augmentation_short="covariate" args.zero_shot_fitness_predictions_var_name = zero_shot_predictions_mapping[args.model_type] else: args.augmentation_short="none" for target_index,target in enumerate(args.target_config): if "location" not in args.target_config[target].keys(): # Note: the case of zero-shot fitness predictions is already handled above if present if args.assay_location is not None: # We passed at least one path for the assay location num_targets = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] if len(args.assay_location) > 1: assert len(args.assay_location)==num_targets, "Trying to predict {} targets, but only referencing {} distinct paths for them.".format(num_targets,len(args.assay_location)) args.target_config[target]["location"] = args.assay_location[target_index] print("Location used for target {} if {}".format(target,args.assay_location[target_index])) else: args.target_config[target]["location"] = args.assay_location[0] print("Location used for target {} if {}".format(target,args.assay_location[0])) else: print("Assay location not provided. Defaulting to location for single substitutions fitness assays: {}".format(args.data_location + os.sep + 'data/fitness/substitutions_singles')) args.target_config[target]["location"] = args.data_location + os.sep + 'data/fitness/substitutions_singles' return args def log_performance_fold(args,target_names,test_eval_results,trainer_final_status,perf_list,logs_folder=None): test_logs = {'total_training_steps': trainer_final_status['total_training_steps'], 'total_training_epochs': trainer_final_status['total_training_epochs'], 'total_train_time': trainer_final_status['total_train_time']} if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) if args.model_type=="ProteinNPT": normalization = 0 for target_name in target_names: normalization += test_eval_results['eval_num_masked_targets'][target_name] else: normalization = test_eval_results['eval_num_predicted_targets'] test_logs['Test total loss per seq.'] = test_eval_results['eval_total_loss'] / normalization
spearmans = {target_name: pnpt_spearmanr(test_eval_results['output_scores']['predictions_'+target_name], test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names}
7
2023-10-28 11:41:05+00:00
24k
CVHub520/yolov5_obb
detect.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=None, dnn=False):\n # Usage:\n # PyTorch: ...
import argparse import os import sys import cv2 import torch import torch.backends.cudnn as cudnn from pathlib import Path from models.common import DetectMultiBackend from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr, increment_path, non_max_suppression, non_max_suppression_obb, print_args, scale_coords, scale_polys, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import select_device, time_sync from utils.rboxs_utils import poly2rbox, rbox2poly
14,903
if pt or jit: model.model.half() if half else model.model.float() # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference model.warmup(imgsz=(1, 3, *imgsz), half=half) # warmup dt, seen = [0.0, 0.0, 0.0], 0 for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() im = torch.from_numpy(im).to(device) im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(im, augment=augment, visualize=visualize) t3 = time_sync() dt[1] += t3 - t2 # NMS # pred: list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2) pred = non_max_suppression_obb(pred, conf_thres, iou_thres, classes, agnostic_nms, multi_label=True, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image pred_poly = rbox2poly(det[:, :5]) # (n, [x1 y1 x2 y2 x3 y3 x4 y4]) seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale polys from img_size to im0 size # det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() pred_poly = scale_polys(im.shape[2:], pred_poly, im0.shape) det = torch.cat((pred_poly, det[:, -2:]), dim=1) # (n, [poly conf cls]) # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *poly, conf, cls in reversed(det): if save_txt: # Write to file # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh poly = poly.tolist() line = (cls, *poly, conf) if save_conf else (cls, *poly) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add poly to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') # annotator.box_label(xyxy, label, color=colors(c, True)) annotator.poly_label(poly, label, color=colors(c, True)) if save_crop: # Yolov5-obb doesn't support it yet # save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) pass # Print time (inference-only) LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run inference on images, videos, directories, streams, etc. Usage: $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam img.jpg # image vid.mp4 # video path/ # directory path/*.jpg # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream """ FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @torch.no_grad() def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/detect', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn) stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size # Half half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt or jit: model.model.half() if half else model.model.float() # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference model.warmup(imgsz=(1, 3, *imgsz), half=half) # warmup dt, seen = [0.0, 0.0, 0.0], 0 for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() im = torch.from_numpy(im).to(device) im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(im, augment=augment, visualize=visualize) t3 = time_sync() dt[1] += t3 - t2 # NMS # pred: list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2) pred = non_max_suppression_obb(pred, conf_thres, iou_thres, classes, agnostic_nms, multi_label=True, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image pred_poly = rbox2poly(det[:, :5]) # (n, [x1 y1 x2 y2 x3 y3 x4 y4]) seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale polys from img_size to im0 size # det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() pred_poly = scale_polys(im.shape[2:], pred_poly, im0.shape) det = torch.cat((pred_poly, det[:, -2:]), dim=1) # (n, [poly conf cls]) # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *poly, conf, cls in reversed(det): if save_txt: # Write to file # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh poly = poly.tolist() line = (cls, *poly, conf) if save_conf else (cls, *poly) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add poly to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') # annotator.box_label(xyxy, label, color=colors(c, True)) annotator.poly_label(poly, label, color=colors(c, True)) if save_crop: # Yolov5-obb doesn't support it yet # save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) pass # Print time (inference-only) LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
10
2023-10-31 06:06:41+00:00
24k
serengil/LightPHE
lightphe/models/Ciphertext.py
[ { "identifier": "Homomorphic", "path": "lightphe/models/Homomorphic.py", "snippet": "class Homomorphic(ABC):\n keys: dict\n plaintext_modulo: int\n ciphertext_modulo: int\n\n @abstractmethod\n def generate_keys(self, key_size: int, s: Optional[int] = None) -> dict:\n pass\n\n @a...
from typing import Union from lightphe.models.Homomorphic import Homomorphic from lightphe.models.Algorithm import Algorithm from lightphe.cryptosystems.RSA import RSA from lightphe.cryptosystems.ElGamal import ElGamal from lightphe.cryptosystems.Paillier import Paillier from lightphe.cryptosystems.DamgardJurik import DamgardJurik from lightphe.cryptosystems.OkamotoUchiyama import OkamotoUchiyama from lightphe.cryptosystems.Benaloh import Benaloh from lightphe.cryptosystems.NaccacheStern import NaccacheStern from lightphe.cryptosystems.GoldwasserMicali import GoldwasserMicali from lightphe.cryptosystems.EllipticCurveElGamal import EllipticCurveElGamal from lightphe.commons import phe_utils from lightphe.commons.logger import Logger
17,545
logger = Logger(module="lightphe/models/Ciphertext.py") # pylint: disable=too-few-public-methods, no-else-return class Ciphertext: def __init__(self, algorithm_name: str, keys: dict, value: Union[int, tuple, list]): self.algorithm_name = algorithm_name self.keys = keys self.value = value if algorithm_name == Algorithm.RSA: cs = RSA(keys=keys) elif algorithm_name == Algorithm.ElGamal: cs = ElGamal(keys=keys) elif algorithm_name == Algorithm.ExponentialElGamal: cs = ElGamal(keys=keys, exponential=True) elif algorithm_name == Algorithm.EllipticCurveElGamal: cs = EllipticCurveElGamal(keys=keys) elif algorithm_name == Algorithm.Paillier: cs = Paillier(keys=keys) elif algorithm_name == Algorithm.DamgardJurik: cs = DamgardJurik(keys=keys)
logger = Logger(module="lightphe/models/Ciphertext.py") # pylint: disable=too-few-public-methods, no-else-return class Ciphertext: def __init__(self, algorithm_name: str, keys: dict, value: Union[int, tuple, list]): self.algorithm_name = algorithm_name self.keys = keys self.value = value if algorithm_name == Algorithm.RSA: cs = RSA(keys=keys) elif algorithm_name == Algorithm.ElGamal: cs = ElGamal(keys=keys) elif algorithm_name == Algorithm.ExponentialElGamal: cs = ElGamal(keys=keys, exponential=True) elif algorithm_name == Algorithm.EllipticCurveElGamal: cs = EllipticCurveElGamal(keys=keys) elif algorithm_name == Algorithm.Paillier: cs = Paillier(keys=keys) elif algorithm_name == Algorithm.DamgardJurik: cs = DamgardJurik(keys=keys)
elif algorithm_name == Algorithm.OkamotoUchiyama:
6
2023-10-28 14:57:59+00:00
24k
DataCanvasIO/LMS
lms/runtime/prune/llm_pruner/LLMPruner/peft/mapping.py
[ { "identifier": "PeftModel", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/peft_model.py", "snippet": "class PeftModel(PushToHubMixin, torch.nn.Module):\n \"\"\"\n Base model encompassing various Peft methods.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): The base transfo...
from .peft_model import ( PeftModel, PeftModelForCausalLM, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification, ) from .tuners import AdaLoraConfig, LoraConfig, PrefixTuningConfig, PromptEncoderConfig, PromptTuningConfig from .utils import PromptLearningConfig
14,908
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. MODEL_TYPE_TO_PEFT_MODEL_MAPPING = { "SEQ_CLS": PeftModelForSequenceClassification, "SEQ_2_SEQ_LM": PeftModelForSeq2SeqLM, "CAUSAL_LM": PeftModelForCausalLM, "TOKEN_CLS": PeftModelForTokenClassification, } PEFT_TYPE_TO_CONFIG_MAPPING = { "PROMPT_TUNING": PromptTuningConfig, "PREFIX_TUNING": PrefixTuningConfig, "P_TUNING": PromptEncoderConfig, "LORA": LoraConfig, "ADALORA": AdaLoraConfig, } def get_peft_config(config_dict): """ Returns a Peft config object from a dictionary. Args: config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters. """ return PEFT_TYPE_TO_CONFIG_MAPPING[config_dict["peft_type"]](**config_dict) def _prepare_prompt_learning_config(peft_config, model_config): if peft_config.num_layers is None: if "num_hidden_layers" in model_config: num_layers = model_config["num_hidden_layers"] elif "num_layers" in model_config: num_layers = model_config["num_layers"] elif "n_layer" in model_config: num_layers = model_config["n_layer"] else: raise ValueError("Please specify `num_layers` in `peft_config`") peft_config.num_layers = num_layers if peft_config.token_dim is None: if "hidden_size" in model_config: token_dim = model_config["hidden_size"] elif "n_embd" in model_config: token_dim = model_config["n_embd"] elif "d_model" in model_config: token_dim = model_config["d_model"] else: raise ValueError("Please specify `token_dim` in `peft_config`") peft_config.token_dim = token_dim if peft_config.num_attention_heads is None: if "num_attention_heads" in model_config: num_attention_heads = model_config["num_attention_heads"] elif "n_head" in model_config: num_attention_heads = model_config["n_head"] elif "num_heads" in model_config: num_attention_heads = model_config["num_heads"] elif "encoder_attention_heads" in model_config: num_attention_heads = model_config["encoder_attention_heads"] else: raise ValueError("Please specify `num_attention_heads` in `peft_config`") peft_config.num_attention_heads = num_attention_heads if getattr(peft_config, "encoder_hidden_size", None) is None: setattr(peft_config, "encoder_hidden_size", token_dim) return peft_config def get_peft_model(model, peft_config): """ Returns a Peft model object from a model and a config. Args: model ([`transformers.PreTrainedModel`]): Model to be wrapped. peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model. """ model_config = model.config.to_dict() if hasattr(model.config, "to_dict") else model.config peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None) if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not isinstance(
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. MODEL_TYPE_TO_PEFT_MODEL_MAPPING = { "SEQ_CLS": PeftModelForSequenceClassification, "SEQ_2_SEQ_LM": PeftModelForSeq2SeqLM, "CAUSAL_LM": PeftModelForCausalLM, "TOKEN_CLS": PeftModelForTokenClassification, } PEFT_TYPE_TO_CONFIG_MAPPING = { "PROMPT_TUNING": PromptTuningConfig, "PREFIX_TUNING": PrefixTuningConfig, "P_TUNING": PromptEncoderConfig, "LORA": LoraConfig, "ADALORA": AdaLoraConfig, } def get_peft_config(config_dict): """ Returns a Peft config object from a dictionary. Args: config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters. """ return PEFT_TYPE_TO_CONFIG_MAPPING[config_dict["peft_type"]](**config_dict) def _prepare_prompt_learning_config(peft_config, model_config): if peft_config.num_layers is None: if "num_hidden_layers" in model_config: num_layers = model_config["num_hidden_layers"] elif "num_layers" in model_config: num_layers = model_config["num_layers"] elif "n_layer" in model_config: num_layers = model_config["n_layer"] else: raise ValueError("Please specify `num_layers` in `peft_config`") peft_config.num_layers = num_layers if peft_config.token_dim is None: if "hidden_size" in model_config: token_dim = model_config["hidden_size"] elif "n_embd" in model_config: token_dim = model_config["n_embd"] elif "d_model" in model_config: token_dim = model_config["d_model"] else: raise ValueError("Please specify `token_dim` in `peft_config`") peft_config.token_dim = token_dim if peft_config.num_attention_heads is None: if "num_attention_heads" in model_config: num_attention_heads = model_config["num_attention_heads"] elif "n_head" in model_config: num_attention_heads = model_config["n_head"] elif "num_heads" in model_config: num_attention_heads = model_config["num_heads"] elif "encoder_attention_heads" in model_config: num_attention_heads = model_config["encoder_attention_heads"] else: raise ValueError("Please specify `num_attention_heads` in `peft_config`") peft_config.num_attention_heads = num_attention_heads if getattr(peft_config, "encoder_hidden_size", None) is None: setattr(peft_config, "encoder_hidden_size", token_dim) return peft_config def get_peft_model(model, peft_config): """ Returns a Peft model object from a model and a config. Args: model ([`transformers.PreTrainedModel`]): Model to be wrapped. peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model. """ model_config = model.config.to_dict() if hasattr(model.config, "to_dict") else model.config peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None) if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not isinstance(
peft_config, PromptLearningConfig
10
2023-10-30 10:50:32+00:00
24k
chenran-li/RQL-release
sb3_contrib/ars/ars.py
[ { "identifier": "BaseAlgorithm", "path": "stable_baselines3/common/base_class.py", "snippet": "class BaseAlgorithm(ABC):\n \"\"\"\n The base of RL algorithms\n\n :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)\n :param env: The environment to learn from\n (i...
import copy import sys import time import warnings import numpy as np import torch as th import torch.nn.utils from functools import partial from typing import Any, Dict, Optional, Type, TypeVar, Union from gym import spaces from stable_baselines3.common.base_class import BaseAlgorithm from stable_baselines3.common.callbacks import BaseCallback from stable_baselines3.common.evaluation import evaluate_policy from stable_baselines3.common.policies import BasePolicy from stable_baselines3.common.save_util import load_from_zip_file from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule from stable_baselines3.common.utils import get_schedule_fn, safe_mean from sb3_contrib.ars.policies import ARSPolicy, LinearPolicy, MlpPolicy from sb3_contrib.common.vec_env.async_eval import AsyncEval
17,039
:param episode_lengths: List containing per-episode lengths (in number of steps) """ # Mimic Monitor Wrapper infos = [ {"episode": {"r": episode_reward, "l": episode_length}} for episode_reward, episode_length in zip(episode_rewards, episode_lengths) ] self._update_info_buffer(infos) def _trigger_callback( self, _locals: Dict[str, Any], _globals: Dict[str, Any], callback: BaseCallback, n_envs: int, ) -> None: """ Callback passed to the ``evaluate_policy()`` helper in order to increment the number of timesteps and trigger events in the single process version. :param _locals: :param _globals: :param callback: Callback that will be called at every step :param n_envs: Number of environments """ self.num_timesteps += n_envs callback.on_step() def evaluate_candidates( self, candidate_weights: th.Tensor, callback: BaseCallback, async_eval: Optional[AsyncEval] ) -> th.Tensor: """ Evaluate each candidate. :param candidate_weights: The candidate weights to be evaluated. :param callback: Callback that will be called at each step (or after evaluation in the multiprocess version) :param async_eval: The object for asynchronous evaluation of candidates. :return: The episodic return for each candidate. """ batch_steps = 0 # returns == sum of rewards candidate_returns = th.zeros(self.pop_size, device=self.device) train_policy = copy.deepcopy(self.policy) # Empty buffer to show only mean over one iteration (one set of candidates) in the logs self.ep_info_buffer = [] callback.on_rollout_start() if async_eval is not None: # Multiprocess asynchronous version async_eval.send_jobs(candidate_weights, self.pop_size) results = async_eval.get_results() for weights_idx, (episode_rewards, episode_lengths) in results: # Update reward to cancel out alive bonus if needed candidate_returns[weights_idx] = sum(episode_rewards) + self.alive_bonus_offset * sum(episode_lengths) batch_steps += np.sum(episode_lengths) self._mimic_monitor_wrapper(episode_rewards, episode_lengths) # Combine the filter stats of each process for normalization for worker_obs_rms in async_eval.get_obs_rms(): if self._vec_normalize_env is not None: # worker_obs_rms.count -= self.old_count self._vec_normalize_env.obs_rms.combine(worker_obs_rms) # Hack: don't count timesteps twice (between the two are synced) # otherwise it will lead to overflow, # in practice we would need two RunningMeanStats self._vec_normalize_env.obs_rms.count -= self.old_count # Synchronise VecNormalize if needed if self._vec_normalize_env is not None: async_eval.sync_obs_rms(self._vec_normalize_env.obs_rms.copy()) self.old_count = self._vec_normalize_env.obs_rms.count # Hack to have Callback events for _ in range(batch_steps // len(async_eval.remotes)): self.num_timesteps += len(async_eval.remotes) callback.on_step() else: # Single process, synchronous version for weights_idx in range(self.pop_size): # Load current candidate weights train_policy.load_from_vector(candidate_weights[weights_idx].cpu()) # Evaluate the candidate episode_rewards, episode_lengths = evaluate_policy( train_policy, self.env, n_eval_episodes=self.n_eval_episodes, return_episode_rewards=True, # Increment num_timesteps too (slight mismatch with multi envs) callback=partial(self._trigger_callback, callback=callback, n_envs=self.env.num_envs), warn=False, ) # Update reward to cancel out alive bonus if needed candidate_returns[weights_idx] = sum(episode_rewards) + self.alive_bonus_offset * sum(episode_lengths) batch_steps += sum(episode_lengths) self._mimic_monitor_wrapper(episode_rewards, episode_lengths) # Note: we increment the num_timesteps inside the evaluate_policy() # however when using multiple environments, there will be a slight # mismatch between the number of timesteps used and the number # of calls to the step() method (cf. implementation of evaluate_policy()) # self.num_timesteps += batch_steps callback.on_rollout_end() return candidate_returns def _log_and_dump(self) -> None: """ Dump information to the logger. """ time_elapsed = max((time.time_ns() - self.start_time) / 1e9, sys.float_info.epsilon) fps = int((self.num_timesteps - self._num_timesteps_at_start) / time_elapsed) if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0:
SelfARS = TypeVar("SelfARS", bound="ARS") class ARS(BaseAlgorithm): """ Augmented Random Search: https://arxiv.org/abs/1803.07055 Original implementation: https://github.com/modestyachts/ARS C++/Cuda Implementation: https://github.com/google-research/tiny-differentiable-simulator/ 150 LOC Numpy Implementation: https://github.com/alexis-jacq/numpy_ARS/blob/master/asr.py :param policy: The policy to train, can be an instance of ``ARSPolicy``, or a string from ["LinearPolicy", "MlpPolicy"] :param env: The environment to train on, may be a string if registered with gym :param n_delta: How many random perturbations of the policy to try at each update step. :param n_top: How many of the top delta to use in each update step. Default is n_delta :param learning_rate: Float or schedule for the step size :param delta_std: Float or schedule for the exploration noise :param zero_policy: Boolean determining if the passed policy should have it's weights zeroed before training. :param alive_bonus_offset: Constant added to the reward at each step, used to cancel out alive bonuses. :param n_eval_episodes: Number of episodes to evaluate each candidate. :param policy_kwargs: Keyword arguments to pass to the policy on creation :param tensorboard_log: String with the directory to put tensorboard logs: :param seed: Random seed for the training :param verbose: Verbosity level: 0 no output, 1 info, 2 debug :param device: Torch device to use for training, defaults to "cpu" :param _init_setup_model: Whether or not to build the network at the creation of the instance """ policy_aliases: Dict[str, Type[BasePolicy]] = { "MlpPolicy": MlpPolicy, "LinearPolicy": LinearPolicy, } def __init__( self, policy: Union[str, Type[ARSPolicy]], env: Union[GymEnv, str], n_delta: int = 8, n_top: Optional[int] = None, learning_rate: Union[float, Schedule] = 0.02, delta_std: Union[float, Schedule] = 0.05, zero_policy: bool = True, alive_bonus_offset: float = 0, n_eval_episodes: int = 1, policy_kwargs: Optional[Dict[str, Any]] = None, tensorboard_log: Optional[str] = None, seed: Optional[int] = None, verbose: int = 0, device: Union[th.device, str] = "cpu", _init_setup_model: bool = True, ): super().__init__( policy, env, learning_rate=learning_rate, tensorboard_log=tensorboard_log, policy_kwargs=policy_kwargs, verbose=verbose, device=device, supported_action_spaces=(spaces.Box, spaces.Discrete), support_multi_env=True, seed=seed, ) self.n_delta = n_delta self.pop_size = 2 * n_delta self.delta_std_schedule = get_schedule_fn(delta_std) self.n_eval_episodes = n_eval_episodes if n_top is None: n_top = n_delta # Make sure our hyper parameters are valid and auto correct them if they are not if n_top > n_delta: warnings.warn(f"n_top = {n_top} > n_delta = {n_top}, setting n_top = n_delta") n_top = n_delta self.n_top = n_top self.alive_bonus_offset = alive_bonus_offset self.zero_policy = zero_policy self.weights = None # Need to call init model to initialize weight self.processes = None # Keep track of how many steps where elapsed before a new rollout # Important for syncing observation normalization between workers self.old_count = 0 if _init_setup_model: self._setup_model() def _setup_model(self) -> None: self._setup_lr_schedule() self.set_random_seed(self.seed) self.policy = self.policy_class(self.observation_space, self.action_space, **self.policy_kwargs) self.policy = self.policy.to(self.device) self.weights = th.nn.utils.parameters_to_vector(self.policy.parameters()).detach() self.n_params = len(self.weights) if self.zero_policy: self.weights = th.zeros_like(self.weights, requires_grad=False) self.policy.load_from_vector(self.weights.cpu()) def _mimic_monitor_wrapper(self, episode_rewards: np.ndarray, episode_lengths: np.ndarray) -> None: """ Helper to mimic Monitor wrapper and report episode statistics (mean reward, mean episode length). :param episode_rewards: List containing per-episode rewards :param episode_lengths: List containing per-episode lengths (in number of steps) """ # Mimic Monitor Wrapper infos = [ {"episode": {"r": episode_reward, "l": episode_length}} for episode_reward, episode_length in zip(episode_rewards, episode_lengths) ] self._update_info_buffer(infos) def _trigger_callback( self, _locals: Dict[str, Any], _globals: Dict[str, Any], callback: BaseCallback, n_envs: int, ) -> None: """ Callback passed to the ``evaluate_policy()`` helper in order to increment the number of timesteps and trigger events in the single process version. :param _locals: :param _globals: :param callback: Callback that will be called at every step :param n_envs: Number of environments """ self.num_timesteps += n_envs callback.on_step() def evaluate_candidates( self, candidate_weights: th.Tensor, callback: BaseCallback, async_eval: Optional[AsyncEval] ) -> th.Tensor: """ Evaluate each candidate. :param candidate_weights: The candidate weights to be evaluated. :param callback: Callback that will be called at each step (or after evaluation in the multiprocess version) :param async_eval: The object for asynchronous evaluation of candidates. :return: The episodic return for each candidate. """ batch_steps = 0 # returns == sum of rewards candidate_returns = th.zeros(self.pop_size, device=self.device) train_policy = copy.deepcopy(self.policy) # Empty buffer to show only mean over one iteration (one set of candidates) in the logs self.ep_info_buffer = [] callback.on_rollout_start() if async_eval is not None: # Multiprocess asynchronous version async_eval.send_jobs(candidate_weights, self.pop_size) results = async_eval.get_results() for weights_idx, (episode_rewards, episode_lengths) in results: # Update reward to cancel out alive bonus if needed candidate_returns[weights_idx] = sum(episode_rewards) + self.alive_bonus_offset * sum(episode_lengths) batch_steps += np.sum(episode_lengths) self._mimic_monitor_wrapper(episode_rewards, episode_lengths) # Combine the filter stats of each process for normalization for worker_obs_rms in async_eval.get_obs_rms(): if self._vec_normalize_env is not None: # worker_obs_rms.count -= self.old_count self._vec_normalize_env.obs_rms.combine(worker_obs_rms) # Hack: don't count timesteps twice (between the two are synced) # otherwise it will lead to overflow, # in practice we would need two RunningMeanStats self._vec_normalize_env.obs_rms.count -= self.old_count # Synchronise VecNormalize if needed if self._vec_normalize_env is not None: async_eval.sync_obs_rms(self._vec_normalize_env.obs_rms.copy()) self.old_count = self._vec_normalize_env.obs_rms.count # Hack to have Callback events for _ in range(batch_steps // len(async_eval.remotes)): self.num_timesteps += len(async_eval.remotes) callback.on_step() else: # Single process, synchronous version for weights_idx in range(self.pop_size): # Load current candidate weights train_policy.load_from_vector(candidate_weights[weights_idx].cpu()) # Evaluate the candidate episode_rewards, episode_lengths = evaluate_policy( train_policy, self.env, n_eval_episodes=self.n_eval_episodes, return_episode_rewards=True, # Increment num_timesteps too (slight mismatch with multi envs) callback=partial(self._trigger_callback, callback=callback, n_envs=self.env.num_envs), warn=False, ) # Update reward to cancel out alive bonus if needed candidate_returns[weights_idx] = sum(episode_rewards) + self.alive_bonus_offset * sum(episode_lengths) batch_steps += sum(episode_lengths) self._mimic_monitor_wrapper(episode_rewards, episode_lengths) # Note: we increment the num_timesteps inside the evaluate_policy() # however when using multiple environments, there will be a slight # mismatch between the number of timesteps used and the number # of calls to the step() method (cf. implementation of evaluate_policy()) # self.num_timesteps += batch_steps callback.on_rollout_end() return candidate_returns def _log_and_dump(self) -> None: """ Dump information to the logger. """ time_elapsed = max((time.time_ns() - self.start_time) / 1e9, sys.float_info.epsilon) fps = int((self.num_timesteps - self._num_timesteps_at_start) / time_elapsed) if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0:
self.logger.record("rollout/ep_rew_mean", safe_mean([ep_info["r"] for ep_info in self.ep_info_buffer]))
7
2023-10-28 01:09:21+00:00
24k
pytabular-ai/auto-scikit-dl
utils/model.py
[ { "identifier": "MLP", "path": "models/mlp.py", "snippet": "class MLP(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ...
import os import time import json import yaml import shutil import random import datetime import numpy as np import torch import optuna from pathlib import Path from typing import Dict, List, Tuple, Union, Optional, Literal from models import MLP, FTTransformer, AutoInt, DCNv2, NODE from models.abstract import TabModel, check_dir from data.utils import Dataset from data.processor import DataProcessor
14,917
MODEL_CARDS = { 'xgboost': None, 'catboost': None, 'lightgbm': None, 'mlp': MLP, 'autoint': AutoInt, 'dcnv2': DCNv2, 'node': NODE,
MODEL_CARDS = { 'xgboost': None, 'catboost': None, 'lightgbm': None, 'mlp': MLP, 'autoint': AutoInt, 'dcnv2': DCNv2, 'node': NODE,
'ft-transformer': FTTransformer, 'saint': None,
1
2023-10-30 14:55:44+00:00
24k
hyperspy/exspy
exspy/tests/signals/test_kramers_kronig_transform.py
[ { "identifier": "VolumePlasmonDrude", "path": "exspy/components/volume_plasmon_drude.py", "snippet": "class VolumePlasmonDrude(hs.model.components1D.Expression):\n r\"\"\"\n Drude volume plasmon energy loss function component, the energy loss\n function is defined as:\n\n .. math::\n\n ...
import numpy as np import pytest import hyperspy.api as hs from hyperspy.components1d import Lorentzian from exspy.components import VolumePlasmonDrude from exspy.misc.eels.tools import eels_constant from exspy.signals import EELSSpectrum
20,729
# -*- coding: utf-8 -*- # Copyright 2007-2023 The exSpy developers # # This file is part of exSpy. # # exSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # exSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with exSpy. If not, see <https://www.gnu.org/licenses/#GPL>. class Test2D: def setup_method(self, method): """To test the kramers_kronig_analysis we will generate 3 EELSSpectrum instances. First a model energy loss function(ELF), in our case following the Drude bulk plasmon peak. Second, we simulate the inelastic scattering to generate a model scattering distribution (SPC). Finally, we use a lorentzian peak with integral equal to 1 to simulate a ZLP. """ # Parameters i0 = 1.0 t = hs.signals.BaseSignal(np.arange(10, 70, 10).reshape((2, 3))) t = t.transpose(signal_axes=0) scale = 0.02 # Create an 3x2x2048 spectrum with Drude plasmon
# -*- coding: utf-8 -*- # Copyright 2007-2023 The exSpy developers # # This file is part of exSpy. # # exSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # exSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with exSpy. If not, see <https://www.gnu.org/licenses/#GPL>. class Test2D: def setup_method(self, method): """To test the kramers_kronig_analysis we will generate 3 EELSSpectrum instances. First a model energy loss function(ELF), in our case following the Drude bulk plasmon peak. Second, we simulate the inelastic scattering to generate a model scattering distribution (SPC). Finally, we use a lorentzian peak with integral equal to 1 to simulate a ZLP. """ # Parameters i0 = 1.0 t = hs.signals.BaseSignal(np.arange(10, 70, 10).reshape((2, 3))) t = t.transpose(signal_axes=0) scale = 0.02 # Create an 3x2x2048 spectrum with Drude plasmon
s = EELSSpectrum(np.zeros((2, 3, 2 * 2048)))
2
2023-10-28 20:04:10+00:00
24k
Elfenreigen/UniChest
train.py
[ { "identifier": "utils", "path": "factory/utils.py", "snippet": "class SmoothedValue(object):\nclass MetricLogger(object):\nclass AttrDict(dict):\n def __init__(self, window_size=20, fmt=None):\n def update(self, value, n=1):\n def synchronize_between_processes(self):\n def median(self):\n ...
import argparse import os import logging import yaml import numpy as np import random import time import datetime import json import math import torch import torch.nn as nn import torch.nn.functional as F import torch.backends.cudnn as cudnn import torch.distributed as dist import socket from pathlib import Path from functools import partial from sklearn.metrics import roc_auc_score from collections import OrderedDict from torch.utils.data import DataLoader from tensorboardX import SummaryWriter from transformers import AutoModel,BertConfig,AutoTokenizer from factory import utils from scheduler import create_scheduler from optim import create_optimizer from engine.train import train,valid_on_cheXpert,valid_on_chestxray14 from models.clip_tqn import CLP_clinical,ModelRes,TQN_Model,TQN_Model_Add,ModelDense,CLP_clinical2 from models.tokenization_bert import BertTokenizer from dataset.dataset_entity import MIMIC_Dataset,Mergetrain_Dataset, Chestxray14_Dataset,CheXpert_Dataset from io import BytesIO
16,347
# import ruamel.yaml as yaml def main(args, config): torch.cuda.current_device() torch.cuda._initialized = True print("Total CUDA devices: ", torch.cuda.device_count()) torch.set_default_tensor_type('torch.FloatTensor') utils.init_distributed_mode(args) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) cudnn.benchmark = True start_epoch = 0 max_epoch = config['schedular']['epochs'] warmup_steps = config['schedular']['warmup_epochs'] num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_rank = global_rank print('sampler_rank',sampler_rank,'num_tasks',num_tasks) #### Dataset #### print("Creating dataset") if args.add_dataset == True:
# import ruamel.yaml as yaml def main(args, config): torch.cuda.current_device() torch.cuda._initialized = True print("Total CUDA devices: ", torch.cuda.device_count()) torch.set_default_tensor_type('torch.FloatTensor') utils.init_distributed_mode(args) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) cudnn.benchmark = True start_epoch = 0 max_epoch = config['schedular']['epochs'] warmup_steps = config['schedular']['warmup_epochs'] num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_rank = global_rank print('sampler_rank',sampler_rank,'num_tasks',num_tasks) #### Dataset #### print("Creating dataset") if args.add_dataset == True:
train_dataset = Mergetrain_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args)
14
2023-10-30 00:24:16+00:00
24k
ifrit98/storage-subnet
neurons/miner.py
[ { "identifier": "hash_data", "path": "storage/shared/ecc.py", "snippet": "def hash_data(data):\n \"\"\"\n Compute a SHA3-256 hash of the input data and return its integer representation.\n\n The function handles both byte-like and non-byte-like inputs by converting non-byte inputs to\n strin...
import os import sys import copy import json import time import torch import typing import base64 import asyncio import aioredis import argparse import threading import traceback import bittensor as bt import storage from collections import defaultdict from Crypto.Random import get_random_bytes from typing import Dict from pprint import pprint, pformat from storage.shared.ecc import ( hash_data, setup_CRS, ECCommitment, ecc_point_to_hex, hex_to_ecc_point, ) from storage.shared.merkle import ( MerkleTree, ) from storage.shared.utils import b64_encode, b64_decode, chunk_data, safe_key_search from storage.miner import ( run, set_weights, ) from storage.miner.utils import ( compute_subsequent_commitment, save_data_to_filesystem, load_from_filesystem, commit_data_with_seed, init_wandb, get_directory_size, get_free_disk_space, update_storage_stats, ) from storage.miner.config import ( config, check_config, add_args, ) from storage.miner.database import ( store_chunk_metadata, update_seed_info, get_chunk_metadata, )
14,988
def retrieve_blacklist_fn( self, synapse: storage.protocol.Retrieve ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def retrieve_priority_fn(self, synapse: storage.protocol.Retrieve) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority async def store(self, synapse: storage.protocol.Store) -> storage.protocol.Store: """ Processes the storage request from a synapse by securely storing the provided data and returning a proof of storage. The data is committed using elliptic curve cryptography, stored on the filesystem, and the metadata is recorded in a Redis database. A cryptographic proof of the commitment, along with a digital signature from the server's hotkey, is returned in the synapse for verification by the requester. Args: synapse (storage.protocol.Store): An object containing the data to be stored, encoded in base64 format, along with associated metadata like the cryptographic curve parameters, a seed for the commitment, and the expected commitment group elements. Returns: storage.protocol.Store: The synapse is returned with additional fields populated, including the randomness used in the commitment, the commitment point itself, a signature from this storage server's hotkey, and a commitment hash that can be used for chained proofs. The method performs the following operations: 1. Decodes the base64-encoded data into raw bytes. 2. Commits to the data using the provided elliptic curve parameters and the seed to generate a commitment point. 3. Stores the raw byte data in the filesystem using a hash of the data as the filename. 4. Records metadata about the stored data in the Redis database, including the file path, previous seed, and data size. 5. Updates the synapse object with the commitment details and a digital signature. This process ensures the integrity and non-repudiation of the data storage, allowing clients to verify that their data has been stored correctly without the need to retrieve the full data set. Example usage: Assuming an initialized 'committer' object and 'synapse' with necessary data: >>> updated_synapse = self.store(synapse) """ bt.logging.info(f"received store request: {synapse.encrypted_data[:24]}") self.request_count += 1 # Decode the data from base64 to raw bytes encrypted_byte_data = base64.b64decode(synapse.encrypted_data) bt.logging.trace(f"store b64decrypted data: {encrypted_byte_data[:24]}") # Store the data with the hash as the key in the filesystem bt.logging.trace(f"entering hash_data()") data_hash = hash_data(encrypted_byte_data) # If already storing this hash, simply update the validator seeds and return challenge bt.logging.trace(f"checking if data already exists...") if await self.database.exists(data_hash): # update the validator seed challenge hash in storage await update_seed_info( self.database, data_hash, synapse.dendrite.hotkey, synapse.seed ) else: # Store the data in the filesystem
# The MIT License (MIT) # Copyright © 2023 Yuma Rao # Copyright © 2023 philanthrope # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the “Software”), to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of # the Software. # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # import this repo class miner: @classmethod def check_config(cls, config: "bt.Config"): """ Adds neuron-specific arguments to the argument parser. Args: parser (argparse.ArgumentParser): Parser to add arguments to. This class method enriches the argument parser with options specific to the neuron's configuration. """ check_config(cls, config) @classmethod def add_args(cls, parser): """ Adds neuron-specific arguments to the argument parser. Args: parser (argparse.ArgumentParser): Parser to add arguments to. This class method enriches the argument parser with options specific to the neuron's configuration. """ add_args(cls, parser) @classmethod def config(cls): """ Retrieves the configuration for the neuron. Returns: bt.Config: The configuration object for the neuron. This class method returns the neuron's configuration, which is used throughout the neuron's lifecycle for various functionalities and operations. """ return config(cls) subtensor: "bt.subtensor" wallet: "bt.wallet" metagraph: "bt.metagraph" def __init__(self): self.config = miner.config() self.check_config(self.config) bt.logging(config=self.config, logging_dir=self.config.miner.full_path) bt.logging.info(f"{self.config}") bt.logging.info("miner.__init__()") # Init device. bt.logging.debug("loading device") self.device = torch.device(self.config.miner.device) bt.logging.debug(str(self.device)) # Init subtensor bt.logging.debug("loading subtensor") self.subtensor = bt.subtensor(config=self.config) bt.logging.debug(str(self.subtensor)) self.current_block = self.subtensor.get_current_block() # Init wallet. bt.logging.debug("loading wallet") self.wallet = bt.wallet(config=self.config) self.wallet.create_if_non_existent() if not self.config.wallet._mock: if not self.subtensor.is_hotkey_registered_on_subnet( hotkey_ss58=self.wallet.hotkey.ss58_address, netuid=self.config.netuid ): raise Exception( f"Wallet not currently registered on netuid {self.config.netuid}, please first register wallet before running" ) bt.logging.debug(f"wallet: {str(self.wallet)}") # Init metagraph. bt.logging.debug("loading metagraph") self.metagraph = bt.metagraph( netuid=self.config.netuid, network=self.subtensor.network, sync=False ) # Make sure not to sync without passing subtensor self.metagraph.sync(subtensor=self.subtensor) # Sync metagraph with subtensor. bt.logging.debug(str(self.metagraph)) # Setup database self.database = aioredis.StrictRedis( host=self.config.database.host, port=self.config.database.port, db=self.config.database.index, socket_keepalive=True, socket_connect_timeout=300, ) self.my_subnet_uid = self.metagraph.hotkeys.index( self.wallet.hotkey.ss58_address ) bt.logging.info(f"Running miner on uid: {self.my_subnet_uid}") # Init wandb. if not self.config.wandb.off: bt.logging.debug("loading wandb") init_wandb(self) # The axon handles request processing, allowing validators to send this process requests. self.axon = bt.axon(wallet=self.wallet, config=self.config) bt.logging.info(f"Axon {self.axon}") # Attach determiners which functions are called when servicing a request. bt.logging.info(f"Attaching forward functions to axon.") self.axon.attach( forward_fn=self.store, blacklist_fn=self.store_blacklist_fn, priority_fn=self.store_priority_fn, ).attach( forward_fn=self.challenge, blacklist_fn=self.challenge_blacklist_fn, priority_fn=self.challenge_priority_fn, ).attach( forward_fn=self.retrieve, blacklist_fn=self.retrieve_blacklist_fn, priority_fn=self.retrieve_priority_fn, ) # Serve passes the axon information to the network + netuid we are hosting on. # This will auto-update if the axon port of external ip have changed. bt.logging.info( f"Serving axon {self.axon} on network: {self.subtensor.chain_endpoint} with netuid: {self.config.netuid}" ) self.axon.serve(netuid=self.config.netuid, subtensor=self.subtensor) # Start starts the miner's axon, making it active on the network. bt.logging.info(f"Starting axon server on port: {self.config.axon.port}") self.axon.start() # Init the event loop. self.loop = asyncio.get_event_loop() # Instantiate runners self.should_exit: bool = False self.is_running: bool = False self.thread: threading.Thread = None self.lock = asyncio.Lock() self.request_timestamps: Dict = {} self.step = 0 # Init the miner's storage request tracker self.request_count = 0 self.start_request_count_timer() self.requests_per_hour = [] self.average_requests_per_hour = 0 # Init the miner's storage usage tracker update_storage_stats(self) def start_request_count_timer(self): """ Initializes and starts a timer for tracking the number of requests received by the miner in an hour. This method sets up a one-hour timer that, upon expiration, calls the `reset_request_count` method to log the number of requests received and reset the count for the next hour. The timer is set to run in a separate thread to avoid blocking the main execution. Usage: Should be called during the initialization of the miner to start tracking requests per hour. """ self.request_count_timer = threading.Timer(3600, self.reset_request_count) self.request_count_timer.start() def reset_request_count(self): """ Logs the number of requests received in the last hour and resets the count. This method is automatically called when the one-hour timer set by `start_request_count_timer` expires. It logs the count of requests received in the last hour and then resets the count. Additionally, it restarts the timer for the next hour. Usage: This method is intended to be called automatically by a timer and typically should not be called directly. """ bt.logging.info( f"Number of requests received in the last hour: {self.request_count}" ) self.requests_per_hour.append(self.request_count) bt.logging.info(f"Requests per hour: {self.requests_per_hour}") self.average_requests_per_hour = sum(self.requests_per_hour) / len( self.requests_per_hour ) bt.logging.info(f"Average requests per hour: {self.average_requests_per_hour}") self.request_count = 0 self.start_request_count_timer() @property async def total_storage(self): """ Calculates the total size of data stored by the miner. This method fetches all data keys from the Redis database and sums up the size of each data object. It provides an estimate of the total amount of data currently held by the miner. Returns: int: Total size of data (in bytes) stored by the miner. Example: >>> miner.total_storage() 102400 # Example output indicating 102,400 bytes of data stored """ # Fetch all keys from Redis all_keys = await safe_key_search(self.database, "*") # Filter out keys that contain a period (temporary, remove later) filtered_keys = [key for key in all_keys if b"." not in key] # Get the size of each data object and sum them up total_size = sum( [ await get_chunk_metadata(self.database, key).get(b"size", 0) for key in filtered_keys ] ) return total_size def store_blacklist_fn( self, synapse: storage.protocol.Store ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def store_priority_fn(self, synapse: storage.protocol.Store) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority def challenge_blacklist_fn( self, synapse: storage.protocol.Challenge ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def challenge_priority_fn(self, synapse: storage.protocol.Challenge) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority def retrieve_blacklist_fn( self, synapse: storage.protocol.Retrieve ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def retrieve_priority_fn(self, synapse: storage.protocol.Retrieve) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority async def store(self, synapse: storage.protocol.Store) -> storage.protocol.Store: """ Processes the storage request from a synapse by securely storing the provided data and returning a proof of storage. The data is committed using elliptic curve cryptography, stored on the filesystem, and the metadata is recorded in a Redis database. A cryptographic proof of the commitment, along with a digital signature from the server's hotkey, is returned in the synapse for verification by the requester. Args: synapse (storage.protocol.Store): An object containing the data to be stored, encoded in base64 format, along with associated metadata like the cryptographic curve parameters, a seed for the commitment, and the expected commitment group elements. Returns: storage.protocol.Store: The synapse is returned with additional fields populated, including the randomness used in the commitment, the commitment point itself, a signature from this storage server's hotkey, and a commitment hash that can be used for chained proofs. The method performs the following operations: 1. Decodes the base64-encoded data into raw bytes. 2. Commits to the data using the provided elliptic curve parameters and the seed to generate a commitment point. 3. Stores the raw byte data in the filesystem using a hash of the data as the filename. 4. Records metadata about the stored data in the Redis database, including the file path, previous seed, and data size. 5. Updates the synapse object with the commitment details and a digital signature. This process ensures the integrity and non-repudiation of the data storage, allowing clients to verify that their data has been stored correctly without the need to retrieve the full data set. Example usage: Assuming an initialized 'committer' object and 'synapse' with necessary data: >>> updated_synapse = self.store(synapse) """ bt.logging.info(f"received store request: {synapse.encrypted_data[:24]}") self.request_count += 1 # Decode the data from base64 to raw bytes encrypted_byte_data = base64.b64decode(synapse.encrypted_data) bt.logging.trace(f"store b64decrypted data: {encrypted_byte_data[:24]}") # Store the data with the hash as the key in the filesystem bt.logging.trace(f"entering hash_data()") data_hash = hash_data(encrypted_byte_data) # If already storing this hash, simply update the validator seeds and return challenge bt.logging.trace(f"checking if data already exists...") if await self.database.exists(data_hash): # update the validator seed challenge hash in storage await update_seed_info( self.database, data_hash, synapse.dendrite.hotkey, synapse.seed ) else: # Store the data in the filesystem
filepath = save_data_to_filesystem(
13
2023-10-26 18:54:47+00:00
24k
cpacker/MemGPT
memgpt/main.py
[ { "identifier": "logger", "path": "memgpt/log.py", "snippet": "" }, { "identifier": "CLIInterface", "path": "memgpt/interface.py", "snippet": "class CLIInterface(AgentInterface):\r\n \"\"\"Basic interface for dumping agent events to the command-line\"\"\"\r\n\r\n @staticmethod\r\n ...
import shutil import configparser import uuid import logging import glob import os import sys import pickle import traceback import json import questionary import typer import memgpt.agent as agent import memgpt.system as system import memgpt.constants as constants import memgpt.errors as errors from rich.console import Console from prettytable import PrettyTable from memgpt.log import logger from memgpt.interface import CLIInterface as interface # for printing to terminal from memgpt.config import MemGPTConfig from memgpt.cli.cli import run, attach, version, server, open_folder, quickstart, migrate from memgpt.cli.cli_config import configure, list, add, delete from memgpt.cli.cli_load import app as load_app from memgpt.agent_store.storage import StorageConnector, TableType from memgpt.metadata import MetadataStore, save_agent
17,393
console = Console() app = typer.Typer(pretty_exceptions_enable=False) app.command(name="run")(run) app.command(name="version")(version)
console = Console() app = typer.Typer(pretty_exceptions_enable=False) app.command(name="run")(run) app.command(name="version")(version)
app.command(name="attach")(attach)
4
2023-10-11 07:38:37+00:00
24k
PixArt-alpha/PixArt-alpha
train_scripts/train_pixart_lcm.py
[ { "identifier": "IDDPM", "path": "diffusion/iddpm.py", "snippet": "def IDDPM(\n timestep_respacing,\n noise_schedule=\"linear\",\n use_kl=False,\n sigma_small=False,\n predict_xstart=False,\n learn_sigma=True,\n pred_sigma=True,\n rescale_learned_s...
import os import sys import types import argparse import datetime import time import warnings import torch import torch.nn as nn import numpy as np import torch.nn.functional as F from pathlib import Path from accelerate import Accelerator, InitProcessGroupKwargs from accelerate.utils import DistributedType from diffusers.models import AutoencoderKL from torch.utils.data import RandomSampler from mmcv.runner import LogBuffer from copy import deepcopy from tqdm import tqdm from diffusion import IDDPM from diffusion.utils.checkpoint import save_checkpoint, load_checkpoint from diffusion.utils.dist_utils import synchronize, get_world_size, clip_grad_norm_ from diffusion.data.builder import build_dataset, build_dataloader, set_data_root from diffusion.model.builder import build_model from diffusion.utils.logger import get_root_logger from diffusion.utils.misc import set_random_seed, read_config, init_random_seed, DebugUnderflowOverflow from diffusion.utils.optimizer import build_optimizer, auto_scale_lr from diffusion.utils.lr_scheduler import build_lr_scheduler from diffusion.utils.data_sampler import AspectRatioBatchSampler, BalancedAspectRatioBatchSampler from diffusion.lcm_scheduler import LCMScheduler from torchvision.utils import save_image from accelerate import FullyShardedDataParallelPlugin from torch.distributed.fsdp.fully_sharded_data_parallel import FullStateDictConfig
16,410
args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() config = read_config(args.config) if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None config.work_dir = args.work_dir if args.cloud: config.data_root = '/data/data' if args.resume_from is not None: config.load_from = None config.resume_from = dict( checkpoint=args.resume_from, load_ema=False, resume_optimizer=True, resume_lr_scheduler=True) if args.debug: config.log_interval = 1 config.train_batch_size = 11 config.valid_num = 100 config.load_from = None os.umask(0o000) os.makedirs(config.work_dir, exist_ok=True) init_handler = InitProcessGroupKwargs() init_handler.timeout = datetime.timedelta(seconds=5400) # change timeout to avoid a strange NCCL bug # Initialize accelerator and tensorboard logging if config.use_fsdp: init_train = 'FSDP' set_fsdp_env() fsdp_plugin = FullyShardedDataParallelPlugin(state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False),) else: init_train = 'DDP' fsdp_plugin = None even_batches = True if config.multi_scale: even_batches=False, accelerator = Accelerator( mixed_precision=config.mixed_precision, gradient_accumulation_steps=config.gradient_accumulation_steps, log_with="tensorboard", project_dir=os.path.join(config.work_dir, "logs"), fsdp_plugin=fsdp_plugin, even_batches=even_batches, kwargs_handlers=[init_handler] ) logger = get_root_logger(os.path.join(config.work_dir, 'train_log.log')) config.seed = init_random_seed(config.get('seed', None)) set_random_seed(config.seed) if accelerator.is_main_process: config.dump(os.path.join(config.work_dir, 'config.py')) logger.info(f"Config: \n{config.pretty_text}") logger.info(f"World_size: {get_world_size()}, seed: {config.seed}") logger.info(f"Initializing: {init_train} for training") image_size = config.image_size # @param [256, 512] latent_size = int(image_size) // 8 pred_sigma = getattr(config, 'pred_sigma', True) learn_sigma = getattr(config, 'learn_sigma', True) and pred_sigma model_kwargs={"window_block_indexes": config.window_block_indexes, "window_size": config.window_size, "use_rel_pos": config.use_rel_pos, "lewei_scale": config.lewei_scale, 'config':config, 'model_max_length': config.model_max_length} # build models train_diffusion = IDDPM(str(config.train_sampling_steps), learn_sigma=learn_sigma, pred_sigma=pred_sigma, snr=config.snr_loss, return_startx=True) model = build_model(config.model, config.grad_checkpointing, config.get('fp32_attention', False), input_size=latent_size, learn_sigma=learn_sigma, pred_sigma=pred_sigma, **model_kwargs).train() logger.info(f"{model.__class__.__name__} Model Parameters: {sum(p.numel() for p in model.parameters()):,}") if config.load_from is not None: if args.load_from is not None: config.load_from = args.load_from missing, unexpected = load_checkpoint(config.load_from, model, load_ema=config.get('load_ema', False)) logger.warning(f'Missing keys: {missing}') logger.warning(f'Unexpected keys: {unexpected}') model_ema = deepcopy(model).eval() model_teacher = deepcopy(model).eval() if not config.data.load_vae_feat: vae = AutoencoderKL.from_pretrained(config.vae_pretrained).cuda() # prepare for FSDP clip grad norm calculation if accelerator.distributed_type == DistributedType.FSDP: for m in accelerator._models: m.clip_grad_norm_ = types.MethodType(clip_grad_norm_, m) # build dataloader set_data_root(config.data_root) dataset = build_dataset(config.data, resolution=image_size, aspect_ratio_type=config.aspect_ratio_type) if config.multi_scale: batch_sampler = AspectRatioBatchSampler(sampler=RandomSampler(dataset), dataset=dataset, batch_size=config.train_batch_size, aspect_ratios=dataset.aspect_ratio, drop_last=True, ratio_nums=dataset.ratio_nums, config=config, valid_num=config.valid_num) # used for balanced sampling # batch_sampler = BalancedAspectRatioBatchSampler(sampler=RandomSampler(dataset), dataset=dataset, # batch_size=config.train_batch_size, aspect_ratios=dataset.aspect_ratio, # ratio_nums=dataset.ratio_nums) train_dataloader = build_dataloader(dataset, batch_sampler=batch_sampler, num_workers=config.num_workers) else: train_dataloader = build_dataloader(dataset, num_workers=config.num_workers, batch_size=config.train_batch_size, shuffle=True) # build optimizer and lr scheduler lr_scale_ratio = 1 if config.get('auto_lr', None):
current_file_path = Path(__file__).resolve() sys.path.insert(0, str(current_file_path.parent.parent)) warnings.filterwarnings("ignore") # ignore warning def set_fsdp_env(): os.environ["ACCELERATE_USE_FSDP"] = 'true' os.environ["FSDP_AUTO_WRAP_POLICY"] = 'TRANSFORMER_BASED_WRAP' os.environ["FSDP_BACKWARD_PREFETCH"] = 'BACKWARD_PRE' os.environ["FSDP_TRANSFORMER_CLS_TO_WRAP"] = 'PixArtBlock' def ema_update(model_dest: nn.Module, model_src: nn.Module, rate): param_dict_src = dict(model_src.named_parameters()) for p_name, p_dest in model_dest.named_parameters(): p_src = param_dict_src[p_name] assert p_src is not p_dest p_dest.data.mul_(rate).add_((1 - rate) * p_src.data) def append_dims(x, target_dims): """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" dims_to_append = target_dims - x.ndim if dims_to_append < 0: raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less") return x[(...,) + (None,) * dims_to_append] # From LCMScheduler.get_scalings_for_boundary_condition_discrete def scalings_for_boundary_conditions(timestep, sigma_data=0.5, timestep_scaling=10.0): c_skip = sigma_data**2 / ((timestep / 0.1) ** 2 + sigma_data**2) c_out = (timestep / 0.1) / ((timestep / 0.1) ** 2 + sigma_data**2) ** 0.5 return c_skip, c_out def extract_into_tensor(a, t, x_shape): b, *_ = t.shape out = a.gather(-1, t) return out.reshape(b, *((1,) * (len(x_shape) - 1))) class DDIMSolver: def __init__(self, alpha_cumprods, timesteps=1000, ddim_timesteps=50): # DDIM sampling parameters step_ratio = timesteps // ddim_timesteps self.ddim_timesteps = (np.arange(1, ddim_timesteps + 1) * step_ratio).round().astype(np.int64) - 1 self.ddim_alpha_cumprods = alpha_cumprods[self.ddim_timesteps] self.ddim_alpha_cumprods_prev = np.asarray( [alpha_cumprods[0]] + alpha_cumprods[self.ddim_timesteps[:-1]].tolist() ) # convert to torch tensors self.ddim_timesteps = torch.from_numpy(self.ddim_timesteps).long() self.ddim_alpha_cumprods = torch.from_numpy(self.ddim_alpha_cumprods) self.ddim_alpha_cumprods_prev = torch.from_numpy(self.ddim_alpha_cumprods_prev) def to(self, device): self.ddim_timesteps = self.ddim_timesteps.to(device) self.ddim_alpha_cumprods = self.ddim_alpha_cumprods.to(device) self.ddim_alpha_cumprods_prev = self.ddim_alpha_cumprods_prev.to(device) return self def ddim_step(self, pred_x0, pred_noise, timestep_index): alpha_cumprod_prev = extract_into_tensor(self.ddim_alpha_cumprods_prev, timestep_index, pred_x0.shape) dir_xt = (1.0 - alpha_cumprod_prev).sqrt() * pred_noise x_prev = alpha_cumprod_prev.sqrt() * pred_x0 + dir_xt return x_prev @torch.no_grad() def log_validation(model, step, device): if hasattr(model, 'module'): model = model.module scheduler = LCMScheduler(beta_start=0.0001, beta_end=0.02, beta_schedule="linear", prediction_type="epsilon") scheduler.set_timesteps(4, 50) infer_timesteps = scheduler.timesteps dog_embed = torch.load('data/tmp/dog.pth', map_location='cpu') caption_embs, emb_masks = dog_embed['dog_text'].to(device), dog_embed['dog_mask'].to(device) hw = torch.tensor([[1024, 1024]], dtype=torch.float, device=device).repeat(1, 1) ar = torch.tensor([[1.]], device=device).repeat(1, 1) # Create sampling noise: infer_latents = torch.randn(1, 4, 1024, 1024, device=device) model_kwargs = dict(data_info={'img_hw': hw, 'aspect_ratio': ar}, mask=emb_masks) logger.info("Running validation... ") # 7. LCM MultiStep Sampling Loop: for i, t in tqdm(list(enumerate(infer_timesteps))): ts = torch.full((1,), t, device=device, dtype=torch.long) # model prediction (v-prediction, eps, x) model_pred = model(infer_latents, ts, caption_embs, **model_kwargs)[:, :4] # compute the previous noisy sample x_t -> x_t-1 infer_latents, denoised = scheduler.step(model_pred, i, t, infer_latents, return_dict=False) samples = vae.decode(denoised / 0.18215).sample torch.cuda.empty_cache() save_image(samples[0], f'output_cv/vis/{step}.jpg', nrow=1, normalize=True, value_range=(-1, 1)) def train(): if config.get('debug_nan', False): DebugUnderflowOverflow(model) logger.info('NaN debugger registered. Start to detect overflow during training.') time_start, last_tic = time.time(), time.time() log_buffer = LogBuffer() start_step = start_epoch * len(train_dataloader) global_step = 0 total_steps = len(train_dataloader) * config.num_epochs load_vae_feat = getattr(train_dataloader.dataset, 'load_vae_feat', False) # Create uncond embeds for classifier free guidance uncond_prompt_embeds = model.module.y_embedder.y_embedding.repeat(config.train_batch_size, 1, 1, 1) # Now you train the model for epoch in range(start_epoch + 1, config.num_epochs + 1): data_time_start= time.time() data_time_all = 0 for step, batch in enumerate(train_dataloader): data_time_all += time.time() - data_time_start if load_vae_feat: z = batch[0] else: with torch.no_grad(): with torch.cuda.amp.autocast(enabled=config.mixed_precision == 'fp16'): posterior = vae.encode(batch[0]).latent_dist if config.sample_posterior: z = posterior.sample() else: z = posterior.mode() latents = z * config.scale_factor y = batch[1] y_mask = batch[2] data_info = batch[3] # Sample a random timestep for each image grad_norm = None with accelerator.accumulate(model): # Predict the noise residual optimizer.zero_grad() # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image t_n ~ U[0, N - k - 1] without bias. topk = config.train_sampling_steps // config.num_ddim_timesteps index = torch.randint(0, config.num_ddim_timesteps, (bsz,), device=latents.device).long() start_timesteps = solver.ddim_timesteps[index] timesteps = start_timesteps - topk timesteps = torch.where(timesteps < 0, torch.zeros_like(timesteps), timesteps) # Get boundary scalings for start_timesteps and (end) timesteps. c_skip_start, c_out_start = scalings_for_boundary_conditions(start_timesteps) c_skip_start, c_out_start = [append_dims(x, latents.ndim) for x in [c_skip_start, c_out_start]] c_skip, c_out = scalings_for_boundary_conditions(timesteps) c_skip, c_out = [append_dims(x, latents.ndim) for x in [c_skip, c_out]] # Sample a random guidance scale w from U[w_min, w_max] and embed it # w = (config.w_max - config.w_min) * torch.rand((bsz,)) + config.w_min w = config.cfg_scale * torch.ones((bsz,)) w = w.reshape(bsz, 1, 1, 1) w = w.to(device=latents.device, dtype=latents.dtype) # Get online LCM prediction on z_{t_{n + k}}, w, c, t_{n + k} _, pred_x_0, noisy_model_input = train_diffusion.training_losses(model, latents, start_timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), noise=noise) model_pred = c_skip_start * noisy_model_input + c_out_start * pred_x_0 # Use the ODE solver to predict the kth step in the augmented PF-ODE trajectory after # noisy_latents with both the conditioning embedding c and unconditional embedding 0 # Get teacher model prediction on noisy_latents and conditional embedding with torch.no_grad(): with torch.autocast("cuda"): cond_teacher_output, cond_pred_x0, _ = train_diffusion.training_losses(model_teacher, latents, start_timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), noise=noise) # Get teacher model prediction on noisy_latents and unconditional embedding uncond_teacher_output, uncond_pred_x0, _ = train_diffusion.training_losses(model_teacher, latents, start_timesteps, model_kwargs=dict(y=uncond_prompt_embeds, mask=y_mask, data_info=data_info), noise=noise) # Perform "CFG" to get x_prev estimate (using the LCM paper's CFG formulation) pred_x0 = cond_pred_x0 + w * (cond_pred_x0 - uncond_pred_x0) pred_noise = cond_teacher_output + w * (cond_teacher_output - uncond_teacher_output) x_prev = solver.ddim_step(pred_x0, pred_noise, index) # Get target LCM prediction on x_prev, w, c, t_n with torch.no_grad(): with torch.autocast("cuda", enabled=True): _, pred_x_0, _ = train_diffusion.training_losses(model_ema, x_prev.float(), timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), skip_noise=True) target = c_skip * x_prev + c_out * pred_x_0 # Calculate loss if config.loss_type == "l2": loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") elif config.loss_type == "huber": loss = torch.mean(torch.sqrt((model_pred.float() - target.float()) ** 2 + config.huber_c**2) - config.huber_c) # Backpropagation on the online student model (`model`) accelerator.backward(loss) if accelerator.sync_gradients: grad_norm = accelerator.clip_grad_norm_(model.parameters(), config.gradient_clip) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=True) if accelerator.sync_gradients: ema_update(model_ema, model, config.ema_decay) lr = lr_scheduler.get_last_lr()[0] logs = {"loss": accelerator.gather(loss).mean().item()} if grad_norm is not None: logs.update(grad_norm=accelerator.gather(grad_norm).mean().item()) log_buffer.update(logs) if (step + 1) % config.log_interval == 0 or (step + 1) == 1: t = (time.time() - last_tic) / config.log_interval t_d = data_time_all / config.log_interval avg_time = (time.time() - time_start) / (global_step + 1) eta = str(datetime.timedelta(seconds=int(avg_time * (total_steps - start_step - global_step - 1)))) eta_epoch = str(datetime.timedelta(seconds=int(avg_time * (len(train_dataloader) - step - 1)))) # avg_loss = sum(loss_buffer) / len(loss_buffer) log_buffer.average() info = f"Step/Epoch [{(epoch-1)*len(train_dataloader)+step+1}/{epoch}][{step + 1}/{len(train_dataloader)}]:total_eta: {eta}, " \ f"epoch_eta:{eta_epoch}, time_all:{t:.3f}, time_data:{t_d:.3f}, lr:{lr:.3e}, s:({data_info['resolution'][0][0].item()}, {data_info['resolution'][0][1].item()}), " info += ', '.join([f"{k}:{v:.4f}" for k, v in log_buffer.output.items()]) logger.info(info) last_tic = time.time() log_buffer.clear() data_time_all = 0 logs.update(lr=lr) accelerator.log(logs, step=global_step + start_step) global_step += 1 data_time_start= time.time() synchronize() torch.cuda.empty_cache() if accelerator.is_main_process: # log_validation(model_ema, step, model.device) if ((epoch - 1) * len(train_dataloader) + step + 1) % config.save_model_steps == 0: os.umask(0o000) save_checkpoint(os.path.join(config.work_dir, 'checkpoints'), epoch=epoch, step=(epoch - 1) * len(train_dataloader) + step + 1, model=accelerator.unwrap_model(model), model_ema=accelerator.unwrap_model(model_ema), optimizer=optimizer, lr_scheduler=lr_scheduler ) synchronize() synchronize() if accelerator.is_main_process: if epoch % config.save_model_epochs == 0 or epoch == config.num_epochs: os.umask(0o000) save_checkpoint(os.path.join(config.work_dir, 'checkpoints'), epoch=epoch, step=(epoch - 1) * len(train_dataloader) + step + 1, model=accelerator.unwrap_model(model), model_ema=accelerator.unwrap_model(model_ema), optimizer=optimizer, lr_scheduler=lr_scheduler ) synchronize() def parse_args(): parser = argparse.ArgumentParser(description="Process some integers.") parser.add_argument("config", type=str, help="config") parser.add_argument("--cloud", action='store_true', default=False, help="cloud or local machine") parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument('--resume-from', help='the dir to resume the training') parser.add_argument('--load-from', default=None, help='the dir to load a ckpt for training') parser.add_argument('--local-rank', type=int, default=-1) parser.add_argument('--local_rank', type=int, default=-1) parser.add_argument('--debug', action='store_true') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() config = read_config(args.config) if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None config.work_dir = args.work_dir if args.cloud: config.data_root = '/data/data' if args.resume_from is not None: config.load_from = None config.resume_from = dict( checkpoint=args.resume_from, load_ema=False, resume_optimizer=True, resume_lr_scheduler=True) if args.debug: config.log_interval = 1 config.train_batch_size = 11 config.valid_num = 100 config.load_from = None os.umask(0o000) os.makedirs(config.work_dir, exist_ok=True) init_handler = InitProcessGroupKwargs() init_handler.timeout = datetime.timedelta(seconds=5400) # change timeout to avoid a strange NCCL bug # Initialize accelerator and tensorboard logging if config.use_fsdp: init_train = 'FSDP' set_fsdp_env() fsdp_plugin = FullyShardedDataParallelPlugin(state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False),) else: init_train = 'DDP' fsdp_plugin = None even_batches = True if config.multi_scale: even_batches=False, accelerator = Accelerator( mixed_precision=config.mixed_precision, gradient_accumulation_steps=config.gradient_accumulation_steps, log_with="tensorboard", project_dir=os.path.join(config.work_dir, "logs"), fsdp_plugin=fsdp_plugin, even_batches=even_batches, kwargs_handlers=[init_handler] ) logger = get_root_logger(os.path.join(config.work_dir, 'train_log.log')) config.seed = init_random_seed(config.get('seed', None)) set_random_seed(config.seed) if accelerator.is_main_process: config.dump(os.path.join(config.work_dir, 'config.py')) logger.info(f"Config: \n{config.pretty_text}") logger.info(f"World_size: {get_world_size()}, seed: {config.seed}") logger.info(f"Initializing: {init_train} for training") image_size = config.image_size # @param [256, 512] latent_size = int(image_size) // 8 pred_sigma = getattr(config, 'pred_sigma', True) learn_sigma = getattr(config, 'learn_sigma', True) and pred_sigma model_kwargs={"window_block_indexes": config.window_block_indexes, "window_size": config.window_size, "use_rel_pos": config.use_rel_pos, "lewei_scale": config.lewei_scale, 'config':config, 'model_max_length': config.model_max_length} # build models train_diffusion = IDDPM(str(config.train_sampling_steps), learn_sigma=learn_sigma, pred_sigma=pred_sigma, snr=config.snr_loss, return_startx=True) model = build_model(config.model, config.grad_checkpointing, config.get('fp32_attention', False), input_size=latent_size, learn_sigma=learn_sigma, pred_sigma=pred_sigma, **model_kwargs).train() logger.info(f"{model.__class__.__name__} Model Parameters: {sum(p.numel() for p in model.parameters()):,}") if config.load_from is not None: if args.load_from is not None: config.load_from = args.load_from missing, unexpected = load_checkpoint(config.load_from, model, load_ema=config.get('load_ema', False)) logger.warning(f'Missing keys: {missing}') logger.warning(f'Unexpected keys: {unexpected}') model_ema = deepcopy(model).eval() model_teacher = deepcopy(model).eval() if not config.data.load_vae_feat: vae = AutoencoderKL.from_pretrained(config.vae_pretrained).cuda() # prepare for FSDP clip grad norm calculation if accelerator.distributed_type == DistributedType.FSDP: for m in accelerator._models: m.clip_grad_norm_ = types.MethodType(clip_grad_norm_, m) # build dataloader set_data_root(config.data_root) dataset = build_dataset(config.data, resolution=image_size, aspect_ratio_type=config.aspect_ratio_type) if config.multi_scale: batch_sampler = AspectRatioBatchSampler(sampler=RandomSampler(dataset), dataset=dataset, batch_size=config.train_batch_size, aspect_ratios=dataset.aspect_ratio, drop_last=True, ratio_nums=dataset.ratio_nums, config=config, valid_num=config.valid_num) # used for balanced sampling # batch_sampler = BalancedAspectRatioBatchSampler(sampler=RandomSampler(dataset), dataset=dataset, # batch_size=config.train_batch_size, aspect_ratios=dataset.aspect_ratio, # ratio_nums=dataset.ratio_nums) train_dataloader = build_dataloader(dataset, batch_sampler=batch_sampler, num_workers=config.num_workers) else: train_dataloader = build_dataloader(dataset, num_workers=config.num_workers, batch_size=config.train_batch_size, shuffle=True) # build optimizer and lr scheduler lr_scale_ratio = 1 if config.get('auto_lr', None):
lr_scale_ratio = auto_scale_lr(config.train_batch_size * get_world_size() * config.gradient_accumulation_steps,
16
2023-10-12 14:16:33+00:00
24k
NVlabs/EmerNeRF
datasets/waymo.py
[ { "identifier": "SceneLidarSource", "path": "datasets/base/lidar_source.py", "snippet": "class SceneLidarSource(abc.ABC):\n \"\"\"\n The base class for the lidar source of a scene.\n \"\"\"\n\n data_cfg: OmegaConf = None\n # the normalized timestamps of all points (normalized to [0, 1]), ...
import logging import os import numpy as np import torch from typing import Dict from omegaconf import OmegaConf from torch import Tensor from tqdm import trange from datasets.base.lidar_source import SceneLidarSource from datasets.base.pixel_source import ScenePixelSource from datasets.base.scene_dataset import SceneDataset from datasets.base.split_wrapper import SplitWrapper from datasets.utils import voxel_coords_to_world_coords from radiance_fields.video_utils import depth_visualizer, save_videos, scene_flow_to_rgb
17,561
render_results=video_dict, save_pth=kwargs["save_pth"], num_timestamps=kwargs["num_timestamps"], keys=kwargs["keys"], num_cams=kwargs["num_cams"], fps=kwargs["fps"], verbose=kwargs["verbose"], save_seperate_video=kwargs["save_seperate_video"], ) def render_data_videos( self, save_pth: str, split: str = "full", fps: int = 24, verbose=True, ): """ Render a video of data. """ pixel_dataset, lidar_dataset = None, None if split == "full": if self.pixel_source is not None: pixel_dataset = self.full_pixel_set if self.lidar_source is not None: lidar_dataset = self.full_lidar_set elif split == "train": if self.pixel_source is not None: pixel_dataset = self.train_pixel_set if self.lidar_source is not None: lidar_dataset = self.train_lidar_set elif split == "test": if self.pixel_source is not None: pixel_dataset = self.test_pixel_set if self.lidar_source is not None: lidar_dataset = self.test_lidar_set else: raise NotImplementedError(f"Split {split} not supported") # pixel source rgb_imgs, dynamic_objects = [], [] sky_masks, feature_pca_colors = [], [] lidar_depths, flow_colors = [], [] for i in trange( len(pixel_dataset), desc="Rendering data videos", dynamic_ncols=True ): data_dict = pixel_dataset[i] if "pixels" in data_dict: rgb_imgs.append(data_dict["pixels"].cpu().numpy()) if "dynamic_masks" in data_dict: dynamic_objects.append( (data_dict["dynamic_masks"].unsqueeze(-1) * data_dict["pixels"]) .cpu() .numpy() ) if "sky_masks" in data_dict: sky_masks.append(data_dict["sky_masks"].cpu().numpy()) if "features" in data_dict: features = data_dict["features"] # use registered parameters to normalize the features for visualization features = features @ self.pixel_source.feat_dimension_reduction_mat features = (features - self.pixel_source.feat_color_min) / ( self.pixel_source.feat_color_max - self.pixel_source.feat_color_min ).clamp(0, 1) feature_pca_colors.append(features.cpu().numpy()) if lidar_dataset is not None: # to deal with asynchronized data # find the closest lidar scan to the current image in time closest_lidar_idx = self.lidar_source.find_closest_timestep( data_dict["normed_timestamps"].flatten()[0] ) data_dict = lidar_dataset[closest_lidar_idx] lidar_points = ( data_dict["lidar_origins"] + data_dict["lidar_ranges"] * data_dict["lidar_viewdirs"] ) # project lidar points to the image plane # TODO: consider making this a function intrinsic_4x4 = torch.nn.functional.pad( self.pixel_source.intrinsics[i], (0, 1, 0, 1) ) intrinsic_4x4[3, 3] = 1.0 lidar2img = intrinsic_4x4 @ self.pixel_source.cam_to_worlds[i].inverse() lidar_points = ( lidar2img[:3, :3] @ lidar_points.T + lidar2img[:3, 3:4] ).T depth = lidar_points[:, 2] cam_points = lidar_points[:, :2] / (depth.unsqueeze(-1) + 1e-6) valid_mask = ( (cam_points[:, 0] >= 0) & (cam_points[:, 0] < self.pixel_source.WIDTH) & (cam_points[:, 1] >= 0) & (cam_points[:, 1] < self.pixel_source.HEIGHT) & (depth > 0) ) depth = depth[valid_mask] _cam_points = cam_points[valid_mask] depth_map = torch.zeros( self.pixel_source.HEIGHT, self.pixel_source.WIDTH ).to(self.device) depth_map[ _cam_points[:, 1].long(), _cam_points[:, 0].long() ] = depth.squeeze(-1) depth_img = depth_map.cpu().numpy() depth_img = depth_visualizer(depth_img, depth_img > 0) mask = (depth_map.unsqueeze(-1) > 0).cpu().numpy() # show the depth map on top of the rgb image image = rgb_imgs[-1] * (1 - mask) + depth_img * mask lidar_depths.append(image) # project lidar flows to the image plane flow_img = torch.zeros( self.pixel_source.HEIGHT, self.pixel_source.WIDTH, 3 ).to(self.device) # to examine whether the ground labels are correct valid_mask = valid_mask & (~data_dict["lidar_ground"]) _cam_points = cam_points[valid_mask] # final color: # white if no flow, black if ground, and flow color otherwise
logger = logging.getLogger() class WaymoPixelSource(ScenePixelSource): ORIGINAL_SIZE = [[1280, 1920], [1280, 1920], [1280, 1920], [884, 1920], [884, 1920]] OPENCV2DATASET = np.array( [[0, 0, 1, 0], [-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, 1]] ) def __init__( self, pixel_data_config: OmegaConf, data_path: str, start_timestep: int, end_timestep: int, device: torch.device = torch.device("cpu"), ): super().__init__(pixel_data_config, device=device) self.data_path = data_path self.start_timestep = start_timestep self.end_timestep = end_timestep self.create_all_filelist() self.load_data() def create_all_filelist(self): """ Create file lists for all data files. e.g., img files, feature files, etc. """ # ---- define camera list ---- # # 0: front, 1: front_left, 2: front_right, 3: side_left, 4: side_right if self.num_cams == 1: self.camera_list = [0] elif self.num_cams == 3: self.camera_list = [1, 0, 2] elif self.num_cams == 5: self.camera_list = [3, 1, 0, 2, 4] else: raise NotImplementedError( f"num_cams: {self.num_cams} not supported for waymo dataset" ) # ---- define filepaths ---- # img_filepaths, feat_filepaths = [], [] dynamic_mask_filepaths, sky_mask_filepaths = [], [] # Note: we assume all the files in waymo dataset are synchronized for t in range(self.start_timestep, self.end_timestep): for cam_idx in self.camera_list: img_filepaths.append( os.path.join(self.data_path, "images", f"{t:03d}_{cam_idx}.jpg") ) dynamic_mask_filepaths.append( os.path.join( self.data_path, "dynamic_masks", f"{t:03d}_{cam_idx}.png" ) ) sky_mask_filepaths.append( os.path.join(self.data_path, "sky_masks", f"{t:03d}_{cam_idx}.png") ) feat_filepaths.append( os.path.join( self.data_path, self.data_cfg.feature_model_type, f"{t:03d}_{cam_idx}.npy", ) ) self.img_filepaths = np.array(img_filepaths) self.dynamic_mask_filepaths = np.array(dynamic_mask_filepaths) self.sky_mask_filepaths = np.array(sky_mask_filepaths) self.feat_filepaths = np.array(feat_filepaths) def load_calibrations(self): """ Load the camera intrinsics, extrinsics, timestamps, etc. Compute the camera-to-world matrices, ego-to-world matrices, etc. """ # to store per-camera intrinsics and extrinsics _intrinsics = [] cam_to_egos = [] for i in range(self.num_cams): # load camera intrinsics # 1d Array of [f_u, f_v, c_u, c_v, k{1, 2}, p{1, 2}, k{3}]. # ====!! we did not use distortion parameters for simplicity !!==== # to be improved!! intrinsic = np.loadtxt( os.path.join(self.data_path, "intrinsics", f"{i}.txt") ) fx, fy, cx, cy = intrinsic[0], intrinsic[1], intrinsic[2], intrinsic[3] # scale intrinsics w.r.t. load size fx, fy = ( fx * self.data_cfg.load_size[1] / self.ORIGINAL_SIZE[i][1], fy * self.data_cfg.load_size[0] / self.ORIGINAL_SIZE[i][0], ) cx, cy = ( cx * self.data_cfg.load_size[1] / self.ORIGINAL_SIZE[i][1], cy * self.data_cfg.load_size[0] / self.ORIGINAL_SIZE[i][0], ) intrinsic = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]]) _intrinsics.append(intrinsic) # load camera extrinsics cam_to_ego = np.loadtxt( os.path.join(self.data_path, "extrinsics", f"{i}.txt") ) # because we use opencv coordinate system to generate camera rays, # we need a transformation matrix to covnert rays from opencv coordinate # system to waymo coordinate system. # opencv coordinate system: x right, y down, z front # waymo coordinate system: x front, y left, z up cam_to_egos.append(cam_to_ego @ self.OPENCV2DATASET) # compute per-image poses and intrinsics cam_to_worlds, ego_to_worlds = [], [] intrinsics, cam_ids = [], [] # ===! for waymo, we simplify timestamps as the time indices timestamps, timesteps = [], [] # we tranform the camera poses w.r.t. the first timestep to make the translation vector of # the first ego pose as the origin of the world coordinate system. ego_to_world_start = np.loadtxt( os.path.join(self.data_path, "ego_pose", f"{self.start_timestep:03d}.txt") ) for t in range(self.start_timestep, self.end_timestep): ego_to_world_current = np.loadtxt( os.path.join(self.data_path, "ego_pose", f"{t:03d}.txt") ) # compute ego_to_world transformation ego_to_world = np.linalg.inv(ego_to_world_start) @ ego_to_world_current ego_to_worlds.append(ego_to_world) for cam_id in self.camera_list: cam_ids.append(cam_id) # transformation: # (opencv_cam -> waymo_cam -> waymo_ego_vehicle) -> current_world cam2world = ego_to_world @ cam_to_egos[cam_id] cam_to_worlds.append(cam2world) intrinsics.append(_intrinsics[cam_id]) # ===! we use time indices as the timestamp for waymo dataset for simplicity # ===! we can use the actual timestamps if needed # to be improved timestamps.append(t - self.start_timestep) timesteps.append(t - self.start_timestep) self.intrinsics = torch.from_numpy(np.stack(intrinsics, axis=0)).float() self.cam_to_worlds = torch.from_numpy(np.stack(cam_to_worlds, axis=0)).float() self.ego_to_worlds = torch.from_numpy(np.stack(ego_to_worlds, axis=0)).float() self.cam_ids = torch.from_numpy(np.stack(cam_ids, axis=0)).long() # the underscore here is important. self._timestamps = torch.from_numpy(np.stack(timestamps, axis=0)).float() self._timesteps = torch.from_numpy(np.stack(timesteps, axis=0)).long() class WaymoLiDARSource(SceneLidarSource): def __init__( self, lidar_data_config: OmegaConf, data_path: str, start_timestep: int, end_timestep: int, device: torch.device = torch.device("cpu"), ): super().__init__(lidar_data_config, device=device) self.data_path = data_path self.start_timestep = start_timestep self.end_timestep = end_timestep self.create_all_filelist() self.load_data() def create_all_filelist(self): """ Create a list of all the files in the dataset. e.g., a list of all the lidar scans in the dataset. """ lidar_filepaths = [] for t in range(self.start_timestep, self.end_timestep): lidar_filepaths.append( os.path.join(self.data_path, "lidar", f"{t:03d}.bin") ) self.lidar_filepaths = np.array(lidar_filepaths) def load_calibrations(self): """ Load the calibration files of the dataset. e.g., lidar to world transformation matrices. """ # Note that in the Waymo Open Dataset, the lidar coordinate system is the same # as the vehicle coordinate system lidar_to_worlds = [] # we tranform the poses w.r.t. the first timestep to make the origin of the # first ego pose as the origin of the world coordinate system. ego_to_world_start = np.loadtxt( os.path.join(self.data_path, "ego_pose", f"{self.start_timestep:03d}.txt") ) for t in range(self.start_timestep, self.end_timestep): ego_to_world_current = np.loadtxt( os.path.join(self.data_path, "ego_pose", f"{t:03d}.txt") ) # compute ego_to_world transformation lidar_to_world = np.linalg.inv(ego_to_world_start) @ ego_to_world_current lidar_to_worlds.append(lidar_to_world) self.lidar_to_worlds = torch.from_numpy( np.stack(lidar_to_worlds, axis=0) ).float() def load_lidar(self): """ Load the lidar data of the dataset from the filelist. """ origins, directions, ranges, laser_ids = [], [], [], [] # flow/ground info are used for evaluation only flows, flow_classes, grounds = [], [], [] # in waymo, we simplify timestamps as the time indices timestamps, timesteps = [], [] accumulated_num_original_rays = 0 accumulated_num_rays = 0 for t in trange( 0, len(self.lidar_filepaths), desc="Loading lidar", dynamic_ncols=True ): # each lidar_info contains an Nx14 array # from left to right: # origins: 3d, points: 3d, flows: 3d, flow_class: 1d, # ground_labels: 1d, intensities: 1d, elongations: 1d, laser_ids: 1d lidar_info = np.memmap( self.lidar_filepaths[t], dtype=np.float32, mode="r", ).reshape(-1, 14) original_length = len(lidar_info) accumulated_num_original_rays += original_length # select lidar points based on the laser id if self.data_cfg.only_use_top_lidar: # laser_ids: 0: TOP, 1: FRONT, 2: SIDE_LEFT, 3: SIDE_RIGHT, 4: REAR lidar_info = lidar_info[lidar_info[:, 13] == 0] lidar_origins = torch.from_numpy(lidar_info[:, :3]).float() lidar_points = torch.from_numpy(lidar_info[:, 3:6]).float() lidar_ids = torch.from_numpy(lidar_info[:, 13]).float() lidar_flows = torch.from_numpy(lidar_info[:, 6:9]).float() lidar_flow_classes = torch.from_numpy(lidar_info[:, 9]).long() ground_labels = torch.from_numpy(lidar_info[:, 10]).long() # we don't collect intensities and elongations for now # select lidar points based on a truncated ego-forward-directional range # this is to make sure most of the lidar points are within the range of the camera valid_mask = torch.ones_like(lidar_origins[:, 0]).bool() if self.data_cfg.truncated_max_range is not None: valid_mask = lidar_points[:, 0] < self.data_cfg.truncated_max_range if self.data_cfg.truncated_min_range is not None: valid_mask = valid_mask & ( lidar_points[:, 0] > self.data_cfg.truncated_min_range ) lidar_origins = lidar_origins[valid_mask] lidar_points = lidar_points[valid_mask] lidar_ids = lidar_ids[valid_mask] lidar_flows = lidar_flows[valid_mask] lidar_flow_classes = lidar_flow_classes[valid_mask] ground_labels = ground_labels[valid_mask] # transform lidar points from lidar coordinate system to world coordinate system lidar_origins = ( self.lidar_to_worlds[t][:3, :3] @ lidar_origins.T + self.lidar_to_worlds[t][:3, 3:4] ).T lidar_points = ( self.lidar_to_worlds[t][:3, :3] @ lidar_points.T + self.lidar_to_worlds[t][:3, 3:4] ).T # scene flows are in the lidar coordinate system, so we need to rotate them lidar_flows = (self.lidar_to_worlds[t][:3, :3] @ lidar_flows.T).T # compute lidar directions lidar_directions = lidar_points - lidar_origins lidar_ranges = torch.norm(lidar_directions, dim=-1, keepdim=True) lidar_directions = lidar_directions / lidar_ranges # we use time indices as the timestamp for waymo dataset lidar_timestamp = torch.ones_like(lidar_ranges).squeeze(-1) * t accumulated_num_rays += len(lidar_ranges) origins.append(lidar_origins) directions.append(lidar_directions) ranges.append(lidar_ranges) laser_ids.append(lidar_ids) flows.append(lidar_flows) flow_classes.append(lidar_flow_classes) grounds.append(ground_labels) # we use time indices as the timestamp for waymo dataset timestamps.append(lidar_timestamp) timesteps.append(lidar_timestamp) logger.info( f"Number of lidar rays: {accumulated_num_rays} " f"({accumulated_num_rays / accumulated_num_original_rays * 100:.2f}% of " f"{accumulated_num_original_rays} original rays)" ) logger.info("Filter condition:") logger.info(f" only_use_top_lidar: {self.data_cfg.only_use_top_lidar}") logger.info(f" truncated_max_range: {self.data_cfg.truncated_max_range}") logger.info(f" truncated_min_range: {self.data_cfg.truncated_min_range}") self.origins = torch.cat(origins, dim=0) self.directions = torch.cat(directions, dim=0) self.ranges = torch.cat(ranges, dim=0) self.laser_ids = torch.cat(laser_ids, dim=0) # becasue the flows here are velocities (m/s), and the fps of the lidar is 10, # we need to divide the velocities by 10 to get the displacements/flows # between two consecutive lidar scans self.flows = torch.cat(flows, dim=0) / 10.0 self.flow_classes = torch.cat(flow_classes, dim=0) self.grounds = torch.cat(grounds, dim=0).bool() # the underscore here is important. self._timestamps = torch.cat(timestamps, dim=0) self._timesteps = torch.cat(timesteps, dim=0) def to(self, device: torch.device): super().to(device) self.flows = self.flows.to(device) self.flow_classes = self.flow_classes.to(device) self.grounds = self.grounds.to(self.device) def get_render_rays(self, time_idx: int) -> Dict[str, Tensor]: """ Override the base class function to add more information to the render rays. """ return { "lidar_origins": self.origins[self.timesteps == time_idx], "lidar_viewdirs": self.directions[self.timesteps == time_idx], "lidar_ranges": self.ranges[self.timesteps == time_idx], # normalized timestamps between 0 and 1 "lidar_normed_timestamps": self.normalized_timestamps[ self.timesteps == time_idx ], "lidar_flow": self.flows[self.timesteps == time_idx], "lidar_flow_class": self.flow_classes[self.timesteps == time_idx], "lidar_ground": self.grounds[self.timesteps == time_idx], } class WaymoDataset(SceneDataset): dataset: str = "waymo" def __init__( self, data_cfg: OmegaConf, ) -> None: super().__init__(data_cfg) self.data_path = os.path.join(self.data_cfg.data_root, f"{self.scene_idx:03d}") assert self.data_cfg.dataset == "waymo" assert os.path.exists(self.data_path), f"{self.data_path} does not exist" # ---- find the number of synchronized frames ---- # if self.data_cfg.end_timestep == -1: num_files = len(os.listdir(os.path.join(self.data_path, "ego_pose"))) end_timestep = num_files - 1 else: end_timestep = self.data_cfg.end_timestep # to make sure the last timestep is included self.end_timestep = end_timestep + 1 self.start_timestep = self.data_cfg.start_timestep # ---- create data source ---- # self.pixel_source, self.lidar_source = self.build_data_source() self.aabb = self.get_aabb() # ---- define train and test indices ---- # # note that the timestamps of the pixel source and the lidar source are the same in waymo dataset ( self.train_timesteps, self.test_timesteps, self.train_indices, self.test_indices, ) = self.split_train_test() # ---- create split wrappers ---- # pixel_sets, lidar_sets = self.build_split_wrapper() self.train_pixel_set, self.test_pixel_set, self.full_pixel_set = pixel_sets self.train_lidar_set, self.test_lidar_set, self.full_lidar_set = lidar_sets def build_split_wrapper(self): """ Makes each data source as a Pytorch Dataset """ train_pixel_set, test_pixel_set, full_pixel_set = None, None, None train_lidar_set, test_lidar_set, full_lidar_set = None, None, None if self.pixel_source is not None: train_pixel_set = SplitWrapper( datasource=self.pixel_source, # train_indices are img indices, so the length is num_cams * num_train_timesteps split_indices=self.train_indices, split="train", ray_batch_size=self.data_cfg.ray_batch_size, ) full_pixel_set = SplitWrapper( datasource=self.pixel_source, # cover all the images split_indices=np.arange(self.pixel_source.num_imgs).tolist(), split="full", ray_batch_size=self.data_cfg.ray_batch_size, ) if len(self.test_indices) > 0: test_pixel_set = SplitWrapper( datasource=self.pixel_source, # test_indices are img indices, so the length is num_cams * num_test_timesteps split_indices=self.test_indices, split="test", ray_batch_size=self.data_cfg.ray_batch_size, ) if self.lidar_source is not None: train_lidar_set = SplitWrapper( datasource=self.lidar_source, # train_timesteps are lidar indices, so the length is num_train_timesteps split_indices=self.train_timesteps, split="train", ray_batch_size=self.data_cfg.ray_batch_size, ) full_lidar_set = SplitWrapper( datasource=self.lidar_source, # cover all the lidar scans split_indices=np.arange(self.lidar_source.num_timesteps), split="full", ray_batch_size=self.data_cfg.ray_batch_size, ) if len(self.test_indices) > 0: test_lidar_set = SplitWrapper( datasource=self.lidar_source, # test_timesteps are lidar indices, so the length is num_test_timesteps split_indices=self.test_timesteps, split="test", ray_batch_size=self.data_cfg.ray_batch_size, ) pixel_set = (train_pixel_set, test_pixel_set, full_pixel_set) lidar_set = (train_lidar_set, test_lidar_set, full_lidar_set) return pixel_set, lidar_set def build_data_source(self): """ Create the data source for the dataset. """ pixel_source, lidar_source = None, None # to collect all timestamps from pixel source and lidar source all_timestamps = [] # ---- create pixel source ---- # load_pixel = ( self.data_cfg.pixel_source.load_rgb or self.data_cfg.pixel_source.load_sky_mask or self.data_cfg.pixel_source.load_dynamic_mask or self.data_cfg.pixel_source.load_feature ) if load_pixel: pixel_source = WaymoPixelSource( self.data_cfg.pixel_source, self.data_path, self.start_timestep, self.end_timestep, device=self.device, ) pixel_source.to(self.device) # collect img timestamps all_timestamps.append(pixel_source.timestamps) # ---- create lidar source ---- # if self.data_cfg.lidar_source.load_lidar: lidar_source = WaymoLiDARSource( self.data_cfg.lidar_source, self.data_path, self.start_timestep, self.end_timestep, device=self.device, ) lidar_source.to(self.device) # collect lidar timestamps all_timestamps.append(lidar_source.timestamps) assert len(all_timestamps) > 0, "No data source is loaded" all_timestamps = torch.cat(all_timestamps, dim=0) # normalize the timestamps jointly for pixel source and lidar source # so that the normalized timestamps are between 0 and 1 all_timestamps = (all_timestamps - all_timestamps.min()) / ( all_timestamps.max() - all_timestamps.min() ) if pixel_source is not None: pixel_source.register_normalized_timestamps( all_timestamps[: len(pixel_source.timestamps)] ) if lidar_source is not None: lidar_source.register_normalized_timestamps( all_timestamps[-len(lidar_source.timestamps) :] ) return pixel_source, lidar_source def split_train_test(self): if self.data_cfg.pixel_source.test_image_stride != 0: test_timesteps = np.arange( # it makes no sense to have test timesteps before the start timestep self.data_cfg.pixel_source.test_image_stride, self.num_img_timesteps, self.data_cfg.pixel_source.test_image_stride, ) else: test_timesteps = [] train_timesteps = np.array( [i for i in range(self.num_img_timesteps) if i not in test_timesteps] ) logger.info( f"Train timesteps: \n{np.arange(self.start_timestep, self.end_timestep)[train_timesteps]}" ) logger.info( f"Test timesteps: \n{np.arange(self.start_timestep, self.end_timestep)[test_timesteps]}" ) # propagate the train and test timesteps to the train and test indices train_indices, test_indices = [], [] for t in range(self.num_img_timesteps): if t in train_timesteps: for cam in range(self.pixel_source.num_cams): train_indices.append(t * self.pixel_source.num_cams + cam) elif t in test_timesteps: for cam in range(self.pixel_source.num_cams): test_indices.append(t * self.pixel_source.num_cams + cam) logger.info(f"Number of train indices: {len(train_indices)}") logger.info(f"Train indices: {train_indices}") logger.info(f"Number of test indices: {len(test_indices)}") logger.info(f"Test indices: {test_indices}") # Again, training and testing indices are indices into the full dataset # train_indices are img indices, so the length is num_cams * num_train_timesteps # but train_timesteps are timesteps, so the length is num_train_timesteps (len(unique_train_timestamps)) return train_timesteps, test_timesteps, train_indices, test_indices def get_occ(self, index: int): """ Get the Occ3D data of the scene at the given index. """ # from: https://github.com/Tsinghua-MARS-Lab/Occ3D#occ3d-waymo # The dataset contains 15 classes. The definition of classes from 0 to 14 is # 0: TYPE_GENERALOBJECT, 1: TYPE_VEHICLE, 2: TYPE_PEDESTRIAN, 3: TYPE_SIGN, # 4: TYPE_CYCLIST, 5: TYPE_TRAFFIC_LIGHT, 6: TYPE_POLE, 7: TYPE_CONSTRUCTION_CONE, # 8: TYPE_BICYCLE, 9: TYPE_MOTORCYCLE, 10: TYPE_BUILDING, 11: TYPE_VEGETATION, # 12: TYPE_TREE_TRUNK, 13: TYPE_ROAD, 14: TYPE_WALKABLE. self.label_mapping = { 0: "general_obj", 1: "vehicle", 2: "pedestrian", 3: "sign", 4: "cyclist", 5: "traffic_light", 6: "pole", 7: "construction_cone", 8: "bicyle", 9: "motorcycle", 10: "building", 11: "vegetation", 12: "tree_trunck", 13: "road", 14: "walkable", } if self.data_cfg.occ_source.voxel_size == 0.4: occ_path = f"{self.data_path}/occ3d/{index:03d}_04.npz" occupancy_resolution = [100, 200, 16] occupancy_aabb_min = [0, -40, -1] occupancy_aabb_max = [40, 40, 5.4] elif self.data_cfg.occ_source.voxel_size == 0.1: occ_path = f"{self.data_path}/occ3d/{index:03d}.npz" occupancy_resolution = [800, 1600, 64] occupancy_aabb_min = [0, -80, -5] occupancy_aabb_max = [80, 80, 7.8] else: raise NotImplementedError( f"voxel size {self.data_cfg.occ_source.voxel_size} not supported" ) if not os.path.exists(occ_path): raise FileNotFoundError(f"{occ_path} does not exist") # loading the occupancy grid gt_occ = np.load(occ_path) # np.unique(gt_occ['voxel_label']): array([ 0, 1, 2, 3, 6, 8, 9, 10, 11, 12, 13, 14, 23], dtype=uint8) semantic_labels = gt_occ["voxel_label"] # final voxel_state will indicate what voxels are visible from the camera mask_camera = gt_occ["final_voxel_state"] # we don't have back-cameras, so we remove the back part of the grid semantic_labels = semantic_labels[len(semantic_labels) // 2 :, :, :] mask_camera = mask_camera[len(mask_camera) // 2 :, :, :] # semantic_labels == 23 means the free space, i.e. empty semantic_labels[semantic_labels == 23] = 15 # mask_camera == 0 means invisible from the camera semantic_labels[mask_camera == 0] = 15 semantic_labels = ( torch.from_numpy(semantic_labels.copy()).long().to(self.device) ) # compute the coordinates and labels of the occupied voxels occ_coords = torch.nonzero(semantic_labels != 15).float() occ_labels = semantic_labels[semantic_labels != 15] # transform the coordinates from voxel space to world space ego_occ_coords = voxel_coords_to_world_coords( occupancy_aabb_min, occupancy_aabb_max, occupancy_resolution, points=occ_coords, ).to(self.device) world_occ_coords = ( self.lidar_source.lidar_to_worlds[index][:3, :3] @ ego_occ_coords.T + self.lidar_source.lidar_to_worlds[index][:3, 3:4] ).T normed_timestamps = ( torch.ones_like(world_occ_coords[..., 0]) * index / (self.lidar_source.num_timesteps + 1e-6 - 1) ) return world_occ_coords, occ_labels, normed_timestamps def get_valid_lidar_mask(self, lidar_timestep: int, data_dict: dict): # filter out the lidar points that are not visible from the camera lidar_points = ( data_dict["lidar_origins"] + data_dict["lidar_ranges"] * data_dict["lidar_viewdirs"] ) valid_mask = torch.zeros_like(lidar_points[:, 0]).bool() # project lidar points to the image plane for i in range(self.pixel_source.num_cams): img_idx = lidar_timestep * self.pixel_source.num_cams + i intrinsic_4x4 = torch.nn.functional.pad( self.pixel_source.intrinsics[img_idx], (0, 1, 0, 1) ) intrinsic_4x4[3, 3] = 1.0 lidar2img = ( intrinsic_4x4 @ self.pixel_source.cam_to_worlds[img_idx].inverse() ) projected_points = ( lidar2img[:3, :3] @ lidar_points.T + lidar2img[:3, 3:4] ).T depth = projected_points[:, 2] cam_points = projected_points[:, :2] / (depth.unsqueeze(-1) + 1e-6) current_valid_mask = ( (cam_points[:, 0] >= 0) & (cam_points[:, 0] < self.pixel_source.WIDTH) & (cam_points[:, 1] >= 0) & (cam_points[:, 1] < self.pixel_source.HEIGHT) & (depth > 0) ) valid_mask = valid_mask | current_valid_mask return valid_mask def save_videos(self, video_dict: dict, **kwargs): """ Save the a video of the data. """ return save_videos( render_results=video_dict, save_pth=kwargs["save_pth"], num_timestamps=kwargs["num_timestamps"], keys=kwargs["keys"], num_cams=kwargs["num_cams"], fps=kwargs["fps"], verbose=kwargs["verbose"], save_seperate_video=kwargs["save_seperate_video"], ) def render_data_videos( self, save_pth: str, split: str = "full", fps: int = 24, verbose=True, ): """ Render a video of data. """ pixel_dataset, lidar_dataset = None, None if split == "full": if self.pixel_source is not None: pixel_dataset = self.full_pixel_set if self.lidar_source is not None: lidar_dataset = self.full_lidar_set elif split == "train": if self.pixel_source is not None: pixel_dataset = self.train_pixel_set if self.lidar_source is not None: lidar_dataset = self.train_lidar_set elif split == "test": if self.pixel_source is not None: pixel_dataset = self.test_pixel_set if self.lidar_source is not None: lidar_dataset = self.test_lidar_set else: raise NotImplementedError(f"Split {split} not supported") # pixel source rgb_imgs, dynamic_objects = [], [] sky_masks, feature_pca_colors = [], [] lidar_depths, flow_colors = [], [] for i in trange( len(pixel_dataset), desc="Rendering data videos", dynamic_ncols=True ): data_dict = pixel_dataset[i] if "pixels" in data_dict: rgb_imgs.append(data_dict["pixels"].cpu().numpy()) if "dynamic_masks" in data_dict: dynamic_objects.append( (data_dict["dynamic_masks"].unsqueeze(-1) * data_dict["pixels"]) .cpu() .numpy() ) if "sky_masks" in data_dict: sky_masks.append(data_dict["sky_masks"].cpu().numpy()) if "features" in data_dict: features = data_dict["features"] # use registered parameters to normalize the features for visualization features = features @ self.pixel_source.feat_dimension_reduction_mat features = (features - self.pixel_source.feat_color_min) / ( self.pixel_source.feat_color_max - self.pixel_source.feat_color_min ).clamp(0, 1) feature_pca_colors.append(features.cpu().numpy()) if lidar_dataset is not None: # to deal with asynchronized data # find the closest lidar scan to the current image in time closest_lidar_idx = self.lidar_source.find_closest_timestep( data_dict["normed_timestamps"].flatten()[0] ) data_dict = lidar_dataset[closest_lidar_idx] lidar_points = ( data_dict["lidar_origins"] + data_dict["lidar_ranges"] * data_dict["lidar_viewdirs"] ) # project lidar points to the image plane # TODO: consider making this a function intrinsic_4x4 = torch.nn.functional.pad( self.pixel_source.intrinsics[i], (0, 1, 0, 1) ) intrinsic_4x4[3, 3] = 1.0 lidar2img = intrinsic_4x4 @ self.pixel_source.cam_to_worlds[i].inverse() lidar_points = ( lidar2img[:3, :3] @ lidar_points.T + lidar2img[:3, 3:4] ).T depth = lidar_points[:, 2] cam_points = lidar_points[:, :2] / (depth.unsqueeze(-1) + 1e-6) valid_mask = ( (cam_points[:, 0] >= 0) & (cam_points[:, 0] < self.pixel_source.WIDTH) & (cam_points[:, 1] >= 0) & (cam_points[:, 1] < self.pixel_source.HEIGHT) & (depth > 0) ) depth = depth[valid_mask] _cam_points = cam_points[valid_mask] depth_map = torch.zeros( self.pixel_source.HEIGHT, self.pixel_source.WIDTH ).to(self.device) depth_map[ _cam_points[:, 1].long(), _cam_points[:, 0].long() ] = depth.squeeze(-1) depth_img = depth_map.cpu().numpy() depth_img = depth_visualizer(depth_img, depth_img > 0) mask = (depth_map.unsqueeze(-1) > 0).cpu().numpy() # show the depth map on top of the rgb image image = rgb_imgs[-1] * (1 - mask) + depth_img * mask lidar_depths.append(image) # project lidar flows to the image plane flow_img = torch.zeros( self.pixel_source.HEIGHT, self.pixel_source.WIDTH, 3 ).to(self.device) # to examine whether the ground labels are correct valid_mask = valid_mask & (~data_dict["lidar_ground"]) _cam_points = cam_points[valid_mask] # final color: # white if no flow, black if ground, and flow color otherwise
flow_color = scene_flow_to_rgb(
5
2023-10-11 20:56:27+00:00
24k
alibaba-damo-academy/FunCodec
funcodec/models/encoder/transformer_encoder.py
[ { "identifier": "AbsEncoder", "path": "funcodec/models/encoder/abs_encoder.py", "snippet": "class AbsEncoder(torch.nn.Module, ABC):\n @abstractmethod\n def output_size(self) -> int:\n raise NotImplementedError\n\n @abstractmethod\n def forward(\n self,\n xs_pad: torch.Te...
from typing import List from typing import Optional from typing import Tuple from torch import nn from funcodec.models.encoder.abs_encoder import AbsEncoder from funcodec.modules.attention import ( MultiHeadedAttention, RelPositionMultiHeadedAttention, # noqa: H301 LegacyRelPositionMultiHeadedAttention, # noqa: H301 ) from funcodec.modules.layer_norm import LayerNorm from funcodec.modules.multi_layer_conv import Conv1dLinear from funcodec.modules.multi_layer_conv import MultiLayeredConv1d from funcodec.modules.nets_utils import make_pad_mask from funcodec.modules.embedding import ( PositionalEncoding, # noqa: H301 ScaledPositionalEncoding, # noqa: H301 RelPositionalEncoding, # noqa: H301 LegacyRelPositionalEncoding, # noqa: H301 ) from funcodec.modules.positionwise_feed_forward import ( PositionwiseFeedForward, # noqa: H301 ) from funcodec.modules.repeat import repeat from funcodec.modules.nets_utils import rename_state_dict from funcodec.modules.dynamic_conv import DynamicConvolution from funcodec.modules.dynamic_conv2d import DynamicConvolution2D from funcodec.modules.lightconv import LightweightConvolution from funcodec.modules.lightconv2d import LightweightConvolution2D from funcodec.modules.subsampling import Conv2dSubsampling from funcodec.modules.subsampling import Conv2dSubsampling2 from funcodec.modules.subsampling import Conv2dSubsampling6 from funcodec.modules.subsampling import Conv2dSubsampling8 from funcodec.modules.subsampling import TooShortUttError from funcodec.modules.subsampling import check_short_utt import torch import logging
18,455
if not self.normalize_before: x = self.norm2(x) if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask class TransformerEncoder(AbsEncoder): """Transformer encoder module. Args: input_size: input dim output_size: dimension of attention attention_heads: the number of heads of multi head attention linear_units: the number of units of position-wise feed forward num_blocks: the number of decoder blocks dropout_rate: dropout rate attention_dropout_rate: dropout rate in attention positional_dropout_rate: dropout rate after adding positional encoding input_layer: input layer type pos_enc_class: PositionalEncoding or ScaledPositionalEncoding normalize_before: whether to use layer_norm before the first block concat_after: whether to concat attention layer's input and output if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type: linear of conv1d positionwise_conv_kernel_size: kernel size of positionwise conv1d layer padding_idx: padding_idx for input_layer=embed """ def __init__( self, input_size: int, output_size: int = 256, attention_heads: int = 4, linear_units: int = 2048, num_blocks: int = 6, dropout_rate: float = 0.1, positional_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.0, input_layer: Optional[str] = "conv2d", pos_enc_class=PositionalEncoding, normalize_before: bool = True, concat_after: bool = False, positionwise_layer_type: str = "linear", positionwise_conv_kernel_size: int = 1, padding_idx: int = -1, interctc_layer_idx: List[int] = [], interctc_use_conditioning: bool = False, causal_mode: str = "None", ): super().__init__() self._output_size = output_size self.causal_mode = causal_mode if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(input_size, output_size), torch.nn.LayerNorm(output_size), torch.nn.Dropout(dropout_rate), torch.nn.ReLU(), pos_enc_class(output_size, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling(input_size, output_size, dropout_rate) elif input_layer == "conv2d2": self.embed = Conv2dSubsampling2(input_size, output_size, dropout_rate) elif input_layer == "conv2d6": self.embed = Conv2dSubsampling6(input_size, output_size, dropout_rate) elif input_layer == "conv2d8": self.embed = Conv2dSubsampling8(input_size, output_size, dropout_rate) elif input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(input_size, output_size, padding_idx=padding_idx), pos_enc_class(output_size, positional_dropout_rate), ) elif input_layer is None: if input_size == output_size: self.embed = None else: self.embed = torch.nn.Linear(input_size, output_size) else: raise ValueError("unknown input_layer: " + input_layer) self.normalize_before = normalize_before if positionwise_layer_type == "linear": positionwise_layer = PositionwiseFeedForward positionwise_layer_args = ( output_size, linear_units, dropout_rate, ) elif positionwise_layer_type == "conv1d": positionwise_layer = MultiLayeredConv1d positionwise_layer_args = ( output_size, linear_units, positionwise_conv_kernel_size, dropout_rate, ) elif positionwise_layer_type == "conv1d-linear": positionwise_layer = Conv1dLinear positionwise_layer_args = ( output_size, linear_units, positionwise_conv_kernel_size, dropout_rate, ) else: raise NotImplementedError("Support only linear or conv1d.") self.encoders = repeat( num_blocks, lambda lnum: EncoderLayer( output_size,
# Copyright 2019 Shigeki Karita # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Transformer encoder definition.""" class EncoderLayer(nn.Module): """Encoder layer module. Args: size (int): Input dimension. self_attn (torch.nn.Module): Self-attention module instance. `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` instance can be used as the argument. feed_forward (torch.nn.Module): Feed-forward module instance. `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance can be used as the argument. dropout_rate (float): Dropout rate. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) stochastic_depth_rate (float): Proability to skip this layer. During training, the layer may skip residual computation and return input as-is with given probability. """ def __init__( self, size, self_attn, feed_forward, dropout_rate, normalize_before=True, concat_after=False, stochastic_depth_rate=0.0, ): """Construct an EncoderLayer object.""" super(EncoderLayer, self).__init__() self.self_attn = self_attn self.feed_forward = feed_forward self.norm1 = LayerNorm(size) self.norm2 = LayerNorm(size) self.dropout = nn.Dropout(dropout_rate) self.size = size self.normalize_before = normalize_before self.concat_after = concat_after if self.concat_after: self.concat_linear = nn.Linear(size + size, size) self.stochastic_depth_rate = stochastic_depth_rate def forward(self, x, mask, cache=None): """Compute encoded features. Args: x_input (torch.Tensor): Input tensor (#batch, time, size). mask (torch.Tensor): Mask tensor for the input (#batch, time). cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size). Returns: torch.Tensor: Output tensor (#batch, time, size). torch.Tensor: Mask tensor (#batch, time). """ if isinstance(x, tuple): x, pos_emb = x[0], x[1] else: x, pos_emb = x, None skip_layer = False # with stochastic depth, residual connection `x + f(x)` becomes # `x <- x + 1 / (1 - p) * f(x)` at training time. stoch_layer_coeff = 1.0 if self.training and self.stochastic_depth_rate > 0: skip_layer = torch.rand(1).item() < self.stochastic_depth_rate stoch_layer_coeff = 1.0 / (1 - self.stochastic_depth_rate) if skip_layer: if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask residual = x if self.normalize_before: x = self.norm1(x) if cache is None: x_q = x else: assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size) x_q = x[:, -1:, :] residual = residual[:, -1:, :] mask = None if mask is None else mask[:, -1:, :] if pos_emb is not None: x_att = self.self_attn(x_q, x, x, pos_emb, mask) else: x_att = self.self_attn(x_q, x, x, mask) if self.concat_after: x_concat = torch.cat((x, x_att), dim=-1) x = residual + stoch_layer_coeff * self.concat_linear(x_concat) else: x = residual + stoch_layer_coeff * self.dropout(x_att) if not self.normalize_before: x = self.norm1(x) residual = x if self.normalize_before: x = self.norm2(x) x = residual + stoch_layer_coeff * self.dropout(self.feed_forward(x)) if not self.normalize_before: x = self.norm2(x) if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask class TransformerEncoder(AbsEncoder): """Transformer encoder module. Args: input_size: input dim output_size: dimension of attention attention_heads: the number of heads of multi head attention linear_units: the number of units of position-wise feed forward num_blocks: the number of decoder blocks dropout_rate: dropout rate attention_dropout_rate: dropout rate in attention positional_dropout_rate: dropout rate after adding positional encoding input_layer: input layer type pos_enc_class: PositionalEncoding or ScaledPositionalEncoding normalize_before: whether to use layer_norm before the first block concat_after: whether to concat attention layer's input and output if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type: linear of conv1d positionwise_conv_kernel_size: kernel size of positionwise conv1d layer padding_idx: padding_idx for input_layer=embed """ def __init__( self, input_size: int, output_size: int = 256, attention_heads: int = 4, linear_units: int = 2048, num_blocks: int = 6, dropout_rate: float = 0.1, positional_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.0, input_layer: Optional[str] = "conv2d", pos_enc_class=PositionalEncoding, normalize_before: bool = True, concat_after: bool = False, positionwise_layer_type: str = "linear", positionwise_conv_kernel_size: int = 1, padding_idx: int = -1, interctc_layer_idx: List[int] = [], interctc_use_conditioning: bool = False, causal_mode: str = "None", ): super().__init__() self._output_size = output_size self.causal_mode = causal_mode if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(input_size, output_size), torch.nn.LayerNorm(output_size), torch.nn.Dropout(dropout_rate), torch.nn.ReLU(), pos_enc_class(output_size, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling(input_size, output_size, dropout_rate) elif input_layer == "conv2d2": self.embed = Conv2dSubsampling2(input_size, output_size, dropout_rate) elif input_layer == "conv2d6": self.embed = Conv2dSubsampling6(input_size, output_size, dropout_rate) elif input_layer == "conv2d8": self.embed = Conv2dSubsampling8(input_size, output_size, dropout_rate) elif input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(input_size, output_size, padding_idx=padding_idx), pos_enc_class(output_size, positional_dropout_rate), ) elif input_layer is None: if input_size == output_size: self.embed = None else: self.embed = torch.nn.Linear(input_size, output_size) else: raise ValueError("unknown input_layer: " + input_layer) self.normalize_before = normalize_before if positionwise_layer_type == "linear": positionwise_layer = PositionwiseFeedForward positionwise_layer_args = ( output_size, linear_units, dropout_rate, ) elif positionwise_layer_type == "conv1d": positionwise_layer = MultiLayeredConv1d positionwise_layer_args = ( output_size, linear_units, positionwise_conv_kernel_size, dropout_rate, ) elif positionwise_layer_type == "conv1d-linear": positionwise_layer = Conv1dLinear positionwise_layer_args = ( output_size, linear_units, positionwise_conv_kernel_size, dropout_rate, ) else: raise NotImplementedError("Support only linear or conv1d.") self.encoders = repeat( num_blocks, lambda lnum: EncoderLayer( output_size,
MultiHeadedAttention(
1
2023-10-07 02:00:40+00:00
24k
Beckschen/3D-TransUNet
nn_transunet/trainer/nnUNetTrainerV2.py
[ { "identifier": "get_moreDA_augmentation", "path": "nn_transunet/data/data_augmentation_moreDA.py", "snippet": "def get_moreDA_augmentation(dataloader_train, dataloader_val, patch_size, params=default_3D_augmentation_params,\n border_val_seg=-1,\n se...
from collections import OrderedDict from typing import Tuple from ..data.data_augmentation_moreDA import get_moreDA_augmentation from ..trainer.loss_functions import MultipleOutputLoss2 from ..trainer.network_trainer import maybe_to_torch, to_cuda from ..trainer.nnUNetTrainer import nnUNetTrainer from ..networks.nnunet_model import Generic_UNet from ..data.default_data_augmentation import default_2D_augmentation_params, \ get_patch_size, default_3D_augmentation_params from ..data.dataset_loading import unpack_dataset from sklearn.model_selection import KFold from torch.cuda.amp import autocast from batchgenerators.utilities.file_and_folder_operations import * from torch import nn from loss_functions import DC_and_CE_loss from ..networks.transunet3d_model import Generic_TransUNet_max_ppbp import numpy as np import torch import torch.nn.functional as F
16,795
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. softmax_helper = lambda x: F.softmax(x, 1) def poly_lr(epoch, max_epochs, initial_lr, exponent=0.9): return initial_lr * (1 - epoch / max_epochs)**exponent class InitWeights_He(object): def __init__(self, neg_slope=1e-2): self.neg_slope = neg_slope def __call__(self, module): if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope) if module.bias is not None: module.bias = nn.init.constant_(module.bias, 0) class InitWeights_XavierUniform(object): def __init__(self, gain=1): self.gain = gain def __call__(self, module): if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): module.weight = nn.init.xavier_uniform_(module.weight, self.gain) if module.bias is not None: module.bias = nn.init.constant_(module.bias, 0) class nnUNetTrainerV2(nnUNetTrainer): """ Info for Fabian: same as internal nnUNetTrainerV2_2 """ def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False, input_size=(64, 160, 160),args=None): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) if args is not None: self.input_size=input_size self.model = args.model self.resume = args.resume self.disable_ds=args.disable_ds self.max_num_epochs = args.max_num_epochs # set 1 gpu training self.initial_lr = args.initial_lr # 0.01 self.args = args if self.disable_ds: print("disable_ds") # print("not runnable for this feature! current nnunetV2 (w/o DDP) only support deep supervision version") # raise NotImplementedError else: print("runnning DDP, inheriting nnUNetTrainerV2") self.save_every = 1 # prev 50 # self.max_num_epochs = 1000 # self.initial_lr = 1e-2 self.deep_supervision_scales = None self.ds_loss_weights = None self.pin_memory = True def initialize(self, training=True, force_load_plans=False): """ - replaced get_default_augmentation with get_moreDA_augmentation - enforce to only run this code once - loss function wrapper for deep supervision :param training: :param force_load_plans: :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() ################# Here we wrap the loss for deep supervision ############ # we need to know the number of outputs of the network net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.ds_loss_weights = weights if self.disable_ds: self.ds_loss_weights[0]=1 self.ds_loss_weights[1:]=0 self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {}) else: # now wrap the loss
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. softmax_helper = lambda x: F.softmax(x, 1) def poly_lr(epoch, max_epochs, initial_lr, exponent=0.9): return initial_lr * (1 - epoch / max_epochs)**exponent class InitWeights_He(object): def __init__(self, neg_slope=1e-2): self.neg_slope = neg_slope def __call__(self, module): if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope) if module.bias is not None: module.bias = nn.init.constant_(module.bias, 0) class InitWeights_XavierUniform(object): def __init__(self, gain=1): self.gain = gain def __call__(self, module): if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): module.weight = nn.init.xavier_uniform_(module.weight, self.gain) if module.bias is not None: module.bias = nn.init.constant_(module.bias, 0) class nnUNetTrainerV2(nnUNetTrainer): """ Info for Fabian: same as internal nnUNetTrainerV2_2 """ def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False, input_size=(64, 160, 160),args=None): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) if args is not None: self.input_size=input_size self.model = args.model self.resume = args.resume self.disable_ds=args.disable_ds self.max_num_epochs = args.max_num_epochs # set 1 gpu training self.initial_lr = args.initial_lr # 0.01 self.args = args if self.disable_ds: print("disable_ds") # print("not runnable for this feature! current nnunetV2 (w/o DDP) only support deep supervision version") # raise NotImplementedError else: print("runnning DDP, inheriting nnUNetTrainerV2") self.save_every = 1 # prev 50 # self.max_num_epochs = 1000 # self.initial_lr = 1e-2 self.deep_supervision_scales = None self.ds_loss_weights = None self.pin_memory = True def initialize(self, training=True, force_load_plans=False): """ - replaced get_default_augmentation with get_moreDA_augmentation - enforce to only run this code once - loss function wrapper for deep supervision :param training: :param force_load_plans: :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() ################# Here we wrap the loss for deep supervision ############ # we need to know the number of outputs of the network net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.ds_loss_weights = weights if self.disable_ds: self.ds_loss_weights[0]=1 self.ds_loss_weights[1:]=0 self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {}) else: # now wrap the loss
self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)
1
2023-10-11 05:19:25+00:00
24k
eai-lab/On-NAS
cifar_search.py
[ { "identifier": "genotypes", "path": "utils/genotypes.py", "snippet": "PRIMITIVES = [\n \"max_pool_3x3\",\n \"avg_pool_3x3\",\n \"skip_connect\", # identity\n \"sep_conv_3x3\",\n \"sep_conv_5x5\",\n \"dil_conv_3x3\",\n \"dil_conv_5x5\",\n \"none\",\n]\nPRIMITIVES_FEWSHOT = [\n ...
import os import torch import torch.nn as nn import numpy as np import utils.utils as utils import random import time import pandas as pd import copy import argparse from utils import genotypes as gt from models.search_cnn import SearchCNNController from models.search_cnn_PC import SearchCNNControllerPC from task_optimizer.darts import Darts,Architect from task_optimizer.darts import train as d_train from tqdm import tqdm from tqdm import tqdm
15,692
""" Search cell """ ''' Based on https://github.com/boschresearch/metanas which is licensed under GNU Affero General Public License, ''' device = torch.device("cuda") # tensorboard def _init_alpha_normalizer(name, task_train_steps, t_max, t_min, temp_anneal_mode): normalizer = dict() normalizer["name"] = name normalizer["params"] = dict() normalizer["params"]["curr_step"] = 0.0 # current step for scheduling normalizer normalizer["params"]["max_steps"] = float( task_train_steps ) # for scheduling normalizer normalizer["params"]["t_max"] = t_max normalizer["params"]["t_min"] = t_min normalizer["params"]["temp_anneal_mode"] = temp_anneal_mode # temperature annealing return normalizer def main(config): # set default gpu device id torch.cuda.set_device(config.gpus[0]) # set seed np.random.seed(config.seed) torch.manual_seed(config.seed) torch.cuda.manual_seed_all(config.seed) random.seed(config.seed) torch.backends.cudnn.benchmark = True # get data with meta info input_size, input_channels, n_classes, train_data = utils.get_data( config.dataset, config.data_path, cutout_length=0, validation=False) _,_,_,_,test_data = utils.get_data(config.dataset, config.data_path, cutout_length=0, validation=True) # input my model architecture here normalizer = _init_alpha_normalizer( config.normalizer, config.task_train_steps, config.normalizer_t_max, config.normalizer_t_min, config.normalizer_temp_anneal_mode, ) net_crit = nn.CrossEntropyLoss().to(device) model = SearchCNNController( 3, config.init_channels, config.k, config.layers, config, n_nodes=config.nodes, reduction_layers=config.reduction_layers, device_ids=config.gpus, normalizer=normalizer, PRIMITIVES=gt.PRIMITIVES, feature_scale_rate=1, use_hierarchical_alphas=config.use_hierarchical_alphas, use_pairwise_input_alphas=config.use_pairwise_input_alphas, alpha_prune_threshold=config.alpha_prune_threshold, ) if config.meta_model == 'pc_adaptation': print("model created as PC adaptation")
""" Search cell """ ''' Based on https://github.com/boschresearch/metanas which is licensed under GNU Affero General Public License, ''' device = torch.device("cuda") # tensorboard def _init_alpha_normalizer(name, task_train_steps, t_max, t_min, temp_anneal_mode): normalizer = dict() normalizer["name"] = name normalizer["params"] = dict() normalizer["params"]["curr_step"] = 0.0 # current step for scheduling normalizer normalizer["params"]["max_steps"] = float( task_train_steps ) # for scheduling normalizer normalizer["params"]["t_max"] = t_max normalizer["params"]["t_min"] = t_min normalizer["params"]["temp_anneal_mode"] = temp_anneal_mode # temperature annealing return normalizer def main(config): # set default gpu device id torch.cuda.set_device(config.gpus[0]) # set seed np.random.seed(config.seed) torch.manual_seed(config.seed) torch.cuda.manual_seed_all(config.seed) random.seed(config.seed) torch.backends.cudnn.benchmark = True # get data with meta info input_size, input_channels, n_classes, train_data = utils.get_data( config.dataset, config.data_path, cutout_length=0, validation=False) _,_,_,_,test_data = utils.get_data(config.dataset, config.data_path, cutout_length=0, validation=True) # input my model architecture here normalizer = _init_alpha_normalizer( config.normalizer, config.task_train_steps, config.normalizer_t_max, config.normalizer_t_min, config.normalizer_temp_anneal_mode, ) net_crit = nn.CrossEntropyLoss().to(device) model = SearchCNNController( 3, config.init_channels, config.k, config.layers, config, n_nodes=config.nodes, reduction_layers=config.reduction_layers, device_ids=config.gpus, normalizer=normalizer, PRIMITIVES=gt.PRIMITIVES, feature_scale_rate=1, use_hierarchical_alphas=config.use_hierarchical_alphas, use_pairwise_input_alphas=config.use_pairwise_input_alphas, alpha_prune_threshold=config.alpha_prune_threshold, ) if config.meta_model == 'pc_adaptation': print("model created as PC adaptation")
model = SearchCNNControllerPC(
2
2023-10-08 02:42:27+00:00
24k
LukeForeverYoung/UReader
serve/model_worker.py
[ { "identifier": "IO", "path": "serve/io_utils.py", "snippet": "class IO:\n @staticmethod\n def register(options):\n pass\n\n def open(self, path: str, mode: str):\n raise NotImplementedError\n\n def exists(self, path: str) -> bool:\n raise NotImplementedError\n\n def ...
from PIL import Image from io import BytesIO from .io_utils import IO, DefaultIO, OSS from mplug_owl.processing_mplug_owl import MplugOwlProcessor, MplugOwlImageProcessor from mplug_owl.modeling_mplug_owl import MplugOwlForConditionalGeneration from mplug_owl.configuration_mplug_owl import MplugOwlConfig from mplug_owl.tokenization_mplug_owl import MplugOwlTokenizer from transformers import GenerationConfig from .model_utils import post_process_output, Stream, Iteratorize from pathlib import Path from mplug_owl.processing_mplug_owl import MplugOwlProcessor from mplug_owl.modeling_mplug_owl import MplugOwlForConditionalGeneration from pipeline.data_utils.processors.builder import build_processors from pipeline.data_utils.processors import * from transformers.models.llama.tokenization_llama import LlamaTokenizer from icecream import ic import torch import gradio as gr import logging import sys import os import json import requests import datetime import uuid import base64 import time import sys import transformers
14,657
sys.path.append("..") server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**" # from pipeline.data_utils.xgpt3_dataset import ImageIO # class ImageProcessor(object): # def __init__(self, resolution=224, tokenizer=None): # normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) # # self.transform = transforms.Compose([ # # transforms.Resize((resolution, resolution),interpolation=Image.BICUBIC), # # transforms.ToTensor(), # # normalize, # # ]) # from megatron.data.processors import doc_processor # processor_class = os.environ.get('DocProcessor','DocSFTProcessor') # self.transform = getattr(doc_processor,processor_class)() # self.image_io = ImageIO() # self.tokenizer=tokenizer # def __call__(self, image_paths, prompts): # if isinstance(image_paths, str): # image_paths = [image_paths] # images = [] # images = self.image_io._load_img(image_paths) # images = [self.transform(image, None) for image in images] # image_input, text_input, patch_position # patch_position = [_[2] for _ in images] # images = [_[0] for _ in images] # text_list = prompts[0].split('<image>') # text = text_list[0] # for ri, image in enumerate(images): # if args.patch_pos_embed_type == 'pre': # # 对于pre处理 v2t最终输出的是一张图的token # text += '<image>' # else: # # 对于post处理 v2t最终输出的是多图 # text += '<image>'*image.shape[0] # text += text_list[ri+1] # images = torch.cat(images, dim=0) # patch_position = torch.cat(patch_position, dim=0) # print(text) # ic(images.shape) # ic(patch_position.shape) # from mplug_owl.processing_mplug_owl import tokenize_prompts # input_ids = tokenize_prompts(text, tokenizer=self.tokenizer, return_tensors='pt') # return { # "pixel_values": images, # 'patch_position': patch_position, # "input_ids": input_ids # } class mPLUG_Owl_Server: def __init__( self, base_model='MAGAer13/mplug-owl-llama-7b', log_dir='./', load_in_8bit=False, bf16=True, device="cuda", io=None, config=None, ): self.log_dir = log_dir self.config = config self.image_processor = build_processors(config['valid_processors'])['sft'] self.tokenizer = LlamaTokenizer.from_pretrained(base_model)
sys.path.append("..") server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**" # from pipeline.data_utils.xgpt3_dataset import ImageIO # class ImageProcessor(object): # def __init__(self, resolution=224, tokenizer=None): # normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) # # self.transform = transforms.Compose([ # # transforms.Resize((resolution, resolution),interpolation=Image.BICUBIC), # # transforms.ToTensor(), # # normalize, # # ]) # from megatron.data.processors import doc_processor # processor_class = os.environ.get('DocProcessor','DocSFTProcessor') # self.transform = getattr(doc_processor,processor_class)() # self.image_io = ImageIO() # self.tokenizer=tokenizer # def __call__(self, image_paths, prompts): # if isinstance(image_paths, str): # image_paths = [image_paths] # images = [] # images = self.image_io._load_img(image_paths) # images = [self.transform(image, None) for image in images] # image_input, text_input, patch_position # patch_position = [_[2] for _ in images] # images = [_[0] for _ in images] # text_list = prompts[0].split('<image>') # text = text_list[0] # for ri, image in enumerate(images): # if args.patch_pos_embed_type == 'pre': # # 对于pre处理 v2t最终输出的是一张图的token # text += '<image>' # else: # # 对于post处理 v2t最终输出的是多图 # text += '<image>'*image.shape[0] # text += text_list[ri+1] # images = torch.cat(images, dim=0) # patch_position = torch.cat(patch_position, dim=0) # print(text) # ic(images.shape) # ic(patch_position.shape) # from mplug_owl.processing_mplug_owl import tokenize_prompts # input_ids = tokenize_prompts(text, tokenizer=self.tokenizer, return_tensors='pt') # return { # "pixel_values": images, # 'patch_position': patch_position, # "input_ids": input_ids # } class mPLUG_Owl_Server: def __init__( self, base_model='MAGAer13/mplug-owl-llama-7b', log_dir='./', load_in_8bit=False, bf16=True, device="cuda", io=None, config=None, ): self.log_dir = log_dir self.config = config self.image_processor = build_processors(config['valid_processors'])['sft'] self.tokenizer = LlamaTokenizer.from_pretrained(base_model)
self.processor = MplugOwlProcessor(self.image_processor, self.tokenizer)
11
2023-10-08 06:29:02+00:00
24k
LeapLabTHU/Rank-DETR
projects/dino/configs/models/dino_r50.py
[ { "identifier": "HungarianMatcher", "path": "detrex/modeling/matcher/matcher.py", "snippet": "class HungarianMatcher(nn.Module):\n \"\"\"HungarianMatcher which computes an assignment between targets and predictions.\n\n For efficiency reasons, the targets don't include the no_object. Because of th...
import copy import torch.nn as nn from detectron2.modeling.backbone import ResNet, BasicStem from detectron2.layers import ShapeSpec from detectron2.config import LazyCall as L from detrex.modeling.matcher import HungarianMatcher from detrex.modeling.neck import ChannelMapper from detrex.layers import PositionEmbeddingSine from projects.dino.modeling import ( DINO, DINOTransformerEncoder, DINOTransformerDecoder, DINOTransformer, DINOCriterion, )
15,951
model = L(DINO)( backbone=L(ResNet)( stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), stages=L(ResNet.make_default_stages)( depth=50, stride_in_1x1=False, norm="FrozenBN", ), out_features=["res3", "res4", "res5"], freeze_at=1, ), position_embedding=L(PositionEmbeddingSine)( num_pos_feats=128, temperature=10000, normalize=True, offset=-0.5, ), neck=L(ChannelMapper)( input_shapes={ "res3": ShapeSpec(channels=512), "res4": ShapeSpec(channels=1024), "res5": ShapeSpec(channels=2048), }, in_features=["res3", "res4", "res5"], out_channels=256, num_outs=4, kernel_size=1, norm_layer=L(nn.GroupNorm)(num_groups=32, num_channels=256), ), transformer=L(DINOTransformer)( encoder=L(DINOTransformerEncoder)( embed_dim=256, num_heads=8, feedforward_dim=2048, attn_dropout=0.0, ffn_dropout=0.0, num_layers=6, post_norm=False, num_feature_levels="${..num_feature_levels}", use_checkpoint=False ), decoder=L(DINOTransformerDecoder)( embed_dim=256, num_heads=8, feedforward_dim=2048, attn_dropout=0.0, ffn_dropout=0.0, num_layers=6, return_intermediate=True, num_feature_levels="${..num_feature_levels}", use_checkpoint=False, ), num_feature_levels=4, two_stage_num_proposals="${..num_queries}", ), embed_dim=256, num_classes=80, num_queries=900, aux_loss=True,
model = L(DINO)( backbone=L(ResNet)( stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), stages=L(ResNet.make_default_stages)( depth=50, stride_in_1x1=False, norm="FrozenBN", ), out_features=["res3", "res4", "res5"], freeze_at=1, ), position_embedding=L(PositionEmbeddingSine)( num_pos_feats=128, temperature=10000, normalize=True, offset=-0.5, ), neck=L(ChannelMapper)( input_shapes={ "res3": ShapeSpec(channels=512), "res4": ShapeSpec(channels=1024), "res5": ShapeSpec(channels=2048), }, in_features=["res3", "res4", "res5"], out_channels=256, num_outs=4, kernel_size=1, norm_layer=L(nn.GroupNorm)(num_groups=32, num_channels=256), ), transformer=L(DINOTransformer)( encoder=L(DINOTransformerEncoder)( embed_dim=256, num_heads=8, feedforward_dim=2048, attn_dropout=0.0, ffn_dropout=0.0, num_layers=6, post_norm=False, num_feature_levels="${..num_feature_levels}", use_checkpoint=False ), decoder=L(DINOTransformerDecoder)( embed_dim=256, num_heads=8, feedforward_dim=2048, attn_dropout=0.0, ffn_dropout=0.0, num_layers=6, return_intermediate=True, num_feature_levels="${..num_feature_levels}", use_checkpoint=False, ), num_feature_levels=4, two_stage_num_proposals="${..num_queries}", ), embed_dim=256, num_classes=80, num_queries=900, aux_loss=True,
criterion=L(DINOCriterion)(
7
2023-10-12 03:02:25+00:00
24k
sakemin/cog-musicgen-remixer
predict.py
[ { "identifier": "MultiBandDiffusion", "path": "audiocraft/models/multibanddiffusion.py", "snippet": "class MultiBandDiffusion:\n \"\"\"Sample from multiple diffusion models.\n\n Args:\n DPs (list of DiffusionProcess): Diffusion processes.\n codec_model (CompressionModel): Underlying ...
import os import random import torchaudio import typing as tp import numpy as np import torch import librosa import subprocess import math import allin1 import pytsmod as tsm import shutil import shutil from typing import Optional from cog import BasePredictor, Input, Path from audiocraft.models import MusicGen, MultiBandDiffusion from audiocraft.solvers.compression import CompressionSolver from audiocraft.models.loaders import ( load_compression_model, load_lm_model, ) from audiocraft.data.audio import audio_write from audiocraft.models.builders import get_lm_model from omegaconf import OmegaConf from audiocraft.modules.btc.btc_model import BTC_model from audiocraft.modules.btc.utils.mir_eval_modules import idx2chord from demucs.audio import convert_audio from demucs.apply import apply_model
16,650
default=1.0, ), classifier_free_guidance: int = Input( description="Increases the influence of inputs on the output. Higher values produce lower-varience outputs that adhere more closely to inputs.", default=3, ), output_format: str = Input( description="Output format for generated audio.", default="wav", choices=["wav", "mp3"], ), seed: int = Input( description="Seed for random number generator. If `None` or `-1`, a random seed will be used.", default=None, ), # overlap: int = Input( # description="The length of overlapping part. Last `overlap` seconds of previous generation output audio is given to the next generation's audio prompt for continuation. (This will be fixed with the optimal value and be hidden, when releasing.)", # default=5, le=15, ge=1 # ), # in_step_beat_sync: bool = Input( # description="If `True`, beat syncing is performed every generation step. In this case, audio prompting with EnCodec token will not be used, so that the audio quality might be degraded on and on along encoding-decoding sequences of the generation steps. (This will be fixed with the optimal value and be hidden, when releasing.)", # default=False, # ), # amp_rate: float = Input( # description="Amplifying the output audio to prevent volume diminishing along generations. (This will be fixed with the optimal value and be hidden, when releasing.)", # default=1.2, # ), ) -> Path: if prompt is None: raise ValueError("Must provide `prompt`.") if not music_input: raise ValueError("Must provide `music_input`.") if prompt is None: prompt = '' # tmp_path = 'tmp' # if os.path.isdir(tmp_path): # import shutil # shutil.rmtree(tmp_path) # os.mkdir(tmp_path) if os.path.isdir('demix'): shutil.rmtree('demix') if os.path.isdir('spec'): shutil.rmtree('spec') # Loading models if os.path.isfile(f'musicgen-{model_version}.th'): pass else: url = f"https://weights.replicate.delivery/default/musicgen-chord/musicgen-{model_version}.th" dest = f"/src/musicgen-{model_version}.th" subprocess.check_call(["pget", url, dest], close_fds=False) self.model = load_ckpt(f'/src/musicgen-{model_version}.th', self.device) self.model.lm.condition_provider.conditioners['self_wav'].match_len_on_eval = True if 'stereo' in model_version: channel = 2 else: channel = 1 if large_chord_voca is False: # Switching Chord Prediction model to 25 vocab (smaller) self.model.lm.condition_provider.conditioners['self_wav'].chroma.config.feature['large_voca']=False self.model.lm.condition_provider.conditioners['self_wav'].chroma.config.model['num_chords']=25 self.model.lm.condition_provider.conditioners['self_wav'].chroma.model_file='audiocraft/modules/btc/test/btc_model.pt' self.model.lm.condition_provider.conditioners['self_wav'].chroma.idx_to_chord = idx2chord loaded = torch.load('audiocraft/modules/btc/test/btc_model.pt') self.model.lm.condition_provider.conditioners['self_wav'].chroma.mean = loaded['mean'] self.model.lm.condition_provider.conditioners['self_wav'].chroma.std = loaded['std'] self.model.lm.condition_provider.conditioners['self_wav'].chroma.model = BTC_model(config=self.model.lm.condition_provider.conditioners['self_wav'].chroma.config.model).to(self.device) self.model.lm.condition_provider.conditioners['self_wav'].chroma.model.load_state_dict(loaded['model']) model = self.model model.lm.eval() if multi_band_diffusion and int(self.model.lm.cfg.transformer_lm.n_q) == 8: raise ValueError("Multi-band Diffusion only works with non-stereo models.") # in_step_beat_sync = in_step_beat_sync set_generation_params = lambda duration: model.set_generation_params( duration=duration, top_k=top_k, top_p=top_p, temperature=temperature, cfg_coef=classifier_free_guidance, ) model.lm.condition_provider.conditioners['self_wav'].chroma_coefficient = chroma_coefficient if not seed or seed == -1: seed = torch.seed() % 2 ** 32 - 1 set_all_seeds(seed) print(f"Using seed {seed}") # Music Structure Analysis music_input_analysis = allin1.analyze(music_input) music_input, sr = torchaudio.load(music_input) print("BPM : ", music_input_analysis.bpm) if not beat_sync_threshold or beat_sync_threshold == -1: if music_input_analysis.bpm is not None: beat_sync_threshold = 1.1/(int(music_input_analysis.bpm)/60) else: beat_sync_threshold = 0.75 if music_input_analysis.bpm is not None: prompt = prompt + f', bpm : {int(music_input_analysis.bpm)}' music_input = music_input[None] if music_input.dim() == 2 else music_input duration = music_input.shape[-1]/sr wav_sr = model.sample_rate vocal, background = self.separate_vocals(music_input, sr)
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg compression_model = CompressionSolver.model_from_checkpoint(cfg.compression_model_checkpoint, device=device) return MusicGen(f"{os.getenv('COG_USERNAME')}/musicgen-chord", compression_model, lm) class Predictor(BasePredictor): def setup(self, weights: Optional[Path] = None): """Load the model into memory to make running multiple predictions efficient""" self.device = "cuda" if torch.cuda.is_available() else "cpu" self.mbd = MultiBandDiffusion.get_mbd_musicgen() def _load_model( self, model_path: str, cls: Optional[any] = None, load_args: Optional[dict] = {}, model_id: Optional[str] = None, device: Optional[str] = None, ) -> MusicGen: if device is None: device = self.device compression_model = load_compression_model( model_id, device=device, cache_dir=model_path ) lm = load_lm_model(model_id, device=device, cache_dir=model_path) return MusicGen(model_id, compression_model, lm) def predict( self, model_version: str = Input( description="Model type. Computations take longer when using `large` or `stereo` models.", default="stereo-chord", choices=["stereo-chord", "stereo-chord-large", "chord", "chord-large"] ), prompt: str = Input( description="A description of the music you want to generate.", default=None ), music_input: Path = Input( description="An audio file input for the remix.", default=None, ), multi_band_diffusion: bool = Input( description="If `True`, the EnCodec tokens will be decoded with MultiBand Diffusion. Not compatible with `stereo` models.", default=False, ), normalization_strategy: str = Input( description="Strategy for normalizing audio.", default="loudness", choices=["loudness", "clip", "peak", "rms"], ), # bpm_hard_sync: bool = Input( # description="If `True`, respective downbeats aren't analyzed, but are calculated from the bpm value detected and the first downbeat recognized instead. If the input audio has a changing bpm value, must be set `False`.", # default=True, # ), beat_sync_threshold: float = Input( description="When beat syncing, if the gap between generated downbeat timing and input audio downbeat timing is larger than `beat_sync_threshold`, consider the beats are not corresponding. If `None` or `-1`, `1.1/(bpm/60)` will be used as the value. 0.75 is a good value to set.", default=None, ), large_chord_voca: bool = Input( description="If `True`, more chords like 7th, diminished and etc are used. If `False` only 12 major and 12 minor chords are used.", default=True ), chroma_coefficient: float = Input( description="Coefficient value multiplied to multi-hot chord chroma.", default=1.0, ge=0.5, le=2.0 ), top_k: int = Input( description="Reduces sampling to the k most likely tokens.", default=250 ), top_p: float = Input( description="Reduces sampling to tokens with cumulative probability of p. When set to `0` (default), top_k sampling is used.", default=0.0, ), temperature: float = Input( description="Controls the 'conservativeness' of the sampling process. Higher temperature means more diversity.", default=1.0, ), classifier_free_guidance: int = Input( description="Increases the influence of inputs on the output. Higher values produce lower-varience outputs that adhere more closely to inputs.", default=3, ), output_format: str = Input( description="Output format for generated audio.", default="wav", choices=["wav", "mp3"], ), seed: int = Input( description="Seed for random number generator. If `None` or `-1`, a random seed will be used.", default=None, ), # overlap: int = Input( # description="The length of overlapping part. Last `overlap` seconds of previous generation output audio is given to the next generation's audio prompt for continuation. (This will be fixed with the optimal value and be hidden, when releasing.)", # default=5, le=15, ge=1 # ), # in_step_beat_sync: bool = Input( # description="If `True`, beat syncing is performed every generation step. In this case, audio prompting with EnCodec token will not be used, so that the audio quality might be degraded on and on along encoding-decoding sequences of the generation steps. (This will be fixed with the optimal value and be hidden, when releasing.)", # default=False, # ), # amp_rate: float = Input( # description="Amplifying the output audio to prevent volume diminishing along generations. (This will be fixed with the optimal value and be hidden, when releasing.)", # default=1.2, # ), ) -> Path: if prompt is None: raise ValueError("Must provide `prompt`.") if not music_input: raise ValueError("Must provide `music_input`.") if prompt is None: prompt = '' # tmp_path = 'tmp' # if os.path.isdir(tmp_path): # import shutil # shutil.rmtree(tmp_path) # os.mkdir(tmp_path) if os.path.isdir('demix'): shutil.rmtree('demix') if os.path.isdir('spec'): shutil.rmtree('spec') # Loading models if os.path.isfile(f'musicgen-{model_version}.th'): pass else: url = f"https://weights.replicate.delivery/default/musicgen-chord/musicgen-{model_version}.th" dest = f"/src/musicgen-{model_version}.th" subprocess.check_call(["pget", url, dest], close_fds=False) self.model = load_ckpt(f'/src/musicgen-{model_version}.th', self.device) self.model.lm.condition_provider.conditioners['self_wav'].match_len_on_eval = True if 'stereo' in model_version: channel = 2 else: channel = 1 if large_chord_voca is False: # Switching Chord Prediction model to 25 vocab (smaller) self.model.lm.condition_provider.conditioners['self_wav'].chroma.config.feature['large_voca']=False self.model.lm.condition_provider.conditioners['self_wav'].chroma.config.model['num_chords']=25 self.model.lm.condition_provider.conditioners['self_wav'].chroma.model_file='audiocraft/modules/btc/test/btc_model.pt' self.model.lm.condition_provider.conditioners['self_wav'].chroma.idx_to_chord = idx2chord loaded = torch.load('audiocraft/modules/btc/test/btc_model.pt') self.model.lm.condition_provider.conditioners['self_wav'].chroma.mean = loaded['mean'] self.model.lm.condition_provider.conditioners['self_wav'].chroma.std = loaded['std'] self.model.lm.condition_provider.conditioners['self_wav'].chroma.model = BTC_model(config=self.model.lm.condition_provider.conditioners['self_wav'].chroma.config.model).to(self.device) self.model.lm.condition_provider.conditioners['self_wav'].chroma.model.load_state_dict(loaded['model']) model = self.model model.lm.eval() if multi_band_diffusion and int(self.model.lm.cfg.transformer_lm.n_q) == 8: raise ValueError("Multi-band Diffusion only works with non-stereo models.") # in_step_beat_sync = in_step_beat_sync set_generation_params = lambda duration: model.set_generation_params( duration=duration, top_k=top_k, top_p=top_p, temperature=temperature, cfg_coef=classifier_free_guidance, ) model.lm.condition_provider.conditioners['self_wav'].chroma_coefficient = chroma_coefficient if not seed or seed == -1: seed = torch.seed() % 2 ** 32 - 1 set_all_seeds(seed) print(f"Using seed {seed}") # Music Structure Analysis music_input_analysis = allin1.analyze(music_input) music_input, sr = torchaudio.load(music_input) print("BPM : ", music_input_analysis.bpm) if not beat_sync_threshold or beat_sync_threshold == -1: if music_input_analysis.bpm is not None: beat_sync_threshold = 1.1/(int(music_input_analysis.bpm)/60) else: beat_sync_threshold = 0.75 if music_input_analysis.bpm is not None: prompt = prompt + f', bpm : {int(music_input_analysis.bpm)}' music_input = music_input[None] if music_input.dim() == 2 else music_input duration = music_input.shape[-1]/sr wav_sr = model.sample_rate vocal, background = self.separate_vocals(music_input, sr)
audio_write(
5
2023-10-09 09:55:24+00:00
24k
oracle/guardian-ai
tests/unitary/test_fairness_metrics.py
[ { "identifier": "ConsistencyScorer", "path": "guardian_ai/fairness/metrics/dataset.py", "snippet": "class ConsistencyScorer(_SimpleDatasetFairnessScorer):\n \"\"\"\n Measures the consistency of a dataset.\n\n Consistency is measured as the number of ratio of instances that have a\n different...
import math import numpy as np import pandas as pd import pytest import sklearn from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import OneHotEncoder from guardian_ai.fairness.metrics.dataset import ( ConsistencyScorer, DatasetStatisticalParityScorer, SmoothedEDFScorer, consistency, dataset_statistical_parity, smoothed_edf, ) from guardian_ai.fairness.metrics.model import ( EqualizedOddsScorer, ErrorRateScorer, FalseDiscoveryRateScorer, FalseNegativeRateScorer, FalseOmissionRateScorer, FalsePositiveRateScorer, ModelStatisticalParityScorer, TheilIndexScorer, TruePositiveRateScorer, equalized_odds, error_rate, false_discovery_rate, false_negative_rate, false_omission_rate, false_positive_rate, model_statistical_parity, theil_index, true_positive_rate, ) from guardian_ai.utils.exception import GuardianAITypeError, GuardianAIValueError from tests.utils import get_dummy_dataset
18,356
#!/usr/bin/env python # -*- coding: utf-8 -*-- # Copyright (c) 2023 Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ @pytest.fixture(scope="module", autouse=True) def init(): np.random.seed(12345) def is_close(a, b): return math.isclose(a, b, rel_tol=1e-5) def approx_dict(d): return pytest.approx(d, rel=1e-5) MODEL_X_Y_SCORERS = { "model_statistical_parity_scorer": ModelStatisticalParityScorer, "true_positive_rate_scorer": TruePositiveRateScorer,
#!/usr/bin/env python # -*- coding: utf-8 -*-- # Copyright (c) 2023 Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ @pytest.fixture(scope="module", autouse=True) def init(): np.random.seed(12345) def is_close(a, b): return math.isclose(a, b, rel_tol=1e-5) def approx_dict(d): return pytest.approx(d, rel=1e-5) MODEL_X_Y_SCORERS = { "model_statistical_parity_scorer": ModelStatisticalParityScorer, "true_positive_rate_scorer": TruePositiveRateScorer,
"false_positive_rate_scorer": FalsePositiveRateScorer,
11
2023-10-09 09:48:50+00:00
24k
jiangjiechen/auction-arena
app.py
[ { "identifier": "create_items", "path": "src/item_base.py", "snippet": "def create_items(item_info_jsl):\n '''\n item_info: a list of dict (name, price, desc, id)\n '''\n item_info_jsl = LoadJsonL(item_info_jsl)\n item_list = []\n for info in item_info_jsl:\n item_list.append(It...
import os import gradio as gr from app_modules.presets import * from app_modules.overwrites import * from app_modules.utils import * from src.item_base import create_items from src.bidder_base import Bidder from src.human_bidder import HumanBidder from src.auctioneer_base import Auctioneer from auction_workflow import run_auction, make_auction_hash from utils import chunks, reset_state_list
15,751
BIDDER_NUM = 4 items = create_items('data/items_demo.jsonl') def auction_loop_app(*args): global items bidder_list = args[0] # gr.State() -> session state items_id = args[1] os.environ['OPENAI_API_KEY'] = args[2] if args[2] != '' else os.environ.get('OPENAI_API_KEY', '') os.environ['ANTHROPIC_API_KEY'] = args[3] if args[3] != '' else os.environ.get('ANTHROPIC_API_KEY', '') thread_num = args[4] item_shuffle = args[5] enable_discount = args[6] min_markup_pct = args[7] args = args[8:] auction_hash = make_auction_hash() items_to_bid = [items[i] for i in items_id] auctioneer = Auctioneer(enable_discount=enable_discount, min_markup_pct=min_markup_pct) auctioneer.init_items(items_to_bid) if item_shuffle: auctioneer.shuffle_items() # must correspond to the order in app's parameters input_keys = [ 'chatbot', 'model_name', 'desire', 'plan_strategy', 'budget', 'correct_belief', 'enable_learning', 'temperature', 'overestimate_percent', ] # convert flatten list into a json list input_jsl = [] for i, chunk in enumerate(chunks(args, len(input_keys))): js = {'name': f"Bidder {i+1}", 'auction_hash': auction_hash} for k, v in zip(input_keys, chunk): js[k] = v input_jsl.append(js) for js in input_jsl: js.pop('chatbot') if 'human' in js['model_name']: bidder_list.append(HumanBidder.create(**js)) else:
BIDDER_NUM = 4 items = create_items('data/items_demo.jsonl') def auction_loop_app(*args): global items bidder_list = args[0] # gr.State() -> session state items_id = args[1] os.environ['OPENAI_API_KEY'] = args[2] if args[2] != '' else os.environ.get('OPENAI_API_KEY', '') os.environ['ANTHROPIC_API_KEY'] = args[3] if args[3] != '' else os.environ.get('ANTHROPIC_API_KEY', '') thread_num = args[4] item_shuffle = args[5] enable_discount = args[6] min_markup_pct = args[7] args = args[8:] auction_hash = make_auction_hash() items_to_bid = [items[i] for i in items_id] auctioneer = Auctioneer(enable_discount=enable_discount, min_markup_pct=min_markup_pct) auctioneer.init_items(items_to_bid) if item_shuffle: auctioneer.shuffle_items() # must correspond to the order in app's parameters input_keys = [ 'chatbot', 'model_name', 'desire', 'plan_strategy', 'budget', 'correct_belief', 'enable_learning', 'temperature', 'overestimate_percent', ] # convert flatten list into a json list input_jsl = [] for i, chunk in enumerate(chunks(args, len(input_keys))): js = {'name': f"Bidder {i+1}", 'auction_hash': auction_hash} for k, v in zip(input_keys, chunk): js[k] = v input_jsl.append(js) for js in input_jsl: js.pop('chatbot') if 'human' in js['model_name']: bidder_list.append(HumanBidder.create(**js)) else:
bidder_list.append(Bidder.create(**js))
1
2023-10-08 09:30:57+00:00
24k
sakemin/cog-musicgen-chord
predict.py
[ { "identifier": "CompressionSolver", "path": "audiocraft/solvers/compression.py", "snippet": "class CompressionSolver(base.StandardSolver):\n \"\"\"Solver for compression task.\n\n The compression task combines a set of perceptual and objective losses\n to train an EncodecModel (composed of an ...
import os import random import torchaudio import typing as tp import numpy as np import torch import subprocess from typing import Optional from cog import BasePredictor, Input, Path from audiocraft.solvers.compression import CompressionSolver from audiocraft.models import MusicGen, MultiBandDiffusion from audiocraft.solvers.compression import CompressionSolver from audiocraft.models.loaders import ( load_compression_model, load_lm_model, ) from audiocraft.data.audio import audio_write from audiocraft.models.builders import get_lm_model from omegaconf import OmegaConf
20,448
if audio_start > audio_end: raise ValueError( "`audio_start` must be less than or equal to `audio_end`" ) audio_chords = audio_chords[ ..., int(sr * audio_start) : int(sr * audio_end) ] wav, tokens = model.generate_with_chroma(['the intro of ' + prompt], audio_chords[...,:30*sr], sr, progress=True, return_tokens=True) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) for i in range(int((duration - overlap) // sub_duration) - 1): wav, tokens = model.generate_continuation_with_audio_tokens_and_audio_chroma( prompt=tokens[...,sub_duration*encodec_rate:], melody_wavs = audio_chords[...,sub_duration*(i+1)*sr:(sub_duration*(i+1)+30)*sr], melody_sample_rate=sr, descriptions=['chorus of ' + prompt], progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) if int(duration - overlap) % sub_duration != 0: set_generation_params(overlap + ((duration - overlap) % sub_duration)) wav, tokens = model.generate_continuation_with_audio_tokens_and_audio_chroma( prompt=tokens[...,sub_duration*encodec_rate:], melody_wavs = audio_chords[...,sub_duration*(len(wavs))*sr:], melody_sample_rate=sr, descriptions=['the outro of ' + prompt], progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) else: # Case 3 wav, tokens = model.generate_with_text_chroma(descriptions = [prompt], chord_texts = [text_chords], bpm = [bpm], meter = [int(time_sig.split('/')[0])], progress=True, return_tokens=True) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) for i in range((duration - 10) // sub_duration - 1): model.lm.condition_provider.conditioners['self_wav'].set_continuation_count(sub_duration/30, i) wav, tokens = model.generate_continuation_with_audio_tokens_and_text_chroma( tokens[...,sub_duration*encodec_rate:], [prompt], [text_chords], bpm=[bpm], meter=[int(time_sig.split('/')[0])], progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) if (duration - overlap) % sub_duration != 0: model.lm.condition_provider.conditioners['self_wav'].set_continuation_count(sub_duration/30, i+1) set_generation_params(sub_duration + ((duration - overlap) % sub_duration)) wav, tokens = model.generate_continuation_with_audio_tokens_and_text_chroma( tokens[...,sub_duration*encodec_rate:], [prompt], [text_chords], bpm=[bpm], meter=[int(time_sig.split('/')[0])], progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) wav = wavs[0][...,:sub_duration*wav_sr] for i in range(len(wavs)-1): if i == len(wavs)-2: wav = torch.concat([wav,wavs[i+1]],dim=-1) else: wav = torch.concat([wav,wavs[i+1][...,:sub_duration*wav_sr]],dim=-1) wav = wav.cpu() else: ''' if not audio_chords: set_generation_params(duration) if text_chords is None or text_chords == '': # Case 4 wav, tokens = model.generate([prompt], progress=True, return_tokens=True) else: # Case 5 wav, tokens = model.generate_with_text_chroma(descriptions = [prompt], chord_texts = [text_chords], bpm = [bpm], meter = [int(time_sig.split('/')[0])], progress=True, return_tokens=True) else: audio_chords, sr = torchaudio.load(audio_chords) audio_chords = audio_chords[None] if audio_chords.dim() == 2 else audio_chords audio_start = 0 if not audio_start else audio_start if audio_end is None or audio_end == -1: audio_end = audio_chords.shape[2] / sr if audio_start > audio_end: raise ValueError( "`audio_start` must be less than or equal to `audio_end`" ) audio_chords_wavform = audio_chords[ ..., int(sr * audio_start) : int(sr * audio_end) ] audio_chords_duration = audio_chords_wavform.shape[-1] / sr if continuation: set_generation_params(duration) if text_chords is None or text_chords == '': # Case 6 wav, tokens = model.generate_continuation( prompt=audio_chords_wavform, prompt_sample_rate=sr, descriptions=[prompt], progress=True, return_tokens=True ) else: # Case 7 wav, tokens = model.generate_continuation_with_text_chroma( audio_chords_wavform, sr, [prompt], [text_chords], bpm=[bpm], meter=[int(time_sig.split('/')[0])], progress=True, return_tokens=True ) else: # Case 8 set_generation_params(duration) wav, tokens = model.generate_with_chroma( [prompt], audio_chords_wavform, sr, progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens)
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg compression_model = CompressionSolver.model_from_checkpoint(cfg.compression_model_checkpoint, device=device) return MusicGen(f"{os.getenv('COG_USERNAME')}/musicgen-chord", compression_model, lm) class Predictor(BasePredictor): def setup(self, weights: Optional[Path] = None): """Load the model into memory to make running multiple predictions efficient""" self.device = "cuda" if torch.cuda.is_available() else "cpu" self.mbd = MultiBandDiffusion.get_mbd_musicgen() if str(weights) == "weights": weights = None if weights is not None: print("Fine-tuned model weights loaded!") self.model = load_ckpt(weights, self.device, url=True) def _load_model( self, model_path: str, cls: Optional[any] = None, load_args: Optional[dict] = {}, model_id: Optional[str] = None, device: Optional[str] = None, ) -> MusicGen: if device is None: device = self.device compression_model = load_compression_model( model_id, device=device, cache_dir=model_path ) lm = load_lm_model(model_id, device=device, cache_dir=model_path) return MusicGen(model_id, compression_model, lm) def predict( self, model_version: str = Input( description="Model type. Select `fine-tuned` if you trained the model into your own repository.", default="stereo-chord-large", choices=["chord", "chord-large", "stereo-chord", "stereo-chord-large", "fine-tuned"] ), prompt: str = Input( description="A description of the music you want to generate.", default=None ), text_chords: str = Input( description="A text based chord progression condition. Single uppercase alphabet character(eg. `C`) is considered as a major chord. Chord attributes like(`maj`, `min`, `dim`, `aug`, `min6`, `maj6`, `min7`, `minmaj7`, `maj7`, `7`, `dim7`, `hdim7`, `sus2` and `sus4`) can be added to the root alphabet character after `:`.(eg. `A:min7`) Each chord token splitted by `SPACE` is allocated to a single bar. If more than one chord must be allocated to a single bar, cluster the chords adding with `,` without any `SPACE`.(eg. `C,C:7 G, E:min A:min`) You must choose either only one of `audio_chords` below or `text_chords`.", default=None ), bpm: float = Input( description="BPM condition for the generated output. `text_chords` will be processed based on this value. This will be appended at the end of `prompt`.", default=None ), time_sig: str = Input( description="Time signature value for the generate output. `text_chords` will be processed based on this value. This will be appended at the end of `prompt`.", default="4/4" ), audio_chords: Path = Input( description="An audio file that will condition the chord progression. You must choose only one among `audio_chords` or `text_chords` above.", default=None, ), audio_start: int = Input( description="Start time of the audio file to use for chord conditioning.", default=0, ge=0, ), audio_end: int = Input( description="End time of the audio file to use for chord conditioning. If None, will default to the end of the audio clip.", default=None, ge=0, ), duration: int = Input( description="Duration of the generated audio in seconds.", default=8 ), continuation: bool = Input( description="If `True`, generated music will continue from `audio_chords`. If chord conditioning, this is only possible when the chord condition is given with `text_chords`. If `False`, generated music will mimic `audio_chords`'s chord.", default=False, ), # continuation_start: int = Input( # description="Start time of the audio file to use for continuation.", # default=0, # ge=0, # ), # continuation_end: int = Input( # description="End time of the audio file to use for continuation. If -1 or None, will default to the end of the audio clip.", # default=None, # ge=0, # ), multi_band_diffusion: bool = Input( description="If `True`, the EnCodec tokens will be decoded with MultiBand Diffusion. Not compatible with stereo models.", default=False, ), normalization_strategy: str = Input( description="Strategy for normalizing audio.", default="loudness", choices=["loudness", "clip", "peak", "rms"], ), chroma_coefficient: float = Input( description="Coefficient value multiplied to multi-hot chord chroma.", default=1.0, ge=0.5, le=2.5 ), top_k: int = Input( description="Reduces sampling to the k most likely tokens.", default=250 ), top_p: float = Input( description="Reduces sampling to tokens with cumulative probability of p. When set to `0` (default), top_k sampling is used.", default=0.0, ), temperature: float = Input( description="Controls the 'conservativeness' of the sampling process. Higher temperature means more diversity.", default=1.0, ), classifier_free_guidance: int = Input( description="Increases the influence of inputs on the output. Higher values produce lower-varience outputs that adhere more closely to inputs.", default=3, ), output_format: str = Input( description="Output format for generated audio.", default="wav", choices=["wav", "mp3"], ), seed: int = Input( description="Seed for random number generator. If `None` or `-1`, a random seed will be used.", default=None, ), ) -> Path: if text_chords == '': text_chords = None if text_chords and audio_chords and not continuation: raise ValueError("Must provide either only one of `audio_chords` or `text_chords`.") if text_chords and not bpm: raise ValueError("There must be `bpm` value set when text based chord conditioning.") if text_chords and (not time_sig or time_sig==""): raise ValueError("There must be `time_sig` value set when text based chord conditioning.") if continuation and not audio_chords: raise ValueError("Must provide an audio input file via `audio_chords` if continuation is `True`.") if multi_band_diffusion and int(self.model.lm.cfg.transformer_lm.n_q) == 8: raise ValueError("Multi-band Diffusion only works with non-stereo models.") if prompt is None: prompt = '' if time_sig is not None and not time_sig == '': if prompt == '': prompt = time_sig else: prompt = prompt + ', ' + time_sig if bpm is not None: if prompt == '': prompt = str(bpm) else: prompt = prompt + f', bpm : {bpm}' if model_version == "fine-tuned": try: self.model except AttributeError: raise Exception("ERROR: Fine-tuned weights don't exist! Is the model trained from `sakemin/musicgen-chord`? If not, set `model_version` from `chord`, `chord-large`, `stereo-chord` and `stereo-chord-large`.") else: if os.path.isfile(f'musicgen-{model_version}.th'): pass else: url = f"https://weights.replicate.delivery/default/musicgen-chord/musicgen-{model_version}.th" dest = f"musicgen-{model_version}.th" subprocess.check_call(["pget", url, dest], close_fds=False) self.model = load_ckpt(f'musicgen-{model_version}.th', self.device) self.model.lm.condition_provider.conditioners['self_wav'].match_len_on_eval = True model = self.model set_generation_params = lambda duration: model.set_generation_params( duration=duration, top_k=top_k, top_p=top_p, temperature=temperature, cfg_coef=classifier_free_guidance, ) model.lm.condition_provider.conditioners['self_wav'].chroma_coefficient = chroma_coefficient if not seed or seed == -1: seed = torch.seed() % 2 ** 32 - 1 set_all_seeds(seed) set_all_seeds(seed) print(f"Using seed {seed}") ''' if duration > 30: encodec_rate = 50 sub_duration=25 overlap = 30 - sub_duration wavs = [] wav_sr = model.sample_rate set_generation_params(30) if (text_chords is None) and audio_chords is None: # Case 1 wav, tokens = model.generate([prompt], progress=True, return_tokens=True) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) for i in range((duration - overlap) // sub_duration - 1): wav, tokens= model.generate_continuation_with_audio_token( prompt=tokens[...,sub_duration*encodec_rate:], descriptions=[prompt], progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) if (duration - overlap) % sub_duration != 0: set_generation_params(overlap + ((duration - overlap) % sub_duration)) wav, tokens = model.generate_continuation_with_audio_token( prompt=tokens[...,sub_duration*encodec_rate:], descriptions=[prompt], progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) elif (text_chords is None or text_chords == '') and audio_chords is not None: # Case 2 audio_chords, sr = torchaudio.load(audio_chords) audio_chords = audio_chords[None] if audio_chords.dim() == 2 else audio_chords audio_start = 0 if not audio_start else audio_start if audio_end is None or audio_end == -1: audio_end = audio_chords.shape[-1] / sr if audio_start > audio_end: raise ValueError( "`audio_start` must be less than or equal to `audio_end`" ) audio_chords = audio_chords[ ..., int(sr * audio_start) : int(sr * audio_end) ] wav, tokens = model.generate_with_chroma(['the intro of ' + prompt], audio_chords[...,:30*sr], sr, progress=True, return_tokens=True) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) for i in range(int((duration - overlap) // sub_duration) - 1): wav, tokens = model.generate_continuation_with_audio_tokens_and_audio_chroma( prompt=tokens[...,sub_duration*encodec_rate:], melody_wavs = audio_chords[...,sub_duration*(i+1)*sr:(sub_duration*(i+1)+30)*sr], melody_sample_rate=sr, descriptions=['chorus of ' + prompt], progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) if int(duration - overlap) % sub_duration != 0: set_generation_params(overlap + ((duration - overlap) % sub_duration)) wav, tokens = model.generate_continuation_with_audio_tokens_and_audio_chroma( prompt=tokens[...,sub_duration*encodec_rate:], melody_wavs = audio_chords[...,sub_duration*(len(wavs))*sr:], melody_sample_rate=sr, descriptions=['the outro of ' + prompt], progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) else: # Case 3 wav, tokens = model.generate_with_text_chroma(descriptions = [prompt], chord_texts = [text_chords], bpm = [bpm], meter = [int(time_sig.split('/')[0])], progress=True, return_tokens=True) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) for i in range((duration - 10) // sub_duration - 1): model.lm.condition_provider.conditioners['self_wav'].set_continuation_count(sub_duration/30, i) wav, tokens = model.generate_continuation_with_audio_tokens_and_text_chroma( tokens[...,sub_duration*encodec_rate:], [prompt], [text_chords], bpm=[bpm], meter=[int(time_sig.split('/')[0])], progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) if (duration - overlap) % sub_duration != 0: model.lm.condition_provider.conditioners['self_wav'].set_continuation_count(sub_duration/30, i+1) set_generation_params(sub_duration + ((duration - overlap) % sub_duration)) wav, tokens = model.generate_continuation_with_audio_tokens_and_text_chroma( tokens[...,sub_duration*encodec_rate:], [prompt], [text_chords], bpm=[bpm], meter=[int(time_sig.split('/')[0])], progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) wav = wavs[0][...,:sub_duration*wav_sr] for i in range(len(wavs)-1): if i == len(wavs)-2: wav = torch.concat([wav,wavs[i+1]],dim=-1) else: wav = torch.concat([wav,wavs[i+1][...,:sub_duration*wav_sr]],dim=-1) wav = wav.cpu() else: ''' if not audio_chords: set_generation_params(duration) if text_chords is None or text_chords == '': # Case 4 wav, tokens = model.generate([prompt], progress=True, return_tokens=True) else: # Case 5 wav, tokens = model.generate_with_text_chroma(descriptions = [prompt], chord_texts = [text_chords], bpm = [bpm], meter = [int(time_sig.split('/')[0])], progress=True, return_tokens=True) else: audio_chords, sr = torchaudio.load(audio_chords) audio_chords = audio_chords[None] if audio_chords.dim() == 2 else audio_chords audio_start = 0 if not audio_start else audio_start if audio_end is None or audio_end == -1: audio_end = audio_chords.shape[2] / sr if audio_start > audio_end: raise ValueError( "`audio_start` must be less than or equal to `audio_end`" ) audio_chords_wavform = audio_chords[ ..., int(sr * audio_start) : int(sr * audio_end) ] audio_chords_duration = audio_chords_wavform.shape[-1] / sr if continuation: set_generation_params(duration) if text_chords is None or text_chords == '': # Case 6 wav, tokens = model.generate_continuation( prompt=audio_chords_wavform, prompt_sample_rate=sr, descriptions=[prompt], progress=True, return_tokens=True ) else: # Case 7 wav, tokens = model.generate_continuation_with_text_chroma( audio_chords_wavform, sr, [prompt], [text_chords], bpm=[bpm], meter=[int(time_sig.split('/')[0])], progress=True, return_tokens=True ) else: # Case 8 set_generation_params(duration) wav, tokens = model.generate_with_chroma( [prompt], audio_chords_wavform, sr, progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens)
audio_write(
6
2023-10-09 09:52:24+00:00
24k
zhijie-group/LOVECon
test_lovecon.py
[ { "identifier": "UNetPseudo3DConditionModel", "path": "video_diffusion/models/unet_3d_condition.py", "snippet": "class UNetPseudo3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Opti...
import os import copy import click import re import numpy as np import torch import torch.utils.data import torch.utils.checkpoint import decord import shutil from glob import glob from typing import Optional,Dict from tqdm.auto import tqdm from omegaconf import OmegaConf from PIL import Image from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDIMScheduler, ) from diffusers.utils.import_utils import is_xformers_available from transformers import AutoTokenizer, CLIPTextModel from einops import rearrange from video_diffusion.models.unet_3d_condition import UNetPseudo3DConditionModel from video_diffusion.models.controlnet_3d_condition import ControlNetPseudo3DModel from video_diffusion.data.dataset import ImageSequenceDataset from video_diffusion.common.util import get_time_string, get_function_args from video_diffusion.common.logger import get_logger_config_path from video_diffusion.common.image_util import log_train_samples from video_diffusion.common.instantiate_from_config import instantiate_from_config from video_diffusion.pipelines.p2p_validation_loop_controlnet import P2pSampleLogger from annotator.util import get_control from video_diffusion.pipelines.DDIMInterpolationScheduler import DDIMInterpolationScheduler from RIFEModel.RIFE_HDv3 import Model
20,140
print("There are {} frames in the video but we take {} frames".format(len(vr), dataset_config.n_sample_frame)) if dataset_config.n_sample_frame <= 50: duration = 100 fps = 10 sample_index = list(range(0,len(vr), 1))[:dataset_config.n_sample_frame] video = vr.get_batch(sample_index) video_name_match = re.search(r"(.*)/(.*).mp4", dataset_config.video_path) video_name = video_name_match.group(2) video_frame_folder = os.path.join('data',video_name) if os.path.exists(video_frame_folder): shutil.rmtree(video_frame_folder) os.makedirs(video_frame_folder,exist_ok=True) for i in range(video.shape[0]): frame = video[i] frame_path = os.path.join(video_frame_folder,f'frame-{i:04}.jpg') frame = Image.fromarray(frame.numpy().astype(np.uint8)) frame.save(frame_path) dataset_config.update({'path': video_frame_folder} ) time_string = get_time_string() if logdir is None: logdir = config.replace('config', 'result').replace('.yml', '').replace('.yaml', '') logdir += f"_{time_string}" accelerator = Accelerator( gradient_accumulation_steps=gradient_accumulation_steps, mixed_precision=mixed_precision, ) if accelerator.is_main_process: os.makedirs(logdir, exist_ok=True) OmegaConf.save(args, os.path.join(logdir, "config.yml")) logger = get_logger_config_path(logdir) if seed is not None: set_seed(seed) # Load the tokenizer tokenizer = AutoTokenizer.from_pretrained( pretrained_model_path, subfolder="tokenizer", use_fast=False, ) # Load models and create wrapper for stable diffusion text_encoder = CLIPTextModel.from_pretrained( pretrained_model_path, subfolder="text_encoder", ) vae = AutoencoderKL.from_pretrained( pretrained_model_path, subfolder="vae", ) #加载unet报错 unet = UNetPseudo3DConditionModel.from_2d_model( os.path.join(pretrained_model_path, "unet"), model_config=model_config ) controlnet = ControlNetPseudo3DModel.from_2d_model( pretrained_controlnet_model_path, model_config=model_config ) if 'target' not in test_pipeline_config: test_pipeline_config['target'] = 'video_diffusion.pipelines.stable_diffusion.SpatioTemporalStableDiffusionControlPipeline' scheduler = DDIMScheduler.from_pretrained( pretrained_model_path, subfolder="scheduler", ) pipeline = instantiate_from_config( test_pipeline_config, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, control_type = control_type, editing_type = editing_config.editing_type, dilation_kernel = editing_config.dilation_kernel, disk_store=kwargs.get('disk_store', False) ) pipeline.scheduler.set_timesteps(editing_config['num_inference_steps']) if editing_config.use_interpolater: new_scheduler = DDIMInterpolationScheduler.from_pretrained( pretrained_model_path, subfolder="scheduler", ) interpolater = Model() interpolater.load_model('RIFEModel', -1) new_scheduler.set_model(vae,interpolater) print('using interpolater') pipeline.add_new_scheduler(new_scheduler) pipeline.new_scheduler.set_timesteps(editing_config['num_inference_steps']) pipeline.set_progress_bar_config(disable=True) # pipeline.print_pipeline(logger) if is_xformers_available(): try: pipeline.enable_xformers_memory_efficient_attention() except Exception as e: logger.warning( "Could not enable memory efficient attention. Make sure xformers is installed" f" correctly and a GPU is available: {e}" ) vae.requires_grad_(False) unet.requires_grad_(False) text_encoder.requires_grad_(False) prompt_ids = tokenizer( dataset_config["prompt"], truncation=True, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt", ).input_ids
decord.bridge.set_bridge('torch') # from video_diffusion.pipelines.p2p_validation_loop_controlnet_ablation import P2pSampleLogger # logger = get_logger(__name__) def collate_fn(examples): """Concat a batch of sampled image in dataloader """ batch = { "prompt_ids": torch.cat([example["prompt_ids"] for example in examples], dim=0), "images": torch.stack([example["images"] for example in examples]), } return batch def test( config: str, pretrained_model_path: str, control_type:str, pretrained_controlnet_model_path :str, dataset_config: Dict, logdir: str = None, editing_config: Optional[Dict] = None, test_pipeline_config: Optional[Dict] = None, gradient_accumulation_steps: int = 1, seed: Optional[int] = None, mixed_precision: Optional[str] = "fp16", batch_size: int = 1, model_config: dict={}, verbose: bool=True, **kwargs ): args = get_function_args() vr = decord.VideoReader(dataset_config.video_path) fps = vr.get_avg_fps() duration = len(vr) / fps print("There are {} frames in the video but we take {} frames".format(len(vr), dataset_config.n_sample_frame)) if dataset_config.n_sample_frame <= 50: duration = 100 fps = 10 sample_index = list(range(0,len(vr), 1))[:dataset_config.n_sample_frame] video = vr.get_batch(sample_index) video_name_match = re.search(r"(.*)/(.*).mp4", dataset_config.video_path) video_name = video_name_match.group(2) video_frame_folder = os.path.join('data',video_name) if os.path.exists(video_frame_folder): shutil.rmtree(video_frame_folder) os.makedirs(video_frame_folder,exist_ok=True) for i in range(video.shape[0]): frame = video[i] frame_path = os.path.join(video_frame_folder,f'frame-{i:04}.jpg') frame = Image.fromarray(frame.numpy().astype(np.uint8)) frame.save(frame_path) dataset_config.update({'path': video_frame_folder} ) time_string = get_time_string() if logdir is None: logdir = config.replace('config', 'result').replace('.yml', '').replace('.yaml', '') logdir += f"_{time_string}" accelerator = Accelerator( gradient_accumulation_steps=gradient_accumulation_steps, mixed_precision=mixed_precision, ) if accelerator.is_main_process: os.makedirs(logdir, exist_ok=True) OmegaConf.save(args, os.path.join(logdir, "config.yml")) logger = get_logger_config_path(logdir) if seed is not None: set_seed(seed) # Load the tokenizer tokenizer = AutoTokenizer.from_pretrained( pretrained_model_path, subfolder="tokenizer", use_fast=False, ) # Load models and create wrapper for stable diffusion text_encoder = CLIPTextModel.from_pretrained( pretrained_model_path, subfolder="text_encoder", ) vae = AutoencoderKL.from_pretrained( pretrained_model_path, subfolder="vae", ) #加载unet报错 unet = UNetPseudo3DConditionModel.from_2d_model( os.path.join(pretrained_model_path, "unet"), model_config=model_config ) controlnet = ControlNetPseudo3DModel.from_2d_model( pretrained_controlnet_model_path, model_config=model_config ) if 'target' not in test_pipeline_config: test_pipeline_config['target'] = 'video_diffusion.pipelines.stable_diffusion.SpatioTemporalStableDiffusionControlPipeline' scheduler = DDIMScheduler.from_pretrained( pretrained_model_path, subfolder="scheduler", ) pipeline = instantiate_from_config( test_pipeline_config, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, control_type = control_type, editing_type = editing_config.editing_type, dilation_kernel = editing_config.dilation_kernel, disk_store=kwargs.get('disk_store', False) ) pipeline.scheduler.set_timesteps(editing_config['num_inference_steps']) if editing_config.use_interpolater: new_scheduler = DDIMInterpolationScheduler.from_pretrained( pretrained_model_path, subfolder="scheduler", ) interpolater = Model() interpolater.load_model('RIFEModel', -1) new_scheduler.set_model(vae,interpolater) print('using interpolater') pipeline.add_new_scheduler(new_scheduler) pipeline.new_scheduler.set_timesteps(editing_config['num_inference_steps']) pipeline.set_progress_bar_config(disable=True) # pipeline.print_pipeline(logger) if is_xformers_available(): try: pipeline.enable_xformers_memory_efficient_attention() except Exception as e: logger.warning( "Could not enable memory efficient attention. Make sure xformers is installed" f" correctly and a GPU is available: {e}" ) vae.requires_grad_(False) unet.requires_grad_(False) text_encoder.requires_grad_(False) prompt_ids = tokenizer( dataset_config["prompt"], truncation=True, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt", ).input_ids
video_dataset = ImageSequenceDataset(**dataset_config, prompt_ids=prompt_ids)
2
2023-10-09 14:38:28+00:00
24k
LiYunfengLYF/LightFC
lib/train/data/base_functions.py
[ { "identifier": "sampler", "path": "lib/train/data/sampler.py", "snippet": "def no_processing(data):\r\n def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\r\n num_search_frames, num_template_frames=1, processing=no_processing, frame_sample_mode='causal',\r\n ...
import torch import lib.train.data.transforms as tfm from torch.utils.data.distributed import DistributedSampler from lib.train.data import sampler, opencv_loader, processing, LTRLoader from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.optimizer.anan import Adan from lib.train.optimizer.lion import Lion from lib.utils.misc import is_main_process
21,027
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE settings.save_interval = cfg.TRAIN.SAVE_INTERVAL def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: # assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", # "COCO17", "VID", "TRACKINGNET"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append( Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append( Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader, env_num=settings.env_num)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader, env_num=settings.env_num)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb")
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE settings.save_interval = cfg.TRAIN.SAVE_INTERVAL def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: # assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", # "COCO17", "VID", "TRACKINGNET"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append( Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append( Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader, env_num=settings.env_num)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader, env_num=settings.env_num)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb")
datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader,
11
2023-10-08 11:44:32+00:00
24k
LiyaoTang/ERDA
main.py
[ { "identifier": "load_config", "path": "config/utils.py", "snippet": "def load_config(cfg_path=None, dataset_name=None, cfg_name=None, cfg_group=None, reload=True):\n # cfg from path\n if cfg_path is not None:\n update = None\n if os.path.isfile(cfg_path):\n # update on th...
import numpy as np import multiprocessing as mp import os, sys, time, glob, pickle, psutil, argparse, importlib import tensorflow as tf import models, datasets import utils.memory_saving_gradients from config import load_config, log_config from utils.logger import print_mem, redirect_io from config.utils import get_snap from utils.tester import ModelTester from utils.trainer import ModelTrainer from utils.tf_graph_builder import GraphBuilder
16,504
# Common libs sys.path.insert(0, f'{os.getcwd()}') # Custom libs def get_last_train(cfg): saving_path = sorted(glob.glob(f'results/{cfg.dataset.lower()}/{cfg.name}/*')) return saving_path[-1] if saving_path else None parser = argparse.ArgumentParser() parser.add_argument('-c', '--cfg_path', type=str, help='config path') parser.add_argument('--gpus', type=str, default=None, help='the number/ID of GPU(s) to use [default: 1], 0 to use cpu only') parser.add_argument('--mode', type=str, default=None, help='options: train, val, test') parser.add_argument('--seed', type=int, default=None, dest='rand_seed', help='random seed for use') parser.add_argument('--data_path', type=str, default=None, help='path to dataset dir = data_path/dataset_name') parser.add_argument('--model_path', type=str, default=None, help='pretrained model path') parser.add_argument('--saving_path', type=str, default=None, help='specified saving path') parser.add_argument('--num_votes', type=float, default=None, help='least num of votes of each point (default to 30)') parser.add_argument('--num_threads', type=lambda n: mp.cpu_count() if n == 'a' else int(n) if n else None, default=None, help='the number of cpu to use for data loading') parser.add_argument('--set', type=str, help='external source to set the config - str of dict / yaml file') parser.add_argument('--debug', action='store_true', help='debug mode') FLAGS = parser.parse_args() # sys.argv = sys.argv[:1] # clean extra argv # ---------------------------------------------------------------------------- # # solve env & cfg # ---------------------------------------------------------------------------- # assert FLAGS.cfg_path is not None # load config - config path: config(dir).dataset_name(py).config_name(py_class)
# Common libs sys.path.insert(0, f'{os.getcwd()}') # Custom libs def get_last_train(cfg): saving_path = sorted(glob.glob(f'results/{cfg.dataset.lower()}/{cfg.name}/*')) return saving_path[-1] if saving_path else None parser = argparse.ArgumentParser() parser.add_argument('-c', '--cfg_path', type=str, help='config path') parser.add_argument('--gpus', type=str, default=None, help='the number/ID of GPU(s) to use [default: 1], 0 to use cpu only') parser.add_argument('--mode', type=str, default=None, help='options: train, val, test') parser.add_argument('--seed', type=int, default=None, dest='rand_seed', help='random seed for use') parser.add_argument('--data_path', type=str, default=None, help='path to dataset dir = data_path/dataset_name') parser.add_argument('--model_path', type=str, default=None, help='pretrained model path') parser.add_argument('--saving_path', type=str, default=None, help='specified saving path') parser.add_argument('--num_votes', type=float, default=None, help='least num of votes of each point (default to 30)') parser.add_argument('--num_threads', type=lambda n: mp.cpu_count() if n == 'a' else int(n) if n else None, default=None, help='the number of cpu to use for data loading') parser.add_argument('--set', type=str, help='external source to set the config - str of dict / yaml file') parser.add_argument('--debug', action='store_true', help='debug mode') FLAGS = parser.parse_args() # sys.argv = sys.argv[:1] # clean extra argv # ---------------------------------------------------------------------------- # # solve env & cfg # ---------------------------------------------------------------------------- # assert FLAGS.cfg_path is not None # load config - config path: config(dir).dataset_name(py).config_name(py_class)
cfg = load_config(cfg_path=FLAGS.cfg_path)
0
2023-10-13 08:03:07+00:00
24k
bilibini/Lovely_Image_Downloader
dist/py/Python38/site-packages/urllib3/poolmanager.py
[ { "identifier": "HTTPHeaderDict", "path": "dist/py/Python38/site-packages/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(typing.MutableMapping[str, str]):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared ...
import functools import logging import typing import warnings import ssl from types import TracebackType from urllib.parse import urljoin from ._collections import HTTPHeaderDict, RecentlyUsedContainer from ._request_methods import RequestMethods from .connection import ProxyConfig from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme from .exceptions import ( LocationValueError, MaxRetryError, ProxySchemeUnknown, URLSchemeUnknown, ) from .response import BaseHTTPResponse from .util.connection import _TYPE_SOCKET_OPTIONS from .util.proxy import connection_requires_http_tunnel from .util.retry import Retry from .util.timeout import Timeout from .util.url import Url, parse_url from typing_extensions import Literal
20,132
from __future__ import annotations if typing.TYPE_CHECKING: __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ssl_minimum_version", "ssl_maximum_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # Default value for `blocksize` - a new parameter introduced to # http.client.HTTPConnection & http.client.HTTPSConnection in Python 3.7 _DEFAULT_BLOCKSIZE = 16384 _SelfT = typing.TypeVar("_SelfT") class PoolKey(typing.NamedTuple): """ All known keyword arguments that could be provided to the pool manager, its pools, or the underlying connections. All custom key schemes should include the fields in this key at a minimum. """ key_scheme: str key_host: str key_port: int | None key_timeout: Timeout | float | int | None key_retries: Retry | bool | int | None key_block: bool | None key_source_address: tuple[str, int] | None key_key_file: str | None key_key_password: str | None key_cert_file: str | None key_cert_reqs: str | None key_ca_certs: str | None key_ssl_version: int | str | None key_ssl_minimum_version: ssl.TLSVersion | None key_ssl_maximum_version: ssl.TLSVersion | None key_ca_cert_dir: str | None key_ssl_context: ssl.SSLContext | None key_maxsize: int | None key_headers: frozenset[tuple[str, str]] | None
from __future__ import annotations if typing.TYPE_CHECKING: __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ssl_minimum_version", "ssl_maximum_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # Default value for `blocksize` - a new parameter introduced to # http.client.HTTPConnection & http.client.HTTPSConnection in Python 3.7 _DEFAULT_BLOCKSIZE = 16384 _SelfT = typing.TypeVar("_SelfT") class PoolKey(typing.NamedTuple): """ All known keyword arguments that could be provided to the pool manager, its pools, or the underlying connections. All custom key schemes should include the fields in this key at a minimum. """ key_scheme: str key_host: str key_port: int | None key_timeout: Timeout | float | int | None key_retries: Retry | bool | int | None key_block: bool | None key_source_address: tuple[str, int] | None key_key_file: str | None key_key_password: str | None key_cert_file: str | None key_cert_reqs: str | None key_ca_certs: str | None key_ssl_version: int | str | None key_ssl_minimum_version: ssl.TLSVersion | None key_ssl_maximum_version: ssl.TLSVersion | None key_ca_cert_dir: str | None key_ssl_context: ssl.SSLContext | None key_maxsize: int | None key_headers: frozenset[tuple[str, str]] | None
key__proxy: Url | None
14
2023-10-11 09:08:57+00:00
24k
MTgeophysics/mtpy-v2
mtpy/modeling/modem/convariance.py
[ { "identifier": "CovarianceError", "path": "mtpy/modeling/modem/exception.py", "snippet": "class CovarianceError(ModEMError):\n \"\"\" Raise for Covariance class specific exceptions\"\"\"\n\n pass" }, { "identifier": "Model", "path": "mtpy/modeling/modem/model.py", "snippet": "clas...
from pathlib import Path from loguru import logger from .exception import CovarianceError from .model import Model from pyevtk.hl import gridToVTK import numpy as np
19,948
""" ================== ModEM ================== # Generate files for ModEM # revised by JP 2017 # revised by AK 2017 to bring across functionality from ak branch """ # ============================================================================= # Imports # ============================================================================= # ============================================================================= class Covariance(object): """ read and write covariance files """ def __init__(self, grid_dimensions=None, **kwargs): self._logger = logger self.grid_dimensions = grid_dimensions self.smoothing_east = 0.3 self.smoothing_north = 0.3 self.smoothing_z = 0.3 self.smoothing_num = 1 self.exception_list = [] self.mask_arr = None self.save_path = Path().cwd() self.fn_basename = "covariance.cov" self._header_str = "\n".join( [ "+{0}+".format("-" * 77), "| This file defines model covariance for a recursive autoregression scheme. |", "| The model space may be divided into distinct areas using integer masks. |", "| Mask 0 is reserved for air; mask 9 is reserved for ocean. Smoothing between |", "| air, ocean and the rest of the model is turned off automatically. You can |", "| also define exceptions to override smoothing between any two model areas. |", "| To turn off smoothing set it to zero. This header is 16 lines long. |", "| 1. Grid dimensions excluding air layers (Nx, Ny, NzEarth) |", "| 2. Smoothing in the X direction (NzEarth real values) |", "| 3. Smoothing in the Y direction (NzEarth real values) |", "| 4. Vertical smoothing (1 real value) |", "| 5. Number of times the smoothing should be applied (1 integer >= 0) |", "| 6. Number of exceptions (1 integer >= 0) |", "| 7. Exceptions in the for e.g. 2 3 0. (to turn off smoothing between 3 & 4) |", "| 8. Two integer layer indices and Nx x Ny block of masks, repeated as needed.|", "+{0}+".format("-" * 77), ] ) for key in list(kwargs.keys()): if hasattr(self, key): setattr(self, key, kwargs[key]) else: self._logger.warn( "Argument {}={} is not supportted thus not been set.".format( key, kwargs[key] ) ) @property def cov_fn(self): return self.save_path.joinpath(self.fn_basename) @cov_fn.setter def cov_fn(self, value): if value is not None: value = Path(value) self.save_path = value.parent self.fn_basename = value.name def write_covariance_file( self, cov_fn=None, save_path=None, fn_basename=None, model_fn=None, sea_water=0.3, air=1e12, ): # """ write a covariance file """ if model_fn is not None:
""" ================== ModEM ================== # Generate files for ModEM # revised by JP 2017 # revised by AK 2017 to bring across functionality from ak branch """ # ============================================================================= # Imports # ============================================================================= # ============================================================================= class Covariance(object): """ read and write covariance files """ def __init__(self, grid_dimensions=None, **kwargs): self._logger = logger self.grid_dimensions = grid_dimensions self.smoothing_east = 0.3 self.smoothing_north = 0.3 self.smoothing_z = 0.3 self.smoothing_num = 1 self.exception_list = [] self.mask_arr = None self.save_path = Path().cwd() self.fn_basename = "covariance.cov" self._header_str = "\n".join( [ "+{0}+".format("-" * 77), "| This file defines model covariance for a recursive autoregression scheme. |", "| The model space may be divided into distinct areas using integer masks. |", "| Mask 0 is reserved for air; mask 9 is reserved for ocean. Smoothing between |", "| air, ocean and the rest of the model is turned off automatically. You can |", "| also define exceptions to override smoothing between any two model areas. |", "| To turn off smoothing set it to zero. This header is 16 lines long. |", "| 1. Grid dimensions excluding air layers (Nx, Ny, NzEarth) |", "| 2. Smoothing in the X direction (NzEarth real values) |", "| 3. Smoothing in the Y direction (NzEarth real values) |", "| 4. Vertical smoothing (1 real value) |", "| 5. Number of times the smoothing should be applied (1 integer >= 0) |", "| 6. Number of exceptions (1 integer >= 0) |", "| 7. Exceptions in the for e.g. 2 3 0. (to turn off smoothing between 3 & 4) |", "| 8. Two integer layer indices and Nx x Ny block of masks, repeated as needed.|", "+{0}+".format("-" * 77), ] ) for key in list(kwargs.keys()): if hasattr(self, key): setattr(self, key, kwargs[key]) else: self._logger.warn( "Argument {}={} is not supportted thus not been set.".format( key, kwargs[key] ) ) @property def cov_fn(self): return self.save_path.joinpath(self.fn_basename) @cov_fn.setter def cov_fn(self, value): if value is not None: value = Path(value) self.save_path = value.parent self.fn_basename = value.name def write_covariance_file( self, cov_fn=None, save_path=None, fn_basename=None, model_fn=None, sea_water=0.3, air=1e12, ): # """ write a covariance file """ if model_fn is not None:
mod_obj = Model()
1
2023-10-11 22:24:50+00:00
24k
weavel-ai/promptmodel-python
promptmodel/llms/llm_proxy.py
[ { "identifier": "LLM", "path": "promptmodel/llms/llm.py", "snippet": "class LLM:\n def __init__(self):\n pass\n\n @classmethod\n def __parse_output_pattern__(\n cls,\n raw_output: Optional[str] = None,\n parsing_type: Optional[ParsingType] = None,\n ) -> ParseResu...
from typing import ( Any, AsyncGenerator, Callable, Dict, Generator, List, Optional, Tuple, Union, ) from uuid import UUID from threading import Thread from rich import print from uuid import uuid4 from litellm.utils import ModelResponse, get_max_tokens from promptmodel.llms.llm import LLM from promptmodel.database.models import ( DeployedPrompt, DeployedFunctionModel, DeployedFunctionModelVersion, ) from promptmodel.database.crud import ( get_deployed_prompts, ) from promptmodel.promptmodel_init import CacheManager from promptmodel.utils.config_utils import read_config, upsert_config from promptmodel.utils.random_utils import select_version_by_ratio from promptmodel.utils import logger from promptmodel.utils.async_utils import run_async_in_sync from promptmodel.utils.token_counting import ( num_tokens_for_messages_for_each, num_tokens_from_functions_input, ) from promptmodel.utils.output_utils import update_dict from promptmodel.apis.base import AsyncAPIClient from promptmodel.types.response import ( LLMResponse, LLMStreamResponse, FunctionModelConfig, ChatModelConfig, UnitConfig, PMDetail, ) from promptmodel.types.request import ChatLogRequest
19,740
def _prepare_call_args( self, prompts: List[Dict[str, str]], version_detail: Dict[str, Any], inputs: Dict[str, Any], kwargs, ): stringified_inputs = {key: str(value) for key, value in inputs.items()} messages = [ { "content": prompt["content"].format(**stringified_inputs), "role": prompt["role"], } for prompt in prompts ] call_args = { "messages": messages, "model": version_detail["model"] if version_detail else None, "parsing_type": version_detail["parsing_type"] if version_detail else None, "output_keys": version_detail["output_keys"] if version_detail else None, } if call_args["parsing_type"] is None: del call_args["parsing_type"] del call_args["output_keys"] if "functions" in kwargs: call_args["functions"] = kwargs["functions"] if "tools" in kwargs: call_args["tools"] = kwargs["tools"] if "api_key" in kwargs: call_args["api_key"] = kwargs["api_key"] return call_args def _prepare_call_args_for_chat( self, messages: List[Dict[str, Any]], version_detail: Dict[str, Any], kwargs, ): call_args = {} token_per_tools = 0 if "functions" in kwargs: call_args["functions"] = kwargs["functions"] token_per_tools = num_tokens_from_functions_input( functions=kwargs["functions"], model=version_detail["model"] if version_detail else "gpt-3.5-turbo", ) if "tools" in kwargs: call_args["tools"] = kwargs["tools"] token_per_tools = num_tokens_from_functions_input( functions=kwargs["tools"], model=version_detail["model"] if version_detail else "gpt-3.5-turbo", ) # truncate messages to make length <= model's max length model_max_tokens = get_max_tokens( model=version_detail["model"] if version_detail else "gpt-3.5-turbo" ) token_per_messages = num_tokens_for_messages_for_each( messages, version_detail["model"] ) token_limit_exceeded = ( sum(token_per_messages) + token_per_tools ) - model_max_tokens if token_limit_exceeded > 0: while token_limit_exceeded > 0: # erase the second oldest message (first one is system prompt, so it should not be erased) if len(messages) == 1: # if there is only one message, Error cannot be solved. Just call LLM and get error response break token_limit_exceeded -= token_per_messages[1] del messages[1] del token_per_messages[1] call_args["messages"] = messages call_args["model"] = version_detail["model"] if version_detail else None if "api_key" in kwargs: call_args["api_key"] = kwargs["api_key"] if "tools" in kwargs: call_args["tools"] = kwargs["tools"] return call_args async def _async_log_to_cloud( self, version_uuid: str, log_uuid: str, inputs: Optional[Dict] = None, api_response: Optional[ModelResponse] = None, parsed_outputs: Optional[Dict] = None, metadata: Optional[Dict] = None, ): config = read_config() if ( "project" in config and "mask_inputs" in config["project"] and config["project"]["mask_inputs"] == True ): inputs = {key: "PRIVATE LOGGING" for key, value in inputs.items()} # Perform the logging asynchronously if api_response: api_response_dict = api_response.model_dump() api_response_dict["response_ms"] = api_response._response_ms api_response_dict["_response_ms"] = api_response._response_ms else: api_response_dict = None run_log_request_body = { "uuid": log_uuid, "api_response": api_response_dict, "inputs": inputs, "parsed_outputs": parsed_outputs, "metadata": metadata, }
class LLMProxy(LLM): def __init__( self, name: str, version: Optional[Union[str, int]] = "deploy", unit_config: Optional[UnitConfig] = None ): super().__init__() self._name = name self.version = version self.unit_config = unit_config def _wrap_gen(self, gen: Callable[..., Any]) -> Callable[..., Any]: def wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = run_async_in_sync( LLMProxy.fetch_prompts(self._name, self.version) ) call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) log_uuid = str(uuid4()) # Call the generator with the arguments stream_response: Generator[LLMStreamResponse, None, None] = gen(**call_args) api_response = None dict_cache = {} # to store aggregated dictionary values string_cache = "" # to store aggregated string values error_occurs = False error_log = None for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.parsed_outputs: dict_cache = update_dict(dict_cache, item.parsed_outputs) if item.raw_output: string_cache += item.raw_output if item.error and not error_occurs: error_occurs = True error_log = item.error_log if error_occurs: # delete all promptmodel data in item item.raw_output = None item.parsed_outputs = None item.function_call = None item.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) yield item metadata = { "error": error_occurs, "error_log": error_log, } run_async_in_sync( self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=api_response, parsed_outputs=dict_cache, metadata=metadata, ) ) return wrapper def _wrap_async_gen(self, async_gen: Callable[..., Any]) -> Callable[..., Any]: async def wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = await LLMProxy.fetch_prompts( self._name, self.version ) call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) # Call async_gen with the arguments stream_response: AsyncGenerator[LLMStreamResponse, None] = async_gen( **call_args ) log_uuid = str(uuid4()) api_response = None dict_cache = {} # to store aggregated dictionary values string_cache = "" # to store aggregated string values error_occurs = False error_log = None api_response: Optional[ModelResponse] = None async for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.parsed_outputs: dict_cache = update_dict(dict_cache, item.parsed_outputs) if item.raw_output: string_cache += item.raw_output if item.error and not error_occurs: error_occurs = True error_log = item.error_log item.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) yield item # # add string_cache in model_response # if api_response: # if "message" not in api_response.choices[0]: # api_response.choices[0].message = {} # if "content" not in api_response.choices[0].message: # api_response.choices[0].message["content"] = string_cache # api_response.choices[0].message["role"] = "assistant" metadata = { "error": error_occurs, "error_log": error_log, } await self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=api_response, parsed_outputs=dict_cache, metadata=metadata, ) # raise Exception("error_log") return wrapper def _wrap_method(self, method: Callable[..., Any]) -> Callable[..., Any]: def wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = run_async_in_sync( LLMProxy.fetch_prompts(self._name, self.version) ) call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) # Call the method with the arguments llm_response: LLMResponse = method(**call_args) error_occurs = llm_response.error error_log = llm_response.error_log metadata = { "error": error_occurs, "error_log": error_log, } log_uuid = str(uuid4()) if llm_response.parsed_outputs: run_async_in_sync( self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=llm_response.api_response, parsed_outputs=llm_response.parsed_outputs, metadata=metadata, ) ) else: run_async_in_sync( self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=llm_response.api_response, parsed_outputs={}, metadata=metadata, ) ) if error_occurs: # delete all promptmodel data in llm_response llm_response.raw_output = None llm_response.parsed_outputs = None llm_response.function_call = None llm_response.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) return llm_response return wrapper def _wrap_async_method(self, method: Callable[..., Any]) -> Callable[..., Any]: async def async_wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = await LLMProxy.fetch_prompts( self._name, self.version ) # messages, model, uuid = self._fetch_prompts() call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) # Call the method with the arguments llm_response: LLMResponse = await method(**call_args) error_occurs = llm_response.error error_log = llm_response.error_log metadata = { "error": error_occurs, "error_log": error_log, } log_uuid = str(uuid4()) if llm_response.parsed_outputs: await self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=llm_response.api_response, parsed_outputs=llm_response.parsed_outputs, metadata=metadata, ) else: await self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=llm_response.api_response, parsed_outputs={}, metadata=metadata, ) if error_occurs: # delete all promptmodel data in llm_response llm_response.raw_output = None llm_response.parsed_outputs = None llm_response.function_call = None llm_response.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) return llm_response return async_wrapper def _wrap_chat(self, method: Callable[..., Any]) -> Callable[..., Any]: def wrapper(session_uuid: str, **kwargs): instruction, version_details, message_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self._name, session_uuid, self.version) ) call_args = self._prepare_call_args_for_chat( message_logs, version_details, kwargs ) # Call the method with the arguments llm_response: LLMResponse = method(**call_args) error_occurs = llm_response.error error_log = llm_response.error_log metadata = { "error": error_occurs, "error_log": error_log, } api_response = None if llm_response.api_response: api_response = llm_response.api_response log_uuid = str(uuid4()) run_async_in_sync( self._async_chat_log_to_cloud( session_uuid=session_uuid, version_uuid=version_details["uuid"], chat_log_request_list=[ ChatLogRequest( message=llm_response.api_response.choices[ 0 ].message.model_dump(), uuid=log_uuid, metadata=metadata, api_response=api_response, ) ], ) ) if error_occurs: # delete all promptmodel data in llm_response llm_response.raw_output = None llm_response.parsed_outputs = None llm_response.function_call = None llm_response.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) return llm_response return wrapper def _wrap_async_chat(self, method: Callable[..., Any]) -> Callable[..., Any]: async def async_wrapper(session_uuid: str, **kwargs): ( instruction, version_details, message_logs, ) = await LLMProxy.fetch_chat_model(self._name, session_uuid, self.version) call_args = self._prepare_call_args_for_chat( message_logs, version_details, kwargs ) # Call the method with the arguments llm_response: LLMResponse = await method(**call_args) error_occurs = llm_response.error error_log = llm_response.error_log metadata = { "error": error_occurs, "error_log": error_log, } api_response = None if llm_response.api_response: api_response = llm_response.api_response log_uuid = str(uuid4()) await self._async_chat_log_to_cloud( session_uuid=session_uuid, version_uuid=version_details["uuid"], chat_log_request_list=[ ChatLogRequest( uuid=log_uuid, message=llm_response.api_response.choices[ 0 ].message.model_dump(), metadata=metadata, api_response=api_response, ) ], ) if error_occurs: # delete all promptmodel data in llm_response llm_response.raw_output = None llm_response.parsed_outputs = None llm_response.function_call = None llm_response.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) return llm_response return async_wrapper def _wrap_chat_gen(self, gen: Callable[..., Any]) -> Callable[..., Any]: def wrapper(session_uuid: str, **kwargs): instruction, version_details, message_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self._name, session_uuid, self.version) ) call_args = self._prepare_call_args_for_chat( message_logs, version_details, kwargs ) # Call the generator with the arguments stream_response: Generator[LLMStreamResponse, None, None] = gen(**call_args) api_response = None error_occurs = False error_log = None log_uuid = str(uuid4()) for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.error and not error_occurs: error_occurs = True error_log = item.error_log if error_occurs: # delete all promptmodel data in item item.raw_output = None item.parsed_outputs = None item.function_call = None item.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) yield item metadata = { "error": error_occurs, "error_log": error_log, } run_async_in_sync( self._async_chat_log_to_cloud( session_uuid=session_uuid, version_uuid=version_details["uuid"], chat_log_request_list=[ ChatLogRequest( uuid=log_uuid, message=api_response.choices[0].message.model_dump(), metadata=metadata, api_response=api_response, ) ], ) ) return wrapper def _wrap_async_chat_gen(self, async_gen: Callable[..., Any]) -> Callable[..., Any]: async def wrapper(session_uuid: str, **kwargs): ( instruction, version_details, message_logs, ) = await LLMProxy.fetch_chat_model(self._name, session_uuid, self.version) call_args = self._prepare_call_args_for_chat( message_logs, version_details, kwargs ) # Call the generator with the arguments stream_response: AsyncGenerator[LLMStreamResponse, None] = async_gen( **call_args ) api_response = None error_occurs = False error_log = None log_uuid = str(uuid4()) async for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.error and not error_occurs: error_occurs = True error_log = item.error_log if error_occurs: # delete all promptmodel data in item item.raw_output = None item.parsed_outputs = None item.function_call = None item.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) yield item metadata = { "error": error_occurs, "error_log": error_log, } await self._async_chat_log_to_cloud( session_uuid=session_uuid, version_uuid=version_details["uuid"], chat_log_request_list=[ ChatLogRequest( uuid=log_uuid, message=api_response.choices[0].message.model_dump(), metadata=metadata, api_response=api_response, ) ], ) return wrapper def _prepare_call_args( self, prompts: List[Dict[str, str]], version_detail: Dict[str, Any], inputs: Dict[str, Any], kwargs, ): stringified_inputs = {key: str(value) for key, value in inputs.items()} messages = [ { "content": prompt["content"].format(**stringified_inputs), "role": prompt["role"], } for prompt in prompts ] call_args = { "messages": messages, "model": version_detail["model"] if version_detail else None, "parsing_type": version_detail["parsing_type"] if version_detail else None, "output_keys": version_detail["output_keys"] if version_detail else None, } if call_args["parsing_type"] is None: del call_args["parsing_type"] del call_args["output_keys"] if "functions" in kwargs: call_args["functions"] = kwargs["functions"] if "tools" in kwargs: call_args["tools"] = kwargs["tools"] if "api_key" in kwargs: call_args["api_key"] = kwargs["api_key"] return call_args def _prepare_call_args_for_chat( self, messages: List[Dict[str, Any]], version_detail: Dict[str, Any], kwargs, ): call_args = {} token_per_tools = 0 if "functions" in kwargs: call_args["functions"] = kwargs["functions"] token_per_tools = num_tokens_from_functions_input( functions=kwargs["functions"], model=version_detail["model"] if version_detail else "gpt-3.5-turbo", ) if "tools" in kwargs: call_args["tools"] = kwargs["tools"] token_per_tools = num_tokens_from_functions_input( functions=kwargs["tools"], model=version_detail["model"] if version_detail else "gpt-3.5-turbo", ) # truncate messages to make length <= model's max length model_max_tokens = get_max_tokens( model=version_detail["model"] if version_detail else "gpt-3.5-turbo" ) token_per_messages = num_tokens_for_messages_for_each( messages, version_detail["model"] ) token_limit_exceeded = ( sum(token_per_messages) + token_per_tools ) - model_max_tokens if token_limit_exceeded > 0: while token_limit_exceeded > 0: # erase the second oldest message (first one is system prompt, so it should not be erased) if len(messages) == 1: # if there is only one message, Error cannot be solved. Just call LLM and get error response break token_limit_exceeded -= token_per_messages[1] del messages[1] del token_per_messages[1] call_args["messages"] = messages call_args["model"] = version_detail["model"] if version_detail else None if "api_key" in kwargs: call_args["api_key"] = kwargs["api_key"] if "tools" in kwargs: call_args["tools"] = kwargs["tools"] return call_args async def _async_log_to_cloud( self, version_uuid: str, log_uuid: str, inputs: Optional[Dict] = None, api_response: Optional[ModelResponse] = None, parsed_outputs: Optional[Dict] = None, metadata: Optional[Dict] = None, ): config = read_config() if ( "project" in config and "mask_inputs" in config["project"] and config["project"]["mask_inputs"] == True ): inputs = {key: "PRIVATE LOGGING" for key, value in inputs.items()} # Perform the logging asynchronously if api_response: api_response_dict = api_response.model_dump() api_response_dict["response_ms"] = api_response._response_ms api_response_dict["_response_ms"] = api_response._response_ms else: api_response_dict = None run_log_request_body = { "uuid": log_uuid, "api_response": api_response_dict, "inputs": inputs, "parsed_outputs": parsed_outputs, "metadata": metadata, }
res = await AsyncAPIClient.execute(
14
2023-10-09 03:35:44+00:00
24k
cambridgeltl/ClaPS
algs/greedy.py
[ { "identifier": "PromptedClassificationReward", "path": "rewards/text_classification_reward.py", "snippet": "class PromptedClassificationReward:\n def __init__(\n self,\n args,\n task_lm: str,\n is_mask_lm: Optional[bool],\n num_classes: int,\n verbalizers: L...
import random import numpy as np from typing import Any, Optional from rewards.text_classification_reward import ( PromptedClassificationReward, ) from utils.fsc_datasets import PromptedClassificationDataset from .base_trainer import BaseTrainer from utils.fsc_datasets import PromptedClassificationDataset from rewards.text_classification_reward import PromptedClassificationReward
18,703
class GreedyTrainer(BaseTrainer): def __init__( self,
class GreedyTrainer(BaseTrainer): def __init__( self,
obj_func: PromptedClassificationReward,
4
2023-10-08 12:39:44+00:00
24k
clessig/atmorep
atmorep/core/trainer.py
[ { "identifier": "AtmoRep", "path": "atmorep/core/atmorep_model.py", "snippet": "class AtmoRep( torch.nn.Module) :\n\n def __init__(self, cf) :\n '''Constructor'''\n \n super( AtmoRep, self).__init__()\n\n self.cf = cf\n\n ###################################################\n def create( self...
import torch import torchinfo import numpy as np import code import os import datetime import functools import pandas as pd import wandb import torch.distributed as dist import torch.utils.data.distributed import atmorep.config.config as config import atmorep.utils.token_infos_transformations as token_infos_transformations import atmorep.utils.utils as utils from pathlib import Path from typing import TypeVar from torch.distributed.optim import ZeroRedundancyOptimizer from atmorep.core.atmorep_model import AtmoRep from atmorep.core.atmorep_model import AtmoRepData from atmorep.training.bert import prepare_batch_BERT_multifield from atmorep.transformer.transformer_base import positional_encoding_harmonic from atmorep.utils.utils import shape_to_str from atmorep.utils.utils import relMSELoss from atmorep.utils.utils import Gaussian from atmorep.utils.utils import CRPS from atmorep.utils.utils import NetMode from atmorep.utils.utils import sgn_exp from atmorep.datasets.data_writer import write_forecast, write_BERT, write_attention
15,091
# target etc are aliasing targets_b which simplifies bookkeeping below if is_predicted : target = [targets_b[vidx][bidx] for vidx in range(num_levels)] pred_mu = [preds_mu_b[vidx][bidx] for vidx in range(num_levels)] pred_ens = [preds_ens_b[vidx][bidx] for vidx in range(num_levels)] dates_masked_l, lats_masked_l, lons_masked_l = [], [], [] for vidx, _ in enumerate(field_info[2]) : normalizer = self.model.normalizer( fidx, vidx) y, m = dates[0].year, dates[0].month sources_b[bidx,vidx] = normalizer.denormalize( y, m, sources_b[bidx,vidx], [lats, lons]) if is_predicted : # TODO: make sure normalizer_local / normalizer_global is used in data_loader idx = tokens_masked_idx_list[fidx][vidx][bidx] tinfo_masked = tinfos[bidx,vidx].flatten( 0,2) tinfo_masked = tinfo_masked[idx] lad, lod = lat_d_h*res, lon_d_h*res lats_masked, lons_masked, dates_masked = [], [], [] for t in tinfo_masked : lats_masked.append( np.expand_dims( np.arange(t[4]-lad, t[4]+lad+0.001,res), 0)) lons_masked.append( np.expand_dims( np.arange(t[5]-lod, t[5]+lod+0.001,res), 0)) r = pd.date_range( start=utils.token_info_to_time(t), periods=token_size[0], freq='h') dates_masked.append( np.expand_dims(r.to_pydatetime().astype( 'datetime64[s]'), 0) ) lats_masked = np.concatenate( lats_masked, 0) lons_masked = np.remainder( np.concatenate( lons_masked, 0), 360.) dates_masked = np.concatenate( dates_masked, 0) for ii,(t,p,e,la,lo) in enumerate(zip( target[vidx], pred_mu[vidx], pred_ens[vidx], lats_masked, lons_masked)) : targets_b[vidx][bidx][ii] = normalizer.denormalize( y, m, t, [la, lo]) preds_mu_b[vidx][bidx][ii] = normalizer.denormalize( y, m, p, [la, lo]) preds_ens_b[vidx][bidx][ii] = normalizer.denormalize( y, m, e, [la, lo]) dates_masked_l += [ dates_masked ] lats_masked_l += [ [90.-lat for lat in lats_masked] ] lons_masked_l += [ lons_masked ] dates = dates.to_pydatetime().astype( 'datetime64[s]') coords_b += [ [dates, 90.-lats, lons, dates_masked_l, lats_masked_l, lons_masked_l] ] fn = field_info[0] sources_out.append( [fn, sources_b]) if is_predicted : targets_out.append([fn, [[t.numpy(force=True) for t in t_v] for t_v in targets_b]]) preds_out.append( [fn, [[p.numpy(force=True) for p in p_v] for p_v in preds_mu_b]]) ensembles_out.append( [fn, [[p.numpy(force=True) for p in p_v] for p_v in preds_ens_b]]) else : targets_out.append( [fn, []]) preds_out.append( [fn, []]) ensembles_out.append( [fn, []]) sources_dates_out.append( [c[0] for c in coords_b]) sources_lats_out.append( [c[1] for c in coords_b]) sources_lons_out.append( [c[2] for c in coords_b]) if is_predicted : targets_dates_out.append( [c[3] for c in coords_b]) targets_lats_out.append( [c[4] for c in coords_b]) targets_lons_out.append( [c[5] for c in coords_b]) else : targets_dates_out.append( [ ]) targets_lats_out.append( [ ]) targets_lons_out.append( [ ]) levels = [[np.array(l) for l in field[2]] for field in cf.fields] write_BERT( cf.wandb_id, epoch, batch_idx, levels, sources_out, [sources_dates_out, sources_lats_out, sources_lons_out], targets_out, [targets_dates_out, targets_lats_out, targets_lons_out], preds_out, ensembles_out ) def log_attention( self, epoch, bidx, log) : '''Hook for logging: output attention maps.''' cf = self.cf attention, token_infos = log attn_dates_out, attn_lats_out, attn_lons_out = [ ], [ ], [ ] attn_out = [] for fidx, field_info in enumerate(cf.fields) : # reconstruct coordinates is_predicted = fidx in self.fields_prediction_idx num_levels = len(field_info[2]) num_tokens = field_info[3] token_size = field_info[4] lat_d_h, lon_d_h = int(np.floor(token_size[1]/2.)), int(np.floor(token_size[2]/2.)) tinfos = token_infos[fidx].reshape( [-1, num_levels, *num_tokens, cf.size_token_info]) coords_b = [] for tinfo in tinfos : # use first vertical levels since a column is considered res = tinfo[0,0,0,0,-1] lats = np.arange(tinfo[0,0,0,0,4]-lat_d_h*res, tinfo[0,0,-1,0,4]+lat_d_h*res+0.001,res*token_size[1]) if tinfo[0,0,0,-1,5] < tinfo[0,0,0,0,5] : lons = np.remainder( np.arange( tinfo[0,0,0,0,5] - lon_d_h*res, 360. + tinfo[0,0,0,-1,5] + lon_d_h*res + 0.001, res*token_size[2]), 360.) else : lons = np.arange(tinfo[0,0,0,0,5]-lon_d_h*res, tinfo[0,0,0,-1,5]+lon_d_h*res+0.001,res*token_size[2]) lons = np.remainder( lons, 360.) dates = np.array([(utils.token_info_to_time(tinfo[0,t,0,0,:3])) for t in range(tinfo.shape[1])], dtype='datetime64[s]') coords_b += [ [dates, lats, lons] ] if is_predicted: attn_out.append([field_info[0], attention[fidx]]) attn_dates_out.append([c[0] for c in coords_b]) attn_lats_out.append( [c[1] for c in coords_b]) attn_lons_out.append( [c[2] for c in coords_b]) else: attn_dates_out.append( [] ) attn_lats_out.append( [] ) attn_lons_out.append( [] ) levels = [[np.array(l) for l in field[2]] for field in cf.fields]
#################################################################################################### # # Copyright (C) 2022 # #################################################################################################### # # project : atmorep # # author : atmorep collaboration # # description : # # license : # #################################################################################################### # code.interact(local=locals()) # import horovod.torch as hvd #################################################################################################### class Trainer_Base() : def __init__( self, cf, devices ) : self.cf = cf self.devices = devices self.device_in = devices[0] self.device_out = devices[-1] self.fields_prediction_idx = [] self.loss_weights = torch.zeros( len(cf.fields_prediction) ) for ifield, field in enumerate(cf.fields_prediction) : self.loss_weights[ifield] = self.cf.fields_prediction[ifield][1] for idx, field_info in enumerate(cf.fields) : if field_info[0] == field[0] : self.fields_prediction_idx.append( idx) break self.loss_weights = self.loss_weights.to( self.device_out) self.MSELoss = torch.nn.MSELoss() # transformation for token infos if hasattr( cf, 'token_infos_transformation') : self.tok_infos_trans = getattr( token_infos_transformations, cf.token_infos_transformation) else : self.tok_infos_trans = getattr( token_infos_transformations, 'identity') if 0 == cf.par_rank : directory = Path( config.path_results, 'id{}'.format( cf.wandb_id)) if not os.path.exists(directory): os.makedirs( directory) directory = Path( config.path_models, 'id{}'.format( cf.wandb_id)) if not os.path.exists(directory): os.makedirs( directory) ################################################### def create( self, load_embeds=True) : net = AtmoRep( self.cf) self.model = AtmoRepData( net) self.model.create( self.pre_batch, self.devices, load_embeds) # TODO: pass the properly to model / net self.model.net.encoder_to_decoder = self.encoder_to_decoder self.model.net.decoder_to_tail = self.decoder_to_tail return self ################################################### @classmethod def load( Typename, cf, model_id, epoch, devices) : trainer = Typename( cf, devices).create( load_embeds=False) trainer.model.net = trainer.model.net.load( model_id, devices, cf, epoch) # TODO: pass the properly to model / net trainer.model.net.encoder_to_decoder = trainer.encoder_to_decoder trainer.model.net.decoder_to_tail = trainer.decoder_to_tail str = 'Loaded model id = {}{}.'.format( model_id, f' at epoch = {epoch}' if epoch> -2 else '') print( str) return trainer ################################################### def save( self, epoch) : self.model.net.save( epoch) ################################################### def get_learn_rates( self) : cf = self.cf size_padding = 5 learn_rates = np.zeros( cf.num_epochs + size_padding) learn_rates[:cf.lr_start_epochs] = np.linspace( cf.lr_start, cf.lr_max, num = cf.lr_start_epochs) lr = learn_rates[cf.lr_start_epochs-1] ic = 0 for epoch in range( cf.lr_start_epochs, cf.num_epochs + size_padding) : lr = max( lr / cf.lr_decay_rate, cf.lr_min) learn_rates[epoch] = lr if ic > 9999 : # sanity check assert "Maximum number of epochs exceeded." return learn_rates ################################################### def run( self, epoch = -1) : cf = self.cf model = self.model learn_rates = self.get_learn_rates() if cf.with_ddp : self.model_ddp = torch.nn.parallel.DistributedDataParallel( model, static_graph=True) if not cf.optimizer_zero : self.optimizer = torch.optim.AdamW( self.model_ddp.parameters(), lr=cf.lr_start, weight_decay=cf.weight_decay) else : self.optimizer = ZeroRedundancyOptimizer(self.model_ddp.parameters(), optimizer_class=torch.optim.AdamW, lr=cf.lr_start ) else : self.optimizer = torch.optim.AdamW( self.model.parameters(), lr=cf.lr_start, weight_decay=cf.weight_decay) if 0 == cf.par_rank : # print( self.model.net) model_parameters = filter(lambda p: p.requires_grad, self.model_ddp.parameters()) num_params = sum([np.prod(p.size()) for p in model_parameters]) print( f'Number of trainable parameters: {num_params:,}') # test at the beginning as reference self.model.load_data( NetMode.test, batch_size=cf.batch_size_test) if cf.test_initial : cur_test_loss = self.validate( epoch, cf.BERT_strategy).cpu().numpy() test_loss = np.array( [cur_test_loss]) else : # generic value based on data normalization test_loss = np.array( [1.0]) epoch += 1 batch_size = cf.batch_size_start - cf.batch_size_delta if cf.profile : lr = learn_rates[epoch] for g in self.optimizer.param_groups: g['lr'] = lr self.model.load_data( NetMode.train, batch_size = cf.batch_size_max) self.profile() # training loop while True : if epoch >= cf.num_epochs : break lr = learn_rates[epoch] for g in self.optimizer.param_groups: g['lr'] = lr batch_size = min( cf.batch_size_max, batch_size + cf.batch_size_delta) tstr = datetime.datetime.now().strftime("%H:%M:%S") print( '{} : {} :: batch_size = {}, lr = {}'.format( epoch, tstr, batch_size, lr) ) self.model.load_data( NetMode.train, batch_size = batch_size) self.train( epoch) if cf.with_wandb and 0 == cf.par_rank : self.save( epoch) cur_test_loss = self.validate( epoch, cf.BERT_strategy).cpu().numpy() # self.validate( epoch, 'forecast') # save model if cur_test_loss < test_loss.min() : self.save( -2) test_loss = np.append( test_loss, [cur_test_loss]) epoch += 1 tstr = datetime.datetime.now().strftime("%H:%M:%S") print( 'Finished training at {} with test loss = {}.'.format( tstr, test_loss[-1]) ) # save final network if cf.with_wandb and 0 == cf.par_rank : self.save( -2) ################################################### def train( self, epoch): model = self.model cf = self.cf model.mode( NetMode.train) self.optimizer.zero_grad() loss_total = [[] for i in range(len(cf.losses)) ] std_dev_total = [[] for i in range(len(self.fields_prediction_idx)) ] mse_loss_total = [] grad_loss_total = [] ctr = 0 for batch_idx in range( model.len( NetMode.train)) : batch_data = self.model.next() batch_data = self.prepare_batch( batch_data) preds, _ = self.model_ddp( batch_data) loss, mse_loss, losses = self.loss( preds, batch_idx) self.optimizer.zero_grad() loss.backward() self.optimizer.step() [loss_total[idx].append( losses[key]) for idx, key in enumerate(losses)] mse_loss_total.append( mse_loss.detach().cpu() ) grad_loss_total.append( loss.detach().cpu() ) [std_dev_total[idx].append( pred[1].detach().cpu()) for idx, pred in enumerate(preds)] # logging if int((batch_idx * cf.batch_size_max) / 4) > ctr : # wandb logging if cf.with_wandb and (0 == cf.par_rank) : loss_dict = { "training loss": torch.mean( torch.tensor( mse_loss_total)), "gradient loss": torch.mean( torch.tensor( grad_loss_total)) } # log individual loss terms for individual fields for idx, cur_loss in enumerate(loss_total) : loss_name = self.cf.losses[idx] lt = torch.tensor(cur_loss) for i, field in enumerate(cf.fields_prediction) : idx_name = loss_name + ', ' + field[0] idx_std_name = 'stddev, ' + field[0] loss_dict[idx_name] = torch.mean( lt[:,i]).cpu().detach() loss_dict[idx_std_name] = torch.mean(torch.cat(std_dev_total[i],0)).cpu().detach() wandb.log( loss_dict ) # console output print('train epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:1.5f} : {:1.5f} :: {:1.5f}'.format( epoch, batch_idx, model.len( NetMode.train), 100. * batch_idx/model.len(NetMode.train), torch.mean( torch.tensor( grad_loss_total)), torch.mean(torch.tensor(mse_loss_total)), torch.mean( preds[0][1]) ), flush=True) # save model (use -2 as epoch to indicate latest, stored without epoch specification) # self.save( -2) # reset loss_total = [[] for i in range(len(cf.losses)) ] mse_loss_total = [] grad_loss_total = [] std_dev_total = [[] for i in range(len(self.fields_prediction_idx)) ] ctr += 1 # save gradients if cf.save_grads and cf.with_wandb and (0 == cf.par_rank) : dir_name = './grads/id{}'.format( cf.wandb_id) if not os.path.exists(dir_name): os.makedirs(dir_name) rmsprop_ws = [] for k in range( len(self.optimizer.state_dict()['state']) ) : rmsprop_ws.append(self.optimizer.state_dict()['state'][k]['exp_avg_sq'].mean().unsqueeze(0)) rmsprop_ws = torch.cat( rmsprop_ws) fname = '{}/{}_epoch{}_rmsprop.npy'.format( dir_name, cf.wandb_id, epoch) np.save( fname, rmsprop_ws.cpu().detach().numpy() ) idx = 0 for name, param in self.model.named_parameters(): if param.requires_grad : fname = '{}/{}_epoch{}_{:05d}_{}_grad.npy'.format( dir_name, cf.wandb_id, epoch, idx,name) np.save( fname, param.grad.cpu().detach().numpy() ) idx += 1 # clean memory self.optimizer.zero_grad() del batch_data, loss, loss_total, mse_loss_total, grad_loss_total, std_dev_total ################################################### def profile( self): model = self.model cf = self.cf model.mode( NetMode.train) self.optimizer.zero_grad() # See https://pytorch.org/tutorials/intermediate/tensorboard_profiler_tutorial.html # for details on how to load and analyse report # https://pytorch.org/blog/trace-analysis-for-masses/ # do for all par_ranks to avoid that they run out of sync print( '---------------------------------') print( 'Profiling:') pname = './logs/profile_par_rank' + str(cf.par_rank) + '_' + cf.wandb_id + '/profile' with torch.profiler.profile( activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA], schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=2), on_trace_ready=torch.profiler.tensorboard_trace_handler(pname), profile_memory=True, record_shapes=True, with_stack=True) as prof: for batch_idx in range( 2 * (1+1+3) ) : batch_data = self.model.next() batch_data = self.prepare_batch( batch_data) preds, _ = self.model_ddp( batch_data) loss, mse_loss, losses = self.loss( preds, batch_idx) self.optimizer.zero_grad() # loss.backward() # self.optimizer.step() prof.step() print( 'Profiling finished.') print( '---------------------------------') ################################################### def validate( self, epoch, BERT_test_strategy = 'BERT'): cf = self.cf BERT_strategy_train = cf.BERT_strategy cf.BERT_strategy = BERT_test_strategy self.model.mode( NetMode.test) total_loss = 0. total_losses = torch.zeros( len(self.fields_prediction_idx) ) test_len = 0 self.mode_test = True # run in training mode offset = 0 if -1 == epoch and 0 == cf.par_rank : if 1 == cf.num_accs_per_task : # bug in torchinfo; fixed in v1.8.0 offset += 1 print( 'Network size:') batch_data = self.model.next() batch_data = self.prepare_batch( batch_data) torchinfo.summary( self.model, input_data=[batch_data]) # run test set evaluation with torch.no_grad() : for it in range( self.model.len( NetMode.test) - offset) : batch_data = self.model.next() if cf.par_rank < cf.log_test_num_ranks : # keep on cpu since it will otherwise clog up GPU memory (sources, token_infos, targets, tmis, tmis_list) = batch_data[0] # targets if len(batch_data[1]) > 0 : if type(batch_data[1][0][0]) is list : targets = [batch_data[1][i][0][0] for i in range( len(batch_data[1]))] else : targets = batch_data[1][0] # store on cpu log_sources = ( [source.detach().clone().cpu() for source in sources ], [ti.detach().clone().cpu() for ti in token_infos], [target.detach().clone().cpu() for target in targets ], tmis, tmis_list ) batch_data = self.prepare_batch( batch_data) preds, atts = self.model( batch_data) loss = torch.tensor( 0.) ifield = 0 for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] # hook for custom test loss self.test_loss( pred, target) # base line loss cur_loss = self.MSELoss( pred[0], target = target ).cpu().item() loss += cur_loss total_losses[ifield] += cur_loss ifield += 1 total_loss += loss test_len += 1 # store detailed results on current test set for book keeping if cf.par_rank < cf.log_test_num_ranks : log_preds = [[p.detach().clone().cpu() for p in pred] for pred in preds] self.log_validate( epoch, it, log_sources, log_preds) if cf.attention: self.log_attention( epoch, it, [atts, [ti.detach().clone().cpu() for ti in token_infos]]) # average over all nodes total_loss /= test_len * len(self.cf.fields_prediction) total_losses /= test_len if cf.with_ddp : total_loss_cuda = total_loss.cuda() total_losses_cuda = total_losses.cuda() dist.all_reduce( total_loss_cuda, op=torch.distributed.ReduceOp.AVG ) dist.all_reduce( total_losses_cuda, op=torch.distributed.ReduceOp.AVG ) total_loss = total_loss_cuda.cpu() total_losses = total_losses_cuda.cpu() if 0 == cf.par_rank : print( 'validation loss for strategy={} at epoch {} : {}'.format( BERT_test_strategy, epoch, total_loss), flush=True) if cf.with_wandb and (0 == cf.par_rank) : loss_dict = {"val. loss {}".format(BERT_test_strategy) : total_loss} total_losses = total_losses.cpu().detach() for i, field in enumerate(cf.fields_prediction) : idx_name = 'val., {}, '.format(BERT_test_strategy) + field[0] loss_dict[idx_name] = total_losses[i] print( 'validation loss for {} : {}'.format( field[0], total_losses[i] )) wandb.log( loss_dict) batch_data = [] torch.cuda.empty_cache() cf.BERT_strategy = BERT_strategy_train self.mode_test = False return total_loss ################################################### def evaluate( self, data_idx = 0, log = True): cf = self.cf self.model.mode( NetMode.test) log_sources = [] test_len = 0 # evaluate loss = torch.tensor( 0.) with torch.no_grad() : for it in range( self.model.len( NetMode.test)) : batch_data = self.model.next() if cf.par_rank < cf.log_test_num_ranks : # keep on cpu since it will otherwise clog up GPU memory (sources, token_infos, targets, tmis, tmis_list) = batch_data[0] # targets if len(batch_data[1]) > 0 : targets = [] for target_field in batch_data[1] : targets.append(torch.cat([target_vl[0].unsqueeze(1) for target_vl in target_field],1)) # store on cpu log_sources = ( [source.detach().clone().cpu() for source in sources ], [ti.detach().clone().cpu() for ti in token_infos], [target.detach().clone().cpu() for target in targets ], tmis, tmis_list ) batch_data = self.prepare_batch( batch_data) preds, atts = self.model( batch_data) ifield = 0 for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] cur_loss = self.MSELoss( pred[0], target = target ).cpu() loss += cur_loss ifield += 1 test_len += 1 # logging if cf.par_rank < cf.log_test_num_ranks : self.log_validate( data_idx, it, log_sources, preds) if cf.attention: self.log_attention( data_idx , it, [atts, [ti.detach().clone().cpu() for ti in token_infos]]) # average over all nodes loss /= test_len * len(self.cf.fields_prediction) if cf.with_ddp : loss_cuda = loss.cuda() dist.all_reduce( loss_cuda, op=torch.distributed.ReduceOp.AVG ) loss = loss_cuda.cpu() if 0 == cf.par_rank : print( 'Loss {}'.format( loss)) ################################################### def test_loss( self, pred, target) : '''Hook for custom test loss''' pass ################################################### def loss( self, preds, batch_idx = 0) : # TODO: move implementations to individual files cf = self.cf mse_loss_total = torch.tensor( 0.,) losses = dict(zip(cf.losses,[[] for loss in cf.losses ])) for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] mse_loss = self.MSELoss( pred[0], target = target) mse_loss_total += mse_loss.cpu().detach() # MSE loss if 'mse' in self.cf.losses : losses['mse'].append( mse_loss) # MSE loss if 'mse_ensemble' in self.cf.losses : loss_en = torch.tensor( 0., device=target.device) for en in torch.transpose( pred[2], 1, 0) : loss_en += self.MSELoss( en, target = target) # losses['mse_ensemble'].append( 50. * loss_en / pred[2].shape[1]) losses['mse_ensemble'].append( loss_en / pred[2].shape[1]) # Generalized cross entroy loss for continuous distributions if 'stats' in self.cf.losses : stats_loss = Gaussian( target, pred[0], pred[1]) diff = (stats_loss-1.) # stats_loss = 0.01 * torch.mean( diff * diff) + torch.mean( torch.sqrt(torch.abs( pred[1])) ) stats_loss = torch.mean( diff * diff) + torch.mean( torch.sqrt( torch.abs( pred[1])) ) losses['stats'].append( stats_loss) # Generalized cross entroy loss for continuous distributions if 'stats_area' in self.cf.losses : diff = torch.abs( torch.special.erf( (target - pred[0]) / (pred[1] * pred[1])) ) stats_area = 0.2 * torch.mean( diff * diff) + torch.mean( torch.sqrt(torch.abs( pred[1])) ) losses['stats_area'].append( stats_area) # CRPS score if 'crps' in self.cf.losses : crps_loss = torch.mean( CRPS( target, pred[0], pred[1])) losses['crps'].append( crps_loss) loss = torch.tensor( 0., device=self.device_out) for key in losses : # print( 'LOSS : {} :: {}'.format( key, losses[key])) for ifield, val in enumerate(losses[key]) : loss += self.loss_weights[ifield] * val.to( self.device_out) loss /= len(self.cf.fields_prediction) * len( self.cf.losses) mse_loss = mse_loss_total / len(self.cf.fields_prediction) return loss, mse_loss, losses #################################################################################################### class Trainer_BERT( Trainer_Base) : ################################################### def __init__( self, cf, devices) : Trainer_Base.__init__( self, cf, devices) self.rng_seed = cf.rng_seed if not self.rng_seed : self.rng_seed = int(torch.randint( 100000000, (1,))) # TODO: generate only rngs that are needed ll = len(cf.fields) * 8 #len(cf.vertical_levels) if cf.BERT_fields_synced : self.rngs = [np.random.default_rng(self.rng_seed) for _ in range(ll)] else : self.rngs = [np.random.default_rng(self.rng_seed+i) for i in range(ll)] # batch preprocessing to be done in loader (mainly for performance reasons since it's # parallelized there) self.pre_batch = functools.partial( prepare_batch_BERT_multifield, self.cf, self.rngs, self.cf.fields, self.cf.BERT_strategy ) ################################################### def prepare_batch( self, xin) : '''Move data to device and some additional final preprocessing before model eval''' cf = self.cf devs = self.devices # unpack loader output # xin[0] since BERT does not have targets (sources, token_infos, targets, fields_tokens_masked_idx,fields_tokens_masked_idx_list) = xin[0] # network input batch_data = [ ( sources[i].to( devs[ cf.fields[i][1][3] ], non_blocking=True), self.tok_infos_trans(token_infos[i]).to( self.devices[0], non_blocking=True)) for i in range(len(sources)) ] # store token number since BERT selects sub-cube (optionally) self.num_tokens = [] for field_idx in range(len(batch_data)) : self.num_tokens.append( list(batch_data[field_idx][0].shape[2:5])) # target self.targets = [] for ifield in self.fields_prediction_idx : self.targets.append( targets[ifield].to( devs[cf.fields[ifield][1][3]], non_blocking=True )) # idxs of masked tokens tmi_out = [[] for _ in range(len(fields_tokens_masked_idx))] for i,tmi in enumerate(fields_tokens_masked_idx) : tmi_out[i] = [tmi_l.to( devs[cf.fields[i][1][3]], non_blocking=True) for tmi_l in tmi] self.tokens_masked_idx = tmi_out # idxs of masked tokens per batch entry self.fields_tokens_masked_idx_list = fields_tokens_masked_idx_list # learnable class token (cannot be done in the data loader since this is running in parallel) if cf.learnable_mask : for ifield, (source, _) in enumerate(batch_data) : source = torch.flatten( torch.flatten( torch.flatten( source, 1, 4), 2, 4), 0, 1) assert len(cf.fields[ifield][2]) == 1 tmidx = self.tokens_masked_idx[ifield][0] source[ tmidx ] = self.model.net.masks[ifield].to( source.device) return batch_data ################################################### def encoder_to_decoder( self, embeds_layers) : return ([embeds_layers[i][-1] for i in range(len(embeds_layers))] , embeds_layers ) ################################################### def decoder_to_tail( self, idx_pred, pred) : '''Positional encoding of masked tokens for tail network evaluation''' field_idx = self.fields_prediction_idx[idx_pred] dev = self.devices[ self.cf.fields[field_idx][1][3] ] target_idx = self.tokens_masked_idx[field_idx] assert len(target_idx) > 0, 'no masked tokens but target variable' # select "fixed" masked tokens for loss computation # recover vertical level dimension num_tokens = self.num_tokens[field_idx] num_vlevels = len(self.cf.fields[field_idx][2]) # flatten token dimensions: remove space-time separation pred = torch.flatten( pred, 2, 3).to( dev) # extract masked token level by level pred_masked = [] for lidx, level in enumerate(self.cf.fields[field_idx][2]) : # select masked tokens, flattened along batch dimension for easier indexing and processing pred_l = torch.flatten( pred[:,lidx], 0, 1) pred_masked_l = pred_l[ target_idx[lidx] ] target_idx_l = target_idx[lidx] # add positional encoding of masked tokens # # TODO: do we need the positional encoding? # compute space time indices of all tokens target_idxs_v = level * torch.ones( target_idx_l.shape[0], device=dev) num_tokens_space = num_tokens[1] * num_tokens[2] # remove offset introduced by linearization target_idx_l = torch.remainder( target_idx_l, np.prod(num_tokens)) target_idxs_t = (target_idx_l / num_tokens_space).int() temp = torch.remainder( target_idx_l, num_tokens_space) target_idxs_x = (temp / num_tokens[1]).int() target_idxs_y = torch.remainder( temp, num_tokens[2]) # apply harmonic positional encoding dim_embed = pred.shape[-1] pe = torch.zeros( pred_masked_l.shape[0], dim_embed, device=dev) xs = (2. * np.pi / dim_embed) * torch.arange( 0, dim_embed, 2, device=dev) pe[:, 0::2] = 0.5 * torch.sin( torch.outer( 8 * target_idxs_x, xs) ) \ + torch.sin( torch.outer( target_idxs_t, xs) ) pe[:, 1::2] = 0.5 * torch.cos( torch.outer( 8 * target_idxs_y, xs) ) \ + torch.cos( torch.outer( target_idxs_v, xs) ) # TODO: with or without final positional encoding? # pred_masked.append( pred_masked_l + pe) pred_masked.append( pred_masked_l) # flatten along level dimension, for loss evaluation we effectively have level, batch, ... # as ordering of dimensions pred_masked = torch.cat( pred_masked, 0) return pred_masked ################################################### def log_validate( self, epoch, bidx, log_sources, log_preds) : '''Hook for logging: output associated with concrete training strategy.''' if not hasattr( self.cf, 'wandb_id') : return if 'forecast' == self.cf.BERT_strategy : self.log_validate_forecast( epoch, bidx, log_sources, log_preds) elif 'BERT' == self.cf.BERT_strategy : self.log_validate_BERT( epoch, bidx, log_sources, log_preds) else : assert False ################################################### def log_validate_forecast( self, epoch, batch_idx, log_sources, log_preds) : '''Logging for BERT_strategy=forecast.''' cf = self.cf detok = utils.detokenize # TODO, TODO: for 6h forecast we need to iterate over predicted token slices # save source: remains identical so just save ones (sources, token_infos, targets, _, _) = log_sources sources_out, targets_out, preds_out, ensembles_out = [ ], [ ], [ ], [ ] # reconstruct geo-coords (identical for all fields) forecast_num_tokens = 1 if hasattr( cf, 'forecast_num_tokens') : forecast_num_tokens = cf.forecast_num_tokens num_tokens = cf.fields[0][3] token_size = cf.fields[0][4] lat_d_h, lon_d_h = int(np.floor(token_size[1]/2.)), int(np.floor(token_size[2]/2.)) lats, lons = [ ], [ ] for tinfo in token_infos[0] : lat_min, lat_max = tinfo[0][4], tinfo[ num_tokens[1]*num_tokens[2]-1 ][4] lon_min, lon_max = tinfo[0][5], tinfo[ num_tokens[1]*num_tokens[2]-1 ][5] res = tinfo[0][-1] lat = torch.arange( lat_min - lat_d_h*res, lat_max + lat_d_h*res + 0.001, res) if lon_max < lon_min : lon = torch.arange( lon_min - lon_d_h*res, 360. + lon_max + lon_d_h*res + 0.001, res) else : lon = torch.arange( lon_min - lon_d_h*res, lon_max + lon_d_h*res + 0.001, res) lats.append( lat.numpy()) lons.append( torch.remainder( lon, 360.).numpy()) # check that last token (bottom right corner) has the expected coords # assert np.allclose( ) # extract dates for each token entry, constant for each batch and field dates_t = [] for b_token_infos in token_infos[0] : dates_t.append(utils.token_info_to_time(b_token_infos[0])-pd.Timedelta(hours=token_size[0]-1)) # TODO: check that last token matches first one # process input fields for fidx, field_info in enumerate(cf.fields) : # reshape from tokens to contiguous physical field num_levels = len(field_info[2]) source = detok( sources[fidx].cpu().detach().numpy()) # recover tokenized shape target = detok( targets[fidx].cpu().detach().numpy().reshape( [ -1, num_levels, forecast_num_tokens, *field_info[3][1:], *field_info[4] ])) # TODO: check that geo-coords match to general ones that have been pre-determined for bidx in range(token_infos[fidx].shape[0]) : for vidx, _ in enumerate(field_info[2]) : denormalize = self.model.normalizer( fidx, vidx).denormalize date, coords = dates_t[bidx], [lats[bidx], lons[bidx]] source[bidx,vidx] = denormalize( date.year, date.month, source[bidx,vidx], coords) target[bidx,vidx] = denormalize( date.year, date.month, target[bidx,vidx], coords) # append sources_out.append( [field_info[0], source]) targets_out.append( [field_info[0], target]) # process predicted fields for fidx, fn in enumerate(cf.fields_prediction) : # field_info = cf.fields[ self.fields_prediction_idx[fidx] ] num_levels = len(field_info[2]) # predictions pred = log_preds[fidx][0].cpu().detach().numpy() pred = detok( pred.reshape( [ -1, num_levels, forecast_num_tokens, *field_info[3][1:], *field_info[4] ])) # ensemble ensemble = log_preds[fidx][2].cpu().detach().numpy() ensemble = detok( ensemble.reshape( [ -1, cf.net_tail_num_nets, num_levels, forecast_num_tokens, *field_info[3][1:], *field_info[4] ]) ) # denormalize for bidx in range(token_infos[fidx].shape[0]) : for vidx, vl in enumerate(field_info[2]) : denormalize = self.model.normalizer( self.fields_prediction_idx[fidx], vidx).denormalize date, coords = dates_t[bidx], [lats[bidx], lons[bidx]] pred[bidx,vidx] = denormalize( date.year, date.month, pred[bidx,vidx], coords) ensemble[bidx,:,vidx] = denormalize(date.year, date.month, ensemble[bidx,:,vidx], coords) # append preds_out.append( [fn[0], pred]) ensembles_out.append( [fn[0], ensemble]) # generate time range dates_sources, dates_targets = [ ], [ ] for bidx in range( source.shape[0]) : r = pd.date_range( start=dates_t[bidx], periods=source.shape[2], freq='h') dates_sources.append( r.to_pydatetime().astype( 'datetime64[s]') ) dates_targets.append( dates_sources[-1][ -forecast_num_tokens*token_size[0] : ] ) levels = np.array(cf.fields[0][2]) lats = [90.-lat for lat in lats] write_forecast( cf.wandb_id, epoch, batch_idx, levels, sources_out, [dates_sources, lats, lons], targets_out, [dates_targets, lats, lons], preds_out, ensembles_out ) ################################################### def log_validate_BERT( self, epoch, batch_idx, log_sources, log_preds) : '''Logging for BERT_strategy=BERT.''' cf = self.cf detok = utils.detokenize # save source: remains identical so just save ones (sources, token_infos, targets, tokens_masked_idx, tokens_masked_idx_list) = log_sources sources_out, targets_out, preds_out, ensembles_out = [ ], [ ], [ ], [ ] sources_dates_out, sources_lats_out, sources_lons_out = [ ], [ ], [ ] targets_dates_out, targets_lats_out, targets_lons_out = [ ], [ ], [ ] for fidx, field_info in enumerate(cf.fields) : # reconstruct coordinates is_predicted = fidx in self.fields_prediction_idx num_levels = len(field_info[2]) num_tokens = field_info[3] token_size = field_info[4] lat_d_h, lon_d_h = int(np.floor(token_size[1]/2.)), int(np.floor(token_size[2]/2.)) tinfos = token_infos[fidx].reshape( [-1, num_levels, *num_tokens, cf.size_token_info]) res = tinfos[0,0,0,0,0][-1].item() batch_size = tinfos.shape[0] sources_b = detok( sources[fidx].numpy()) if is_predicted : # split according to levels lens_levels = [t.shape[0] for t in tokens_masked_idx[fidx]] targets_b = torch.split( targets[fidx], lens_levels) preds_mu_b = torch.split( log_preds[fidx][0], lens_levels) preds_ens_b = torch.split( log_preds[fidx][2], lens_levels) # split according to batch lens_batches = [ [bv.shape[0] for bv in b] for b in tokens_masked_idx_list[fidx] ] targets_b = [torch.split( targets_b[vidx], lens) for vidx,lens in enumerate(lens_batches)] preds_mu_b = [torch.split(preds_mu_b[vidx], lens) for vidx,lens in enumerate(lens_batches)] preds_ens_b =[torch.split(preds_ens_b[vidx],lens) for vidx,lens in enumerate(lens_batches)] # recover token shape targets_b = [[targets_b[vidx][bidx].reshape([-1, *token_size]) for bidx in range(batch_size)] for vidx in range(num_levels)] preds_mu_b = [[preds_mu_b[vidx][bidx].reshape([-1, *token_size]) for bidx in range(batch_size)] for vidx in range(num_levels)] preds_ens_b = [[preds_ens_b[vidx][bidx].reshape( [-1, cf.net_tail_num_nets, *token_size]) for bidx in range(batch_size)] for vidx in range(num_levels)] # for all batch items coords_b = [] for bidx, tinfo in enumerate(tinfos) : # use first vertical levels since a column is considered lats = np.arange(tinfo[0,0,0,0,4]-lat_d_h*res, tinfo[0,0,-1,0,4]+lat_d_h*res+0.001,res) if tinfo[0,0,0,-1,5] < tinfo[0,0,0,0,5] : lons = np.remainder( np.arange( tinfo[0,0,0,0,5] - lon_d_h*res, 360. + tinfo[0,0,0,-1,5] + lon_d_h*res + 0.001, res), 360.) else : lons = np.arange(tinfo[0,0,0,0,5]-lon_d_h*res, tinfo[0,0,0,-1,5]+lon_d_h*res+0.001,res) lons = np.remainder( lons, 360.) # time stamp in token_infos is at start time so needs to be advanced by token_size[0]-1 s = utils.token_info_to_time( tinfo[0,0,0,0,:3] ) - pd.Timedelta(hours=token_size[0]-1) e = utils.token_info_to_time( tinfo[0,-1,0,0,:3] ) dates = pd.date_range( start=s, end=e, freq='h') # target etc are aliasing targets_b which simplifies bookkeeping below if is_predicted : target = [targets_b[vidx][bidx] for vidx in range(num_levels)] pred_mu = [preds_mu_b[vidx][bidx] for vidx in range(num_levels)] pred_ens = [preds_ens_b[vidx][bidx] for vidx in range(num_levels)] dates_masked_l, lats_masked_l, lons_masked_l = [], [], [] for vidx, _ in enumerate(field_info[2]) : normalizer = self.model.normalizer( fidx, vidx) y, m = dates[0].year, dates[0].month sources_b[bidx,vidx] = normalizer.denormalize( y, m, sources_b[bidx,vidx], [lats, lons]) if is_predicted : # TODO: make sure normalizer_local / normalizer_global is used in data_loader idx = tokens_masked_idx_list[fidx][vidx][bidx] tinfo_masked = tinfos[bidx,vidx].flatten( 0,2) tinfo_masked = tinfo_masked[idx] lad, lod = lat_d_h*res, lon_d_h*res lats_masked, lons_masked, dates_masked = [], [], [] for t in tinfo_masked : lats_masked.append( np.expand_dims( np.arange(t[4]-lad, t[4]+lad+0.001,res), 0)) lons_masked.append( np.expand_dims( np.arange(t[5]-lod, t[5]+lod+0.001,res), 0)) r = pd.date_range( start=utils.token_info_to_time(t), periods=token_size[0], freq='h') dates_masked.append( np.expand_dims(r.to_pydatetime().astype( 'datetime64[s]'), 0) ) lats_masked = np.concatenate( lats_masked, 0) lons_masked = np.remainder( np.concatenate( lons_masked, 0), 360.) dates_masked = np.concatenate( dates_masked, 0) for ii,(t,p,e,la,lo) in enumerate(zip( target[vidx], pred_mu[vidx], pred_ens[vidx], lats_masked, lons_masked)) : targets_b[vidx][bidx][ii] = normalizer.denormalize( y, m, t, [la, lo]) preds_mu_b[vidx][bidx][ii] = normalizer.denormalize( y, m, p, [la, lo]) preds_ens_b[vidx][bidx][ii] = normalizer.denormalize( y, m, e, [la, lo]) dates_masked_l += [ dates_masked ] lats_masked_l += [ [90.-lat for lat in lats_masked] ] lons_masked_l += [ lons_masked ] dates = dates.to_pydatetime().astype( 'datetime64[s]') coords_b += [ [dates, 90.-lats, lons, dates_masked_l, lats_masked_l, lons_masked_l] ] fn = field_info[0] sources_out.append( [fn, sources_b]) if is_predicted : targets_out.append([fn, [[t.numpy(force=True) for t in t_v] for t_v in targets_b]]) preds_out.append( [fn, [[p.numpy(force=True) for p in p_v] for p_v in preds_mu_b]]) ensembles_out.append( [fn, [[p.numpy(force=True) for p in p_v] for p_v in preds_ens_b]]) else : targets_out.append( [fn, []]) preds_out.append( [fn, []]) ensembles_out.append( [fn, []]) sources_dates_out.append( [c[0] for c in coords_b]) sources_lats_out.append( [c[1] for c in coords_b]) sources_lons_out.append( [c[2] for c in coords_b]) if is_predicted : targets_dates_out.append( [c[3] for c in coords_b]) targets_lats_out.append( [c[4] for c in coords_b]) targets_lons_out.append( [c[5] for c in coords_b]) else : targets_dates_out.append( [ ]) targets_lats_out.append( [ ]) targets_lons_out.append( [ ]) levels = [[np.array(l) for l in field[2]] for field in cf.fields] write_BERT( cf.wandb_id, epoch, batch_idx, levels, sources_out, [sources_dates_out, sources_lats_out, sources_lons_out], targets_out, [targets_dates_out, targets_lats_out, targets_lons_out], preds_out, ensembles_out ) def log_attention( self, epoch, bidx, log) : '''Hook for logging: output attention maps.''' cf = self.cf attention, token_infos = log attn_dates_out, attn_lats_out, attn_lons_out = [ ], [ ], [ ] attn_out = [] for fidx, field_info in enumerate(cf.fields) : # reconstruct coordinates is_predicted = fidx in self.fields_prediction_idx num_levels = len(field_info[2]) num_tokens = field_info[3] token_size = field_info[4] lat_d_h, lon_d_h = int(np.floor(token_size[1]/2.)), int(np.floor(token_size[2]/2.)) tinfos = token_infos[fidx].reshape( [-1, num_levels, *num_tokens, cf.size_token_info]) coords_b = [] for tinfo in tinfos : # use first vertical levels since a column is considered res = tinfo[0,0,0,0,-1] lats = np.arange(tinfo[0,0,0,0,4]-lat_d_h*res, tinfo[0,0,-1,0,4]+lat_d_h*res+0.001,res*token_size[1]) if tinfo[0,0,0,-1,5] < tinfo[0,0,0,0,5] : lons = np.remainder( np.arange( tinfo[0,0,0,0,5] - lon_d_h*res, 360. + tinfo[0,0,0,-1,5] + lon_d_h*res + 0.001, res*token_size[2]), 360.) else : lons = np.arange(tinfo[0,0,0,0,5]-lon_d_h*res, tinfo[0,0,0,-1,5]+lon_d_h*res+0.001,res*token_size[2]) lons = np.remainder( lons, 360.) dates = np.array([(utils.token_info_to_time(tinfo[0,t,0,0,:3])) for t in range(tinfo.shape[1])], dtype='datetime64[s]') coords_b += [ [dates, lats, lons] ] if is_predicted: attn_out.append([field_info[0], attention[fidx]]) attn_dates_out.append([c[0] for c in coords_b]) attn_lats_out.append( [c[1] for c in coords_b]) attn_lons_out.append( [c[2] for c in coords_b]) else: attn_dates_out.append( [] ) attn_lats_out.append( [] ) attn_lons_out.append( [] ) levels = [[np.array(l) for l in field[2]] for field in cf.fields]
write_attention(cf.wandb_id, epoch,
12
2023-10-09 19:42:46+00:00
24k
MachinePerceptionLab/Attentive_DFPrior
src/DF_Prior.py
[ { "identifier": "config", "path": "src/config.py", "snippet": "def load_config(path, default_path=None):\ndef update_recursive(dict1, dict2):\ndef get_model(cfg):" }, { "identifier": "Mapper", "path": "src/Mapper.py", "snippet": "class Mapper(object):\n \"\"\"\n Mapper thread. \n\n...
import os import time import numpy as np import torch import torch.multiprocessing import torch.multiprocessing as mp from src import config from src.Mapper import Mapper from src.Tracker import Tracker from src.utils.datasets import get_dataset from src.utils.Logger import Logger from src.utils.Mesher import Mesher from src.utils.Renderer import Renderer
20,617
# import src.fusion as fusion # import open3d as o3d torch.multiprocessing.set_sharing_strategy('file_system') class DF_Prior(): """ DF_Prior main class. Mainly allocate shared resources, and dispatch mapping and tracking process. """ def __init__(self, cfg, args): self.cfg = cfg self.args = args self.occupancy = cfg['occupancy'] self.low_gpu_mem = cfg['low_gpu_mem'] self.verbose = cfg['verbose'] self.dataset = cfg['dataset'] if args.output is None: self.output = cfg['data']['output'] else: self.output = args.output self.ckptsdir = os.path.join(self.output, 'ckpts') os.makedirs(self.output, exist_ok=True) os.makedirs(self.ckptsdir, exist_ok=True) os.makedirs(f'{self.output}/mesh', exist_ok=True) self.H, self.W, self.fx, self.fy, self.cx, self.cy = cfg['cam']['H'], cfg['cam'][ 'W'], cfg['cam']['fx'], cfg['cam']['fy'], cfg['cam']['cx'], cfg['cam']['cy'] self.update_cam() model = config.get_model(cfg) self.shared_decoders = model self.scale = cfg['scale'] self.load_bound(cfg) self.load_pretrain(cfg) self.grid_init(cfg) # need to use spawn try: mp.set_start_method('spawn', force=True) except RuntimeError: pass self.frame_reader = get_dataset(cfg, args, self.scale) self.n_img = len(self.frame_reader) self.estimate_c2w_list = torch.zeros((self.n_img, 4, 4)) self.estimate_c2w_list.share_memory_() dataset = self.cfg['data']['dataset'] scene_id = self.cfg['data']['id'] self.scene_id = scene_id print(scene_id) # load tsdf grid if dataset == 'scannet': self.tsdf_volume_shared = torch.load(f'scannet_tsdf_volume/scene{scene_id}_tsdf_volume.pt') elif dataset == 'replica': self.tsdf_volume_shared = torch.load(f'replica_tsdf_volume/{scene_id}_tsdf_volume.pt') self.tsdf_volume_shared = self.tsdf_volume_shared.to(self.cfg['mapping']['device']) self.tsdf_volume_shared.share_memory_() # load tsdf grid bound if dataset == 'scannet': self.tsdf_bnds = torch.load(f'scannet_tsdf_volume/scene{scene_id}_bounds.pt') elif dataset == 'replica': self.tsdf_bnds = torch.load(f'replica_tsdf_volume/{scene_id}_bounds.pt') self.tsdf_bnds = torch.tensor(self.tsdf_bnds).to(self.cfg['mapping']['device']) self.tsdf_bnds.share_memory_() self.vol_bnds = self.tsdf_bnds self.vol_bnds.share_memory_() self.gt_c2w_list = torch.zeros((self.n_img, 4, 4)) self.gt_c2w_list.share_memory_() self.idx = torch.zeros((1)).int() self.idx.share_memory_() self.mapping_first_frame = torch.zeros((1)).int() self.mapping_first_frame.share_memory_() # the id of the newest frame Mapper is processing self.mapping_idx = torch.zeros((1)).int() self.mapping_idx.share_memory_() self.mapping_cnt = torch.zeros((1)).int() # counter for mapping self.mapping_cnt.share_memory_() for key, val in self.shared_c.items(): val = val.to(self.cfg['mapping']['device']) val.share_memory_() self.shared_c[key] = val self.shared_decoders = self.shared_decoders.to( self.cfg['mapping']['device']) self.shared_decoders.share_memory() self.renderer = Renderer(cfg, args, self)
# import src.fusion as fusion # import open3d as o3d torch.multiprocessing.set_sharing_strategy('file_system') class DF_Prior(): """ DF_Prior main class. Mainly allocate shared resources, and dispatch mapping and tracking process. """ def __init__(self, cfg, args): self.cfg = cfg self.args = args self.occupancy = cfg['occupancy'] self.low_gpu_mem = cfg['low_gpu_mem'] self.verbose = cfg['verbose'] self.dataset = cfg['dataset'] if args.output is None: self.output = cfg['data']['output'] else: self.output = args.output self.ckptsdir = os.path.join(self.output, 'ckpts') os.makedirs(self.output, exist_ok=True) os.makedirs(self.ckptsdir, exist_ok=True) os.makedirs(f'{self.output}/mesh', exist_ok=True) self.H, self.W, self.fx, self.fy, self.cx, self.cy = cfg['cam']['H'], cfg['cam'][ 'W'], cfg['cam']['fx'], cfg['cam']['fy'], cfg['cam']['cx'], cfg['cam']['cy'] self.update_cam() model = config.get_model(cfg) self.shared_decoders = model self.scale = cfg['scale'] self.load_bound(cfg) self.load_pretrain(cfg) self.grid_init(cfg) # need to use spawn try: mp.set_start_method('spawn', force=True) except RuntimeError: pass self.frame_reader = get_dataset(cfg, args, self.scale) self.n_img = len(self.frame_reader) self.estimate_c2w_list = torch.zeros((self.n_img, 4, 4)) self.estimate_c2w_list.share_memory_() dataset = self.cfg['data']['dataset'] scene_id = self.cfg['data']['id'] self.scene_id = scene_id print(scene_id) # load tsdf grid if dataset == 'scannet': self.tsdf_volume_shared = torch.load(f'scannet_tsdf_volume/scene{scene_id}_tsdf_volume.pt') elif dataset == 'replica': self.tsdf_volume_shared = torch.load(f'replica_tsdf_volume/{scene_id}_tsdf_volume.pt') self.tsdf_volume_shared = self.tsdf_volume_shared.to(self.cfg['mapping']['device']) self.tsdf_volume_shared.share_memory_() # load tsdf grid bound if dataset == 'scannet': self.tsdf_bnds = torch.load(f'scannet_tsdf_volume/scene{scene_id}_bounds.pt') elif dataset == 'replica': self.tsdf_bnds = torch.load(f'replica_tsdf_volume/{scene_id}_bounds.pt') self.tsdf_bnds = torch.tensor(self.tsdf_bnds).to(self.cfg['mapping']['device']) self.tsdf_bnds.share_memory_() self.vol_bnds = self.tsdf_bnds self.vol_bnds.share_memory_() self.gt_c2w_list = torch.zeros((self.n_img, 4, 4)) self.gt_c2w_list.share_memory_() self.idx = torch.zeros((1)).int() self.idx.share_memory_() self.mapping_first_frame = torch.zeros((1)).int() self.mapping_first_frame.share_memory_() # the id of the newest frame Mapper is processing self.mapping_idx = torch.zeros((1)).int() self.mapping_idx.share_memory_() self.mapping_cnt = torch.zeros((1)).int() # counter for mapping self.mapping_cnt.share_memory_() for key, val in self.shared_c.items(): val = val.to(self.cfg['mapping']['device']) val.share_memory_() self.shared_c[key] = val self.shared_decoders = self.shared_decoders.to( self.cfg['mapping']['device']) self.shared_decoders.share_memory() self.renderer = Renderer(cfg, args, self)
self.mesher = Mesher(cfg, args, self)
5
2023-10-13 00:49:57+00:00
24k
fury-05/BookRecomendApp
.pythonlibs/lib/python3.10/site-packages/sklearn/metrics/_ranking.py
[ { "identifier": "UndefinedMetricWarning", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/exceptions.py", "snippet": "class UndefinedMetricWarning(UserWarning):\n \"\"\"Warning used when the metric is invalid\n\n .. versionchanged:: 0.18\n Moved from sklearn.base.\n \"\"\"" },...
import warnings import numpy as np from functools import partial from numbers import Integral, Real from scipy.sparse import csr_matrix, issparse from scipy.stats import rankdata from ..exceptions import UndefinedMetricWarning from ..preprocessing import label_binarize from ..utils import ( assert_all_finite, check_array, check_consistent_length, column_or_1d, ) from ..utils._encode import _encode, _unique from ..utils._param_validation import Interval, StrOptions, validate_params from ..utils.extmath import stable_cumsum from ..utils.fixes import trapezoid from ..utils.multiclass import type_of_target from ..utils.sparsefuncs import count_nonzero from ..utils.validation import _check_pos_label_consistency, _check_sample_weight from ._base import _average_binary_score, _average_multiclass_ovo_score
17,755
"Target scores need to be probabilities for multiclass " "roc_auc, i.e. they should sum up to 1.0 over classes" ) # validation for multiclass parameter specifications average_options = ("macro", "weighted", None) if multi_class == "ovr": average_options = ("micro",) + average_options if average not in average_options: raise ValueError( "average must be one of {0} for multiclass problems".format(average_options) ) multiclass_options = ("ovo", "ovr") if multi_class not in multiclass_options: raise ValueError( "multi_class='{0}' is not supported " "for multiclass ROC AUC, multi_class must be " "in {1}".format(multi_class, multiclass_options) ) if average is None and multi_class == "ovo": raise NotImplementedError( "average=None is not implemented for multi_class='ovo'." ) if labels is not None: labels = column_or_1d(labels) classes = _unique(labels) if len(classes) != len(labels): raise ValueError("Parameter 'labels' must be unique") if not np.array_equal(classes, labels): raise ValueError("Parameter 'labels' must be ordered") if len(classes) != y_score.shape[1]: raise ValueError( "Number of given labels, {0}, not equal to the number " "of columns in 'y_score', {1}".format(len(classes), y_score.shape[1]) ) if len(np.setdiff1d(y_true, classes)): raise ValueError("'y_true' contains labels not in parameter 'labels'") else: classes = _unique(y_true) if len(classes) != y_score.shape[1]: raise ValueError( "Number of classes in y_true not equal to the number of " "columns in 'y_score'" ) if multi_class == "ovo": if sample_weight is not None: raise ValueError( "sample_weight is not supported " "for multiclass one-vs-one ROC AUC, " "'sample_weight' must be None in this case." ) y_true_encoded = _encode(y_true, uniques=classes) # Hand & Till (2001) implementation (ovo) return _average_multiclass_ovo_score( _binary_roc_auc_score, y_true_encoded, y_score, average=average ) else: # ovr is same as multi-label y_true_multilabel = label_binarize(y_true, classes=classes) return _average_binary_score( _binary_roc_auc_score, y_true_multilabel, y_score, average, sample_weight=sample_weight, ) def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None): """Calculate true and false positives per binary classification threshold. Parameters ---------- y_true : ndarray of shape (n_samples,) True targets of binary classification. y_score : ndarray of shape (n_samples,) Estimated probabilities or output of a decision function. pos_label : int, float, bool or str, default=None The label of the positive class. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- fps : ndarray of shape (n_thresholds,) A count of false positives, at index i being the number of negative samples assigned a score >= thresholds[i]. The total number of negative samples is equal to fps[-1] (thus true negatives are given by fps[-1] - fps). tps : ndarray of shape (n_thresholds,) An increasing count of true positives, at index i being the number of positive samples assigned a score >= thresholds[i]. The total number of positive samples is equal to tps[-1] (thus false negatives are given by tps[-1] - tps). thresholds : ndarray of shape (n_thresholds,) Decreasing score values. """ # Check to make sure y_true is valid y_type = type_of_target(y_true, input_name="y_true") if not (y_type == "binary" or (y_type == "multiclass" and pos_label is not None)): raise ValueError("{0} format is not supported".format(y_type)) check_consistent_length(y_true, y_score, sample_weight) y_true = column_or_1d(y_true) y_score = column_or_1d(y_score) assert_all_finite(y_true) assert_all_finite(y_score) # Filter out zero-weighted samples, as they should not impact the result if sample_weight is not None: sample_weight = column_or_1d(sample_weight)
"""Metrics to assess performance on classification task given scores. Functions named as ``*_score`` return a scalar value to maximize: the higher the better. Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize: the lower the better. """ # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Olivier Grisel <olivier.grisel@ensta.org> # Arnaud Joly <a.joly@ulg.ac.be> # Jochen Wersdorfer <jochen@wersdoerfer.de> # Lars Buitinck # Joel Nothman <joel.nothman@gmail.com> # Noel Dawe <noel@dawe.me> # Michal Karbownik <michakarbownik@gmail.com> # License: BSD 3 clause @validate_params( {"x": ["array-like"], "y": ["array-like"]}, prefer_skip_nested_validation=True, ) def auc(x, y): """Compute Area Under the Curve (AUC) using the trapezoidal rule. This is a general function, given points on a curve. For computing the area under the ROC-curve, see :func:`roc_auc_score`. For an alternative way to summarize a precision-recall curve, see :func:`average_precision_score`. Parameters ---------- x : array-like of shape (n,) X coordinates. These must be either monotonic increasing or monotonic decreasing. y : array-like of shape (n,) Y coordinates. Returns ------- auc : float Area Under the Curve. See Also -------- roc_auc_score : Compute the area under the ROC curve. average_precision_score : Compute average precision from prediction scores. precision_recall_curve : Compute precision-recall pairs for different probability thresholds. Examples -------- >>> import numpy as np >>> from sklearn import metrics >>> y = np.array([1, 1, 2, 2]) >>> pred = np.array([0.1, 0.4, 0.35, 0.8]) >>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2) >>> metrics.auc(fpr, tpr) 0.75 """ check_consistent_length(x, y) x = column_or_1d(x) y = column_or_1d(y) if x.shape[0] < 2: raise ValueError( "At least 2 points are needed to compute area under curve, but x.shape = %s" % x.shape ) direction = 1 dx = np.diff(x) if np.any(dx < 0): if np.all(dx <= 0): direction = -1 else: raise ValueError("x is neither increasing nor decreasing : {}.".format(x)) area = direction * trapezoid(y, x) if isinstance(area, np.memmap): # Reductions such as .sum used internally in trapezoid do not return a # scalar by default for numpy.memmap instances contrary to # regular numpy.ndarray instances. area = area.dtype.type(area) return area @validate_params( { "y_true": ["array-like"], "y_score": ["array-like"], "average": [StrOptions({"micro", "samples", "weighted", "macro"}), None], "pos_label": [Real, str, "boolean"], "sample_weight": ["array-like", None], }, prefer_skip_nested_validation=True, ) def average_precision_score( y_true, y_score, *, average="macro", pos_label=1, sample_weight=None ): """Compute average precision (AP) from prediction scores. AP summarizes a precision-recall curve as the weighted mean of precisions achieved at each threshold, with the increase in recall from the previous threshold used as the weight: .. math:: \\text{AP} = \\sum_n (R_n - R_{n-1}) P_n where :math:`P_n` and :math:`R_n` are the precision and recall at the nth threshold [1]_. This implementation is not interpolated and is different from computing the area under the precision-recall curve with the trapezoidal rule, which uses linear interpolation and can be too optimistic. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : array-like of shape (n_samples,) or (n_samples, n_classes) True binary labels or binary label indicators. y_score : array-like of shape (n_samples,) or (n_samples, n_classes) Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by :term:`decision_function` on some classifiers). average : {'micro', 'samples', 'weighted', 'macro'} or None, \ default='macro' If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'micro'``: Calculate metrics globally by considering each element of the label indicator matrix as a label. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). ``'samples'``: Calculate metrics for each instance, and find their average. Will be ignored when ``y_true`` is binary. pos_label : int, float, bool or str, default=1 The label of the positive class. Only applied to binary ``y_true``. For multilabel-indicator ``y_true``, ``pos_label`` is fixed to 1. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- average_precision : float Average precision score. See Also -------- roc_auc_score : Compute the area under the ROC curve. precision_recall_curve : Compute precision-recall pairs for different probability thresholds. Notes ----- .. versionchanged:: 0.19 Instead of linearly interpolating between operating points, precisions are weighted by the change in recall since the last operating point. References ---------- .. [1] `Wikipedia entry for the Average precision <https://en.wikipedia.org/w/index.php?title=Information_retrieval& oldid=793358396#Average_precision>`_ Examples -------- >>> import numpy as np >>> from sklearn.metrics import average_precision_score >>> y_true = np.array([0, 0, 1, 1]) >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> average_precision_score(y_true, y_scores) 0.83... >>> y_true = np.array([0, 0, 1, 1, 2, 2]) >>> y_scores = np.array([ ... [0.7, 0.2, 0.1], ... [0.4, 0.3, 0.3], ... [0.1, 0.8, 0.1], ... [0.2, 0.3, 0.5], ... [0.4, 0.4, 0.2], ... [0.1, 0.2, 0.7], ... ]) >>> average_precision_score(y_true, y_scores) 0.77... """ def _binary_uninterpolated_average_precision( y_true, y_score, pos_label=1, sample_weight=None ): precision, recall, _ = precision_recall_curve( y_true, y_score, pos_label=pos_label, sample_weight=sample_weight ) # Return the step function integral # The following works because the last entry of precision is # guaranteed to be 1, as returned by precision_recall_curve return -np.sum(np.diff(recall) * np.array(precision)[:-1]) y_type = type_of_target(y_true, input_name="y_true") # Convert to Python primitive type to avoid NumPy type / Python str # comparison. See https://github.com/numpy/numpy/issues/6784 present_labels = np.unique(y_true).tolist() if y_type == "binary": if len(present_labels) == 2 and pos_label not in present_labels: raise ValueError( f"pos_label={pos_label} is not a valid label. It should be " f"one of {present_labels}" ) elif y_type == "multilabel-indicator" and pos_label != 1: raise ValueError( "Parameter pos_label is fixed to 1 for multilabel-indicator y_true. " "Do not set pos_label or set pos_label to 1." ) elif y_type == "multiclass": if pos_label != 1: raise ValueError( "Parameter pos_label is fixed to 1 for multiclass y_true. " "Do not set pos_label or set pos_label to 1." ) y_true = label_binarize(y_true, classes=present_labels) average_precision = partial( _binary_uninterpolated_average_precision, pos_label=pos_label ) return _average_binary_score( average_precision, y_true, y_score, average, sample_weight=sample_weight ) @validate_params( { "y_true": ["array-like"], "y_score": ["array-like"], "pos_label": [Real, str, "boolean", None], "sample_weight": ["array-like", None], }, prefer_skip_nested_validation=True, ) def det_curve(y_true, y_score, pos_label=None, sample_weight=None): """Compute error rates for different probability thresholds. .. note:: This metric is used for evaluation of ranking and error tradeoffs of a binary classification task. Read more in the :ref:`User Guide <det_curve>`. .. versionadded:: 0.24 Parameters ---------- y_true : ndarray of shape (n_samples,) True binary labels. If labels are not either {-1, 1} or {0, 1}, then pos_label should be explicitly given. y_score : ndarray of shape of (n_samples,) Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by "decision_function" on some classifiers). pos_label : int, float, bool or str, default=None The label of the positive class. When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1}, ``pos_label`` is set to 1, otherwise an error will be raised. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- fpr : ndarray of shape (n_thresholds,) False positive rate (FPR) such that element i is the false positive rate of predictions with score >= thresholds[i]. This is occasionally referred to as false acceptance probability or fall-out. fnr : ndarray of shape (n_thresholds,) False negative rate (FNR) such that element i is the false negative rate of predictions with score >= thresholds[i]. This is occasionally referred to as false rejection or miss rate. thresholds : ndarray of shape (n_thresholds,) Decreasing score values. See Also -------- DetCurveDisplay.from_estimator : Plot DET curve given an estimator and some data. DetCurveDisplay.from_predictions : Plot DET curve given the true and predicted labels. DetCurveDisplay : DET curve visualization. roc_curve : Compute Receiver operating characteristic (ROC) curve. precision_recall_curve : Compute precision-recall curve. Examples -------- >>> import numpy as np >>> from sklearn.metrics import det_curve >>> y_true = np.array([0, 0, 1, 1]) >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> fpr, fnr, thresholds = det_curve(y_true, y_scores) >>> fpr array([0.5, 0.5, 0. ]) >>> fnr array([0. , 0.5, 0.5]) >>> thresholds array([0.35, 0.4 , 0.8 ]) """ fps, tps, thresholds = _binary_clf_curve( y_true, y_score, pos_label=pos_label, sample_weight=sample_weight ) if len(np.unique(y_true)) != 2: raise ValueError( "Only one class present in y_true. Detection error " "tradeoff curve is not defined in that case." ) fns = tps[-1] - tps p_count = tps[-1] n_count = fps[-1] # start with false positives zero first_ind = ( fps.searchsorted(fps[0], side="right") - 1 if fps.searchsorted(fps[0], side="right") > 0 else None ) # stop with false negatives zero last_ind = tps.searchsorted(tps[-1]) + 1 sl = slice(first_ind, last_ind) # reverse the output such that list of false positives is decreasing return (fps[sl][::-1] / n_count, fns[sl][::-1] / p_count, thresholds[sl][::-1]) def _binary_roc_auc_score(y_true, y_score, sample_weight=None, max_fpr=None): """Binary roc auc score.""" if len(np.unique(y_true)) != 2: raise ValueError( "Only one class present in y_true. ROC AUC score " "is not defined in that case." ) fpr, tpr, _ = roc_curve(y_true, y_score, sample_weight=sample_weight) if max_fpr is None or max_fpr == 1: return auc(fpr, tpr) if max_fpr <= 0 or max_fpr > 1: raise ValueError("Expected max_fpr in range (0, 1], got: %r" % max_fpr) # Add a single point at max_fpr by linear interpolation stop = np.searchsorted(fpr, max_fpr, "right") x_interp = [fpr[stop - 1], fpr[stop]] y_interp = [tpr[stop - 1], tpr[stop]] tpr = np.append(tpr[:stop], np.interp(max_fpr, x_interp, y_interp)) fpr = np.append(fpr[:stop], max_fpr) partial_auc = auc(fpr, tpr) # McClish correction: standardize result to be 0.5 if non-discriminant # and 1 if maximal min_area = 0.5 * max_fpr**2 max_area = max_fpr return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area)) @validate_params( { "y_true": ["array-like"], "y_score": ["array-like"], "average": [StrOptions({"micro", "macro", "samples", "weighted"}), None], "sample_weight": ["array-like", None], "max_fpr": [Interval(Real, 0.0, 1, closed="right"), None], "multi_class": [StrOptions({"raise", "ovr", "ovo"})], "labels": ["array-like", None], }, prefer_skip_nested_validation=True, ) def roc_auc_score( y_true, y_score, *, average="macro", sample_weight=None, max_fpr=None, multi_class="raise", labels=None, ): """Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC) \ from prediction scores. Note: this implementation can be used with binary, multiclass and multilabel classification, but some restrictions apply (see Parameters). Read more in the :ref:`User Guide <roc_metrics>`. Parameters ---------- y_true : array-like of shape (n_samples,) or (n_samples, n_classes) True labels or binary label indicators. The binary and multiclass cases expect labels with shape (n_samples,) while the multilabel case expects binary label indicators with shape (n_samples, n_classes). y_score : array-like of shape (n_samples,) or (n_samples, n_classes) Target scores. * In the binary case, it corresponds to an array of shape `(n_samples,)`. Both probability estimates and non-thresholded decision values can be provided. The probability estimates correspond to the **probability of the class with the greater label**, i.e. `estimator.classes_[1]` and thus `estimator.predict_proba(X, y)[:, 1]`. The decision values corresponds to the output of `estimator.decision_function(X, y)`. See more information in the :ref:`User guide <roc_auc_binary>`; * In the multiclass case, it corresponds to an array of shape `(n_samples, n_classes)` of probability estimates provided by the `predict_proba` method. The probability estimates **must** sum to 1 across the possible classes. In addition, the order of the class scores must correspond to the order of ``labels``, if provided, or else to the numerical or lexicographical order of the labels in ``y_true``. See more information in the :ref:`User guide <roc_auc_multiclass>`; * In the multilabel case, it corresponds to an array of shape `(n_samples, n_classes)`. Probability estimates are provided by the `predict_proba` method and the non-thresholded decision values by the `decision_function` method. The probability estimates correspond to the **probability of the class with the greater label for each output** of the classifier. See more information in the :ref:`User guide <roc_auc_multilabel>`. average : {'micro', 'macro', 'samples', 'weighted'} or None, \ default='macro' If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Note: multiclass ROC AUC currently only handles the 'macro' and 'weighted' averages. For multiclass targets, `average=None` is only implemented for `multi_class='ovr'` and `average='micro'` is only implemented for `multi_class='ovr'`. ``'micro'``: Calculate metrics globally by considering each element of the label indicator matrix as a label. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). ``'samples'``: Calculate metrics for each instance, and find their average. Will be ignored when ``y_true`` is binary. sample_weight : array-like of shape (n_samples,), default=None Sample weights. max_fpr : float > 0 and <= 1, default=None If not ``None``, the standardized partial AUC [2]_ over the range [0, max_fpr] is returned. For the multiclass case, ``max_fpr``, should be either equal to ``None`` or ``1.0`` as AUC ROC partial computation currently is not supported for multiclass. multi_class : {'raise', 'ovr', 'ovo'}, default='raise' Only used for multiclass targets. Determines the type of configuration to use. The default value raises an error, so either ``'ovr'`` or ``'ovo'`` must be passed explicitly. ``'ovr'``: Stands for One-vs-rest. Computes the AUC of each class against the rest [3]_ [4]_. This treats the multiclass case in the same way as the multilabel case. Sensitive to class imbalance even when ``average == 'macro'``, because class imbalance affects the composition of each of the 'rest' groupings. ``'ovo'``: Stands for One-vs-one. Computes the average AUC of all possible pairwise combinations of classes [5]_. Insensitive to class imbalance when ``average == 'macro'``. labels : array-like of shape (n_classes,), default=None Only used for multiclass targets. List of labels that index the classes in ``y_score``. If ``None``, the numerical or lexicographical order of the labels in ``y_true`` is used. Returns ------- auc : float Area Under the Curve score. See Also -------- average_precision_score : Area under the precision-recall curve. roc_curve : Compute Receiver operating characteristic (ROC) curve. RocCurveDisplay.from_estimator : Plot Receiver Operating Characteristic (ROC) curve given an estimator and some data. RocCurveDisplay.from_predictions : Plot Receiver Operating Characteristic (ROC) curve given the true and predicted values. References ---------- .. [1] `Wikipedia entry for the Receiver operating characteristic <https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_ .. [2] `Analyzing a portion of the ROC curve. McClish, 1989 <https://www.ncbi.nlm.nih.gov/pubmed/2668680>`_ .. [3] Provost, F., Domingos, P. (2000). Well-trained PETs: Improving probability estimation trees (Section 6.2), CeDER Working Paper #IS-00-04, Stern School of Business, New York University. .. [4] `Fawcett, T. (2006). An introduction to ROC analysis. Pattern Recognition Letters, 27(8), 861-874. <https://www.sciencedirect.com/science/article/pii/S016786550500303X>`_ .. [5] `Hand, D.J., Till, R.J. (2001). A Simple Generalisation of the Area Under the ROC Curve for Multiple Class Classification Problems. Machine Learning, 45(2), 171-186. <http://link.springer.com/article/10.1023/A:1010920819831>`_ Examples -------- Binary case: >>> from sklearn.datasets import load_breast_cancer >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.metrics import roc_auc_score >>> X, y = load_breast_cancer(return_X_y=True) >>> clf = LogisticRegression(solver="liblinear", random_state=0).fit(X, y) >>> roc_auc_score(y, clf.predict_proba(X)[:, 1]) 0.99... >>> roc_auc_score(y, clf.decision_function(X)) 0.99... Multiclass case: >>> from sklearn.datasets import load_iris >>> X, y = load_iris(return_X_y=True) >>> clf = LogisticRegression(solver="liblinear").fit(X, y) >>> roc_auc_score(y, clf.predict_proba(X), multi_class='ovr') 0.99... Multilabel case: >>> import numpy as np >>> from sklearn.datasets import make_multilabel_classification >>> from sklearn.multioutput import MultiOutputClassifier >>> X, y = make_multilabel_classification(random_state=0) >>> clf = MultiOutputClassifier(clf).fit(X, y) >>> # get a list of n_output containing probability arrays of shape >>> # (n_samples, n_classes) >>> y_pred = clf.predict_proba(X) >>> # extract the positive columns for each output >>> y_pred = np.transpose([pred[:, 1] for pred in y_pred]) >>> roc_auc_score(y, y_pred, average=None) array([0.82..., 0.86..., 0.94..., 0.85... , 0.94...]) >>> from sklearn.linear_model import RidgeClassifierCV >>> clf = RidgeClassifierCV().fit(X, y) >>> roc_auc_score(y, clf.decision_function(X), average=None) array([0.81..., 0.84... , 0.93..., 0.87..., 0.94...]) """ y_type = type_of_target(y_true, input_name="y_true") y_true = check_array(y_true, ensure_2d=False, dtype=None) y_score = check_array(y_score, ensure_2d=False) if y_type == "multiclass" or ( y_type == "binary" and y_score.ndim == 2 and y_score.shape[1] > 2 ): # do not support partial ROC computation for multiclass if max_fpr is not None and max_fpr != 1.0: raise ValueError( "Partial AUC computation not available in " "multiclass setting, 'max_fpr' must be" " set to `None`, received `max_fpr={0}` " "instead".format(max_fpr) ) if multi_class == "raise": raise ValueError("multi_class must be in ('ovo', 'ovr')") return _multiclass_roc_auc_score( y_true, y_score, labels, multi_class, average, sample_weight ) elif y_type == "binary": labels = np.unique(y_true) y_true = label_binarize(y_true, classes=labels)[:, 0] return _average_binary_score( partial(_binary_roc_auc_score, max_fpr=max_fpr), y_true, y_score, average, sample_weight=sample_weight, ) else: # multilabel-indicator return _average_binary_score( partial(_binary_roc_auc_score, max_fpr=max_fpr), y_true, y_score, average, sample_weight=sample_weight, ) def _multiclass_roc_auc_score( y_true, y_score, labels, multi_class, average, sample_weight ): """Multiclass roc auc score. Parameters ---------- y_true : array-like of shape (n_samples,) True multiclass labels. y_score : array-like of shape (n_samples, n_classes) Target scores corresponding to probability estimates of a sample belonging to a particular class labels : array-like of shape (n_classes,) or None List of labels to index ``y_score`` used for multiclass. If ``None``, the lexical order of ``y_true`` is used to index ``y_score``. multi_class : {'ovr', 'ovo'} Determines the type of multiclass configuration to use. ``'ovr'``: Calculate metrics for the multiclass case using the one-vs-rest approach. ``'ovo'``: Calculate metrics for the multiclass case using the one-vs-one approach. average : {'micro', 'macro', 'weighted'} Determines the type of averaging performed on the pairwise binary metric scores ``'micro'``: Calculate metrics for the binarized-raveled classes. Only supported for `multi_class='ovr'`. .. versionadded:: 1.2 ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. Classes are assumed to be uniformly distributed. ``'weighted'``: Calculate metrics for each label, taking into account the prevalence of the classes. sample_weight : array-like of shape (n_samples,) or None Sample weights. """ # validation of the input y_score if not np.allclose(1, y_score.sum(axis=1)): raise ValueError( "Target scores need to be probabilities for multiclass " "roc_auc, i.e. they should sum up to 1.0 over classes" ) # validation for multiclass parameter specifications average_options = ("macro", "weighted", None) if multi_class == "ovr": average_options = ("micro",) + average_options if average not in average_options: raise ValueError( "average must be one of {0} for multiclass problems".format(average_options) ) multiclass_options = ("ovo", "ovr") if multi_class not in multiclass_options: raise ValueError( "multi_class='{0}' is not supported " "for multiclass ROC AUC, multi_class must be " "in {1}".format(multi_class, multiclass_options) ) if average is None and multi_class == "ovo": raise NotImplementedError( "average=None is not implemented for multi_class='ovo'." ) if labels is not None: labels = column_or_1d(labels) classes = _unique(labels) if len(classes) != len(labels): raise ValueError("Parameter 'labels' must be unique") if not np.array_equal(classes, labels): raise ValueError("Parameter 'labels' must be ordered") if len(classes) != y_score.shape[1]: raise ValueError( "Number of given labels, {0}, not equal to the number " "of columns in 'y_score', {1}".format(len(classes), y_score.shape[1]) ) if len(np.setdiff1d(y_true, classes)): raise ValueError("'y_true' contains labels not in parameter 'labels'") else: classes = _unique(y_true) if len(classes) != y_score.shape[1]: raise ValueError( "Number of classes in y_true not equal to the number of " "columns in 'y_score'" ) if multi_class == "ovo": if sample_weight is not None: raise ValueError( "sample_weight is not supported " "for multiclass one-vs-one ROC AUC, " "'sample_weight' must be None in this case." ) y_true_encoded = _encode(y_true, uniques=classes) # Hand & Till (2001) implementation (ovo) return _average_multiclass_ovo_score( _binary_roc_auc_score, y_true_encoded, y_score, average=average ) else: # ovr is same as multi-label y_true_multilabel = label_binarize(y_true, classes=classes) return _average_binary_score( _binary_roc_auc_score, y_true_multilabel, y_score, average, sample_weight=sample_weight, ) def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None): """Calculate true and false positives per binary classification threshold. Parameters ---------- y_true : ndarray of shape (n_samples,) True targets of binary classification. y_score : ndarray of shape (n_samples,) Estimated probabilities or output of a decision function. pos_label : int, float, bool or str, default=None The label of the positive class. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- fps : ndarray of shape (n_thresholds,) A count of false positives, at index i being the number of negative samples assigned a score >= thresholds[i]. The total number of negative samples is equal to fps[-1] (thus true negatives are given by fps[-1] - fps). tps : ndarray of shape (n_thresholds,) An increasing count of true positives, at index i being the number of positive samples assigned a score >= thresholds[i]. The total number of positive samples is equal to tps[-1] (thus false negatives are given by tps[-1] - tps). thresholds : ndarray of shape (n_thresholds,) Decreasing score values. """ # Check to make sure y_true is valid y_type = type_of_target(y_true, input_name="y_true") if not (y_type == "binary" or (y_type == "multiclass" and pos_label is not None)): raise ValueError("{0} format is not supported".format(y_type)) check_consistent_length(y_true, y_score, sample_weight) y_true = column_or_1d(y_true) y_score = column_or_1d(y_score) assert_all_finite(y_true) assert_all_finite(y_score) # Filter out zero-weighted samples, as they should not impact the result if sample_weight is not None: sample_weight = column_or_1d(sample_weight)
sample_weight = _check_sample_weight(sample_weight, y_true)
16
2023-10-07 13:19:48+00:00
24k
zbzhu99/madiff
diffuser/models/diffusion.py
[ { "identifier": "DPM_Solver", "path": "diffuser/utils/dpm_solver.py", "snippet": "class DPM_Solver:\n def __init__(\n self,\n model_fn,\n noise_schedule,\n algorithm_type=\"dpmsolver++\",\n correcting_x0_fn=None,\n correcting_xt_fn=None,\n thresholding...
import functools import numpy as np import torch import torch.nn.functional as F import diffuser.utils as utils from torch import nn from diffuser.utils.dpm_solver import DPM_Solver, NoiseScheduleVP, model_wrapper from .helpers import Losses, apply_conditioning, cosine_beta_schedule, extract
20,349
class GaussianDiffusion(nn.Module): def __init__( self, model, n_agents, horizon, history_horizon, observation_dim, action_dim, n_timesteps=1000, loss_type="l1", clip_denoised=False, predict_epsilon=True, action_weight=1.0, loss_discount=1.0, loss_weights=None, returns_condition=False, condition_guidance_w=0.1, agent_share_noise=False, data_encoder=utils.IdentityEncoder(), **kwargs, ): super().__init__() self.n_agents = n_agents self.horizon = horizon self.history_horizon = history_horizon self.observation_dim = observation_dim self.action_dim = action_dim self.transition_dim = observation_dim + action_dim self.model = model self.returns_condition = returns_condition self.condition_guidance_w = condition_guidance_w self.agent_share_noise = agent_share_noise self.data_encoder = data_encoder
class GaussianDiffusion(nn.Module): def __init__( self, model, n_agents, horizon, history_horizon, observation_dim, action_dim, n_timesteps=1000, loss_type="l1", clip_denoised=False, predict_epsilon=True, action_weight=1.0, loss_discount=1.0, loss_weights=None, returns_condition=False, condition_guidance_w=0.1, agent_share_noise=False, data_encoder=utils.IdentityEncoder(), **kwargs, ): super().__init__() self.n_agents = n_agents self.horizon = horizon self.history_horizon = history_horizon self.observation_dim = observation_dim self.action_dim = action_dim self.transition_dim = observation_dim + action_dim self.model = model self.returns_condition = returns_condition self.condition_guidance_w = condition_guidance_w self.agent_share_noise = agent_share_noise self.data_encoder = data_encoder
betas = cosine_beta_schedule(n_timesteps)
3
2023-10-13 13:03:53+00:00
24k
hellloxiaotian/KDNet
train_KDNet.py
[ { "identifier": "attempt_load", "path": "models/experimental.py", "snippet": "def attempt_load(weights, map_location=None):\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ensemble()\n # print('weights', weights) # /runs/train/yolov7_distill...
import argparse import logging import math import os import random import time import numpy as np import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler import torch.utils.data import yaml import test # import test.py to get mAP after each epoch from copy import deepcopy from pathlib import Path from threading import Thread from torch.cuda import amp from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm from models.experimental import attempt_load from models.experimental import attempt_loadv5 from models.experimental import attempt_load_zxy from models.yolo import Model from utils.autoanchor import check_anchors from utils.datasets import create_dataloader from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ check_requirements, print_mutation, set_logging, one_cycle, colorstr from utils.google_utils import attempt_download from utils.loss import ComputeLoss, ComputeLossOTA from utils.plots import plot_images, plot_labels, plot_results, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume from utils.distill_utils import getMask, compute_mask_loss
20,427
# load teacher model teacher = attempt_load_zxy(opt.teacher_weights, device=device) if pretrained: with torch_distributed_zero_first(rank): attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(state_dict, strict=False) # load logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create with torch_distributed_zero_first(rank): check_dataset(data_dict) # check train_path = data_dict['train'] test_path = data_dict['val'] # Freeze freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # parameter names to freeze (full or partial) for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): print('freezing %s' % k) v.requires_grad = False # Optimizer nbs = 64 # nominal batch size accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay logger.info(f"Scaled weight_decay = {hyp['weight_decay']}") pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in model.named_modules(): if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): pg2.append(v.bias) # biases if isinstance(v, nn.BatchNorm2d): pg0.append(v.weight) # no decay elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): pg1.append(v.weight) # apply decay if hasattr(v, 'im'): if hasattr(v.im, 'implicit'): pg0.append(v.im.implicit) else: for iv in v.im: pg0.append(iv.implicit) if hasattr(v, 'imc'): if hasattr(v.imc, 'implicit'): pg0.append(v.imc.implicit) else: for iv in v.imc: pg0.append(iv.implicit) if hasattr(v, 'imb'): if hasattr(v.imb, 'implicit'): pg0.append(v.imb.implicit) else: for iv in v.imb: pg0.append(iv.implicit) if hasattr(v, 'imo'): if hasattr(v.imo, 'implicit'): pg0.append(v.imo.implicit) else: for iv in v.imo: pg0.append(iv.implicit) if hasattr(v, 'ia'): if hasattr(v.ia, 'implicit'): pg0.append(v.ia.implicit) else: for iv in v.ia: pg0.append(iv.implicit) if hasattr(v, 'attn'): if hasattr(v.attn, 'logit_scale'): pg0.append(v.attn.logit_scale) if hasattr(v.attn, 'q_bias'): pg0.append(v.attn.q_bias) if hasattr(v.attn, 'v_bias'): pg0.append(v.attn.v_bias) if hasattr(v.attn, 'relative_position_bias_table'): pg0.append(v.attn.relative_position_bias_table) if hasattr(v, 'rbr_dense'): if hasattr(v.rbr_dense, 'weight_rbr_origin'): pg0.append(v.rbr_dense.weight_rbr_origin) if hasattr(v.rbr_dense, 'weight_rbr_avg_conv'): pg0.append(v.rbr_dense.weight_rbr_avg_conv) if hasattr(v.rbr_dense, 'weight_rbr_pfir_conv'): pg0.append(v.rbr_dense.weight_rbr_pfir_conv) if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_idconv1'): pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_idconv1) if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_conv2'): pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_conv2) if hasattr(v.rbr_dense, 'weight_rbr_gconv_dw'): pg0.append(v.rbr_dense.weight_rbr_gconv_dw) if hasattr(v.rbr_dense, 'weight_rbr_gconv_pw'): pg0.append(v.rbr_dense.weight_rbr_gconv_pw) if hasattr(v.rbr_dense, 'vector'): pg0.append(v.rbr_dense.vector) if opt.adam: optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 # Scheduler https://arxiv.org/pdf/1812.01187.pdf # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR if opt.linear_lr: lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear else: lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA
logger = logging.getLogger(__name__) def train(hyp, opt, device, tb_writer=None): logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank, freeze = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, opt.freeze # Directories wdir = save_dir / 'weights' wdir.mkdir(parents=True, exist_ok=True) # make dir last = wdir / 'last.pt' best = wdir / 'best.pt' results_file = save_dir / 'results.txt' # Save run settings with open(save_dir / 'hyp.yaml', 'w') as f: yaml.dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: yaml.dump(vars(opt), f, sort_keys=False) # Configure plots = not opt.evolve # create plots cuda = device.type != 'cpu' init_seeds(2 + rank) with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict is_coco = opt.data.endswith('coco.yaml') # Logging- Doing this before checking the dataset. Might update data_dict loggers = {'wandb': None} # loggers dict if rank in [-1, 0]: opt.hyp = hyp # add hyperparameters run_id = torch.load(weights, map_location=device).get('wandb_id') if weights.endswith('.pt') and os.path.isfile( weights) else None wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict) loggers['wandb'] = wandb_logger.wandb data_dict = wandb_logger.data_dict if wandb_logger.wandb: weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check # Model pretrained = weights.endswith('.pt') # load teacher model teacher = attempt_load_zxy(opt.teacher_weights, device=device) if pretrained: with torch_distributed_zero_first(rank): attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(state_dict, strict=False) # load logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create with torch_distributed_zero_first(rank): check_dataset(data_dict) # check train_path = data_dict['train'] test_path = data_dict['val'] # Freeze freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # parameter names to freeze (full or partial) for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): print('freezing %s' % k) v.requires_grad = False # Optimizer nbs = 64 # nominal batch size accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay logger.info(f"Scaled weight_decay = {hyp['weight_decay']}") pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in model.named_modules(): if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): pg2.append(v.bias) # biases if isinstance(v, nn.BatchNorm2d): pg0.append(v.weight) # no decay elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): pg1.append(v.weight) # apply decay if hasattr(v, 'im'): if hasattr(v.im, 'implicit'): pg0.append(v.im.implicit) else: for iv in v.im: pg0.append(iv.implicit) if hasattr(v, 'imc'): if hasattr(v.imc, 'implicit'): pg0.append(v.imc.implicit) else: for iv in v.imc: pg0.append(iv.implicit) if hasattr(v, 'imb'): if hasattr(v.imb, 'implicit'): pg0.append(v.imb.implicit) else: for iv in v.imb: pg0.append(iv.implicit) if hasattr(v, 'imo'): if hasattr(v.imo, 'implicit'): pg0.append(v.imo.implicit) else: for iv in v.imo: pg0.append(iv.implicit) if hasattr(v, 'ia'): if hasattr(v.ia, 'implicit'): pg0.append(v.ia.implicit) else: for iv in v.ia: pg0.append(iv.implicit) if hasattr(v, 'attn'): if hasattr(v.attn, 'logit_scale'): pg0.append(v.attn.logit_scale) if hasattr(v.attn, 'q_bias'): pg0.append(v.attn.q_bias) if hasattr(v.attn, 'v_bias'): pg0.append(v.attn.v_bias) if hasattr(v.attn, 'relative_position_bias_table'): pg0.append(v.attn.relative_position_bias_table) if hasattr(v, 'rbr_dense'): if hasattr(v.rbr_dense, 'weight_rbr_origin'): pg0.append(v.rbr_dense.weight_rbr_origin) if hasattr(v.rbr_dense, 'weight_rbr_avg_conv'): pg0.append(v.rbr_dense.weight_rbr_avg_conv) if hasattr(v.rbr_dense, 'weight_rbr_pfir_conv'): pg0.append(v.rbr_dense.weight_rbr_pfir_conv) if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_idconv1'): pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_idconv1) if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_conv2'): pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_conv2) if hasattr(v.rbr_dense, 'weight_rbr_gconv_dw'): pg0.append(v.rbr_dense.weight_rbr_gconv_dw) if hasattr(v.rbr_dense, 'weight_rbr_gconv_pw'): pg0.append(v.rbr_dense.weight_rbr_gconv_pw) if hasattr(v.rbr_dense, 'vector'): pg0.append(v.rbr_dense.vector) if opt.adam: optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 # Scheduler https://arxiv.org/pdf/1812.01187.pdf # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR if opt.linear_lr: lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear else: lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA
ema = ModelEMA(model) if rank in [-1, 0] else None
14
2023-10-08 13:05:58+00:00
24k
falesiani/torch_ga
tests/test_keras.py
[ { "identifier": "GeometricProductDense", "path": "torch_ga/layers.py", "snippet": "class GeometricProductDense(GeometricAlgebraLayer):\n \"\"\"Analagous to Keras' Dense layer but using multivector-valued matrices\n instead of scalar ones and geometric multiplication instead of standard\n multip...
import unittest as ut import h5py import torch import torch.nn as nn import torch.nn.functional as F import torch from io import BytesIO from torch_ga.layers import ( GeometricProductDense, GeometricSandwichProductDense, GeometricProductElementwise, GeometricSandwichProductElementwise, GeometricProductConv1D, GeometricAlgebraExp, GeometricToTensor, GeometricToTensorWithKind, TensorToGeometric, TensorWithKindToGeometric, ) from torch_ga.blades import BladeKind from torch_ga import GeometricAlgebra
16,872
torch.manual_seed(0) class TestKerasLayers(ut.TestCase): def assertTensorsEqual(self, a, b): # self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b)) print(f"assertTensorsEqual(a={a},b={b})") assert torch.all(a.squeeze() == b.squeeze()), "%s not equal to %s" % (a, b) def test_tensor_to_geometric(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([32, 4]) gt_geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] tensor_to_geom_layer = TensorToGeometric(sta, vector_blade_indices) self.assertTensorsEqual(tensor_to_geom_layer(tensor), gt_geom_tensor) def test_tensor_with_kind_to_geometric(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([32, 4]) gt_geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4]
torch.manual_seed(0) class TestKerasLayers(ut.TestCase): def assertTensorsEqual(self, a, b): # self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b)) print(f"assertTensorsEqual(a={a},b={b})") assert torch.all(a.squeeze() == b.squeeze()), "%s not equal to %s" % (a, b) def test_tensor_to_geometric(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([32, 4]) gt_geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] tensor_to_geom_layer = TensorToGeometric(sta, vector_blade_indices) self.assertTensorsEqual(tensor_to_geom_layer(tensor), gt_geom_tensor) def test_tensor_with_kind_to_geometric(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([32, 4]) gt_geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4]
tensor_kind_to_geom_layer = TensorWithKindToGeometric(
9
2023-10-07 13:34:07+00:00
24k
Significant-Gravitas/autostandup
bot.py
[ { "identifier": "StreaksDB", "path": "streaks/streaks_db.py", "snippet": "class StreaksDB(BaseDB):\n \"\"\"\n StreaksDB class handles all operations related to the 'streaks' table.\n Inherits from the BaseDB class.\n \"\"\"\n\n def __init__(self, host, user, password, database, port):\n ...
import os import pytz import asyncio import openai import requests from typing import List from dotenv import load_dotenv from datetime import datetime, timedelta from multiprocessing import Process from streaks.streaks_db import StreaksDB from team_members.team_member_db import TeamMemberDB from updates.updates_db import UpdatesDB from weekly_posts.weekly_posts_db import WeeklyPostsDB from streaks.streaks_manager import StreaksManager from team_members.team_member_manager import TeamMemberManager from updates.updates_manager import UpdatesManager from weekly_posts.weekly_post_manager import WeeklyPostManager from scheduler import Scheduler from team_members.team_member import TeamMember from discord.ext import commands, tasks from discord import Intents, DMChannel from flask import Flask from asyncio import Task, ensure_future, CancelledError
15,358
if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to update streaks.") return # Find the member object using the Discord ID member_to_update = team_member_manager.find_member(discord_id) if member_to_update: # Update the streak in the database streaks_manager.update_streak(discord_id, new_streak) member_to_update.update_streak(new_streak) # Update the Discord post using WeeklyPostManager await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send(f"Streak for user with Discord ID {discord_id} updated to {new_streak}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='forcepostrebuild') async def force_post_rebuild(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to force a post rebuild.") return # Rebuild the post await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send("Post rebuilt successfully.") @bot.command(name='deletelateststatus') async def delete_latest_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to delete status updates.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Delete the newest status using the UpdatesManager's method updates_manager.delete_newest_status(discord_id) await ctx.send(f"Latest status update for user with Discord ID {discord_id} deleted successfully.") @bot.command(name='viewuser') async def view_user(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to view user data.") return # Get the member's statuses using the UpdatesManager's method statuses = updates_manager.get_all_statuses_for_user(discord_id) if not statuses: await ctx.send(f"No status updates found for user with Discord ID {discord_id}.") return # Loop through the statuses and send individual messages for status in statuses: await ctx.send(f"### **Timestamp:** {status['timestamp']}") await ctx.send(f"### **Raw Status:** {status['status']}") await ctx.send(f"### **Summarized Status:** \n{status['summarized_status']}") @bot.command(name='setvacationstatus') async def set_vacation_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to set vacation status.") return member = team_member_manager.find_member(discord_id) if member: new_status = not member.on_vacation team_member_manager.set_member_vacation_status(discord_id, new_status) await ctx.send(f"Vacation status for user with Discord ID {discord_id} set to {'on vacation' if new_status else 'not on vacation'}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='weeklysummary') async def weekly_summary(ctx, discord_id: int, start_date: str, end_date: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to generate weekly summaries.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Convert the start_date and end_date strings to datetime objects # Adjusting the date format to MM-DD-YYYY and setting the time try: start_date = datetime.strptime(start_date, '%m-%d-%Y') end_date = datetime.strptime(end_date, '%m-%d-%Y') # Setting the time to ensure the whole week is captured start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0) end_date = end_date.replace(hour=23, minute=59, second=59, microsecond=999999) except ValueError: await ctx.send("Invalid date format. Please use MM-DD-YYYY.") return # Generate the weekly summary weekly_summary = await updates_manager.generate_weekly_summary(discord_id, start_date, end_date) # Send the weekly summary to the admin user admin_user = bot.get_user(ADMIN_DISCORD_ID) if admin_user: await admin_user.send(f"**{member.name}'s Weekly Summary for {start_date.strftime('%m-%d-%Y')} to {end_date.strftime('%m-%d-%Y')}:**\n{weekly_summary}") else: await ctx.send("Unable to find the admin user.") @bot.event async def on_ready(): print("Bot is online!") # Log that the bot is online
# Import required modules app = Flask(__name__) # Load environment variables from the .env file load_dotenv() # Retrieve bot, guild, and channel tokens from environment variables BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN') GUILD_TOKEN = int(os.getenv('DISCORD_GUILD_TOKEN')) CHANNEL_TOKEN = int(os.getenv('DISCORD_CHANNEL_TOKEN')) ADMIN_DISCORD_ID = int(os.getenv('ADMIN_DISCORD_ID')) # Retrieve database credentials from environment variables MYSQL_HOST = os.getenv('MYSQL_HOST') MYSQL_USER = os.getenv('MYSQL_USER') MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD') MYSQL_DB = os.getenv('MYSQL_DB') MYSQL_PORT = os.getenv('MYSQL_PORT') ORG_NAME = os.getenv('GITHUB_ORG_NAME') ORG_TOKEN = os.getenv('GITHUB_ORG_TOKEN') OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') # Initialize bot with default intents intents = Intents.default() intents.members = True intents.message_content = True bot = commands.Bot(command_prefix='!', intents=intents) openai.api_key = OPENAI_API_KEY # TODO: Remove these globals streaks_manager = None weekly_post_manager = None team_member_manager = None updates_manager = None scheduler = None ongoing_status_requests = {} THUMBS_UP_EMOJI = "👍" PENCIL_EMOJI = "✏️" REPORT_SUBMISSION_EMOJI = '📝' async def weekly_state_reset(weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, team_members: List[TeamMember]): # Reset streaks for the previous week for member in team_members: if not member.on_vacation and member.weekly_checkins < 5: streaks_manager.reset_streak(member.discord_id) member.reset_streak() member.reset_weekly_checkins() # Initialize new weekly post await weekly_post_manager.initialize_post(team_members) def get_all_commit_messages_for_user(org_name: str, token: str, member: TeamMember) -> list: """Retrieve all commit messages for a user across all repos in an organization from the last 24 hours.""" headers = { "Authorization": f"token {token}", "Accept": "application/vnd.github.v3+json" } last_update_timestamp, user_time_zone = updates_manager.get_last_update_timestamp(member.discord_id) if last_update_timestamp: # Convert the timestamp to UTC local_tz = pytz.timezone(user_time_zone) localized_timestamp = local_tz.localize(last_update_timestamp) utc_timestamp = localized_timestamp.astimezone(pytz.utc) # Format the timestamp for the GitHub API and append 'Z' since_date = utc_timestamp.isoformat() if not since_date.endswith('Z'): since_date = utc_timestamp.isoformat().replace('+00:00', '') + 'Z' else: # If no updates found, default to last 24 hours since_date = (datetime.utcnow() - timedelta(days=1)).isoformat() + 'Z' all_commit_messages = [] # Paginate through all repositories in the organization repos_url = f"https://api.github.com/orgs/{org_name}/repos?type=all&per_page=100" while repos_url: response = requests.get(repos_url, headers=headers) if response.status_code != 200: # Log error and break loop print(f"Failed to fetch repos: {response.status_code} {response.text}") break repos = response.json() # Iterate over each repository for repo in repos: repo_name = repo["name"] commits_url = f"https://api.github.com/repos/{org_name}/{repo_name}/commits?author={member.github_username}&since={since_date}&per_page=100" # Paginate through commits for the repository while commits_url: response = requests.get(commits_url, headers=headers) if response.status_code != 200: # Log error and continue to the next repository print(f"Failed to fetch commits for {repo_name}: {response.status_code} {response.text}") break commits = response.json() repo_commit_messages = [commit["commit"]["message"] for commit in commits] all_commit_messages.extend(repo_commit_messages) # Check for the 'next' link for commits pagination commits_url = get_pagination_link(response.headers, 'next') # Check for the 'next' link for repositories pagination repos_url = get_pagination_link(response.headers, 'next') return all_commit_messages def get_pagination_link(headers, rel): """Extract pagination link for the 'rel' type from the Link header.""" link = headers.get('Link', None) if link: links = link.split(', ') for link in links: if 'rel="{}"'.format(rel) in link: return link.split('; ')[0].strip('<>') return None async def send_status_request(member: TeamMember, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, updates_manager: UpdatesManager): if member.weekly_checkins == 5: return # If already completed 5 check-ins, do nothing user = bot.get_user(member.discord_id) if user: # Notify the admin that a status request is being sent admin_user = bot.get_user(ADMIN_DISCORD_ID) if admin_user: await admin_user.send(f"Status request sent to {member.name}.") # Cancel the previous task if it exists ongoing_task: Task = ongoing_status_requests.get(member.discord_id) if ongoing_task: ongoing_task.cancel() # Retrieve all commit messages for the member commit_messages = get_all_commit_messages_for_user(ORG_NAME, ORG_TOKEN, member) if not commit_messages: summarized_report = "You have no commits for the previous working day." msg = f"{summarized_report}\nReact with {THUMBS_UP_EMOJI} to confirm, {PENCIL_EMOJI} to iterate with AI, or {REPORT_SUBMISSION_EMOJI} to submit your own report." else: summarized_report = await updates_manager.summarize_technical_updates(commit_messages) msg = f"Here's your summarized report based on your commits:\n{summarized_report}\nReact with {THUMBS_UP_EMOJI} to confirm, {PENCIL_EMOJI} to iterate with AI, or {REPORT_SUBMISSION_EMOJI} to submit your own report." raw_updates = summarized_report # Send initial message and wait for reaction await user.send( f"# Good morning {member.name}, time for your daily status update!\n" f"### I'm first going to check your commit messages and try to build a technical report for you.\n" f"### Next I will ask you for any non-technical updates from your previous work day.\n" f"### Finally I will ask you what you plan to work on today." ) sent_message = await user.send(msg) await sent_message.add_reaction(THUMBS_UP_EMOJI) await sent_message.add_reaction(PENCIL_EMOJI) await sent_message.add_reaction(REPORT_SUBMISSION_EMOJI) def check(m) -> bool: return m.author == user and isinstance(m.channel, DMChannel) # Store the new wait_for reaction task in the global dictionary ongoing_task = ensure_future(bot.wait_for('reaction_add', check=lambda r, u: u == user and r.message.id == sent_message.id and isinstance(r.message.channel, DMChannel) and str(r.emoji) in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI])) ongoing_status_requests[member.discord_id] = ongoing_task reaction, reactor = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the reaction for emoji in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI]: await sent_message.remove_reaction(emoji, bot.user) while str(reaction.emoji) in [PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI]: if str(reaction.emoji) == PENCIL_EMOJI: await user.send("What would you like me to change?") # Store the new wait_for message (feedback) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task feedback = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the feedback # Send original + feedback to LLM for reformatting summarized_report = await updates_manager.summarize_feedback_and_revisions(summarized_report, feedback.content) elif str(reaction.emoji) == REPORT_SUBMISSION_EMOJI: await user.send("Please submit your technical report directly.") # Store the new wait_for message (report submission) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task direct_report = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the report summarized_report = direct_report.content break # Exit the while loop as the user has submitted their report directly msg = f"Here's the revised report:\n{summarized_report}\nReact with {THUMBS_UP_EMOJI} to confirm, {PENCIL_EMOJI} to iterate with AI, or {REPORT_SUBMISSION_EMOJI} to submit your own report." last_sent_message = await send_long_message(user, msg) if last_sent_message: await last_sent_message.add_reaction(THUMBS_UP_EMOJI) await last_sent_message.add_reaction(PENCIL_EMOJI) await last_sent_message.add_reaction(REPORT_SUBMISSION_EMOJI) # Store the new wait_for reaction task in the global dictionary ongoing_task = ensure_future(bot.wait_for('reaction_add', check=lambda r, u: u == user and r.message.id == last_sent_message.id and isinstance(r.message.channel, DMChannel) and str(r.emoji) in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI])) ongoing_status_requests[member.discord_id] = ongoing_task reaction, user = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the reaction for emoji in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI]: await last_sent_message.remove_reaction(emoji, bot.user) # Prompt user for non-technical updates from the previous day non_technical_msg_prompt = "Please provide any non-technical updates from your previous working day, e.g., important meetings, interviews, etc." await user.send(non_technical_msg_prompt) # Store the new wait_for message (non-technical update) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task non_technical_update_raw = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the non-technical update raw_updates += f"\n\n{non_technical_update_raw.content}" # Summarize non-technical update with LLM non_technical_update = await updates_manager.summarize_non_technical_updates(non_technical_update_raw.content) # Prompt user for their goals for the day goals_msg_prompt = "What do you plan to work on or accomplish today?" await user.send(goals_msg_prompt) # Store the new wait_for message (goals for the day) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task goals_for_today_raw = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the goals # Summarize goals for the day with LLM goals_for_today = await updates_manager.summarize_goals_for_the_day(goals_for_today_raw.content) # Update the streak for this member streak = streaks_manager.get_streak(member.discord_id) streaks_manager.update_streak(member.discord_id, streak + 1) member.update_streak(streaks_manager.get_streak(member.discord_id)) member.increment_weekly_checkins() raw_updates += f"\n\n{goals_for_today_raw.content}" final_updates = f"{summarized_report}\n\n{non_technical_update}\n\n{goals_for_today}" updates_manager.insert_status(member.discord_id, raw_updates, member.time_zone) updates_manager.update_summarized_status(member.discord_id, final_updates) # Update the Discord post using WeeklyPostManager await weekly_post_manager.rebuild_post(team_member_manager.team_members) # Member name update as a header member_update_header = f"## {member.name}'s Update:" # Compile the final report with Markdown formatting final_report = ( f"\n### Technical Update:\n" f"{summarized_report}\n" f"### Non-Technical Update:\n" f"{non_technical_update}\n" f"### Goals for Today:\n" f"{goals_for_today}" ) stand_up_feedback = await updates_manager.evaluate_performance(final_report) # Concatenate the member name update with the final report and send to the designated Discord channel complete_message = f"{member_update_header}{final_report}" guild = bot.get_guild(GUILD_TOKEN) channel_to_post_in = guild.get_channel(CHANNEL_TOKEN) await user.send(stand_up_feedback) await send_long_message(channel_to_post_in, complete_message) async def send_long_message(destination, msg): max_length = 2000 # Discord's max character limit for a message sent_messages = [] # Keep track of all messages sent while len(msg) > 0: # If the message is shorter than the max length, send it as is if len(msg) <= max_length: sent_message = await destination.send(msg) sent_messages.append(sent_message) break # The message is sent, so break out of the loop # Find the nearest newline character before the max_length split_index = msg.rfind('\n', 0, max_length) # If no newline is found, just split at max_length if split_index == -1: split_index = max_length # Split the message at the found index and send the first part part_to_send = msg[:split_index].strip() sent_message = await destination.send(part_to_send) sent_messages.append(sent_message) # Wait a bit to respect Discord's rate limits await asyncio.sleep(1) # Remove the part that was sent from the message msg = msg[split_index:].strip() # Return the last message sent for reaction addition return sent_messages[-1] if sent_messages else None @bot.command(name='viewscheduledjobs') async def view_scheduled_jobs(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to view scheduled jobs.") return # Get all scheduled jobs using the Scheduler's method scheduled_jobs = scheduler.get_all_scheduled_jobs(team_member_manager) # Send the scheduled jobs to the admin user for job in scheduled_jobs: await ctx.send(job) @bot.command(name='statusrequest') async def status_request(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to request status.") return # Find the member object using the Discord ID member_to_request = team_member_manager.find_member(discord_id) if member_to_request: for member in team_member_manager.team_members: scheduler.remove_job(member.discord_id) scheduler.unschedule_weekly_post() # Send the status request to the member await ctx.send(f"Status request sent to user with Discord ID {discord_id}.") for member in team_member_manager.team_members: scheduler.add_job(send_status_request, member, weekly_post_manager, streaks_manager, updates_manager) scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await send_status_request(member_to_request, weekly_post_manager, streaks_manager, updates_manager) await ctx.send(f"Status request received from user with Discord ID {discord_id}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='adduser') async def add_user(ctx, discord_id: int, time_zone: str, name: str, github_username: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to add users.") return # Add the new member using team_member_manager team_member_manager.add_member(discord_id, name, time_zone, github_username) # Update the weekly post to include the new member new_member = team_member_manager.find_member(discord_id) if new_member: await weekly_post_manager.rebuild_post(team_member_manager.team_members) scheduler.add_job(send_status_request, new_member, weekly_post_manager, streaks_manager, updates_manager) scheduler.unschedule_weekly_post() scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await ctx.send(f"User {name} added successfully.") @bot.command(name='removeuser') async def remove_user(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to remove users.") return # Find the member object member_to_remove = team_member_manager.find_member(discord_id) if member_to_remove: # Remove the member from the database team_member_manager.remove_member(discord_id) # Update the weekly post to remove the member await weekly_post_manager.rebuild_post(team_member_manager.team_members) scheduler.remove_job(discord_id) scheduler.unschedule_weekly_post() scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await ctx.send(f"User with Discord ID {discord_id} removed successfully.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='listusers') async def list_users(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to list users.") return # List users using team_member_manager users = [(member.discord_id, member.name, member.time_zone, member.github_username, member.current_streak) for member in team_member_manager.team_members] user_list = '\n'.join([f"Name: {user[1]}, Discord ID: {user[0]}, Time Zone: {user[2]}, GitHub Username: {user[3]}, Current Streak: {user[4]}" for user in users]) await ctx.send(f"List of users:\n{user_list}") @bot.command(name='updatetimezone') async def update_timezone(ctx, discord_id: int, new_time_zone: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to update timezones.") return # Find the member object using the Discord ID member_to_update = team_member_manager.find_member(discord_id) if member_to_update: # Update the timezone in the database team_member_manager.update_member_timezone(discord_id, new_time_zone) scheduler.remove_job(discord_id) scheduler.add_job(send_status_request, member_to_update, weekly_post_manager, streaks_manager, updates_manager) scheduler.unschedule_weekly_post() scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await ctx.send(f"Timezone for user with Discord ID {discord_id} updated to {new_time_zone}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='updatestreak') async def update_streak(ctx, discord_id: int, new_streak: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to update streaks.") return # Find the member object using the Discord ID member_to_update = team_member_manager.find_member(discord_id) if member_to_update: # Update the streak in the database streaks_manager.update_streak(discord_id, new_streak) member_to_update.update_streak(new_streak) # Update the Discord post using WeeklyPostManager await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send(f"Streak for user with Discord ID {discord_id} updated to {new_streak}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='forcepostrebuild') async def force_post_rebuild(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to force a post rebuild.") return # Rebuild the post await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send("Post rebuilt successfully.") @bot.command(name='deletelateststatus') async def delete_latest_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to delete status updates.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Delete the newest status using the UpdatesManager's method updates_manager.delete_newest_status(discord_id) await ctx.send(f"Latest status update for user with Discord ID {discord_id} deleted successfully.") @bot.command(name='viewuser') async def view_user(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to view user data.") return # Get the member's statuses using the UpdatesManager's method statuses = updates_manager.get_all_statuses_for_user(discord_id) if not statuses: await ctx.send(f"No status updates found for user with Discord ID {discord_id}.") return # Loop through the statuses and send individual messages for status in statuses: await ctx.send(f"### **Timestamp:** {status['timestamp']}") await ctx.send(f"### **Raw Status:** {status['status']}") await ctx.send(f"### **Summarized Status:** \n{status['summarized_status']}") @bot.command(name='setvacationstatus') async def set_vacation_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to set vacation status.") return member = team_member_manager.find_member(discord_id) if member: new_status = not member.on_vacation team_member_manager.set_member_vacation_status(discord_id, new_status) await ctx.send(f"Vacation status for user with Discord ID {discord_id} set to {'on vacation' if new_status else 'not on vacation'}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='weeklysummary') async def weekly_summary(ctx, discord_id: int, start_date: str, end_date: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to generate weekly summaries.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Convert the start_date and end_date strings to datetime objects # Adjusting the date format to MM-DD-YYYY and setting the time try: start_date = datetime.strptime(start_date, '%m-%d-%Y') end_date = datetime.strptime(end_date, '%m-%d-%Y') # Setting the time to ensure the whole week is captured start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0) end_date = end_date.replace(hour=23, minute=59, second=59, microsecond=999999) except ValueError: await ctx.send("Invalid date format. Please use MM-DD-YYYY.") return # Generate the weekly summary weekly_summary = await updates_manager.generate_weekly_summary(discord_id, start_date, end_date) # Send the weekly summary to the admin user admin_user = bot.get_user(ADMIN_DISCORD_ID) if admin_user: await admin_user.send(f"**{member.name}'s Weekly Summary for {start_date.strftime('%m-%d-%Y')} to {end_date.strftime('%m-%d-%Y')}:**\n{weekly_summary}") else: await ctx.send("Unable to find the admin user.") @bot.event async def on_ready(): print("Bot is online!") # Log that the bot is online
streaks_db = StreaksDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT)
0
2023-10-12 02:01:46+00:00
24k
azuline/rose
rose/rules_test.py
[ { "identifier": "AudioTags", "path": "rose/audiotags.py", "snippet": "class AudioTags:\n id: str | None\n release_id: str | None\n title: str | None\n year: int | None\n tracknumber: str | None\n tracktotal: int | None\n discnumber: str | None\n disctotal: int | None\n album: ...
import dataclasses import pytest from pathlib import Path from typing import Any from unittest.mock import Mock from rose.audiotags import AudioTags from rose.cache import ( list_releases, list_tracks, update_cache, ) from rose.common import Artist from rose.config import Config from rose.rule_parser import MetadataMatcher, MetadataRule from rose.rules import ( FastSearchResult, TrackTagNotAllowedError, execute_metadata_rule, execute_stored_metadata_rules, fast_search_for_matching_releases, fast_search_for_matching_tracks, filter_release_false_positives_using_read_cache, filter_track_false_positives_using_read_cache, )
16,537
execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["K-Pop", "lalala"] def test_action_on_different_tag(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("label:A Cool Label", ["genre::replace:hi"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["hi"] def test_action_no_pattern(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("genre:K-Pop", ["matched:::sed:P:B"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["K-Bop", "Bop"] def test_chained_action(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse( "label:A Cool Label", [ "replace:Jennie", "label:^Jennie$::replace:Jisoo", "label:nomatch::replace:Rose", "genre::replace:haha", ], ) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.label == ["Jisoo"] assert af.genre == ["haha"] @pytest.mark.timeout(2) def test_confirmation_yes(monkeypatch: Any, config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:Track", ["replace:lalala"]) monkeypatch.setattr("rose.rules.click.confirm", lambda *_, **__: True) execute_metadata_rule(config, rule, confirm_yes=True) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" @pytest.mark.timeout(2) def test_confirmation_no(monkeypatch: Any, config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:Track", ["replace:lalala"]) monkeypatch.setattr("rose.rules.click.confirm", lambda *_, **__: False) execute_metadata_rule(config, rule, confirm_yes=True) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title != "lalala" @pytest.mark.timeout(2) def test_confirmation_count(monkeypatch: Any, config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:Track", ["replace:lalala"]) monkeypatch.setattr("rose.rules.click.prompt", Mock(side_effect=["no", "8", "6"])) # Abort. execute_metadata_rule(config, rule, confirm_yes=True, enter_number_to_confirm_above_count=1) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title != "lalala" # Success in two arguments. execute_metadata_rule(config, rule, confirm_yes=True, enter_number_to_confirm_above_count=1) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" def test_dry_run(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:Track", ["replace:lalala"]) execute_metadata_rule(config, rule, dry_run=True, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title != "lalala" def test_run_stored_rules(config: Config, source_dir: Path) -> None: config = dataclasses.replace( config, stored_metadata_rules=[MetadataRule.parse("tracktitle:Track", ["replace:lalala"])], ) execute_stored_metadata_rules(config) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" @pytest.mark.usefixtures("seeded_cache") def test_fast_search_for_matching_releases(config: Config) -> None: results = fast_search_for_matching_releases( config, MetadataMatcher.parse("albumartist:Techno Man") ) assert results == [FastSearchResult(id="r1", path=config.music_source_dir / "r1")] @pytest.mark.usefixtures("seeded_cache") def test_fast_search_for_matching_releases_invalid_tag(config: Config) -> None: with pytest.raises(TrackTagNotAllowedError): fast_search_for_matching_releases(config, MetadataMatcher.parse("tracktitle:x")) with pytest.raises(TrackTagNotAllowedError): fast_search_for_matching_releases(config, MetadataMatcher.parse("trackartist:x")) # But allow artist tag: fast_search_for_matching_releases(config, MetadataMatcher.parse("artist:x")) @pytest.mark.usefixtures("seeded_cache") def test_filter_release_false_positives_with_read_cache(config: Config) -> None: matcher = MetadataMatcher.parse("albumartist:^Man") fsresults = fast_search_for_matching_releases(config, matcher) assert len(fsresults) == 2 cacheresults = list_releases(config, [r.id for r in fsresults]) assert len(cacheresults) == 2 filteredresults = filter_release_false_positives_using_read_cache(matcher, cacheresults) assert not filteredresults @pytest.mark.usefixtures("seeded_cache") def test_filter_track_false_positives_with_read_cache(config: Config) -> None: matcher = MetadataMatcher.parse("trackartist:^Man") fsresults = fast_search_for_matching_tracks(config, matcher) assert len(fsresults) == 3
def test_rules_execution_match_substring(config: Config, source_dir: Path) -> None: # No match rule = MetadataRule.parse("tracktitle:bbb", ["replace:lalala"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title != "lalala" # Match rule = MetadataRule.parse("tracktitle:rack", ["replace:lalala"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" def test_rules_execution_match_beginnning(config: Config, source_dir: Path) -> None: # No match rule = MetadataRule.parse("tracktitle:^rack", ["replace:lalala"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title != "lalala" # Match rule = MetadataRule.parse("tracktitle:^Track", ["replace:lalala"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" def test_rules_execution_match_end(config: Config, source_dir: Path) -> None: # No match rule = MetadataRule.parse("tracktitle:rack$", ["replace:lalala"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title != "lalala" # Match rule = MetadataRule.parse("tracktitle:rack 1$", ["replace:lalala"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" def test_rules_execution_match_superstrict(config: Config, source_dir: Path) -> None: # No match rule = MetadataRule.parse("tracktitle:^Track $", ["replace:lalala"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title != "lalala" # Match rule = MetadataRule.parse("tracktitle:^Track 1$", ["replace:lalala"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" def test_rules_execution_match_case_insensitive(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:tRaCk:i", ["replace:lalala"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" def test_rules_fields_match_tracktitle(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:Track", ["replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "8" def test_rules_fields_match_year(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("year:1990", ["replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.year == 8 def test_rules_fields_match_releasetype(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("releasetype:album", ["replace:live"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.releasetype == "live" def test_rules_fields_match_tracknumber(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracknumber:1", ["replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.tracknumber == "8" def test_rules_fields_match_tracktotal(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktotal:2", ["tracktitle::replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "8" def test_rules_fields_match_discnumber(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("discnumber:1", ["replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.discnumber == "8" def test_rules_fields_match_disctotal(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("disctotal:1", ["tracktitle::replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "8" def test_rules_fields_match_albumtitle(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("albumtitle:Love Blackpink", ["replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.album == "8" def test_rules_fields_match_genre(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("genre:K-Pop", ["replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["8", "Pop"] def test_rules_fields_match_label(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("label:Cool", ["replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.label == ["8"] def test_rules_fields_match_albumartist(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("albumartist:BLACKPINK", ["replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.albumartists.main == [Artist("8")] def test_rules_fields_match_trackartist(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("trackartist:BLACKPINK", ["replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.trackartists.main == [Artist("8")] def test_match_backslash(config: Config, source_dir: Path) -> None: af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") af.title = r"X \\ Y" af.flush() update_cache(config) rule = MetadataRule.parse(r"tracktitle: \\\\ ", [r"sed: \\\\\\\\ : / "]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "X / Y" def test_action_replace_with_delimiter(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("genre:K-Pop", ["replace:Hip-Hop;Rap"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["Hip-Hop", "Rap", "Pop"] def test_action_replace_with_delimiters_empty_str(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("genre:K-Pop", ["matched:::replace:Hip-Hop;;;;"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["Hip-Hop"] def test_sed_action(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:Track", ["sed:ack:ip"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "Trip 1" def test_sed_no_pattern(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("genre:P", [r"matched:::sed:^(.*)$:i\\1"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["iK-Pop", "iPop"] def test_split_action(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("label:Cool", ["split:Cool"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.label == ["A", "Label"] def test_split_action_no_pattern(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("genre:K-Pop", ["matched:::split:P"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["K-", "op"] def test_add_action(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("label:Cool", ["add:Even Cooler Label"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.label == ["A Cool Label", "Even Cooler Label"] def test_delete_action(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("genre:^Pop$", ["delete"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["K-Pop"] def test_delete_action_no_pattern(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("genre:^Pop$", ["matched:::delete"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == [] def test_preserves_unmatched_multitags(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("genre:^Pop$", ["replace:lalala"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["K-Pop", "lalala"] def test_action_on_different_tag(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("label:A Cool Label", ["genre::replace:hi"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["hi"] def test_action_no_pattern(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("genre:K-Pop", ["matched:::sed:P:B"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["K-Bop", "Bop"] def test_chained_action(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse( "label:A Cool Label", [ "replace:Jennie", "label:^Jennie$::replace:Jisoo", "label:nomatch::replace:Rose", "genre::replace:haha", ], ) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.label == ["Jisoo"] assert af.genre == ["haha"] @pytest.mark.timeout(2) def test_confirmation_yes(monkeypatch: Any, config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:Track", ["replace:lalala"]) monkeypatch.setattr("rose.rules.click.confirm", lambda *_, **__: True) execute_metadata_rule(config, rule, confirm_yes=True) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" @pytest.mark.timeout(2) def test_confirmation_no(monkeypatch: Any, config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:Track", ["replace:lalala"]) monkeypatch.setattr("rose.rules.click.confirm", lambda *_, **__: False) execute_metadata_rule(config, rule, confirm_yes=True) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title != "lalala" @pytest.mark.timeout(2) def test_confirmation_count(monkeypatch: Any, config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:Track", ["replace:lalala"]) monkeypatch.setattr("rose.rules.click.prompt", Mock(side_effect=["no", "8", "6"])) # Abort. execute_metadata_rule(config, rule, confirm_yes=True, enter_number_to_confirm_above_count=1) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title != "lalala" # Success in two arguments. execute_metadata_rule(config, rule, confirm_yes=True, enter_number_to_confirm_above_count=1) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" def test_dry_run(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:Track", ["replace:lalala"]) execute_metadata_rule(config, rule, dry_run=True, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title != "lalala" def test_run_stored_rules(config: Config, source_dir: Path) -> None: config = dataclasses.replace( config, stored_metadata_rules=[MetadataRule.parse("tracktitle:Track", ["replace:lalala"])], ) execute_stored_metadata_rules(config) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" @pytest.mark.usefixtures("seeded_cache") def test_fast_search_for_matching_releases(config: Config) -> None: results = fast_search_for_matching_releases( config, MetadataMatcher.parse("albumartist:Techno Man") ) assert results == [FastSearchResult(id="r1", path=config.music_source_dir / "r1")] @pytest.mark.usefixtures("seeded_cache") def test_fast_search_for_matching_releases_invalid_tag(config: Config) -> None: with pytest.raises(TrackTagNotAllowedError): fast_search_for_matching_releases(config, MetadataMatcher.parse("tracktitle:x")) with pytest.raises(TrackTagNotAllowedError): fast_search_for_matching_releases(config, MetadataMatcher.parse("trackartist:x")) # But allow artist tag: fast_search_for_matching_releases(config, MetadataMatcher.parse("artist:x")) @pytest.mark.usefixtures("seeded_cache") def test_filter_release_false_positives_with_read_cache(config: Config) -> None: matcher = MetadataMatcher.parse("albumartist:^Man") fsresults = fast_search_for_matching_releases(config, matcher) assert len(fsresults) == 2 cacheresults = list_releases(config, [r.id for r in fsresults]) assert len(cacheresults) == 2 filteredresults = filter_release_false_positives_using_read_cache(matcher, cacheresults) assert not filteredresults @pytest.mark.usefixtures("seeded_cache") def test_filter_track_false_positives_with_read_cache(config: Config) -> None: matcher = MetadataMatcher.parse("trackartist:^Man") fsresults = fast_search_for_matching_tracks(config, matcher) assert len(fsresults) == 3
tracks = list_tracks(config, [r.id for r in fsresults])
2
2023-10-09 14:42:23+00:00
24k
zhaoyizhou1123/mbrcsl
examples/roboverse/run_combo_roboverse.py
[ { "identifier": "MLP", "path": "offlinerlkit/nets/mlp.py", "snippet": "class MLP(nn.Module):\n def __init__(\n self,\n input_dim: int,\n hidden_dims: Union[List[int], Tuple[int]],\n output_dim: Optional[int] = None,\n activation: nn.Module = nn.ReLU,\n dropou...
import argparse import os import sys import random import datetime import roboverse import numpy as np import torch from offlinerlkit.nets import MLP from offlinerlkit.modules import ActorProb, Critic, TanhDiagGaussian, EnsembleDynamicsModel from offlinerlkit.dynamics import EnsembleDynamics from offlinerlkit.utils.scaler import StandardScaler from offlinerlkit.utils.termination_fns import termination_fn_default from offlinerlkit.buffer import ReplayBuffer from offlinerlkit.utils.logger import Logger, make_log_dirs from offlinerlkit.policy_trainer import MBPolicyTrainer from offlinerlkit.policy import COMBOPolicy from offlinerlkit.utils.roboverse_utils import PickPlaceObsWrapper, DoubleDrawerObsWrapper, get_pickplace_dataset, get_doubledrawer_dataset from offlinerlkit.utils.none_or_str import none_or_str
16,115
return parser.parse_args() def train(args=get_args()): # seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True # create env and dataset if args.task == 'pickplace': env = roboverse.make('Widow250PickTray-v0') env = PickPlaceObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "pickplace_prior.npy") task_data_path = os.path.join(args.data_dir, "pickplace_task.npy") dataset, init_obss_dataset = get_pickplace_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledraweropen': env = roboverse.make('Widow250DoubleDrawerOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "closed_drawer_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledrawercloseopen': env = roboverse.make('Widow250DoubleDrawerCloseOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "blocked_drawer_1_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledrawerpickplaceopen': env = roboverse.make('Widow250DoubleDrawerPickPlaceOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "blocked_drawer_2_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) else: raise NotImplementedError env.reset(seed=args.seed) # create policy model actor_backbone = MLP(input_dim=np.prod(args.obs_shape), hidden_dims=args.hidden_dims) critic1_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) critic2_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) dist = TanhDiagGaussian( latent_dim=getattr(actor_backbone, "output_dim"), output_dim=args.action_dim, unbounded=True, conditioned_sigma=True ) actor = ActorProb(actor_backbone, dist, args.device) critic1 = Critic(critic1_backbone, args.device) critic2 = Critic(critic2_backbone, args.device) actor_optim = torch.optim.Adam(actor.parameters(), lr=args.actor_lr) critic1_optim = torch.optim.Adam(critic1.parameters(), lr=args.critic_lr) critic2_optim = torch.optim.Adam(critic2.parameters(), lr=args.critic_lr) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(actor_optim, args.epoch) if args.auto_alpha: target_entropy = args.target_entropy if args.target_entropy \ else -np.prod(env.action_space.shape) args.target_entropy = target_entropy log_alpha = torch.zeros(1, requires_grad=True, device=args.device) alpha_optim = torch.optim.Adam([log_alpha], lr=args.alpha_lr) alpha = (target_entropy, log_alpha, alpha_optim) else: alpha = args.alpha # create dynamics load_dynamics_model = True if args.load_dynamics_path else False dynamics_model = EnsembleDynamicsModel( obs_dim=np.prod(args.obs_shape), action_dim=args.action_dim, hidden_dims=args.dynamics_hidden_dims, num_ensemble=args.n_ensemble, num_elites=args.n_elites, weight_decays=args.dynamics_weight_decay, device=args.device ) dynamics_optim = torch.optim.Adam( dynamics_model.parameters(), lr=args.dynamics_lr )
def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--algo-name", type=str, default="combo") parser.add_argument("--task", type=str, default="pickplace", help="pickplace") # Self-constructed environment parser.add_argument("--last_eval", action="store_false") # env config (pickplace) parser.add_argument('--data_dir', type=str, required=True) parser.add_argument('--horizon', type=int, default=40, help="max path length for pickplace") parser.add_argument("--seed", type=int, default=0) parser.add_argument("--actor-lr", type=float, default=1e-4) parser.add_argument("--critic-lr", type=float, default=3e-4) parser.add_argument("--hidden-dims", type=int, nargs='*', default=[256, 256, 256]) parser.add_argument("--gamma", type=float, default=0.99) parser.add_argument("--tau", type=float, default=0.005) parser.add_argument("--alpha", type=float, default=0.2) parser.add_argument("--auto-alpha", default=True) parser.add_argument("--target-entropy", type=int, default=None) parser.add_argument("--alpha-lr", type=float, default=1e-4) parser.add_argument("--cql-weight", type=float, default=1.0) parser.add_argument("--temperature", type=float, default=1.0) parser.add_argument("--max-q-backup", type=bool, default=False) parser.add_argument("--deterministic-backup", type=bool, default=True) parser.add_argument("--with-lagrange", type=bool, default=False) parser.add_argument("--lagrange-threshold", type=float, default=10.0) parser.add_argument("--cql-alpha-lr", type=float, default=3e-4) parser.add_argument("--num-repeat-actions", type=int, default=10) parser.add_argument("--uniform-rollout", type=bool, default=False) parser.add_argument("--rho-s", type=str, default="mix", choices=["model", "mix"]) parser.add_argument("--dynamics-lr", type=float, default=1e-3) parser.add_argument("--dynamics-hidden-dims", type=int, nargs='*', default=[200, 200, 200, 200]) parser.add_argument("--dynamics-weight-decay", type=float, nargs='*', default=[2.5e-5, 5e-5, 7.5e-5, 7.5e-5, 1e-4]) parser.add_argument("--n-ensemble", type=int, default=7) parser.add_argument("--n-elites", type=int, default=5) parser.add_argument("--rollout-freq", type=int, default=1000) parser.add_argument("--rollout-batch-size", type=int, default=50000) parser.add_argument("--rollout-length", type=int, default=5) parser.add_argument("--model-retain-epochs", type=int, default=5) parser.add_argument("--real-ratio", type=float, default=0.5) parser.add_argument("--load-dynamics-path", type=none_or_str, default=None) parser.add_argument("--epoch", type=int, default=200) parser.add_argument("--step-per-epoch", type=int, default=1000) parser.add_argument("--eval_episodes", type=int, default=100) parser.add_argument("--batch-size", type=int, default=256) parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu") return parser.parse_args() def train(args=get_args()): # seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True # create env and dataset if args.task == 'pickplace': env = roboverse.make('Widow250PickTray-v0') env = PickPlaceObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "pickplace_prior.npy") task_data_path = os.path.join(args.data_dir, "pickplace_task.npy") dataset, init_obss_dataset = get_pickplace_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledraweropen': env = roboverse.make('Widow250DoubleDrawerOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "closed_drawer_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledrawercloseopen': env = roboverse.make('Widow250DoubleDrawerCloseOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "blocked_drawer_1_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledrawerpickplaceopen': env = roboverse.make('Widow250DoubleDrawerPickPlaceOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "blocked_drawer_2_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) else: raise NotImplementedError env.reset(seed=args.seed) # create policy model actor_backbone = MLP(input_dim=np.prod(args.obs_shape), hidden_dims=args.hidden_dims) critic1_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) critic2_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) dist = TanhDiagGaussian( latent_dim=getattr(actor_backbone, "output_dim"), output_dim=args.action_dim, unbounded=True, conditioned_sigma=True ) actor = ActorProb(actor_backbone, dist, args.device) critic1 = Critic(critic1_backbone, args.device) critic2 = Critic(critic2_backbone, args.device) actor_optim = torch.optim.Adam(actor.parameters(), lr=args.actor_lr) critic1_optim = torch.optim.Adam(critic1.parameters(), lr=args.critic_lr) critic2_optim = torch.optim.Adam(critic2.parameters(), lr=args.critic_lr) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(actor_optim, args.epoch) if args.auto_alpha: target_entropy = args.target_entropy if args.target_entropy \ else -np.prod(env.action_space.shape) args.target_entropy = target_entropy log_alpha = torch.zeros(1, requires_grad=True, device=args.device) alpha_optim = torch.optim.Adam([log_alpha], lr=args.alpha_lr) alpha = (target_entropy, log_alpha, alpha_optim) else: alpha = args.alpha # create dynamics load_dynamics_model = True if args.load_dynamics_path else False dynamics_model = EnsembleDynamicsModel( obs_dim=np.prod(args.obs_shape), action_dim=args.action_dim, hidden_dims=args.dynamics_hidden_dims, num_ensemble=args.n_ensemble, num_elites=args.n_elites, weight_decays=args.dynamics_weight_decay, device=args.device ) dynamics_optim = torch.optim.Adam( dynamics_model.parameters(), lr=args.dynamics_lr )
scaler = StandardScaler()
6
2023-10-11 08:36:06+00:00
24k
lmb-freiburg/ldce
scripts/ldce.py
[ { "identifier": "disabled_train", "path": "sampling_helpers.py", "snippet": "def disabled_train(self, mode=True):\n \"\"\"Overwrite model.train with this function to make sure train/eval mode\n does not change anymore.\"\"\"\n return self" }, { "identifier": "get_model", "path": "sa...
import argparse import os import psutil import yaml import copy import random import matplotlib.pyplot as plt import numpy as np import pathlib import torch import hydra import wandb import torchvision import json import sys import regex as re import open_clip from contextlib import nullcontext from torch import autocast from omegaconf import OmegaConf, open_dict from hydra.utils import instantiate from omegaconf import DictConfig, OmegaConf from torchvision import transforms, datasets from torchvision.utils import save_image from sampling_helpers import disabled_train, get_model, _unmap_img, generate_samples from sampling_helpers import load_model_hf from ldm import * from ldm.models.diffusion.cc_ddim import CCMDDIMSampler from data.imagenet_classnames import name_map, openai_imagenet_classes from utils.DecisionDensenetModel import DecisionDensenetModel from utils.preprocessor import Normalizer, CropAndNormalizer, ResizeAndNormalizer, GenericPreprocessing, Crop from utils.vision_language_wrapper import VisionLanguageWrapper from utils.madry_net import MadryNet from utils.dino_linear import LinearClassifier, DINOLinear
14,675
print(f"resuming from batch {last_data_idx}") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # device = torch.device("cpu") # there seems to be a CUDA/autograd instability in gradient computation print(f"using device: {device}") model = get_model(cfg_path=cfg.diffusion_model.cfg_path, ckpt_path = cfg.diffusion_model.ckpt_path).to(device).eval() classifier_model = get_classifier(cfg, device) classifier_model.to(device).eval() classifier_model.train = disabled_train ddim_steps = cfg.ddim_steps ddim_eta = cfg.ddim_eta scale = cfg.scale #for unconditional guidance strength = cfg.strength #for unconditional guidance sampler = CCMDDIMSampler(model, classifier_model, seg_model= None, classifier_wrapper="classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper, record_intermediate_results=cfg.record_intermediate_results, verbose=cfg.verbose, **cfg.sampler) sampler.make_schedule(ddim_num_steps=ddim_steps, ddim_eta=ddim_eta, verbose=False) assert 0. <= strength <= 1., 'can only work with strength in [0.0, 1.0]' t_enc = int(strength * len(sampler.ddim_timesteps)) assert len(sampler.ddim_timesteps) == ddim_steps, "ddim_steps should be equal to len(sampler.ddim_timesteps)" n_samples_per_class = cfg.n_samples_per_class batch_size = cfg.data.batch_size shuffle = cfg.get("shuffle", False) #save config to the output directory #check if the config file already exists else create a config file config_path = os.path.join(out_dir, "config.yaml") if os.path.exists(config_path): print("config file already exists! skipping ...") else: with open(os.path.join(out_dir, "config.yaml"), 'w') as f: print("saving config to ", os.path.join(out_dir, "config.yaml ...")) yaml.dump(config, f) os.chmod(os.path.join(out_dir, "config.yaml"), 0o555) #data_path = cfg.data_path dataset = get_dataset(cfg, last_data_idx=last_data_idx) print("dataset length: ", len(dataset)) data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=1) if "ImageNet" in cfg.data._target_: i2h = name_map elif "CelebAHQDataset" in cfg.data._target_: # query label 31 (smile): label=0 <-> no smile and label=1 <-> smile # query label 39 (age): label=0 <-> old and label=1 <-> young assert cfg.data.query_label in [31, 39] if 31 == cfg.data.query_label: i2h = ["no smile", "smile"] elif 39 == cfg.data.query_label: i2h = ["old", "young"] else: raise NotImplementedError elif "Flowers102" in cfg.data._target_: with open("data/flowers_idx_to_label.json", "r") as f: flowers_idx_to_classname = json.load(f) flowers_idx_to_classname = {int(k)-1: v for k, v in flowers_idx_to_classname.items()} i2h = flowers_idx_to_classname elif "OxfordIIIPets" in cfg.data._target_: with open("data/pets_idx_to_label.json", "r") as f: pets_idx_to_classname = json.load(f) i2h = {int(k): v for k, v in pets_idx_to_classname.items()} else: raise NotImplementedError if "ImageNet" in cfg.data._target_: with open('data/synset_closest_idx.yaml', 'r') as file: synset_closest_idx = yaml.safe_load(file) elif "Flowers102" in cfg.data._target_: with open("data/flowers_closest_indices.json") as file: closest_indices = json.load(file) closest_indices = {int(k):v for k,v in closest_indices.items()} elif "OxfordIIIPets" in cfg.data._target_: with open("data/pets_closest_indices.json") as file: closest_indices = json.load(file) closest_indices = {int(k):v for k,v in closest_indices.items()} if not cfg.resume: torch.save({"last_data_idx": -1}, checkpoint_path) seed = cfg.seed if "seed" in cfg else 0 set_seed(seed=seed) for i, batch in enumerate(data_loader): if "fixed_seed" in cfg: set_seed(seed=cfg.get("seed", 0)) if cfg.fixed_seed else None seed = seed if cfg.fixed_seed else -1 if "return_tgt_cls" in cfg.data and cfg.data.return_tgt_cls: image, label, tgt_classes, unique_data_idx = batch tgt_classes = tgt_classes.to(device) #squeeze() else: image, label, unique_data_idx = batch if "ImageNet" in cfg.data._target_: tgt_classes = torch.tensor([random.choice(synset_closest_idx[l.item()]) for l in label]).to(device) elif "CelebAHQDataset" in cfg.data._target_: tgt_classes = (1 - label).type(torch.float32) elif "Flowers102" in cfg.data._target_ or "OxfordIIIPets" in cfg.data._target_: tgt_classes = torch.tensor([closest_indices[unique_data_idx[l].item()*cfg.data.num_shards + cfg.data.shard][0] for l in range(label.shape[0])]).to(device) else: raise NotImplementedError image = image.to(device) #squeeze() label = label.to(device) #.item() #squeeze() #tgt_classes = torch.tensor([random.choice(synset_closest_idx[l.item()]) for l in label]).to(device) #tgt_classes = synset_closest_idx[label] #tgt_classes = torch.tensor([random.choice(synset_closest_idx[l.item()]) for l in label]).to(device) #shuffle tgt_classes #random.shuffle(tgt_classes) #get classifcation prediction with torch.inference_mode(): #with precision_scope(): if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: logits = classifier_model(image) else:
torch.backends.cuda.matmul.allow_tf32 = True # torch.backends.cudnn.benchmark = True try: except: print("Install OpenClip via: pip install open_clip_torch") def set_seed(seed: int = 0): torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) torch.cuda.manual_seed_all(seed) def blockPrint(): sys.stdout = open(os.devnull, 'w') def get_classifier(cfg, device): if "ImageNet" in cfg.data._target_: classifier_name = cfg.classifier_model.name if classifier_name == "robust_resnet50": classifier_model = MadryNet(cfg.classifier_model.ckpt, device) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: classifier_model = Crop(classifier_model) else: classifier_model = getattr(torchvision.models, classifier_name)(pretrained=True) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: classifier_model = CropAndNormalizer(classifier_model) elif "CelebAHQDataset" in cfg.data._target_: assert cfg.data.query_label in [20, 31, 39], 'Query label MUST be 20 (Gender), 31 (Smile), or 39 (Age) for CelebAHQ' ql = 0 if cfg.data.query_label in [31, 39]: ql = 1 if cfg.data.query_label == 31 else 2 classifier_model = DecisionDensenetModel(3, pretrained=False, query_label=ql) classifier_model.load_state_dict(torch.load(cfg.classifier_model.classifier_path, map_location='cpu')['model_state_dict']) if cfg.classifier_model.classifier_wrapper: classifier_model = Normalizer( classifier_model, [0.5] * 3, [0.5] * 3 ) elif "Flowers102" in cfg.data._target_: # fine-tuned Dino ViT B/8: https://arxiv.org/pdf/2104.14294.pdf dino = torch.hub.load('facebookresearch/dino:main', 'dino_vits8').to(device).eval() dim = dino.embed_dim linear_classifier = LinearClassifier(dim*cfg.classifier_model.n_last_blocks, 102) linear_classifier.load_state_dict(torch.load(cfg.classifier_model.classifier_path, map_location="cpu"), strict=True) linear_classifier = linear_classifier.eval().to(device) classifier_model = DINOLinear(dino, linear_classifier) transforms_list = [transforms.CenterCrop(224), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))] classifier_model = GenericPreprocessing(classifier_model, transforms.Compose(transforms_list)) elif "OxfordIIIPets" in cfg.data._target_: # zero-shot OpenClip: https://arxiv.org/pdf/2212.07143.pdf model, _, preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k') model = model.to(device).eval() tokenizer = open_clip.get_tokenizer('ViT-B-32') # prompts following https://github.com/openai/CLIP/blob/main/data/prompts.md with open("data/pets_idx_to_label.json", "r") as f: pets_idx_to_classname = json.load(f) prompts = [f"a photo of a {label}, a type of pet." for label in pets_idx_to_classname.values()] classifier_model = VisionLanguageWrapper(model, tokenizer, prompts) # try running optimization on 224x224 pixel image # transforms_list = [preprocess.transforms[0], preprocess.transforms[1], preprocess.transforms[4]] if cfg.classifier_model.classifier_wrapper: transforms_list = [preprocess.transforms[1], preprocess.transforms[4]] # CenterCrop(224, 224), Normalize classifier_model = GenericPreprocessing(classifier_model, transforms.Compose(transforms_list)) else: raise NotImplementedError return classifier_model def get_dataset(cfg, last_data_idx: int = 0): if "ImageNet" in cfg.data._target_: out_size = 256 transform_list = [ transforms.Resize((out_size, out_size)), transforms.ToTensor() ] transform = transforms.Compose(transform_list) dataset = instantiate(cfg.data, start_sample=cfg.data.start_sample, end_sample=cfg.data.end_sample, transform=transform, restart_idx=last_data_idx) elif "CelebAHQDataset" in cfg.data._target_: dataset = instantiate( cfg.data, image_size=256, data_dir=cfg.data.data_dir, random_crop=False, random_flip=False, partition='test', query_label=cfg.data.query_label, normalize=False, shard=cfg.data.shard, num_shards=cfg.data.num_shards, restart_idx=last_data_idx ) elif "Flowers102" in cfg.data._target_: transform = transforms.Compose([ transforms.Resize((256, 256)), transforms.ToTensor(), ]) dataset = instantiate( cfg.data, shard=cfg.data.shard, num_shards=cfg.data.num_shards, transform=transform, restart_idx=last_data_idx ) elif "OxfordIIIPets" in cfg.data._target_: # try running on 224x224 img def _convert_to_rgb(image): return image.convert('RGB') out_size = 256 transform_list = [ transforms.Resize((out_size, out_size)), # transforms.CenterCrop(out_size), _convert_to_rgb, transforms.ToTensor(), ] transform = transforms.Compose(transform_list) dataset = instantiate( cfg.data, shard=cfg.data.shard, num_shards=cfg.data.num_shards, transform=transform, restart_idx=last_data_idx ) else: raise NotImplementedError return dataset @hydra.main(version_base=None, config_path="../configs/ldce", config_name="v1") def main(cfg : DictConfig) -> None: if "verbose" not in cfg: with open_dict(cfg): cfg.verbose = True if "record_intermediate_results" not in cfg: with open_dict(cfg): cfg.record_intermediate_results = True if "verbose" in cfg and not cfg.verbose: blockPrint() os.makedirs(cfg.output_dir, exist_ok=True) os.chmod(cfg.output_dir, 0o777) if "ImageNet" in cfg.data._target_: out_dir = os.path.join(cfg.output_dir, f"bucket_{cfg.data.start_sample}_{cfg.data.end_sample}") else: out_dir = os.path.join(cfg.output_dir, f"bucket_{cfg.data.shard}_{cfg.data.num_shards}") os.makedirs(out_dir, exist_ok=True) os.chmod(out_dir, 0o777) checkpoint_path = os.path.join(out_dir, "last_saved_id.pth") config = {} if "ImageNet" in cfg.data._target_: run_id = f"{cfg.data.start_sample}_{cfg.data.end_sample}" else: run_id = f"{cfg.data.shard}_{cfg.data.num_shards}" if cfg.resume: print("run ID to resume: ", run_id) else: print("starting new run", run_id) config.update(OmegaConf.to_container(cfg, resolve=True)) print("current run id: ", run_id) last_data_idx = 0 if cfg.resume: # or os.path.isfile(checkpoint_path): resume only if asked to, allow restarts print(f"resuming from {checkpoint_path}") #check if checkpoint exists if not os.path.exists(checkpoint_path): print("checkpoint does not exist! starting from 0 ...") else: checkpoint = torch.load(checkpoint_path)# torch.load(restored_file.name) last_data_idx = checkpoint["last_data_idx"] + 1 if "last_data_idx" in checkpoint else 0 print(f"resuming from batch {last_data_idx}") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # device = torch.device("cpu") # there seems to be a CUDA/autograd instability in gradient computation print(f"using device: {device}") model = get_model(cfg_path=cfg.diffusion_model.cfg_path, ckpt_path = cfg.diffusion_model.ckpt_path).to(device).eval() classifier_model = get_classifier(cfg, device) classifier_model.to(device).eval() classifier_model.train = disabled_train ddim_steps = cfg.ddim_steps ddim_eta = cfg.ddim_eta scale = cfg.scale #for unconditional guidance strength = cfg.strength #for unconditional guidance sampler = CCMDDIMSampler(model, classifier_model, seg_model= None, classifier_wrapper="classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper, record_intermediate_results=cfg.record_intermediate_results, verbose=cfg.verbose, **cfg.sampler) sampler.make_schedule(ddim_num_steps=ddim_steps, ddim_eta=ddim_eta, verbose=False) assert 0. <= strength <= 1., 'can only work with strength in [0.0, 1.0]' t_enc = int(strength * len(sampler.ddim_timesteps)) assert len(sampler.ddim_timesteps) == ddim_steps, "ddim_steps should be equal to len(sampler.ddim_timesteps)" n_samples_per_class = cfg.n_samples_per_class batch_size = cfg.data.batch_size shuffle = cfg.get("shuffle", False) #save config to the output directory #check if the config file already exists else create a config file config_path = os.path.join(out_dir, "config.yaml") if os.path.exists(config_path): print("config file already exists! skipping ...") else: with open(os.path.join(out_dir, "config.yaml"), 'w') as f: print("saving config to ", os.path.join(out_dir, "config.yaml ...")) yaml.dump(config, f) os.chmod(os.path.join(out_dir, "config.yaml"), 0o555) #data_path = cfg.data_path dataset = get_dataset(cfg, last_data_idx=last_data_idx) print("dataset length: ", len(dataset)) data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=1) if "ImageNet" in cfg.data._target_: i2h = name_map elif "CelebAHQDataset" in cfg.data._target_: # query label 31 (smile): label=0 <-> no smile and label=1 <-> smile # query label 39 (age): label=0 <-> old and label=1 <-> young assert cfg.data.query_label in [31, 39] if 31 == cfg.data.query_label: i2h = ["no smile", "smile"] elif 39 == cfg.data.query_label: i2h = ["old", "young"] else: raise NotImplementedError elif "Flowers102" in cfg.data._target_: with open("data/flowers_idx_to_label.json", "r") as f: flowers_idx_to_classname = json.load(f) flowers_idx_to_classname = {int(k)-1: v for k, v in flowers_idx_to_classname.items()} i2h = flowers_idx_to_classname elif "OxfordIIIPets" in cfg.data._target_: with open("data/pets_idx_to_label.json", "r") as f: pets_idx_to_classname = json.load(f) i2h = {int(k): v for k, v in pets_idx_to_classname.items()} else: raise NotImplementedError if "ImageNet" in cfg.data._target_: with open('data/synset_closest_idx.yaml', 'r') as file: synset_closest_idx = yaml.safe_load(file) elif "Flowers102" in cfg.data._target_: with open("data/flowers_closest_indices.json") as file: closest_indices = json.load(file) closest_indices = {int(k):v for k,v in closest_indices.items()} elif "OxfordIIIPets" in cfg.data._target_: with open("data/pets_closest_indices.json") as file: closest_indices = json.load(file) closest_indices = {int(k):v for k,v in closest_indices.items()} if not cfg.resume: torch.save({"last_data_idx": -1}, checkpoint_path) seed = cfg.seed if "seed" in cfg else 0 set_seed(seed=seed) for i, batch in enumerate(data_loader): if "fixed_seed" in cfg: set_seed(seed=cfg.get("seed", 0)) if cfg.fixed_seed else None seed = seed if cfg.fixed_seed else -1 if "return_tgt_cls" in cfg.data and cfg.data.return_tgt_cls: image, label, tgt_classes, unique_data_idx = batch tgt_classes = tgt_classes.to(device) #squeeze() else: image, label, unique_data_idx = batch if "ImageNet" in cfg.data._target_: tgt_classes = torch.tensor([random.choice(synset_closest_idx[l.item()]) for l in label]).to(device) elif "CelebAHQDataset" in cfg.data._target_: tgt_classes = (1 - label).type(torch.float32) elif "Flowers102" in cfg.data._target_ or "OxfordIIIPets" in cfg.data._target_: tgt_classes = torch.tensor([closest_indices[unique_data_idx[l].item()*cfg.data.num_shards + cfg.data.shard][0] for l in range(label.shape[0])]).to(device) else: raise NotImplementedError image = image.to(device) #squeeze() label = label.to(device) #.item() #squeeze() #tgt_classes = torch.tensor([random.choice(synset_closest_idx[l.item()]) for l in label]).to(device) #tgt_classes = synset_closest_idx[label] #tgt_classes = torch.tensor([random.choice(synset_closest_idx[l.item()]) for l in label]).to(device) #shuffle tgt_classes #random.shuffle(tgt_classes) #get classifcation prediction with torch.inference_mode(): #with precision_scope(): if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: logits = classifier_model(image) else:
logits = sampler.get_classifier_logits(_unmap_img(image)) #converting to -1, 1
2
2023-10-10 09:40:10+00:00
24k
spla-tam/SplaTAM
scripts/post_splatam_opt.py
[ { "identifier": "AzureKinectDataset", "path": "datasets/gradslam_datasets/azure.py", "snippet": "class AzureKinectDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = ...
import argparse import os import random import sys import shutil import cv2 import numpy as np import torch import wandb from importlib.machinery import SourceFileLoader from tqdm import tqdm from datasets.gradslam_datasets import ( load_dataset_config, ICLDataset, ReplicaDataset, AzureKinectDataset, ScannetDataset, Ai2thorDataset, Record3DDataset, RealsenseDataset, TUMDataset, ScannetPPDataset, NeRFCaptureDataset ) from utils.common_utils import seed_everything, save_seq_params, save_params, save_params_ckpt, save_seq_params_ckpt from utils.recon_helpers import setup_camera from utils.gs_helpers import ( params2rendervar, params2depthplussilhouette, transformed_params2depthplussilhouette, transform_to_frame, report_progress, eval, l1_loss_v1, matrix_to_quaternion ) from utils.gs_external import ( calc_ssim, build_rotation, densify, get_expon_lr_func, update_learning_rate ) from diff_gaussian_rasterization import GaussianRasterizer as Renderer
20,186
use_train_split=dataset_config["use_train_split"], ) num_frames = dataset_config["num_frames"] if num_frames == -1: num_frames = len(mapping_dataset) eval_num_frames = dataset_config["eval_num_frames"] if eval_num_frames == -1: eval_num_frames = len(eval_dataset) # Initialize Parameters, Optimizer & Canoncial Camera parameters ckpt_path = config["data"]["param_ckpt_path"] params, variables, optimizer, intrinsics, w2c, cam = initialize_first_timestep_from_ckpt(ckpt_path,mapping_dataset, num_frames, config['train']['lrs_mapping'], config['mean_sq_dist_method']) _, _, map_intrinsics, _ = mapping_dataset[0] # Load all RGBD frames - Mapping dataloader color_all_frames_map = [] depth_all_frames_map = [] gt_w2c_all_frames_map = [] gs_cams_all_frames_map = [] for time_idx in range(num_frames): color, depth, _, gt_pose = mapping_dataset[time_idx] # Process poses gt_w2c = torch.linalg.inv(gt_pose) # Process RGB-D Data color = color.permute(2, 0, 1) / 255 depth = depth.permute(2, 0, 1) color_all_frames_map.append(color) depth_all_frames_map.append(depth) gt_w2c_all_frames_map.append(gt_w2c) # Setup Gaussian Splatting Camera gs_cam = setup_camera(color.shape[2], color.shape[1], map_intrinsics.cpu().numpy(), gt_w2c.detach().cpu().numpy()) gs_cams_all_frames_map.append(gs_cam) # Iterate over Scan for time_idx in tqdm(range(num_frames)): # Optimization Iterations num_iters_mapping = config['train']['num_iters_mapping'] # Initialize current frame data iter_time_idx = time_idx color = color_all_frames_map[iter_time_idx] depth = depth_all_frames_map[iter_time_idx] curr_gt_w2c = gt_w2c_all_frames_map[:iter_time_idx+1] curr_data = {'cam': cam, 'im': color, 'depth': depth, 'id': iter_time_idx, 'intrinsics': intrinsics, 'w2c': w2c, 'iter_gt_w2c_list': curr_gt_w2c} # Add new Gaussians to the scene based on the Silhouette # if time_idx > 0: # params, variables = add_new_gaussians(params, variables, curr_data, # config['train']['sil_thres'], time_idx, # config['mean_sq_dist_method']) post_num_pts = params['means3D'].shape[0] if config['use_wandb']: wandb_run.log({"Init/Number of Gaussians": post_num_pts, "Init/step": wandb_time_step}) # Reset Optimizer & Learning Rates for Full Map Optimization optimizer = initialize_optimizer(params, config['train']['lrs_mapping']) means3D_scheduler = get_expon_lr_func(lr_init=config['train']['lrs_mapping']['means3D'], lr_final=config['train']['lrs_mapping_means3D_final'], lr_delay_mult=config['train']['lr_delay_mult'], max_steps=config['train']['num_iters_mapping']) # Mapping if (time_idx + 1) == num_frames: if num_iters_mapping > 0: progress_bar = tqdm(range(num_iters_mapping), desc=f"Mapping Time Step: {time_idx}") for iter in range(num_iters_mapping): # Update Learning Rates for means3D updated_lr = update_learning_rate(optimizer, means3D_scheduler, iter+1) if config['use_wandb']: wandb_run.log({"Learning Rate - Means3D": updated_lr}) # Randomly select a frame until current time step iter_time_idx = random.randint(0, time_idx) # Initialize Data for selected frame iter_color = color_all_frames_map[iter_time_idx] iter_depth = depth_all_frames_map[iter_time_idx] iter_gt_w2c = gt_w2c_all_frames_map[:iter_time_idx+1] iter_gs_cam = gs_cams_all_frames_map[iter_time_idx] iter_data = {'cam': iter_gs_cam, 'im': iter_color, 'depth': iter_depth, 'id': iter_time_idx, 'intrinsics': map_intrinsics, 'w2c': gt_w2c_all_frames_map[iter_time_idx], 'iter_gt_w2c_list': iter_gt_w2c} # Loss for current frame loss, variables, losses = get_loss_gs(params, iter_data, variables, config['train']['loss_weights']) # Backprop loss.backward() with torch.no_grad(): # Gaussian-Splatting's Gradient-based Densification if config['train']['use_gaussian_splatting_densification']: params, variables = densify(params, variables, optimizer, iter, config['train']['densify_dict']) if config['use_wandb']: wandb_run.log({"Number of Gaussians - Densification": params['means3D'].shape[0]}) # Optimizer Update optimizer.step() optimizer.zero_grad(set_to_none=True) # Report Progress if config['report_iter_progress']: if config['use_wandb']: report_progress(params, iter_data, iter+1, progress_bar, iter_time_idx, sil_thres=config['train']['sil_thres'], wandb_run=wandb_run, wandb_step=wandb_step, wandb_save_qual=config['wandb']['save_qual'], mapping=True, online_time_idx=time_idx) else: report_progress(params, iter_data, iter+1, progress_bar, iter_time_idx, sil_thres=config['train']['sil_thres'], mapping=True, online_time_idx=time_idx) else: progress_bar.update(1) # Eval Params at 7K Iterations if (iter + 1) == 7000: print("Evaluating Params at 7K Iterations") eval_params = convert_params_to_store(params) output_dir = os.path.join(config["workdir"], config["run_name"]) eval_dir = os.path.join(output_dir, "eval_7k") os.makedirs(eval_dir, exist_ok=True) if config['use_wandb']:
_BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _BASE_DIR) print("System Paths:") for p in sys.path: print(p) def get_dataset(config_dict, basedir, sequence, **kwargs): if config_dict["dataset_name"].lower() in ["icl"]: return ICLDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["replica"]: return ReplicaDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["azure", "azurekinect"]: return AzureKinectDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["scannet"]: return ScannetDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["ai2thor"]: return Ai2thorDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["record3d"]: return Record3DDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["realsense"]: return RealsenseDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["tum"]: return TUMDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["scannetpp"]: return ScannetPPDataset(basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["nerfcapture"]: return NeRFCaptureDataset(basedir, sequence, **kwargs) else: raise ValueError(f"Unknown dataset name {config_dict['dataset_name']}") def get_pointcloud(color, depth, intrinsics, w2c, transform_pts=True, mask=None, compute_mean_sq_dist=False, mean_sq_dist_method="projective"): width, height = color.shape[2], color.shape[1] CX = intrinsics[0][2] CY = intrinsics[1][2] FX = intrinsics[0][0] FY = intrinsics[1][1] # Compute indices of pixels x_grid, y_grid = torch.meshgrid(torch.arange(width).cuda().float(), torch.arange(height).cuda().float(), indexing='xy') xx = (x_grid - CX)/FX yy = (y_grid - CY)/FY xx = xx.reshape(-1) yy = yy.reshape(-1) depth_z = depth[0].reshape(-1) # Initialize point cloud pts_cam = torch.stack((xx * depth_z, yy * depth_z, depth_z), dim=-1) if transform_pts: pix_ones = torch.ones(height * width, 1).cuda().float() pts4 = torch.cat((pts_cam, pix_ones), dim=1) c2w = torch.inverse(w2c) pts = (c2w @ pts4.T).T[:, :3] else: pts = pts_cam # Compute mean squared distance for initializing the scale of the Gaussians if compute_mean_sq_dist: if mean_sq_dist_method == "projective": # Projective Geometry (this is fast, farther -> larger radius) scale_gaussian = depth_z / ((FX + FY)/2) mean3_sq_dist = scale_gaussian**2 else: raise ValueError(f"Unknown mean_sq_dist_method: {mean_sq_dist_method}") # Colorize point cloud cols = torch.permute(color, (1, 2, 0)).reshape(-1, 3) # (C, H, W) -> (H, W, C) -> (H * W, C) point_cld = torch.cat((pts, cols), -1) # Select points based on mask if mask is not None: point_cld = point_cld[mask] if compute_mean_sq_dist: mean3_sq_dist = mean3_sq_dist[mask] if compute_mean_sq_dist: return point_cld, mean3_sq_dist else: return point_cld def initialize_params(init_pt_cld, num_frames, mean3_sq_dist): num_pts = init_pt_cld.shape[0] means3D = init_pt_cld[:, :3] # [num_gaussians, 3] unnorm_rots = np.tile([1, 0, 0, 0], (num_pts, 1)) # [num_gaussians, 3] logit_opacities = torch.zeros((num_pts, 1), dtype=torch.float, device="cuda") params = { 'means3D': means3D, 'rgb_colors': init_pt_cld[:, 3:6], 'unnorm_rotations': unnorm_rots, 'logit_opacities': logit_opacities, 'log_scales': torch.tile(torch.log(torch.sqrt(mean3_sq_dist))[..., None], (1, 1)), } # Initialize a single gaussian trajectory to model the camera poses relative to the first frame cam_rots = np.tile([1, 0, 0, 0], (1, 1)) cam_rots = np.tile(cam_rots[:, :, None], (1, 1, num_frames)) params['cam_unnorm_rots'] = cam_rots params['cam_trans'] = np.zeros((1, 3, num_frames)) for k, v in params.items(): # Check if value is already a torch tensor if not isinstance(v, torch.Tensor): params[k] = torch.nn.Parameter(torch.tensor(v).cuda().float().contiguous().requires_grad_(True)) else: params[k] = torch.nn.Parameter(v.cuda().float().contiguous().requires_grad_(True)) variables = {'max_2D_radius': torch.zeros(params['means3D'].shape[0]).cuda().float(), 'means2D_gradient_accum': torch.zeros(params['means3D'].shape[0]).cuda().float(), 'denom': torch.zeros(params['means3D'].shape[0]).cuda().float()} return params, variables def initialize_optimizer(params, lrs_dict): lrs = lrs_dict param_groups = [{'params': [v], 'name': k, 'lr': lrs[k]} for k, v in params.items()] return torch.optim.Adam(param_groups, lr=0.0, eps=1e-15) def initialize_first_timestep_from_ckpt(ckpt_path,dataset, num_frames, lrs_dict, mean_sq_dist_method): # Get RGB-D Data & Camera Parameters color, depth, intrinsics, pose = dataset[0] # Process RGB-D Data color = color.permute(2, 0, 1) / 255 # (H, W, C) -> (C, H, W) depth = depth.permute(2, 0, 1) # (H, W, C) -> (C, H, W) # Process Camera Parameters intrinsics = intrinsics[:3, :3] w2c = torch.linalg.inv(pose) # Setup Camera cam = setup_camera(color.shape[2], color.shape[1], intrinsics.cpu().numpy(), w2c.detach().cpu().numpy()) # Get Initial Point Cloud (PyTorch CUDA Tensor) mask = (depth > 0) # Mask out invalid depth values mask = mask.reshape(-1) # Initialize Parameters & Optimizer from Checkpoint # Load checkpoint print(f"Loading Params") params = dict(np.load(ckpt_path, allow_pickle=True)) variables = {} for k in ['intrinsics', 'w2c', 'org_width', 'org_height', 'gt_w2c_all_frames']: # for k in ['timestep','intrinsics', 'w2c', 'org_width', 'org_height', 'gt_w2c_all_frames']: params.pop(k) print(params.keys()) params = {k: torch.tensor(params[k]).cuda().float().requires_grad_(True) for k in params.keys()} variables['max_2D_radius'] = torch.zeros(params['means3D'].shape[0]).cuda().float() variables['means2D_gradient_accum'] = torch.zeros(params['means3D'].shape[0]).cuda().float() variables['denom'] = torch.zeros(params['means3D'].shape[0]).cuda().float() # variables['timestep'] = torch.zeros(params['means3D'].shape[0]).cuda().float() variables['timestep'] = torch.tensor(params['timestep']).cuda().float() params.pop('timestep') optimizer = initialize_optimizer(params, lrs_dict) # Initialize an estimate of scene radius for Gaussian-Splatting Densification variables['scene_radius'] = torch.max(depth)/2.0 return params, variables, optimizer, intrinsics, w2c, cam def get_loss_gs(params, curr_data, variables, loss_weights): # Initialize Loss Dictionary losses = {} # Initialize Render Variables rendervar = params2rendervar(params) depth_sil_rendervar = params2depthplussilhouette(params, curr_data['w2c']) # RGB Rendering rendervar['means2D'].retain_grad() im, radius, _, = Renderer(raster_settings=curr_data['cam'])(**rendervar) variables['means2D'] = rendervar['means2D'] # Gradient only accum from colour render for densification # Depth & Silhouette Rendering depth_sil, _, _, = Renderer(raster_settings=curr_data['cam'])(**depth_sil_rendervar) depth = depth_sil[0, :, :].unsqueeze(0) silhouette = depth_sil[1, :, :] # Get invalid Depth Mask valid_depth_mask = (curr_data['depth'] != 0.0) depth = depth * valid_depth_mask # RGB Loss losses['im'] = 0.8 * l1_loss_v1(im, curr_data['im']) + 0.2 * (1.0 - calc_ssim(im, curr_data['im'])) # Depth Loss losses['depth'] = l1_loss_v1(depth, curr_data['depth']) weighted_losses = {k: v * loss_weights[k] for k, v in losses.items()} loss = sum(weighted_losses.values()) seen = radius > 0 variables['max_2D_radius'][seen] = torch.max(radius[seen], variables['max_2D_radius'][seen]) variables['seen'] = seen weighted_losses['loss'] = loss return loss, variables, weighted_losses def initialize_new_params(new_pt_cld, mean3_sq_dist): num_pts = new_pt_cld.shape[0] means3D = new_pt_cld[:, :3] # [num_gaussians, 3] unnorm_rots = np.tile([1, 0, 0, 0], (num_pts, 1)) # [num_gaussians, 3] logit_opacities = torch.zeros((num_pts, 1), dtype=torch.float, device="cuda") params = { 'means3D': means3D, 'rgb_colors': new_pt_cld[:, 3:6], 'unnorm_rotations': unnorm_rots, 'logit_opacities': logit_opacities, 'log_scales': torch.tile(torch.log(torch.sqrt(mean3_sq_dist))[..., None], (1, 1)), } for k, v in params.items(): # Check if value is already a torch tensor if not isinstance(v, torch.Tensor): params[k] = torch.nn.Parameter(torch.tensor(v).cuda().float().contiguous().requires_grad_(True)) else: params[k] = torch.nn.Parameter(v.cuda().float().contiguous().requires_grad_(True)) return params def infill_depth(depth, inpaint_radius=1): """ Function to infill Depth for invalid regions Input: depth: Depth Image (Numpy) radius: Radius of the circular neighborhood for infilling Output: depth: Depth Image with invalid regions infilled (Numpy) """ invalid_mask = (depth == 0) invalid_mask = invalid_mask.astype(np.uint8) filled_depth = cv2.inpaint(depth, invalid_mask, inpaint_radius, cv2.INPAINT_NS) return filled_depth def add_new_gaussians(params, variables, curr_data, sil_thres, time_idx, mean_sq_dist_method): # Silhouette Rendering transformed_pts = transform_to_frame(params, time_idx, gaussians_grad=False, camera_grad=False) depth_sil_rendervar = transformed_params2depthplussilhouette(params, curr_data['w2c'], transformed_pts) depth_sil, _, _, = Renderer(raster_settings=curr_data['cam'])(**depth_sil_rendervar) silhouette = depth_sil[1, :, :] non_presence_sil_mask = (silhouette < sil_thres) # Check for new foreground objects by using GT depth gt_depth = curr_data['depth'][0, :, :] render_depth = depth_sil[0, :, :] depth_error = torch.abs(gt_depth - render_depth) * (gt_depth > 0) non_presence_depth_mask = (render_depth > gt_depth) * (depth_error > 50*depth_error.median()) # Determine non-presence mask non_presence_mask = non_presence_sil_mask | non_presence_depth_mask # Infill Depth for invalid regions of GT Depth infilled_gt_depth = infill_depth(curr_data['depth'][0, :, :].detach().cpu().numpy()) infilled_gt_depth = torch.tensor(infilled_gt_depth).cuda().float().unsqueeze(0) # Flatten mask non_presence_mask = non_presence_mask.reshape(-1) # Get the new frame Gaussians based on the Silhouette if torch.sum(non_presence_mask) > 0: # Get the new pointcloud in the world frame curr_cam_rot = torch.nn.functional.normalize(params['cam_unnorm_rots'][..., time_idx].detach()) curr_cam_tran = params['cam_trans'][..., time_idx].detach() curr_w2c = torch.eye(4).cuda().float() curr_w2c[:3, :3] = build_rotation(curr_cam_rot) curr_w2c[:3, 3] = curr_cam_tran valid_depth_mask = (infilled_gt_depth > 0) non_presence_mask = non_presence_mask & valid_depth_mask.reshape(-1) new_pt_cld, mean3_sq_dist = get_pointcloud(curr_data['im'], infilled_gt_depth, curr_data['intrinsics'], curr_w2c, mask=non_presence_mask, compute_mean_sq_dist=True, mean_sq_dist_method=mean_sq_dist_method) new_params = initialize_new_params(new_pt_cld, mean3_sq_dist) for k, v in new_params.items(): params[k] = torch.nn.Parameter(torch.cat((params[k], v), dim=0).requires_grad_(True)) num_pts = params['means3D'].shape[0] variables['means2D_gradient_accum'] = torch.zeros(num_pts, device="cuda").float() variables['denom'] = torch.zeros(num_pts, device="cuda").float() variables['max_2D_radius'] = torch.zeros(num_pts, device="cuda").float() new_timestep = time_idx*torch.ones(new_pt_cld.shape[0],device="cuda").float() variables['timestep'] = torch.cat((variables['timestep'],new_timestep),dim=0) return params, variables def convert_params_to_store(params): params_to_store = {} for k, v in params.items(): if isinstance(v, torch.Tensor): params_to_store[k] = v.detach().clone() else: params_to_store[k] = v return params_to_store def rgbd_slam(config: dict): # Print Config print("Loaded Config:") print(f"{config}") # Init WandB if config['use_wandb']: wandb_step = 0 wandb_time_step = 0 wandb_run = wandb.init(project=config['wandb']['project'], entity=config['wandb']['entity'], group=config['wandb']['group'], name=config['wandb']['name'], config=config) wandb_run.define_metric("Mapping_Iters") wandb_run.define_metric("Number of Gaussians - Densification", step_metric="Mapping_Iters") wandb_run.define_metric("Learning Rate - Means3D", step_metric="Mapping_Iters") # Get Device device = torch.device(config["primary_device"]) # Load Dataset print("Loading Dataset ...") dataset_config = config["data"] if "gradslam_data_cfg" not in dataset_config: gradslam_data_cfg = {} gradslam_data_cfg["dataset_name"] = dataset_config["dataset_name"] else: gradslam_data_cfg = load_dataset_config(dataset_config["gradslam_data_cfg"]) if "ignore_bad" not in dataset_config: dataset_config["ignore_bad"] = False if "use_train_split" not in dataset_config: dataset_config["use_train_split"] = True # Poses are relative to the first frame mapping_dataset = get_dataset( config_dict=gradslam_data_cfg, basedir=dataset_config["basedir"], sequence=os.path.basename(dataset_config["sequence"]), start=dataset_config["start"], end=dataset_config["end"], stride=dataset_config["stride"], desired_height=dataset_config["desired_image_height"], desired_width=dataset_config["desired_image_width"], device=device, relative_pose=True, ignore_bad=dataset_config["ignore_bad"], use_train_split=dataset_config["use_train_split"], ) eval_dataset = get_dataset( config_dict=gradslam_data_cfg, basedir=dataset_config["basedir"], sequence=os.path.basename(dataset_config["sequence"]), start=dataset_config["start"], end=dataset_config["end"], stride=dataset_config["eval_stride"], desired_height=dataset_config["desired_image_height"], desired_width=dataset_config["desired_image_width"], device=device, relative_pose=True, ignore_bad=dataset_config["ignore_bad"], use_train_split=dataset_config["use_train_split"], ) num_frames = dataset_config["num_frames"] if num_frames == -1: num_frames = len(mapping_dataset) eval_num_frames = dataset_config["eval_num_frames"] if eval_num_frames == -1: eval_num_frames = len(eval_dataset) # Initialize Parameters, Optimizer & Canoncial Camera parameters ckpt_path = config["data"]["param_ckpt_path"] params, variables, optimizer, intrinsics, w2c, cam = initialize_first_timestep_from_ckpt(ckpt_path,mapping_dataset, num_frames, config['train']['lrs_mapping'], config['mean_sq_dist_method']) _, _, map_intrinsics, _ = mapping_dataset[0] # Load all RGBD frames - Mapping dataloader color_all_frames_map = [] depth_all_frames_map = [] gt_w2c_all_frames_map = [] gs_cams_all_frames_map = [] for time_idx in range(num_frames): color, depth, _, gt_pose = mapping_dataset[time_idx] # Process poses gt_w2c = torch.linalg.inv(gt_pose) # Process RGB-D Data color = color.permute(2, 0, 1) / 255 depth = depth.permute(2, 0, 1) color_all_frames_map.append(color) depth_all_frames_map.append(depth) gt_w2c_all_frames_map.append(gt_w2c) # Setup Gaussian Splatting Camera gs_cam = setup_camera(color.shape[2], color.shape[1], map_intrinsics.cpu().numpy(), gt_w2c.detach().cpu().numpy()) gs_cams_all_frames_map.append(gs_cam) # Iterate over Scan for time_idx in tqdm(range(num_frames)): # Optimization Iterations num_iters_mapping = config['train']['num_iters_mapping'] # Initialize current frame data iter_time_idx = time_idx color = color_all_frames_map[iter_time_idx] depth = depth_all_frames_map[iter_time_idx] curr_gt_w2c = gt_w2c_all_frames_map[:iter_time_idx+1] curr_data = {'cam': cam, 'im': color, 'depth': depth, 'id': iter_time_idx, 'intrinsics': intrinsics, 'w2c': w2c, 'iter_gt_w2c_list': curr_gt_w2c} # Add new Gaussians to the scene based on the Silhouette # if time_idx > 0: # params, variables = add_new_gaussians(params, variables, curr_data, # config['train']['sil_thres'], time_idx, # config['mean_sq_dist_method']) post_num_pts = params['means3D'].shape[0] if config['use_wandb']: wandb_run.log({"Init/Number of Gaussians": post_num_pts, "Init/step": wandb_time_step}) # Reset Optimizer & Learning Rates for Full Map Optimization optimizer = initialize_optimizer(params, config['train']['lrs_mapping']) means3D_scheduler = get_expon_lr_func(lr_init=config['train']['lrs_mapping']['means3D'], lr_final=config['train']['lrs_mapping_means3D_final'], lr_delay_mult=config['train']['lr_delay_mult'], max_steps=config['train']['num_iters_mapping']) # Mapping if (time_idx + 1) == num_frames: if num_iters_mapping > 0: progress_bar = tqdm(range(num_iters_mapping), desc=f"Mapping Time Step: {time_idx}") for iter in range(num_iters_mapping): # Update Learning Rates for means3D updated_lr = update_learning_rate(optimizer, means3D_scheduler, iter+1) if config['use_wandb']: wandb_run.log({"Learning Rate - Means3D": updated_lr}) # Randomly select a frame until current time step iter_time_idx = random.randint(0, time_idx) # Initialize Data for selected frame iter_color = color_all_frames_map[iter_time_idx] iter_depth = depth_all_frames_map[iter_time_idx] iter_gt_w2c = gt_w2c_all_frames_map[:iter_time_idx+1] iter_gs_cam = gs_cams_all_frames_map[iter_time_idx] iter_data = {'cam': iter_gs_cam, 'im': iter_color, 'depth': iter_depth, 'id': iter_time_idx, 'intrinsics': map_intrinsics, 'w2c': gt_w2c_all_frames_map[iter_time_idx], 'iter_gt_w2c_list': iter_gt_w2c} # Loss for current frame loss, variables, losses = get_loss_gs(params, iter_data, variables, config['train']['loss_weights']) # Backprop loss.backward() with torch.no_grad(): # Gaussian-Splatting's Gradient-based Densification if config['train']['use_gaussian_splatting_densification']: params, variables = densify(params, variables, optimizer, iter, config['train']['densify_dict']) if config['use_wandb']: wandb_run.log({"Number of Gaussians - Densification": params['means3D'].shape[0]}) # Optimizer Update optimizer.step() optimizer.zero_grad(set_to_none=True) # Report Progress if config['report_iter_progress']: if config['use_wandb']: report_progress(params, iter_data, iter+1, progress_bar, iter_time_idx, sil_thres=config['train']['sil_thres'], wandb_run=wandb_run, wandb_step=wandb_step, wandb_save_qual=config['wandb']['save_qual'], mapping=True, online_time_idx=time_idx) else: report_progress(params, iter_data, iter+1, progress_bar, iter_time_idx, sil_thres=config['train']['sil_thres'], mapping=True, online_time_idx=time_idx) else: progress_bar.update(1) # Eval Params at 7K Iterations if (iter + 1) == 7000: print("Evaluating Params at 7K Iterations") eval_params = convert_params_to_store(params) output_dir = os.path.join(config["workdir"], config["run_name"]) eval_dir = os.path.join(output_dir, "eval_7k") os.makedirs(eval_dir, exist_ok=True) if config['use_wandb']:
eval(eval_dataset, eval_params, eval_num_frames, eval_dir, sil_thres=config['train']['sil_thres'],
22
2023-11-30 20:26:47+00:00
24k
zhyever/PatchFusion
zoedepth/models/zoedepth_custom/patchfusion.py
[ { "identifier": "DepthModel", "path": "zoedepth/models/depth_model.py", "snippet": "class DepthModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.device = 'cpu'\n \n def to(self, device) -> nn.Module:\n self.device = device\n return super().to(device...
import itertools import math import copy import torch import torch.nn as nn import numpy as np import matplotlib.pyplot as plt import matplotlib.pyplot as plt import os import torch.distributed as dist import torch.nn.functional as F from zoedepth.models.depth_model import DepthModel from zoedepth.models.base_models.midas import MidasCore from zoedepth.models.layers.attractor import AttractorLayer, AttractorLayerUnnormed from zoedepth.models.layers.dist_layers import ConditionalLogBinomial, ConditionalLogBinomialV2 from zoedepth.models.layers.localbins_layers import (Projector, SeedBinRegressor, SeedBinRegressorUnnormed) from zoedepth.models.model_io import load_state_from_resource from torchvision.transforms import Normalize from torchvision.ops import roi_align as torch_roi_align from zoedepth.utils.misc import generatemask from zoedepth.models.layers.transformer import TransformerDecoderLayer, TransformerEncoderLayer, TransformerEncoder from zoedepth.utils.misc import colorize, colors from zoedepth.models.layers.fusion_network import UNetv1 from zoedepth.models.zoedepth_custom.zoedepth_custom import ZoeDepthCustom
14,798
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # File author: Zhenyu Li def check_keywords_in_name(name, keywords=()): isin = False for keyword in keywords: if keyword in name: isin = True return isin def get_activation(name, bank): # input of forward_hook will be a function of model/inp/oup def hook(module, input, output): bank[name] = output return hook def get_input(name, bank): # input of forward_hook will be a function of model/inp/oup def hook(module, input, output): bank[name] = input return hook class AttributeDict(dict): def __getattr__(self, key): try: return self[key] except KeyError: raise AttributeError(key) def __setattr__(self, key, value): self[key] = value def __delattr__(self, key): try: del self[key] except KeyError: raise AttributeError(key) class PatchFusion(DepthModel): def __init__(self, coarse_model, fine_model, n_bins=64, bin_centers_type="softplus", bin_embedding_dim=128, min_depth=1e-3, max_depth=10, n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp', min_temp=5, max_temp=50, train_midas=True, midas_lr_factor=10, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, sr_ratio=1, raw_depth_shape=(2160, 3840), transform_sample_gt_size=(2160, 3840), representation='', fetch_features=True, sample_feat_level=3, use_hr=False, deform=False, wo_bins=False, baseline=False, condition=True, freeze=False, g2l=False, use_fusion_network=False, use_area_prior=False, unet_version='v1', consistency_training=False, consistency_target='unet_feat', pos_embed=False, **kwargs): """ZoeDepth model. This is the version of ZoeDepth that has a single metric head Args: core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features n_bins (int, optional): Number of bin centers. Defaults to 64. bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus". bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128. min_depth (float, optional): Lower bound for normed bin centers. Defaults to 1e-3. max_depth (float, optional): Upper bound for normed bin centers. Defaults to 10. n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1]. attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300. attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2. attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'. attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'. min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5. max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50. train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True. midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10. encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10. pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10. sr_ratio: sr ratio during infer raw_depth_shape: raw depth shape during infer. times sr_ratio will be the target resolution. Used to sample points during training transform_sample_gt_size: training depth shape # influenced by crop shape which is not included in this pipeline right now representation: I use it to test the "bilap head" and a discarded idea fetch_features: if fetch feats. Default=True """ super().__init__() self.coarse_model = coarse_model self.fine_model = fine_model self.max_depth = max_depth self.min_depth = min_depth self.min_temp = min_temp self.bin_centers_type = bin_centers_type self.midas_lr_factor = midas_lr_factor self.encoder_lr_factor = encoder_lr_factor self.pos_enc_lr_factor = pos_enc_lr_factor self.train_midas = train_midas self.inverse_midas = inverse_midas if bin_centers_type == "normed": SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayer elif bin_centers_type == "softplus": # default SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayerUnnormed elif bin_centers_type == "hybrid1": SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayerUnnormed elif bin_centers_type == "hybrid2": SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayer else: raise ValueError( "bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'") N_MIDAS_OUT = 32 btlnck_features = self.fine_model.core.output_channels[0] num_out_features = self.fine_model.core.output_channels[1:] # all of them are the same self.seed_bin_regressor = SeedBinRegressorLayer( btlnck_features, n_bins=n_bins, min_depth=min_depth, max_depth=max_depth)
# MIT License # Copyright (c) 2022 Intelligent Systems Lab Org # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # File author: Zhenyu Li def check_keywords_in_name(name, keywords=()): isin = False for keyword in keywords: if keyword in name: isin = True return isin def get_activation(name, bank): # input of forward_hook will be a function of model/inp/oup def hook(module, input, output): bank[name] = output return hook def get_input(name, bank): # input of forward_hook will be a function of model/inp/oup def hook(module, input, output): bank[name] = input return hook class AttributeDict(dict): def __getattr__(self, key): try: return self[key] except KeyError: raise AttributeError(key) def __setattr__(self, key, value): self[key] = value def __delattr__(self, key): try: del self[key] except KeyError: raise AttributeError(key) class PatchFusion(DepthModel): def __init__(self, coarse_model, fine_model, n_bins=64, bin_centers_type="softplus", bin_embedding_dim=128, min_depth=1e-3, max_depth=10, n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp', min_temp=5, max_temp=50, train_midas=True, midas_lr_factor=10, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, sr_ratio=1, raw_depth_shape=(2160, 3840), transform_sample_gt_size=(2160, 3840), representation='', fetch_features=True, sample_feat_level=3, use_hr=False, deform=False, wo_bins=False, baseline=False, condition=True, freeze=False, g2l=False, use_fusion_network=False, use_area_prior=False, unet_version='v1', consistency_training=False, consistency_target='unet_feat', pos_embed=False, **kwargs): """ZoeDepth model. This is the version of ZoeDepth that has a single metric head Args: core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features n_bins (int, optional): Number of bin centers. Defaults to 64. bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus". bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128. min_depth (float, optional): Lower bound for normed bin centers. Defaults to 1e-3. max_depth (float, optional): Upper bound for normed bin centers. Defaults to 10. n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1]. attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300. attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2. attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'. attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'. min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5. max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50. train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True. midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10. encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10. pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10. sr_ratio: sr ratio during infer raw_depth_shape: raw depth shape during infer. times sr_ratio will be the target resolution. Used to sample points during training transform_sample_gt_size: training depth shape # influenced by crop shape which is not included in this pipeline right now representation: I use it to test the "bilap head" and a discarded idea fetch_features: if fetch feats. Default=True """ super().__init__() self.coarse_model = coarse_model self.fine_model = fine_model self.max_depth = max_depth self.min_depth = min_depth self.min_temp = min_temp self.bin_centers_type = bin_centers_type self.midas_lr_factor = midas_lr_factor self.encoder_lr_factor = encoder_lr_factor self.pos_enc_lr_factor = pos_enc_lr_factor self.train_midas = train_midas self.inverse_midas = inverse_midas if bin_centers_type == "normed": SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayer elif bin_centers_type == "softplus": # default SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayerUnnormed elif bin_centers_type == "hybrid1": SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayerUnnormed elif bin_centers_type == "hybrid2": SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayer else: raise ValueError( "bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'") N_MIDAS_OUT = 32 btlnck_features = self.fine_model.core.output_channels[0] num_out_features = self.fine_model.core.output_channels[1:] # all of them are the same self.seed_bin_regressor = SeedBinRegressorLayer( btlnck_features, n_bins=n_bins, min_depth=min_depth, max_depth=max_depth)
self.seed_projector = Projector(btlnck_features, bin_embedding_dim)
6
2023-12-04 08:43:15+00:00
24k
baaivision/GeoDream
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.models.geometry.geodream_geometry_volume import GeodreamGeometryVolume from threestudio.utils.typing import * from pysdf import SDF
18,340
mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitSDF) or isinstance(other, GeodreamGeometryVolume):
11
2023-12-01 01:59:42+00:00
24k
horseee/DeepCache
DeepCache/sd/pipeline_text_to_video_zero.py
[ { "identifier": "UNet2DConditionModel", "path": "DeepCache/sd/unet_2d_condition.py", "snippet": "class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):\n r\"\"\"\n A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a samp...
import copy import numpy as np import PIL.Image import torch import torch.nn.functional as F from dataclasses import dataclass from typing import Callable, List, Optional, Union from torch.nn.functional import grid_sample from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL from .unet_2d_condition import UNet2DConditionModel from .pipeline_stable_diffusion import StableDiffusionPipeline, StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import BaseOutput from diffusers.utils.torch_utils import randn_tensor
16,961
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states @dataclass class TextToVideoPipelineOutput(BaseOutput): r""" Output class for zero-shot text-to-video pipeline. Args: images (`[List[PIL.Image.Image]`, `np.ndarray`]): List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. nsfw_content_detected (`[List[bool]]`): List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or `None` if safety checking could not be performed. """ images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] def coords_grid(batch, ht, wd, device): # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) coords = torch.stack(coords[::-1], dim=0).float() return coords[None].repeat(batch, 1, 1, 1) def warp_single_latent(latent, reference_flow): """ Warp latent of a single frame with given flow Args: latent: latent code of a single frame reference_flow: flow which to warp the latent with Returns: warped: warped latent """ _, _, H, W = reference_flow.size() _, _, h, w = latent.size() coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) coords_t0 = coords0 + reference_flow coords_t0[:, 0] /= W coords_t0[:, 1] /= H coords_t0 = coords_t0 * 2.0 - 1.0 coords_t0 = F.interpolate(coords_t0, size=(h, w), mode="bilinear") coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) warped = grid_sample(latent, coords_t0, mode="nearest", padding_mode="reflection") return warped def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): """ Create translation motion field Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference device: device dtype: dtype Returns: """ seq_length = len(frame_ids) reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) for fr_idx in range(seq_length): reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx]) reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx]) return reference_flow def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): """ Creates translation motion and warps the latents accordingly Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference latents: latent codes of frames Returns: warped_latents: warped latents """ motion_field = create_motion_field( motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, frame_ids=frame_ids, device=latents.device, dtype=latents.dtype, ) warped_latents = latents.clone().detach() for i in range(len(warped_latents)): warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) return warped_latents
def sample_gaussian_centered(n=1000, sample_size=100, std_dev=100): samples = [] while len(samples) < sample_size: # Sample from a Gaussian centered at n/2 sample = int(np.random.normal(loc=n/2, scale=std_dev)) # Check if the sample is in bounds if 1 <= sample < n and sample not in samples: samples.append(sample) return samples def sample_from_quad(total_numbers, n_samples, pow=1.2): while pow > 1: # Generate linearly spaced values between 0 and a max value x_values = np.linspace(0, total_numbers**(1/pow), n_samples+1) # Raise these values to the power of 1.5 to get a non-linear distribution indices = np.unique(np.int32(x_values**pow))[:-1] if len(indices) == n_samples: break pow -=0.02 if pow <= 1: raise ValueError("Cannot find suitable pow. Please adjust n_samples or decrease center.") return indices, pow def sample_from_quad_center(total_numbers, n_samples, center, pow=1.2): while pow > 1: # Generate linearly spaced values between 0 and a max value x_values = np.linspace((-center)**(1/pow), (total_numbers-center)**(1/pow), n_samples+1) indices = [0] + [x+center for x in np.unique(np.int32(x_values**pow))[1:-1]] if len(indices) == n_samples: break pow -=0.02 if pow <= 1: raise ValueError("Cannot find suitable pow. Please adjust n_samples or decrease center.") return indices, pow def rearrange_0(tensor, f): F, C, H, W = tensor.size() tensor = torch.permute(torch.reshape(tensor, (F // f, f, C, H, W)), (0, 2, 1, 3, 4)) return tensor def rearrange_1(tensor): B, C, F, H, W = tensor.size() return torch.reshape(torch.permute(tensor, (0, 2, 1, 3, 4)), (B * F, C, H, W)) def rearrange_3(tensor, f): F, D, C = tensor.size() return torch.reshape(tensor, (F // f, f, D, C)) def rearrange_4(tensor): B, F, D, C = tensor.size() return torch.reshape(tensor, (B * F, D, C)) class CrossFrameAttnProcessor: """ Cross frame attention processor. Each frame attends the first frame. Args: batch_size: The number that represents actual batch size, other than the frames. For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to 2, due to classifier-free guidance. """ def __init__(self, batch_size=2): self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) # Cross Frame Attention if not is_cross_attention: video_length = key.size()[0] // self.batch_size first_frame_index = [0] * video_length # rearrange keys to have batch and frames in the 1st and 2nd dims respectively key = rearrange_3(key, video_length) key = key[:, first_frame_index] # rearrange values to have batch and frames in the 1st and 2nd dims respectively value = rearrange_3(value, video_length) value = value[:, first_frame_index] # rearrange back to original shape key = rearrange_4(key) value = rearrange_4(value) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states class CrossFrameAttnProcessor2_0: """ Cross frame attention processor with scaled_dot_product attention of Pytorch 2.0. Args: batch_size: The number that represents actual batch size, other than the frames. For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to 2, due to classifier-free guidance. """ def __init__(self, batch_size=2): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) inner_dim = hidden_states.shape[-1] if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) # Cross Frame Attention if not is_cross_attention: video_length = max(1, key.size()[0] // self.batch_size) first_frame_index = [0] * video_length # rearrange keys to have batch and frames in the 1st and 2nd dims respectively key = rearrange_3(key, video_length) key = key[:, first_frame_index] # rearrange values to have batch and frames in the 1st and 2nd dims respectively value = rearrange_3(value, video_length) value = value[:, first_frame_index] # rearrange back to original shape key = rearrange_4(key) value = rearrange_4(value) head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states @dataclass class TextToVideoPipelineOutput(BaseOutput): r""" Output class for zero-shot text-to-video pipeline. Args: images (`[List[PIL.Image.Image]`, `np.ndarray`]): List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. nsfw_content_detected (`[List[bool]]`): List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or `None` if safety checking could not be performed. """ images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] def coords_grid(batch, ht, wd, device): # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) coords = torch.stack(coords[::-1], dim=0).float() return coords[None].repeat(batch, 1, 1, 1) def warp_single_latent(latent, reference_flow): """ Warp latent of a single frame with given flow Args: latent: latent code of a single frame reference_flow: flow which to warp the latent with Returns: warped: warped latent """ _, _, H, W = reference_flow.size() _, _, h, w = latent.size() coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) coords_t0 = coords0 + reference_flow coords_t0[:, 0] /= W coords_t0[:, 1] /= H coords_t0 = coords_t0 * 2.0 - 1.0 coords_t0 = F.interpolate(coords_t0, size=(h, w), mode="bilinear") coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) warped = grid_sample(latent, coords_t0, mode="nearest", padding_mode="reflection") return warped def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): """ Create translation motion field Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference device: device dtype: dtype Returns: """ seq_length = len(frame_ids) reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) for fr_idx in range(seq_length): reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx]) reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx]) return reference_flow def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): """ Creates translation motion and warps the latents accordingly Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference latents: latent codes of frames Returns: warped_latents: warped latents """ motion_field = create_motion_field( motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, frame_ids=frame_ids, device=latents.device, dtype=latents.dtype, ) warped_latents = latents.clone().detach() for i in range(len(warped_latents)): warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) return warped_latents
class TextToVideoZeroPipeline(StableDiffusionPipeline):
1
2023-12-01 10:54:04+00:00
24k
alvinliu0/HumanGaussian
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
14,932
if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale
points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1)
2
2023-11-27 02:39:39+00:00
24k
EricGuo5513/momask-codes
gen_t2m.py
[ { "identifier": "MaskTransformer", "path": "models/mask_transformer/transformer.py", "snippet": "class MaskTransformer(nn.Module):\n def __init__(self, code_dim, cond_mode, latent_dim=256, ff_size=1024, num_layers=8,\n num_heads=4, dropout=0.1, clip_dim=512, cond_drop_prob=0.1,\n ...
import os import torch import torch.nn.functional as F import numpy as np from os.path import join as pjoin from models.mask_transformer.transformer import MaskTransformer, ResidualTransformer from models.vq.model import RVQVAE, LengthEstimator from options.eval_option import EvalT2MOptions from utils.get_opt import get_opt from utils.fixseed import fixseed from visualization.joints2bvh import Joint2BVHConvertor from torch.distributions.categorical import Categorical from utils.motion_process import recover_from_ric from utils.plot_script import plot_3d_motion from utils.paramUtil import t2m_kinematic_chain
16,497
clip_version = 'ViT-B/32' def load_vq_model(vq_opt): # opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'opt.txt') vq_model = RVQVAE(vq_opt, vq_opt.dim_pose, vq_opt.nb_code, vq_opt.code_dim, vq_opt.output_emb_width, vq_opt.down_t, vq_opt.stride_t, vq_opt.width, vq_opt.depth, vq_opt.dilation_growth_rate, vq_opt.vq_act, vq_opt.vq_norm) ckpt = torch.load(pjoin(vq_opt.checkpoints_dir, vq_opt.dataset_name, vq_opt.name, 'model', 'net_best_fid.tar'), map_location='cpu') model_key = 'vq_model' if 'vq_model' in ckpt else 'net' vq_model.load_state_dict(ckpt[model_key]) print(f'Loading VQ Model {vq_opt.name} Completed!') return vq_model, vq_opt def load_trans_model(model_opt, opt, which_model): t2m_transformer = MaskTransformer(code_dim=model_opt.code_dim, cond_mode='text', latent_dim=model_opt.latent_dim, ff_size=model_opt.ff_size, num_layers=model_opt.n_layers, num_heads=model_opt.n_heads, dropout=model_opt.dropout, clip_dim=512, cond_drop_prob=model_opt.cond_drop_prob, clip_version=clip_version, opt=model_opt) ckpt = torch.load(pjoin(model_opt.checkpoints_dir, model_opt.dataset_name, model_opt.name, 'model', which_model), map_location='cpu') model_key = 't2m_transformer' if 't2m_transformer' in ckpt else 'trans' # print(ckpt.keys()) missing_keys, unexpected_keys = t2m_transformer.load_state_dict(ckpt[model_key], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Transformer {opt.name} from epoch {ckpt["ep"]}!') return t2m_transformer def load_res_model(res_opt, vq_opt, opt): res_opt.num_quantizers = vq_opt.num_quantizers res_opt.num_tokens = vq_opt.nb_code res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, cond_mode='text', latent_dim=res_opt.latent_dim, ff_size=res_opt.ff_size, num_layers=res_opt.n_layers, num_heads=res_opt.n_heads, dropout=res_opt.dropout, clip_dim=512, shared_codebook=vq_opt.shared_codebook, cond_drop_prob=res_opt.cond_drop_prob, # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, share_weight=res_opt.share_weight, clip_version=clip_version, opt=res_opt) ckpt = torch.load(pjoin(res_opt.checkpoints_dir, res_opt.dataset_name, res_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) missing_keys, unexpected_keys = res_transformer.load_state_dict(ckpt['res_transformer'], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Residual Transformer {res_opt.name} from epoch {ckpt["ep"]}!') return res_transformer def load_len_estimator(opt): model = LengthEstimator(512, 50) ckpt = torch.load(pjoin(opt.checkpoints_dir, opt.dataset_name, 'length_estimator', 'model', 'finest.tar'), map_location=opt.device) model.load_state_dict(ckpt['estimator']) print(f'Loading Length Estimator from epoch {ckpt["epoch"]}!') return model if __name__ == '__main__': parser = EvalT2MOptions() opt = parser.parse()
clip_version = 'ViT-B/32' def load_vq_model(vq_opt): # opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'opt.txt') vq_model = RVQVAE(vq_opt, vq_opt.dim_pose, vq_opt.nb_code, vq_opt.code_dim, vq_opt.output_emb_width, vq_opt.down_t, vq_opt.stride_t, vq_opt.width, vq_opt.depth, vq_opt.dilation_growth_rate, vq_opt.vq_act, vq_opt.vq_norm) ckpt = torch.load(pjoin(vq_opt.checkpoints_dir, vq_opt.dataset_name, vq_opt.name, 'model', 'net_best_fid.tar'), map_location='cpu') model_key = 'vq_model' if 'vq_model' in ckpt else 'net' vq_model.load_state_dict(ckpt[model_key]) print(f'Loading VQ Model {vq_opt.name} Completed!') return vq_model, vq_opt def load_trans_model(model_opt, opt, which_model): t2m_transformer = MaskTransformer(code_dim=model_opt.code_dim, cond_mode='text', latent_dim=model_opt.latent_dim, ff_size=model_opt.ff_size, num_layers=model_opt.n_layers, num_heads=model_opt.n_heads, dropout=model_opt.dropout, clip_dim=512, cond_drop_prob=model_opt.cond_drop_prob, clip_version=clip_version, opt=model_opt) ckpt = torch.load(pjoin(model_opt.checkpoints_dir, model_opt.dataset_name, model_opt.name, 'model', which_model), map_location='cpu') model_key = 't2m_transformer' if 't2m_transformer' in ckpt else 'trans' # print(ckpt.keys()) missing_keys, unexpected_keys = t2m_transformer.load_state_dict(ckpt[model_key], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Transformer {opt.name} from epoch {ckpt["ep"]}!') return t2m_transformer def load_res_model(res_opt, vq_opt, opt): res_opt.num_quantizers = vq_opt.num_quantizers res_opt.num_tokens = vq_opt.nb_code res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, cond_mode='text', latent_dim=res_opt.latent_dim, ff_size=res_opt.ff_size, num_layers=res_opt.n_layers, num_heads=res_opt.n_heads, dropout=res_opt.dropout, clip_dim=512, shared_codebook=vq_opt.shared_codebook, cond_drop_prob=res_opt.cond_drop_prob, # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, share_weight=res_opt.share_weight, clip_version=clip_version, opt=res_opt) ckpt = torch.load(pjoin(res_opt.checkpoints_dir, res_opt.dataset_name, res_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) missing_keys, unexpected_keys = res_transformer.load_state_dict(ckpt['res_transformer'], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Residual Transformer {res_opt.name} from epoch {ckpt["ep"]}!') return res_transformer def load_len_estimator(opt): model = LengthEstimator(512, 50) ckpt = torch.load(pjoin(opt.checkpoints_dir, opt.dataset_name, 'length_estimator', 'model', 'finest.tar'), map_location=opt.device) model.load_state_dict(ckpt['estimator']) print(f'Loading Length Estimator from epoch {ckpt["epoch"]}!') return model if __name__ == '__main__': parser = EvalT2MOptions() opt = parser.parse()
fixseed(opt.seed)
6
2023-11-29 19:21:27+00:00
24k
dvlab-research/LLMGA
llmga/diffusers/src/diffusers/models/transformer_temporal.py
[ { "identifier": "ConfigMixin", "path": "llmga/diffusers/src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also\n provides the [`~ConfigMixin.from_config`] and [...
from dataclasses import dataclass from typing import Optional from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin import torch
20,040
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @dataclass class TransformerTemporalModelOutput(BaseOutput): """ The output of [`TransformerTemporalModel`]. Args: sample (`torch.FloatTensor` of shape `(batch_size x num_frames, num_channels, height, width)`): The hidden states output conditioned on `encoder_hidden_states` input. """ sample: torch.FloatTensor
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @dataclass class TransformerTemporalModelOutput(BaseOutput): """ The output of [`TransformerTemporalModel`]. Args: sample (`torch.FloatTensor` of shape `(batch_size x num_frames, num_channels, height, width)`): The hidden states output conditioned on `encoder_hidden_states` input. """ sample: torch.FloatTensor
class TransformerTemporalModel(ModelMixin, ConfigMixin):
0
2023-11-27 18:46:55+00:00
24k
JiahuiLei/GART
solver.py
[ { "identifier": "prepare_real_seq", "path": "lib_data/get_data.py", "snippet": "def prepare_real_seq(\n seq_name,\n dataset_mode,\n split=\"train\",\n image_zoom_ratio=0.5,\n balance=False,\n ins_avt_wild_start_end_skip=None,\n):\n logging.info(\"Prepare real seq: {}\".format(seq_na...
from matplotlib import pyplot as plt from pytorch3d.transforms import matrix_to_axis_angle from tqdm import tqdm from transforms3d.euler import euler2mat from omegaconf import OmegaConf from lib_data.get_data import prepare_real_seq from lib_data.data_provider import DatabasePoseProvider from lib_gart.templates import get_template from lib_gart.model import GaussianTemplateModel, AdditionalBones from lib_gart.optim_utils import * from lib_render.gauspl_renderer import render_cam_pcl from lib_gart.model_utils import transform_mu_frame from utils.misc import * from utils.viz import viz_render from pytorch3d.transforms import axis_angle_to_matrix, matrix_to_axis_angle from pytorch3d.ops import knn_points from lib_guidance.camera_sampling import sample_camera, fov2K, opencv2blender from viz_utils import viz_spinning, viz_human_all, viz_dog_all from utils.ssim import ssim from datetime import datetime from test_utils import test from lib_guidance.mvdream.mvdream_guidance import MVDream from utils.lpips import LPIPS import imageio import torch import numpy as np import os, os.path as osp, shutil, sys import time import logging import argparse
20,848
self.profile_fn = profile_fn try: shutil.copy(profile_fn, osp.join(self.log_dir, osp.basename(profile_fn))) except: pass self.mode = mode assert self.mode in ["human", "dog"], "Only support human and dog for now" self.template_model_path = template_model_path self.device = device # * auto set attr cfg = OmegaConf.load(profile_fn) # assign the cfg to self attribute for k, v in cfg.items(): setattr(self, k, v) for k, v in kwargs.items(): setattr(self, k, v) # * explicitly set flags self.FAST_TRAINING = getattr(self, "FAST_TRAINING", False) self.LAMBDA_SSIM = getattr(self, "LAMBDA_SSIM", 0.0) self.LAMBDA_LPIPS = getattr(self, "LAMBDA_LPIPS", 0.0) if self.LAMBDA_LPIPS > 0: self.lpips = LPIPS(net="vgg").to(self.device) for param in self.lpips.parameters(): param.requires_grad = False if isinstance(self.RESET_OPACITY_STEPS, int): self.RESET_OPACITY_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.RESET_OPACITY_STEPS == 0 ] if isinstance(self.REGAUSSIAN_STEPS, int): self.REGAUSSIAN_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.REGAUSSIAN_STEPS == 0 ] # prepare base R if self.mode == "human": viz_base_R_opencv = np.asarray(euler2mat(np.pi, 0, 0, "sxyz")) else: viz_base_R_opencv = np.asarray(euler2mat(np.pi / 2.0, 0, np.pi, "rxyz")) viz_base_R_opencv = torch.from_numpy(viz_base_R_opencv).float() self.viz_base_R = viz_base_R_opencv.to(self.device) if self.mode == "human": self.reg_base_R_global = ( matrix_to_axis_angle( torch.as_tensor(euler2mat(np.pi / 2.0, 0, np.pi / 2.0, "sxyz"))[ None ] )[0] .float() .to(self.device) ) else: # TODO, for generation of dog pass self.writer = create_log( self.log_dir, name=osp.basename(self.profile_fn).split(".")[0], debug=False ) return def prepare_fake_data(self, mode, *args, **kwargs): if mode == "amass": # todo: change to amass provider = DatabasePoseProvider(*args, **kwargs, device=torch.device("cpu")) return provider return provider def prepare_real_seq( self, seq_name, dataset_mode, split, ins_avt_wild_start_end_skip=None, image_zoom_ratio=0.5, data_stay_gpu_flag=True, ): provider, dataset = prepare_real_seq( seq_name=seq_name, dataset_mode=dataset_mode, split=split, ins_avt_wild_start_end_skip=ins_avt_wild_start_end_skip, image_zoom_ratio=getattr( self, "IMAGE_ZOOM_RATIO", image_zoom_ratio ), # ! this overwrite the func arg balance=getattr(self, "VIEW_BALANCE_FLAG", False), ) provider.to(self.device) if getattr(self, "DATA_STAY_GPU_FLAG", data_stay_gpu_flag): provider.move_images_to_device(self.device) provider.viz_selection_prob( osp.join(self.log_dir, f"split_{split}_view_prob.png") ) return provider, dataset def load_saved_model(self, ckpt_path=None): if ckpt_path is None: ckpt_path = osp.join(self.log_dir, "model.pth") ret = self._get_model_optimizer(betas=None) model = ret[0] model.load(torch.load(ckpt_path)) model.to(self.device) model.eval() logging.info("After loading:") model.summary() return model def _get_model_optimizer(self, betas, add_bones_total_t=0): seed_everything(self.SEED)
# from lib_marchingcubes.gaumesh_utils import MeshExtractor try: # from lib_guidance.sd_utils import StableDiffusion except: logging.warning("No guidance module") class TGFitter: def __init__( self, log_dir, profile_fn, mode, template_model_path="data/smpl_model/SMPL_NEUTRAL.pkl", device=torch.device("cuda:0"), **kwargs, ) -> None: self.log_dir = log_dir os.makedirs(self.log_dir, exist_ok=True) self.profile_fn = profile_fn try: shutil.copy(profile_fn, osp.join(self.log_dir, osp.basename(profile_fn))) except: pass self.mode = mode assert self.mode in ["human", "dog"], "Only support human and dog for now" self.template_model_path = template_model_path self.device = device # * auto set attr cfg = OmegaConf.load(profile_fn) # assign the cfg to self attribute for k, v in cfg.items(): setattr(self, k, v) for k, v in kwargs.items(): setattr(self, k, v) # * explicitly set flags self.FAST_TRAINING = getattr(self, "FAST_TRAINING", False) self.LAMBDA_SSIM = getattr(self, "LAMBDA_SSIM", 0.0) self.LAMBDA_LPIPS = getattr(self, "LAMBDA_LPIPS", 0.0) if self.LAMBDA_LPIPS > 0: self.lpips = LPIPS(net="vgg").to(self.device) for param in self.lpips.parameters(): param.requires_grad = False if isinstance(self.RESET_OPACITY_STEPS, int): self.RESET_OPACITY_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.RESET_OPACITY_STEPS == 0 ] if isinstance(self.REGAUSSIAN_STEPS, int): self.REGAUSSIAN_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.REGAUSSIAN_STEPS == 0 ] # prepare base R if self.mode == "human": viz_base_R_opencv = np.asarray(euler2mat(np.pi, 0, 0, "sxyz")) else: viz_base_R_opencv = np.asarray(euler2mat(np.pi / 2.0, 0, np.pi, "rxyz")) viz_base_R_opencv = torch.from_numpy(viz_base_R_opencv).float() self.viz_base_R = viz_base_R_opencv.to(self.device) if self.mode == "human": self.reg_base_R_global = ( matrix_to_axis_angle( torch.as_tensor(euler2mat(np.pi / 2.0, 0, np.pi / 2.0, "sxyz"))[ None ] )[0] .float() .to(self.device) ) else: # TODO, for generation of dog pass self.writer = create_log( self.log_dir, name=osp.basename(self.profile_fn).split(".")[0], debug=False ) return def prepare_fake_data(self, mode, *args, **kwargs): if mode == "amass": # todo: change to amass provider = DatabasePoseProvider(*args, **kwargs, device=torch.device("cpu")) return provider return provider def prepare_real_seq( self, seq_name, dataset_mode, split, ins_avt_wild_start_end_skip=None, image_zoom_ratio=0.5, data_stay_gpu_flag=True, ): provider, dataset = prepare_real_seq( seq_name=seq_name, dataset_mode=dataset_mode, split=split, ins_avt_wild_start_end_skip=ins_avt_wild_start_end_skip, image_zoom_ratio=getattr( self, "IMAGE_ZOOM_RATIO", image_zoom_ratio ), # ! this overwrite the func arg balance=getattr(self, "VIEW_BALANCE_FLAG", False), ) provider.to(self.device) if getattr(self, "DATA_STAY_GPU_FLAG", data_stay_gpu_flag): provider.move_images_to_device(self.device) provider.viz_selection_prob( osp.join(self.log_dir, f"split_{split}_view_prob.png") ) return provider, dataset def load_saved_model(self, ckpt_path=None): if ckpt_path is None: ckpt_path = osp.join(self.log_dir, "model.pth") ret = self._get_model_optimizer(betas=None) model = ret[0] model.load(torch.load(ckpt_path)) model.to(self.device) model.eval() logging.info("After loading:") model.summary() return model def _get_model_optimizer(self, betas, add_bones_total_t=0): seed_everything(self.SEED)
template = get_template(
2
2023-11-27 17:30:04+00:00
24k
skhu101/GauHuman
scene/dataset_readers.py
[ { "identifier": "read_extrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while T...
import os import sys import numpy as np import torch import json import imageio import cv2 import random from PIL import Image from typing import NamedTuple from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \ read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal from pathlib import Path from plyfile import PlyData, PlyElement from utils.sh_utils import SH2RGB from scene.gaussian_model import BasicPointCloud from smpl.smpl_numpy import SMPL from smplx.body_models import SMPLX from data.dna_rendering.dna_rendering_sample_code.SMCReader import SMCReader
15,383
ply_path: str def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info: W2C = getWorld2View2(cam.R, cam.T) C2W = np.linalg.inv(W2C) cam_centers.append(C2W[:3, 3:4]) center, diagonal = get_center_and_diag(cam_centers) radius = diagonal * 1.1 translate = -center return {"translate": translate, "radius": radius} def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder): cam_infos = [] for idx, key in enumerate(cam_extrinsics): sys.stdout.write('\r') # the exact output you're looking for: sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics))) sys.stdout.flush() extr = cam_extrinsics[key] intr = cam_intrinsics[extr.camera_id] height = intr.height width = intr.width uid = intr.id R = np.transpose(qvec2rotmat(extr.qvec)) T = np.array(extr.tvec) if intr.model=="SIMPLE_PINHOLE": focal_length_x = intr.params[0] FovY = focal2fov(focal_length_x, height) FovX = focal2fov(focal_length_x, width) elif intr.model=="PINHOLE": focal_length_x = intr.params[0] focal_length_y = intr.params[1] FovY = focal2fov(focal_length_y, height) FovX = focal2fov(focal_length_x, width) else: assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!" image_path = os.path.join(images_folder, os.path.basename(extr.name)) image_name = os.path.basename(image_path).split(".")[0] image = Image.open(image_path) cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, width=width, height=height) cam_infos.append(cam_info) sys.stdout.write('\n') return cam_infos def fetchPly(path): plydata = PlyData.read(path) vertices = plydata['vertex'] positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0 normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T return BasicPointCloud(points=positions, colors=colors, normals=normals) def storePly(path, xyz, rgb): # Define the dtype for the structured array dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')] normals = np.zeros_like(xyz) elements = np.empty(xyz.shape[0], dtype=dtype) attributes = np.concatenate((xyz, normals, rgb), axis=1) elements[:] = list(map(tuple, attributes)) # Create the PlyData object and write to file vertex_element = PlyElement.describe(elements, 'vertex') ply_data = PlyData([vertex_element]) ply_data.write(path) def readColmapSceneInfo(path, images, eval, llffhold=8): try: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin") cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file) except: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt") cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file) reading_dir = "images" if images == None else images cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir)) cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name) if eval: train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0] test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0] else: train_cam_infos = cam_infos test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) ply_path = os.path.join(path, "sparse/0/points3D.ply") bin_path = os.path.join(path, "sparse/0/points3D.bin") txt_path = os.path.join(path, "sparse/0/points3D.txt") if not os.path.exists(ply_path): print("Converting point3d.bin to .ply, will happen only the first time you open the scene.") try:
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact george.drettakis@inria.fr # class CameraInfo(NamedTuple): uid: int pose_id: int R: np.array T: np.array K: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str bkgd_mask: np.array bound_mask: np.array width: int height: int smpl_param: dict world_vertex: np.array world_bound: np.array big_pose_smpl_param: dict big_pose_world_vertex: np.array big_pose_world_bound: np.array class SceneInfo(NamedTuple): point_cloud: BasicPointCloud train_cameras: list test_cameras: list nerf_normalization: dict ply_path: str def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info: W2C = getWorld2View2(cam.R, cam.T) C2W = np.linalg.inv(W2C) cam_centers.append(C2W[:3, 3:4]) center, diagonal = get_center_and_diag(cam_centers) radius = diagonal * 1.1 translate = -center return {"translate": translate, "radius": radius} def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder): cam_infos = [] for idx, key in enumerate(cam_extrinsics): sys.stdout.write('\r') # the exact output you're looking for: sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics))) sys.stdout.flush() extr = cam_extrinsics[key] intr = cam_intrinsics[extr.camera_id] height = intr.height width = intr.width uid = intr.id R = np.transpose(qvec2rotmat(extr.qvec)) T = np.array(extr.tvec) if intr.model=="SIMPLE_PINHOLE": focal_length_x = intr.params[0] FovY = focal2fov(focal_length_x, height) FovX = focal2fov(focal_length_x, width) elif intr.model=="PINHOLE": focal_length_x = intr.params[0] focal_length_y = intr.params[1] FovY = focal2fov(focal_length_y, height) FovX = focal2fov(focal_length_x, width) else: assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!" image_path = os.path.join(images_folder, os.path.basename(extr.name)) image_name = os.path.basename(image_path).split(".")[0] image = Image.open(image_path) cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, width=width, height=height) cam_infos.append(cam_info) sys.stdout.write('\n') return cam_infos def fetchPly(path): plydata = PlyData.read(path) vertices = plydata['vertex'] positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0 normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T return BasicPointCloud(points=positions, colors=colors, normals=normals) def storePly(path, xyz, rgb): # Define the dtype for the structured array dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')] normals = np.zeros_like(xyz) elements = np.empty(xyz.shape[0], dtype=dtype) attributes = np.concatenate((xyz, normals, rgb), axis=1) elements[:] = list(map(tuple, attributes)) # Create the PlyData object and write to file vertex_element = PlyElement.describe(elements, 'vertex') ply_data = PlyData([vertex_element]) ply_data.write(path) def readColmapSceneInfo(path, images, eval, llffhold=8): try: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin") cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file) except: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt") cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file) reading_dir = "images" if images == None else images cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir)) cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name) if eval: train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0] test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0] else: train_cam_infos = cam_infos test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) ply_path = os.path.join(path, "sparse/0/points3D.ply") bin_path = os.path.join(path, "sparse/0/points3D.bin") txt_path = os.path.join(path, "sparse/0/points3D.txt") if not os.path.exists(ply_path): print("Converting point3d.bin to .ply, will happen only the first time you open the scene.") try:
xyz, rgb, _ = read_points3D_binary(bin_path)
5
2023-11-29 07:10:39+00:00
24k
cswry/SeeSR
test_seesr.py
[ { "identifier": "StableDiffusionControlNetPipeline", "path": "pipelines/pipeline_seesr.py", "snippet": "class StableDiffusionControlNetPipeline(DiffusionPipeline, TextualInversionLoaderMixin):\n r\"\"\"\n Pipeline for text-to-image generation using Stable Diffusion with ControlNet guidance.\n\n ...
import os import sys import cv2 import glob import argparse import numpy as np import torch import torch.utils.checkpoint import torch.nn as nn import torch.nn.functional as F from PIL import Image from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import AutoencoderKL, DDPMScheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from transformers import CLIPTextModel, CLIPTokenizer, CLIPImageProcessor from pipelines.pipeline_seesr import StableDiffusionControlNetPipeline from utils.misc import load_dreambooth_lora from utils.wavelet_color_fix import wavelet_color_fix, adain_color_fix from ram.models.ram_lora import ram from ram import inference_ram as inference from ram import get_transform from typing import Mapping, Any from torchvision import transforms from torchvision import transforms from models.controlnet import ControlNetModel from models.unet_2d_condition import UNet2DConditionModel
14,797
''' * SeeSR: Towards Semantics-Aware Real-World Image Super-Resolution * Modified from diffusers by Rongyuan Wu * 24/12/2023 ''' sys.path.append(os.getcwd()) logger = get_logger(__name__, log_level="INFO") tensor_transforms = transforms.Compose([ transforms.ToTensor(), ]) ram_transforms = transforms.Compose([ transforms.Resize((384, 384)), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def load_state_dict_diffbirSwinIR(model: nn.Module, state_dict: Mapping[str, Any], strict: bool=False) -> None: state_dict = state_dict.get("state_dict", state_dict) is_model_key_starts_with_module = list(model.state_dict().keys())[0].startswith("module.") is_state_dict_key_starts_with_module = list(state_dict.keys())[0].startswith("module.") if ( is_model_key_starts_with_module and (not is_state_dict_key_starts_with_module) ): state_dict = {f"module.{key}": value for key, value in state_dict.items()} if ( (not is_model_key_starts_with_module) and is_state_dict_key_starts_with_module ): state_dict = {key[len("module."):]: value for key, value in state_dict.items()} model.load_state_dict(state_dict, strict=strict) def load_seesr_pipeline(args, accelerator, enable_xformers_memory_efficient_attention): # Load scheduler, tokenizer and models. scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_path, subfolder="scheduler") text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_path, subfolder="text_encoder") tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_path, subfolder="tokenizer") vae = AutoencoderKL.from_pretrained(args.pretrained_model_path, subfolder="vae") feature_extractor = CLIPImageProcessor.from_pretrained(f"{args.pretrained_model_path}/feature_extractor") unet = UNet2DConditionModel.from_pretrained(args.seesr_model_path, subfolder="unet") controlnet = ControlNetModel.from_pretrained(args.seesr_model_path, subfolder="controlnet") # Freeze vae and text_encoder vae.requires_grad_(False) text_encoder.requires_grad_(False) unet.requires_grad_(False) controlnet.requires_grad_(False) if enable_xformers_memory_efficient_attention: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention() controlnet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # Get the validation pipeline
''' * SeeSR: Towards Semantics-Aware Real-World Image Super-Resolution * Modified from diffusers by Rongyuan Wu * 24/12/2023 ''' sys.path.append(os.getcwd()) logger = get_logger(__name__, log_level="INFO") tensor_transforms = transforms.Compose([ transforms.ToTensor(), ]) ram_transforms = transforms.Compose([ transforms.Resize((384, 384)), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def load_state_dict_diffbirSwinIR(model: nn.Module, state_dict: Mapping[str, Any], strict: bool=False) -> None: state_dict = state_dict.get("state_dict", state_dict) is_model_key_starts_with_module = list(model.state_dict().keys())[0].startswith("module.") is_state_dict_key_starts_with_module = list(state_dict.keys())[0].startswith("module.") if ( is_model_key_starts_with_module and (not is_state_dict_key_starts_with_module) ): state_dict = {f"module.{key}": value for key, value in state_dict.items()} if ( (not is_model_key_starts_with_module) and is_state_dict_key_starts_with_module ): state_dict = {key[len("module."):]: value for key, value in state_dict.items()} model.load_state_dict(state_dict, strict=strict) def load_seesr_pipeline(args, accelerator, enable_xformers_memory_efficient_attention): # Load scheduler, tokenizer and models. scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_path, subfolder="scheduler") text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_path, subfolder="text_encoder") tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_path, subfolder="tokenizer") vae = AutoencoderKL.from_pretrained(args.pretrained_model_path, subfolder="vae") feature_extractor = CLIPImageProcessor.from_pretrained(f"{args.pretrained_model_path}/feature_extractor") unet = UNet2DConditionModel.from_pretrained(args.seesr_model_path, subfolder="unet") controlnet = ControlNetModel.from_pretrained(args.seesr_model_path, subfolder="controlnet") # Freeze vae and text_encoder vae.requires_grad_(False) text_encoder.requires_grad_(False) unet.requires_grad_(False) controlnet.requires_grad_(False) if enable_xformers_memory_efficient_attention: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention() controlnet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # Get the validation pipeline
validation_pipeline = StableDiffusionControlNetPipeline(
0
2023-11-27 08:50:33+00:00
24k
xmu-xiaoma666/X-Dreamer
train_x_dreamer.py
[ { "identifier": "DatasetMesh", "path": "dataset/dataset_mesh.py", "snippet": "class DatasetMesh(torch.utils.data.Dataset):\n\n\n def __init__(self, glctx, FLAGS, validate=False, gif=False):\n # Init \n self.glctx = glctx\n self.FLAGS = FLAGS\n sel...
import os import time import argparse import json import math import numpy as np import torch import nvdiffrast.torch as dr import itertools import xatlas import open3d as o3d import random import imageio import os.path as osp import pickle from dataset.dataset_mesh import DatasetMesh from dataset.dataset_mesh import get_camera_params from geometry.dmtet_x_dreamer import DMTetGeometry from geometry.dlmesh_x_dreamer import DLMesh from render import obj from render import material from render import util from render import mesh from render import texture from render import mlptexture from render import light from render import render from sd_cglora import StableDiffusion from tqdm import tqdm from render import util from render.video import Video
14,969
if 'kd' in result_dict.keys(): save_gif(kd_dir,30) if 'ks' in result_dict.keys(): save_gif(ks_dir,30) if 'normal' in result_dict.keys(): save_gif(normal_dir,30) return 0 ############################################################################### # Main shape fitter function / optimization loop ############################################################################### class Trainer(torch.nn.Module): def __init__(self, glctx, geometry, lgt, mat, optimize_geometry, optimize_light, FLAGS, guidance): super(Trainer, self).__init__() self.glctx = glctx self.geometry = geometry self.light = lgt self.material = mat self.optimize_geometry = optimize_geometry self.optimize_light = optimize_light self.FLAGS = FLAGS self.guidance = guidance self.if_flip_the_normal = FLAGS.if_flip_the_normal self.if_use_bump = FLAGS.if_use_bump if self.FLAGS.mode == 'appearance_modeling': if not self.optimize_light: with torch.no_grad(): self.light.build_mips() self.params = list(self.material.parameters()) self.params += list(self.geometry.pos_encoder.parameters()) self.params += list(self.light.parameters()) if optimize_light else [] self.geo_params = list(self.geometry.parameters()) if optimize_geometry else [] def forward(self, target, it, if_normal, if_pretrain, scene_and_vertices ): if self.FLAGS.mode == 'appearance_modeling': if self.optimize_light: self.light.build_mips() if self.FLAGS.camera_space_light: self.light.xfm(target['mv']) if if_pretrain: return self.geometry.decoder.pre_train_ellipsoid(it, scene_and_vertices) else: return self.geometry.tick(glctx, target, self.light, self.material, it , if_normal, self.guidance, self.FLAGS.mode, self.if_flip_the_normal, self.if_use_bump) def optimize_mesh( glctx, geometry, opt_material, lgt, dataset_train, dataset_validate, FLAGS, log_interval=10, optimize_light=True, optimize_geometry=True, guidance = None, scene_and_vertices = None, ): dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=FLAGS.batch, collate_fn=dataset_train.collate, shuffle=False) dataloader_validate = torch.utils.data.DataLoader(dataset_validate, batch_size=1, collate_fn=dataset_train.collate) model = Trainer(glctx, geometry, lgt, opt_material, optimize_geometry, optimize_light, FLAGS, guidance) if optimize_geometry: optimizer_mesh = torch.optim.AdamW(model.geo_params, lr=0.001, betas=(0.9, 0.99), eps=1e-15) optimizer = torch.optim.AdamW(model.params, lr=0.01, betas=(0.9, 0.99), eps=1e-15) optimizer_lora = torch.optim.SGD(itertools.chain(*guidance.unet_lora_params), lr=1e-5) if FLAGS.multi_gpu: model = model.cuda() model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[FLAGS.local_rank], find_unused_parameters= True ) img_cnt = 0 img_loss_vec = [] reg_loss_vec = [] iter_dur_vec = [] def cycle(iterable): iterator = iter(iterable) while True: try: yield next(iterator) except StopIteration: iterator = iter(iterable) v_it = cycle(dataloader_validate) scaler = torch.cuda.amp.GradScaler(enabled=True) rot_ang = 0 if FLAGS.local_rank == 0: video = Video(FLAGS.out_dir) if FLAGS.local_rank == 0: dataloader_train = tqdm(dataloader_train) for it, target in enumerate(dataloader_train): # Mix randomized background into dataset image target = prepare_batch(target, FLAGS.train_background) # Show/save image before training step (want to get correct rendering of input) if FLAGS.local_rank == 0: save_image = FLAGS.save_interval and (it % FLAGS.save_interval == 0) save_video = FLAGS.video_interval and (it % FLAGS.video_interval == 0) if save_image: result_image, result_dict = validate_itr(glctx, prepare_batch(next(v_it), FLAGS.train_background), geometry, opt_material, lgt, FLAGS) #prepare_batch(next(v_it), FLAGS.background) np_result_image = result_image.detach().cpu().numpy() util.save_image(FLAGS.out_dir + '/' + ('img_%s_%06d.png' % (FLAGS.mode, img_cnt)), np_result_image) util.save_image(FLAGS.out_dir + '/' + ('mask_%s_%06d.png' % (FLAGS.mode, img_cnt)), result_dict['mask'].detach().cpu().numpy()) img_cnt = img_cnt+1 if save_video: with torch.no_grad():
############################################################################### # Mix background into a dataset image ############################################################################### @torch.no_grad() def prepare_batch(target, background= 'black'): target['mv'] = target['mv'].cuda() target['mvp'] = target['mvp'].cuda() target['campos'] = target['campos'].cuda() target['fov'] = target['fov'].cuda() target['normal_rotate'] = target['normal_rotate'].cuda() batch_size = target['mv'].shape[0] resolution = target['resolution'] if background == 'white': target['background']= torch.ones(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') if background == 'black': target['background'] = torch.zeros(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') return target ############################################################################### # UV - map geometry & convert to a mesh ############################################################################### @torch.no_grad() def xatlas_uvmap(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) # Create uvs with xatlas v_pos = eval_mesh.v_pos.detach().cpu().numpy() t_pos_idx = eval_mesh.t_pos_idx.detach().cpu().numpy() vmapping, indices, uvs = xatlas.parametrize(v_pos, t_pos_idx) # Convert to tensors indices_int64 = indices.astype(np.uint64, casting='same_kind').view(np.int64) uvs = torch.tensor(uvs, dtype=torch.float32, device='cuda') faces = torch.tensor(indices_int64, dtype=torch.int64, device='cuda') new_mesh = mesh.Mesh(v_tex=uvs, t_tex_idx=faces, base=eval_mesh) mask, kd, ks, normal = render.render_uv(glctx, new_mesh, FLAGS.texture_res, eval_mesh.material['kd_ks_normal']) if FLAGS.layers > 1: kd = torch.cat((kd, torch.rand_like(kd[...,0:1])), dim=-1) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') new_mesh.material = material.Material({ 'bsdf' : mat['bsdf'], 'kd' : texture.Texture2D(kd, min_max=[kd_min, kd_max]), 'ks' : texture.Texture2D(ks, min_max=[ks_min, ks_max]), 'normal' : texture.Texture2D(normal, min_max=[nrm_min, nrm_max]) }) return new_mesh @torch.no_grad() def xatlas_uvmap1(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) new_mesh = mesh.Mesh( base=eval_mesh) mask, kd, ks, normal = render.render_uv1(glctx, new_mesh, FLAGS.texture_res, eval_mesh.material['kd_ks_normal'], FLAGS.uv_padding_block) if FLAGS.layers > 1: kd = torch.cat((kd, torch.rand_like(kd[...,0:1])), dim=-1) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') new_mesh.material = material.Material({ 'bsdf' : mat['bsdf'], 'kd' : texture.Texture2D(kd, min_max=[kd_min, kd_max]), 'ks' : texture.Texture2D(ks, min_max=[ks_min, ks_max]), 'normal' : texture.Texture2D(normal, min_max=[nrm_min, nrm_max]) }) return new_mesh ############################################################################### # Utility functions for material ############################################################################### def get_normalize_mesh(pro_path): mesh = o3d.io.read_triangle_mesh(pro_path) vertices = np.asarray(mesh.vertices) shift = np.mean(vertices,axis=0) scale = np.max(np.linalg.norm(vertices-shift, ord=2, axis=1)) vertices = (vertices-shift) / scale mesh.vertices = o3d.cuda.pybind.utility.Vector3dVector(vertices) return mesh def initial_guness_material(geometry, mlp, FLAGS, init_mat=None): # ipdb.set_trace(()) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') if mlp: mlp_min = torch.cat((kd_min[0:3], ks_min, nrm_min), dim=0) mlp_max = torch.cat((kd_max[0:3], ks_max, nrm_max), dim=0) mlp_map_opt = mlptexture.MLPTexture3D(geometry.getAABB(), channels=9, min_max=[mlp_min, mlp_max]) mat = material.Material({'kd_ks_normal' : mlp_map_opt}) else: # Setup Kd (albedo) and Ks (x, roughness, metalness) textures if FLAGS.random_textures or init_mat is None: num_channels = 4 if FLAGS.layers > 1 else 3 kd_init = torch.rand(size=FLAGS.texture_res + [num_channels], device='cuda') * (kd_max - kd_min)[None, None, 0:num_channels] + kd_min[None, None, 0:num_channels] kd_map_opt = texture.create_trainable(kd_init , FLAGS.texture_res, not FLAGS.custom_mip, [kd_min, kd_max]) ksR = np.random.uniform(size=FLAGS.texture_res + [1], low=0.0, high=0.01) ksG = np.random.uniform(size=FLAGS.texture_res + [1], low=ks_min[1].cpu(), high=ks_max[1].cpu()) ksB = np.random.uniform(size=FLAGS.texture_res + [1], low=ks_min[2].cpu(), high=ks_max[2].cpu()) ks_map_opt = texture.create_trainable(np.concatenate((ksR, ksG, ksB), axis=2), FLAGS.texture_res, not FLAGS.custom_mip, [ks_min, ks_max]) else: kd_map_opt = texture.create_trainable(init_mat['kd'], FLAGS.texture_res, not FLAGS.custom_mip, [kd_min, kd_max]) ks_map_opt = texture.create_trainable(init_mat['ks'], FLAGS.texture_res, not FLAGS.custom_mip, [ks_min, ks_max]) # Setup normal map if FLAGS.random_textures or init_mat is None or 'normal' not in init_mat: normal_map_opt = texture.create_trainable(np.array([0, 0, 1]), FLAGS.texture_res, not FLAGS.custom_mip, [nrm_min, nrm_max]) else: normal_map_opt = texture.create_trainable(init_mat['normal'], FLAGS.texture_res, not FLAGS.custom_mip, [nrm_min, nrm_max]) mat = material.Material({ 'kd' : kd_map_opt, 'ks' : ks_map_opt, 'normal' : normal_map_opt }) if init_mat is not None: mat['bsdf'] = init_mat['bsdf'] else: mat['bsdf'] = 'pbr' return mat ############################################################################### # Validation & testing ############################################################################### # @torch.no_grad() def validate_itr(glctx, target, geometry, opt_material, lgt, FLAGS, relight = None): result_dict = {} with torch.no_grad(): if FLAGS.mode == 'appearance_modeling': with torch.no_grad(): lgt.build_mips() if FLAGS.camera_space_light: lgt.xfm(target['mv']) if relight != None: relight.build_mips() buffers = geometry.render(glctx, target, lgt, opt_material, if_use_bump = FLAGS.if_use_bump) result_dict['shaded'] = buffers['shaded'][0, ..., 0:3] result_dict['shaded'] = util.rgb_to_srgb(result_dict['shaded']) if relight != None: result_dict['relight'] = geometry.render(glctx, target, relight, opt_material, if_use_bump = FLAGS.if_use_bump)['shaded'][0, ..., 0:3] result_dict['relight'] = util.rgb_to_srgb(result_dict['relight']) result_dict['mask'] = (buffers['shaded'][0, ..., 3:4]) result_image = result_dict['shaded'] if FLAGS.display is not None : # white_bg = torch.ones_like(target['background']) for layer in FLAGS.display: if 'latlong' in layer and layer['latlong']: if isinstance(lgt, light.EnvironmentLight): result_dict['light_image'] = util.cubemap_to_latlong(lgt.base, FLAGS.display_res) result_image = torch.cat([result_image, result_dict['light_image']], axis=1) elif 'bsdf' in layer: buffers = geometry.render(glctx, target, lgt, opt_material, bsdf=layer['bsdf'], if_use_bump = FLAGS.if_use_bump) if layer['bsdf'] == 'kd': result_dict[layer['bsdf']] = util.rgb_to_srgb(buffers['shaded'][0, ..., 0:3]) elif layer['bsdf'] == 'normal': result_dict[layer['bsdf']] = (buffers['shaded'][0, ..., 0:3] + 1) * 0.5 else: result_dict[layer['bsdf']] = buffers['shaded'][0, ..., 0:3] result_image = torch.cat([result_image, result_dict[layer['bsdf']]], axis=1) return result_image, result_dict def save_gif(dir,fps): imgpath = dir frames = [] for idx in sorted(os.listdir(imgpath)): img = osp.join(imgpath,idx) frames.append(imageio.imread(img)) imageio.mimsave(os.path.join(dir, 'eval.gif'),frames,'GIF',duration=1/fps,loop=0) @torch.no_grad() def validate(glctx, geometry, opt_material, lgt, dataset_validate, out_dir, FLAGS, relight= None): # ============================================================================================== # Validation loop # ============================================================================================== mse_values = [] psnr_values = [] dataloader_validate = torch.utils.data.DataLoader(dataset_validate, batch_size=1, collate_fn=dataset_validate.collate) os.makedirs(out_dir, exist_ok=True) shaded_dir = os.path.join(out_dir, "shaded") relight_dir = os.path.join(out_dir, "relight") kd_dir = os.path.join(out_dir, "kd") ks_dir = os.path.join(out_dir, "ks") normal_dir = os.path.join(out_dir, "normal") mask_dir = os.path.join(out_dir, "mask") os.makedirs(shaded_dir, exist_ok=True) os.makedirs(relight_dir, exist_ok=True) os.makedirs(kd_dir, exist_ok=True) os.makedirs(ks_dir, exist_ok=True) os.makedirs(normal_dir, exist_ok=True) os.makedirs(mask_dir, exist_ok=True) print("Running validation") dataloader_validate = tqdm(dataloader_validate) for it, target in enumerate(dataloader_validate): # Mix validation background target = prepare_batch(target, 'white') result_image, result_dict = validate_itr(glctx, target, geometry, opt_material, lgt, FLAGS, relight) for k in result_dict.keys(): np_img = result_dict[k].detach().cpu().numpy() if k == 'shaded': util.save_image(shaded_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'relight': util.save_image(relight_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'kd': util.save_image(kd_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'ks': util.save_image(ks_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'normal': util.save_image(normal_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'mask': util.save_image(mask_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) if 'shaded' in result_dict.keys(): save_gif(shaded_dir,30) if 'relight' in result_dict.keys(): save_gif(relight_dir,30) if 'kd' in result_dict.keys(): save_gif(kd_dir,30) if 'ks' in result_dict.keys(): save_gif(ks_dir,30) if 'normal' in result_dict.keys(): save_gif(normal_dir,30) return 0 ############################################################################### # Main shape fitter function / optimization loop ############################################################################### class Trainer(torch.nn.Module): def __init__(self, glctx, geometry, lgt, mat, optimize_geometry, optimize_light, FLAGS, guidance): super(Trainer, self).__init__() self.glctx = glctx self.geometry = geometry self.light = lgt self.material = mat self.optimize_geometry = optimize_geometry self.optimize_light = optimize_light self.FLAGS = FLAGS self.guidance = guidance self.if_flip_the_normal = FLAGS.if_flip_the_normal self.if_use_bump = FLAGS.if_use_bump if self.FLAGS.mode == 'appearance_modeling': if not self.optimize_light: with torch.no_grad(): self.light.build_mips() self.params = list(self.material.parameters()) self.params += list(self.geometry.pos_encoder.parameters()) self.params += list(self.light.parameters()) if optimize_light else [] self.geo_params = list(self.geometry.parameters()) if optimize_geometry else [] def forward(self, target, it, if_normal, if_pretrain, scene_and_vertices ): if self.FLAGS.mode == 'appearance_modeling': if self.optimize_light: self.light.build_mips() if self.FLAGS.camera_space_light: self.light.xfm(target['mv']) if if_pretrain: return self.geometry.decoder.pre_train_ellipsoid(it, scene_and_vertices) else: return self.geometry.tick(glctx, target, self.light, self.material, it , if_normal, self.guidance, self.FLAGS.mode, self.if_flip_the_normal, self.if_use_bump) def optimize_mesh( glctx, geometry, opt_material, lgt, dataset_train, dataset_validate, FLAGS, log_interval=10, optimize_light=True, optimize_geometry=True, guidance = None, scene_and_vertices = None, ): dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=FLAGS.batch, collate_fn=dataset_train.collate, shuffle=False) dataloader_validate = torch.utils.data.DataLoader(dataset_validate, batch_size=1, collate_fn=dataset_train.collate) model = Trainer(glctx, geometry, lgt, opt_material, optimize_geometry, optimize_light, FLAGS, guidance) if optimize_geometry: optimizer_mesh = torch.optim.AdamW(model.geo_params, lr=0.001, betas=(0.9, 0.99), eps=1e-15) optimizer = torch.optim.AdamW(model.params, lr=0.01, betas=(0.9, 0.99), eps=1e-15) optimizer_lora = torch.optim.SGD(itertools.chain(*guidance.unet_lora_params), lr=1e-5) if FLAGS.multi_gpu: model = model.cuda() model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[FLAGS.local_rank], find_unused_parameters= True ) img_cnt = 0 img_loss_vec = [] reg_loss_vec = [] iter_dur_vec = [] def cycle(iterable): iterator = iter(iterable) while True: try: yield next(iterator) except StopIteration: iterator = iter(iterable) v_it = cycle(dataloader_validate) scaler = torch.cuda.amp.GradScaler(enabled=True) rot_ang = 0 if FLAGS.local_rank == 0: video = Video(FLAGS.out_dir) if FLAGS.local_rank == 0: dataloader_train = tqdm(dataloader_train) for it, target in enumerate(dataloader_train): # Mix randomized background into dataset image target = prepare_batch(target, FLAGS.train_background) # Show/save image before training step (want to get correct rendering of input) if FLAGS.local_rank == 0: save_image = FLAGS.save_interval and (it % FLAGS.save_interval == 0) save_video = FLAGS.video_interval and (it % FLAGS.video_interval == 0) if save_image: result_image, result_dict = validate_itr(glctx, prepare_batch(next(v_it), FLAGS.train_background), geometry, opt_material, lgt, FLAGS) #prepare_batch(next(v_it), FLAGS.background) np_result_image = result_image.detach().cpu().numpy() util.save_image(FLAGS.out_dir + '/' + ('img_%s_%06d.png' % (FLAGS.mode, img_cnt)), np_result_image) util.save_image(FLAGS.out_dir + '/' + ('mask_%s_%06d.png' % (FLAGS.mode, img_cnt)), result_dict['mask'].detach().cpu().numpy()) img_cnt = img_cnt+1 if save_video: with torch.no_grad():
params = get_camera_params(
1
2023-11-27 13:44:01+00:00
24k
zhenzhiwang/intercontrol
utils/model_util.py
[ { "identifier": "ControlGaussianDiffusion", "path": "diffusion/control_diffusion.py", "snippet": "class ControlGaussianDiffusion(SpacedDiffusion):\n\n def inv_transform(self, data):\n assert self.std is not None and self.mean is not None\n #assert data.requires_grad == True\n std...
import torch from diffusion.control_diffusion import ControlGaussianDiffusion from model.cfg_sampler import wrap_model from model.mdm import MDM from model.ControlMDM import ControlMDM from diffusion import gaussian_diffusion as gd from diffusion.respace import SpacedDiffusion, space_timesteps
15,035
def load_model(args, data, device, ModelClass=MDM): model, diffusion = create_model_and_diffusion(args, data, ModelClass=ModelClass) model_path = args.model_path print(f"Loading checkpoints from [{model_path}]...") state_dict = torch.load(model_path, map_location='cpu') load_model_wo_clip(model, state_dict) model.to(device) model.eval() # disable random masking model = wrap_model(model, args) return model, diffusion def load_model_wo_clip(model, state_dict): missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) if 't_pos_encoder.pe' in missing_keys: missing_keys.remove('t_pos_encoder.pe') if 't_pos_encoder.pe' in unexpected_keys: unexpected_keys.remove('t_pos_encoder.pe') assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) def load_pretrained_mdm(model, state_dict): missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') or k.startswith('multi_person.') for k in missing_keys]) def load_pretrained_mdm_to_controlmdm(model, state_dict): missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) transformer_encoder_weight = {} for key, value in state_dict.items(): if key.startswith('seqTransEncoder'): transformer_encoder_weight[key[16:]] = value unexpected_keys.remove(key) model.seqTransEncoder_mdm.load_state_dict(transformer_encoder_weight, strict=True) model.seqTransEncoder_control.load_state_dict(transformer_encoder_weight, strict=True) assert len(unexpected_keys) == 0 #assert all([k.startswith('clip_model.') for k in missing_keys]) print("The following parameters are trained from scratch.") for k in missing_keys: if not k.startswith('clip_model.') and not k.startswith('seqTransEncoder'): print(k) def load_split_mdm(model, state_dict, cutting_point): new_state_dict = {} orig_trans_prefix = 'seqTransEncoder.' for k, v in state_dict.items(): if k.startswith(orig_trans_prefix): orig_layer = int(k.split('.')[2]) orig_suffix = '.'.join(k.split('.')[3:]) target_split = 'seqTransEncoder_start.' if orig_layer < cutting_point else 'seqTransEncoder_end.' target_layer = orig_layer if orig_layer < cutting_point else orig_layer - cutting_point new_k = target_split + 'layers.' + str(target_layer) + '.' + orig_suffix new_state_dict[new_k] = v else: new_state_dict[k] = v missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') or k.startswith('multi_person.') for k in missing_keys]) def create_model_and_diffusion(args, data, ModelClass=MDM, DiffusionClass=SpacedDiffusion): model = ModelClass(**get_model_args(args, data)) diffusion = create_gaussian_diffusion(args, DiffusionClass) return model, diffusion def get_model_args(args, data): # default args clip_version = 'ViT-B/32' action_emb = 'tensor' cond_mode = 'text' if args.dataset in ['humanml', 'kit','babel', 'pw3d'] else 'action' if hasattr(data.dataset, 'num_actions'): num_actions = data.dataset.num_actions else: num_actions = 1 # SMPL defaults data_rep = 'rot6d' njoints = 25 nfeats = 6 if args.dataset in ['humanml', 'pw3d']: data_rep = 'hml_vec' njoints = 263 nfeats = 1 elif args.dataset == 'babel': data_rep = 'rot6d' njoints = 135 nfeats = 1 elif args.dataset == 'kit': data_rep = 'hml_vec' njoints = 251 nfeats = 1 else: raise TypeError(f'dataset {args.dataset} is not currently supported') return {'modeltype': '', 'njoints': njoints, 'nfeats': nfeats, 'num_actions': num_actions, 'translation': True, 'pose_rep': 'rot6d', 'glob': True, 'glob_rot': True, 'latent_dim': args.latent_dim, 'ff_size': 1024, 'num_layers': args.layers, 'num_heads': 4, 'dropout': 0.1, 'activation': "gelu", 'data_rep': data_rep, 'cond_mode': cond_mode, 'cond_mask_prob': args.cond_mask_prob, 'action_emb': action_emb, 'arch': args.arch, 'emb_trans_dec': args.emb_trans_dec, 'clip_version': clip_version, 'dataset': args.dataset, 'diffusion-steps': args.diffusion_steps, 'batch_size': args.batch_size, 'use_tta': args.use_tta, 'trans_emb': args.trans_emb, 'concat_trans_emb': args.concat_trans_emb, 'args': args} def create_gaussian_diffusion(args, DiffusionClass=SpacedDiffusion): # default params predict_xstart = True # we always predict x_start (a.k.a. x0), that's our deal! steps = args.diffusion_steps scale_beta = 1. # no scaling timestep_respacing = '' # can be used for ddim sampling, we don't use it. learn_sigma = False rescale_timesteps = False print(f"number of diffusion-steps: {steps}")
def load_controlmdm_and_diffusion(args, data, device, ModelClass=ControlMDM, DiffusionClass=ControlGaussianDiffusion): model, diffusion = create_model_and_diffusion(args, data, ModelClass=ControlMDM, DiffusionClass=DiffusionClass) model_path = args.model_path print(f"Loading checkpoints from [{model_path}]...") state_dict = torch.load(model_path, map_location='cpu') load_model_wo_clip(model, state_dict) model.mean = data.dataset.t2m_dataset.mean model.std = data.dataset.t2m_dataset.std model.to(device) model.eval() # disable random masking model = wrap_model(model, args) return model, diffusion def load_model(args, data, device, ModelClass=MDM): model, diffusion = create_model_and_diffusion(args, data, ModelClass=ModelClass) model_path = args.model_path print(f"Loading checkpoints from [{model_path}]...") state_dict = torch.load(model_path, map_location='cpu') load_model_wo_clip(model, state_dict) model.to(device) model.eval() # disable random masking model = wrap_model(model, args) return model, diffusion def load_model_wo_clip(model, state_dict): missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) if 't_pos_encoder.pe' in missing_keys: missing_keys.remove('t_pos_encoder.pe') if 't_pos_encoder.pe' in unexpected_keys: unexpected_keys.remove('t_pos_encoder.pe') assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) def load_pretrained_mdm(model, state_dict): missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') or k.startswith('multi_person.') for k in missing_keys]) def load_pretrained_mdm_to_controlmdm(model, state_dict): missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) transformer_encoder_weight = {} for key, value in state_dict.items(): if key.startswith('seqTransEncoder'): transformer_encoder_weight[key[16:]] = value unexpected_keys.remove(key) model.seqTransEncoder_mdm.load_state_dict(transformer_encoder_weight, strict=True) model.seqTransEncoder_control.load_state_dict(transformer_encoder_weight, strict=True) assert len(unexpected_keys) == 0 #assert all([k.startswith('clip_model.') for k in missing_keys]) print("The following parameters are trained from scratch.") for k in missing_keys: if not k.startswith('clip_model.') and not k.startswith('seqTransEncoder'): print(k) def load_split_mdm(model, state_dict, cutting_point): new_state_dict = {} orig_trans_prefix = 'seqTransEncoder.' for k, v in state_dict.items(): if k.startswith(orig_trans_prefix): orig_layer = int(k.split('.')[2]) orig_suffix = '.'.join(k.split('.')[3:]) target_split = 'seqTransEncoder_start.' if orig_layer < cutting_point else 'seqTransEncoder_end.' target_layer = orig_layer if orig_layer < cutting_point else orig_layer - cutting_point new_k = target_split + 'layers.' + str(target_layer) + '.' + orig_suffix new_state_dict[new_k] = v else: new_state_dict[k] = v missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') or k.startswith('multi_person.') for k in missing_keys]) def create_model_and_diffusion(args, data, ModelClass=MDM, DiffusionClass=SpacedDiffusion): model = ModelClass(**get_model_args(args, data)) diffusion = create_gaussian_diffusion(args, DiffusionClass) return model, diffusion def get_model_args(args, data): # default args clip_version = 'ViT-B/32' action_emb = 'tensor' cond_mode = 'text' if args.dataset in ['humanml', 'kit','babel', 'pw3d'] else 'action' if hasattr(data.dataset, 'num_actions'): num_actions = data.dataset.num_actions else: num_actions = 1 # SMPL defaults data_rep = 'rot6d' njoints = 25 nfeats = 6 if args.dataset in ['humanml', 'pw3d']: data_rep = 'hml_vec' njoints = 263 nfeats = 1 elif args.dataset == 'babel': data_rep = 'rot6d' njoints = 135 nfeats = 1 elif args.dataset == 'kit': data_rep = 'hml_vec' njoints = 251 nfeats = 1 else: raise TypeError(f'dataset {args.dataset} is not currently supported') return {'modeltype': '', 'njoints': njoints, 'nfeats': nfeats, 'num_actions': num_actions, 'translation': True, 'pose_rep': 'rot6d', 'glob': True, 'glob_rot': True, 'latent_dim': args.latent_dim, 'ff_size': 1024, 'num_layers': args.layers, 'num_heads': 4, 'dropout': 0.1, 'activation': "gelu", 'data_rep': data_rep, 'cond_mode': cond_mode, 'cond_mask_prob': args.cond_mask_prob, 'action_emb': action_emb, 'arch': args.arch, 'emb_trans_dec': args.emb_trans_dec, 'clip_version': clip_version, 'dataset': args.dataset, 'diffusion-steps': args.diffusion_steps, 'batch_size': args.batch_size, 'use_tta': args.use_tta, 'trans_emb': args.trans_emb, 'concat_trans_emb': args.concat_trans_emb, 'args': args} def create_gaussian_diffusion(args, DiffusionClass=SpacedDiffusion): # default params predict_xstart = True # we always predict x_start (a.k.a. x0), that's our deal! steps = args.diffusion_steps scale_beta = 1. # no scaling timestep_respacing = '' # can be used for ddim sampling, we don't use it. learn_sigma = False rescale_timesteps = False print(f"number of diffusion-steps: {steps}")
betas = gd.get_named_beta_schedule(args.noise_schedule, steps, scale_beta)
3
2023-11-27 05:28:02+00:00
24k
camenduru/magicanimate-hf
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,...
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.controlnet import ControlNetModel from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
19,344
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, controlnet_condition: list = None, controlnet_conditioning_scale: float = 1.0, context_frames: int = 16, context_stride: int = 1, context_overlap: int = 4, context_batch_size: int = 1, context_schedule: str = "uniform", init_latents: Optional[torch.FloatTensor] = None, num_actual_inference_steps: Optional[int] = None, appearance_encoder = None, reference_control_writer = None, reference_control_reader = None, source_image: str = None, decoder_consistency = None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ controlnet = self.controlnet # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size) reference_control_writer = ReferenceAttentionControl(appearance_encoder, do_classifier_free_guidance=True, mode='write', batch_size=context_batch_size) reference_control_reader = ReferenceAttentionControl(self.unet, do_classifier_free_guidance=True, mode='read', batch_size=context_batch_size) is_dist_initialized = kwargs.get("dist", False) rank = kwargs.get("rank", 0) world_size = kwargs.get("world_size", 1) # Prepare video assert num_videos_per_prompt == 1 # FIXME: verify if num_videos_per_prompt > 1 works assert batch_size == 1 # FIXME: verify if batch_size > 1 works control = self.prepare_condition( condition=controlnet_condition, device=device, dtype=controlnet.dtype, num_videos_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, ) controlnet_uncond_images, controlnet_cond_images = control.chunk(2) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables if init_latents is not None: latents = rearrange(init_latents, "(b f) c h w -> b c f h w", f=video_length) else: num_channels_latents = self.unet.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, video_length, height, width, text_embeddings.dtype, device, generator, latents, ) latents_dtype = latents.dtype # Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # Prepare text embeddings for controlnet controlnet_text_embeddings = text_embeddings.repeat_interleave(video_length, 0) _, controlnet_text_embeddings_c = controlnet_text_embeddings.chunk(2) controlnet_res_samples_cache_dict = {i:None for i in range(video_length)} # For img2img setting if num_actual_inference_steps is None: num_actual_inference_steps = num_inference_steps if isinstance(source_image, str): ref_image_latents = self.images2latents(np.array(Image.open(source_image).resize((width, height)))[None, :], latents_dtype).cuda() elif isinstance(source_image, np.ndarray): ref_image_latents = self.images2latents(source_image[None, :], latents_dtype).cuda()
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, controlnet: ControlNetModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents, rank, decoder_consistency=None): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0]), disable=(rank!=0)): if decoder_consistency is not None: video.append(decoder_consistency(latents[frame_idx:frame_idx+1])) else: video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None, clip_length=16): shape = (batch_size, num_channels_latents, clip_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) latents = latents.repeat(1, 1, video_length//clip_length, 1, 1) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_condition(self, condition, num_videos_per_prompt, device, dtype, do_classifier_free_guidance): # prepare conditions for controlnet condition = torch.from_numpy(condition.copy()).to(device=device, dtype=dtype) / 255.0 condition = torch.stack([condition for _ in range(num_videos_per_prompt)], dim=0) condition = rearrange(condition, 'b f h w c -> (b f) c h w').clone() if do_classifier_free_guidance: condition = torch.cat([condition] * 2) return condition def next_step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0., verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate: v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f) new_latents[:,:,new_index,:,:] = v.to(latents.device) new_index += 1 new_latents[:,:,new_index,:,:] = v1 new_index += 1 return new_latents def select_controlnet_res_samples(self, controlnet_res_samples_cache_dict, context, do_classifier_free_guidance, b, f): _down_block_res_samples = [] _mid_block_res_sample = [] for i in np.concatenate(np.array(context)): _down_block_res_samples.append(controlnet_res_samples_cache_dict[i][0]) _mid_block_res_sample.append(controlnet_res_samples_cache_dict[i][1]) down_block_res_samples = [[] for _ in range(len(controlnet_res_samples_cache_dict[i][0]))] for res_t in _down_block_res_samples: for i, res in enumerate(res_t): down_block_res_samples[i].append(res) down_block_res_samples = [torch.cat(res) for res in down_block_res_samples] mid_block_res_sample = torch.cat(_mid_block_res_sample) # reshape controlnet output to match the unet3d inputs b = b // 2 if do_classifier_free_guidance else b _down_block_res_samples = [] for sample in down_block_res_samples: sample = rearrange(sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: sample = sample.repeat(2, 1, 1, 1, 1) _down_block_res_samples.append(sample) down_block_res_samples = _down_block_res_samples mid_block_res_sample = rearrange(mid_block_res_sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: mid_block_res_sample = mid_block_res_sample.repeat(2, 1, 1, 1, 1) return down_block_res_samples, mid_block_res_sample @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, controlnet_condition: list = None, controlnet_conditioning_scale: float = 1.0, context_frames: int = 16, context_stride: int = 1, context_overlap: int = 4, context_batch_size: int = 1, context_schedule: str = "uniform", init_latents: Optional[torch.FloatTensor] = None, num_actual_inference_steps: Optional[int] = None, appearance_encoder = None, reference_control_writer = None, reference_control_reader = None, source_image: str = None, decoder_consistency = None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ controlnet = self.controlnet # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size) reference_control_writer = ReferenceAttentionControl(appearance_encoder, do_classifier_free_guidance=True, mode='write', batch_size=context_batch_size) reference_control_reader = ReferenceAttentionControl(self.unet, do_classifier_free_guidance=True, mode='read', batch_size=context_batch_size) is_dist_initialized = kwargs.get("dist", False) rank = kwargs.get("rank", 0) world_size = kwargs.get("world_size", 1) # Prepare video assert num_videos_per_prompt == 1 # FIXME: verify if num_videos_per_prompt > 1 works assert batch_size == 1 # FIXME: verify if batch_size > 1 works control = self.prepare_condition( condition=controlnet_condition, device=device, dtype=controlnet.dtype, num_videos_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, ) controlnet_uncond_images, controlnet_cond_images = control.chunk(2) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables if init_latents is not None: latents = rearrange(init_latents, "(b f) c h w -> b c f h w", f=video_length) else: num_channels_latents = self.unet.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, video_length, height, width, text_embeddings.dtype, device, generator, latents, ) latents_dtype = latents.dtype # Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # Prepare text embeddings for controlnet controlnet_text_embeddings = text_embeddings.repeat_interleave(video_length, 0) _, controlnet_text_embeddings_c = controlnet_text_embeddings.chunk(2) controlnet_res_samples_cache_dict = {i:None for i in range(video_length)} # For img2img setting if num_actual_inference_steps is None: num_actual_inference_steps = num_inference_steps if isinstance(source_image, str): ref_image_latents = self.images2latents(np.array(Image.open(source_image).resize((width, height)))[None, :], latents_dtype).cuda() elif isinstance(source_image, np.ndarray): ref_image_latents = self.images2latents(source_image[None, :], latents_dtype).cuda()
context_scheduler = get_context_scheduler(context_schedule)
3
2023-12-04 20:47:34+00:00
24k
metatube-community/metatube-plex-plugins
MetaTube.bundle/Contents/Libraries/Shared/urllib3/poolmanager.py
[ { "identifier": "HTTPHeaderDict", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(MutableMapping):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-i...
import collections import functools import logging from ._collections import HTTPHeaderDict, RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme from .exceptions import ( LocationValueError, MaxRetryError, ProxySchemeUnknown, ProxySchemeUnsupported, URLSchemeUnknown, ) from .packages import six from .packages.six.moves.urllib.parse import urljoin from .request import RequestMethods from .util.proxy import connection_requires_http_tunnel from .util.retry import Retry from .util.url import parse_url
14,808
If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is provided, it is merged with the instance's ``connection_pool_kw`` variable and used to create the new connection pool, if one is needed. """ if not host: raise LocationValueError("No host specified.") request_context = self._merge_pool_kwargs(pool_kwargs) request_context["scheme"] = scheme or "http" if not port: port = port_by_scheme.get(request_context["scheme"].lower(), 80) request_context["port"] = port request_context["host"] = host return self.connection_from_context(request_context) def connection_from_context(self, request_context): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context. ``request_context`` must at least contain the ``scheme`` key and its value must be a key in ``key_fn_by_scheme`` instance variable. """ scheme = request_context["scheme"].lower() pool_key_constructor = self.key_fn_by_scheme.get(scheme) if not pool_key_constructor: raise URLSchemeUnknown(scheme) pool_key = pool_key_constructor(request_context) return self.connection_from_pool_key(pool_key, request_context=request_context) def connection_from_pool_key(self, pool_key, request_context=None): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key. ``pool_key`` should be a namedtuple that only contains immutable objects. At a minimum it must have the ``scheme``, ``host``, and ``port`` fields. """ with self.pools.lock: # If the scheme, host, or port doesn't match existing open # connections, open a new ConnectionPool. pool = self.pools.get(pool_key) if pool: return pool # Make a fresh ConnectionPool of the desired type scheme = request_context["scheme"] host = request_context["host"] port = request_context["port"] pool = self._new_pool(scheme, host, port, request_context=request_context) self.pools[pool_key] = pool return pool def connection_from_url(self, url, pool_kwargs=None): """ Similar to :func:`urllib3.connectionpool.connection_from_url`. If ``pool_kwargs`` is not provided and a new pool needs to be constructed, ``self.connection_pool_kw`` is used to initialize the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` is provided, it is used instead. Note that if a new pool does not need to be created for the request, the provided ``pool_kwargs`` are not used. """ u = parse_url(url) return self.connection_from_host( u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs ) def _merge_pool_kwargs(self, override): """ Merge a dictionary of override values for self.connection_pool_kw. This does not modify self.connection_pool_kw and returns a new dict. Any keys in the override dictionary with a value of ``None`` are removed from the merged dictionary. """ base_pool_kwargs = self.connection_pool_kw.copy() if override: for key, value in override.items(): if value is None: try: del base_pool_kwargs[key] except KeyError: pass else: base_pool_kwargs[key] = value return base_pool_kwargs def _proxy_requires_url_absolute_form(self, parsed_url): """ Indicates if the proxy requires the complete destination URL in the request. Normally this is only needed when not using an HTTP CONNECT tunnel. """ if self.proxy is None: return False return not connection_requires_http_tunnel( self.proxy, self.proxy_config, parsed_url.scheme ) def _validate_proxy_scheme_url_selection(self, url_scheme): """ Validates that were not attempting to do TLS in TLS connections on Python2 or with unsupported SSL implementations. """ if self.proxy is None or url_scheme != "https": return if self.proxy.scheme != "https": return if six.PY2 and not self.proxy_config.use_forwarding_for_https:
from __future__ import absolute_import __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. _key_fields = ( "key_scheme", # str "key_host", # str "key_port", # int "key_timeout", # int or float or Timeout "key_retries", # int or Retry "key_strict", # bool "key_block", # bool "key_source_address", # str "key_key_file", # str "key_key_password", # str "key_cert_file", # str "key_cert_reqs", # str "key_ca_certs", # str "key_ssl_version", # str "key_ca_cert_dir", # str "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext "key_maxsize", # int "key_headers", # dict "key__proxy", # parsed proxy url "key__proxy_headers", # dict "key__proxy_config", # class "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples "key__socks_options", # dict "key_assert_hostname", # bool or string "key_assert_fingerprint", # str "key_server_hostname", # str ) #: The namedtuple class used to construct keys for the connection pool. #: All custom key schemes should include the fields in this key at a minimum. PoolKey = collections.namedtuple("PoolKey", _key_fields) _proxy_config_fields = ("ssl_context", "use_forwarding_for_https") ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields) def _default_key_normalizer(key_class, request_context): """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context["scheme"] = context["scheme"].lower() context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get("socket_options") if socket_opts is not None: context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), } pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool} class PoolManager(RequestMethods): """ Allows for arbitrary requests while transparently keeping track of necessary connection pools for you. :param num_pools: Number of connection pools to cache before discarding the least recently used pool. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param \\**connection_pool_kw: Additional parameters are used to create fresh :class:`urllib3.connectionpool.ConnectionPool` instances. Example:: >>> manager = PoolManager(num_pools=2) >>> r = manager.request('GET', 'http://google.com/') >>> r = manager.request('GET', 'http://google.com/mail') >>> r = manager.request('GET', 'http://yahoo.com/') >>> len(manager.pools) 2 """ proxy = None proxy_config = None def __init__(self, num_pools=10, headers=None, **connection_pool_kw): RequestMethods.__init__(self, headers) self.connection_pool_kw = connection_pool_kw self.pools = RecentlyUsedContainer(num_pools) # Locally set the pool classes and keys so other PoolManagers can # override them. self.pool_classes_by_scheme = pool_classes_by_scheme self.key_fn_by_scheme = key_fn_by_scheme.copy() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.clear() # Return False to re-raise any potential exceptions return False def _new_pool(self, scheme, host, port, request_context=None): """ Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and any additional pool keyword arguments. If ``request_context`` is provided, it is provided as keyword arguments to the pool class used. This method is used to actually create the connection pools handed out by :meth:`connection_from_url` and companion methods. It is intended to be overridden for customization. """ pool_cls = self.pool_classes_by_scheme[scheme] if request_context is None: request_context = self.connection_pool_kw.copy() # Although the context has everything necessary to create the pool, # this function has historically only used the scheme, host, and port # in the positional args. When an API change is acceptable these can # be removed. for key in ("scheme", "host", "port"): request_context.pop(key, None) if scheme == "http": for kw in SSL_KEYWORDS: request_context.pop(kw, None) return pool_cls(host, port, **request_context) def clear(self): """ Empty our store of pools and direct them all to close. This will not affect in-flight connections, but they will not be re-used after completion. """ self.pools.clear() def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme. If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is provided, it is merged with the instance's ``connection_pool_kw`` variable and used to create the new connection pool, if one is needed. """ if not host: raise LocationValueError("No host specified.") request_context = self._merge_pool_kwargs(pool_kwargs) request_context["scheme"] = scheme or "http" if not port: port = port_by_scheme.get(request_context["scheme"].lower(), 80) request_context["port"] = port request_context["host"] = host return self.connection_from_context(request_context) def connection_from_context(self, request_context): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context. ``request_context`` must at least contain the ``scheme`` key and its value must be a key in ``key_fn_by_scheme`` instance variable. """ scheme = request_context["scheme"].lower() pool_key_constructor = self.key_fn_by_scheme.get(scheme) if not pool_key_constructor: raise URLSchemeUnknown(scheme) pool_key = pool_key_constructor(request_context) return self.connection_from_pool_key(pool_key, request_context=request_context) def connection_from_pool_key(self, pool_key, request_context=None): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key. ``pool_key`` should be a namedtuple that only contains immutable objects. At a minimum it must have the ``scheme``, ``host``, and ``port`` fields. """ with self.pools.lock: # If the scheme, host, or port doesn't match existing open # connections, open a new ConnectionPool. pool = self.pools.get(pool_key) if pool: return pool # Make a fresh ConnectionPool of the desired type scheme = request_context["scheme"] host = request_context["host"] port = request_context["port"] pool = self._new_pool(scheme, host, port, request_context=request_context) self.pools[pool_key] = pool return pool def connection_from_url(self, url, pool_kwargs=None): """ Similar to :func:`urllib3.connectionpool.connection_from_url`. If ``pool_kwargs`` is not provided and a new pool needs to be constructed, ``self.connection_pool_kw`` is used to initialize the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` is provided, it is used instead. Note that if a new pool does not need to be created for the request, the provided ``pool_kwargs`` are not used. """ u = parse_url(url) return self.connection_from_host( u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs ) def _merge_pool_kwargs(self, override): """ Merge a dictionary of override values for self.connection_pool_kw. This does not modify self.connection_pool_kw and returns a new dict. Any keys in the override dictionary with a value of ``None`` are removed from the merged dictionary. """ base_pool_kwargs = self.connection_pool_kw.copy() if override: for key, value in override.items(): if value is None: try: del base_pool_kwargs[key] except KeyError: pass else: base_pool_kwargs[key] = value return base_pool_kwargs def _proxy_requires_url_absolute_form(self, parsed_url): """ Indicates if the proxy requires the complete destination URL in the request. Normally this is only needed when not using an HTTP CONNECT tunnel. """ if self.proxy is None: return False return not connection_requires_http_tunnel( self.proxy, self.proxy_config, parsed_url.scheme ) def _validate_proxy_scheme_url_selection(self, url_scheme): """ Validates that were not attempting to do TLS in TLS connections on Python2 or with unsupported SSL implementations. """ if self.proxy is None or url_scheme != "https": return if self.proxy.scheme != "https": return if six.PY2 and not self.proxy_config.use_forwarding_for_https:
raise ProxySchemeUnsupported(
6
2023-11-27 07:01:39+00:00
24k
NobiDeveloper/Nobita-Filter-Bot
plugins/p_ttishow.py
[ { "identifier": "ADMINS", "path": "info.py", "snippet": "ADMINS = [int(admin) if id_pattern.search(admin) else admin for admin in environ.get('ADMINS', '').split()]" }, { "identifier": "LOG_CHANNEL", "path": "info.py", "snippet": "LOG_CHANNEL = int(environ.get('LOG_CHANNEL', ''))" }, ...
from pyrogram import Client, filters, enums from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery from pyrogram.errors.exceptions.bad_request_400 import MessageTooLong, PeerIdInvalid from info import ADMINS, LOG_CHANNEL, SUPPORT_CHAT, MELCOW_NEW_USERS, MELCOW_VID, CHNL_LNK, GRP_LNK from database.users_chats_db import db from database.ia_filterdb import Media from utils import get_size, temp, get_settings from Script import script from pyrogram.errors import ChatAdminRequired import asyncio
14,685
"""----------------------------------------- https://github.com/NobiDeveloper/Nobita-Filter-Bot --------------------------------------""" @Client.on_message(filters.new_chat_members & filters.group) async def save_group(bot, message): r_j_check = [u.id for u in message.new_chat_members] if temp.ME in r_j_check: if not await db.get_chat(message.chat.id): total=await bot.get_chat_members_count(message.chat.id) r_j = message.from_user.mention if message.from_user else "Anonymous"
"""----------------------------------------- https://github.com/NobiDeveloper/Nobita-Filter-Bot --------------------------------------""" @Client.on_message(filters.new_chat_members & filters.group) async def save_group(bot, message): r_j_check = [u.id for u in message.new_chat_members] if temp.ME in r_j_check: if not await db.get_chat(message.chat.id): total=await bot.get_chat_members_count(message.chat.id) r_j = message.from_user.mention if message.from_user else "Anonymous"
await bot.send_message(LOG_CHANNEL, script.LOG_TEXT_G.format(message.chat.title, message.chat.id, total, r_j))
12
2023-11-28 13:36:56+00:00
24k
chenxx89/BFRffusion
models/models.py
[ { "identifier": "timestep_embedding", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\...
import torch import os import numpy as np import math import shutil import safetensors.torch from ldm.modules.diffusionmodules.util import timestep_embedding from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.diffusionmodules.openaimodel import UNetModel from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import log_txt_as_img, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from data.dataset_instantiate import instantiate_from_config as instantiate_dataset_from_config from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm from metrics.metrics_all import calculate_psnr_ssim, calculate_lpips, calculate_NIQE, calculate_fid_folder from torch.utils.data import DataLoader from PIL import Image from torch.optim.lr_scheduler import LambdaLR from omegaconf import OmegaConf
20,413
def get_state_dict(d): return d.get('state_dict', d) def load_state_dict(ckpt_path, location='cpu'): _, extension = os.path.splitext(ckpt_path) if extension.lower() == ".safetensors": state_dict = safetensors.torch.load_file(ckpt_path, device=location) else: state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location))) state_dict = get_state_dict(state_dict) print(f'Loaded state_dict from [{ckpt_path}]') return state_dict def create_model(config_path): config = OmegaConf.load(config_path) model = instantiate_from_config(config.model).cpu() print(f'Loaded model config from [{config_path}]') return model class ControlledUnetModel(UNetModel): def forward(self, x, timesteps=None, context=None, control=None, **kwargs): hs = []
def get_state_dict(d): return d.get('state_dict', d) def load_state_dict(ckpt_path, location='cpu'): _, extension = os.path.splitext(ckpt_path) if extension.lower() == ".safetensors": state_dict = safetensors.torch.load_file(ckpt_path, device=location) else: state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location))) state_dict = get_state_dict(state_dict) print(f'Loaded state_dict from [{ckpt_path}]') return state_dict def create_model(config_path): config = OmegaConf.load(config_path) model = instantiate_from_config(config.model).cpu() print(f'Loaded model config from [{config_path}]') return model class ControlledUnetModel(UNetModel): def forward(self, x, timesteps=None, context=None, control=None, **kwargs): hs = []
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
0
2023-11-30 13:50:58+00:00
24k
IanYeung/MGLD-VSR
basicsr/data/realbasicvsr_dataset.py
[ { "identifier": "Clip", "path": "basicsr/data/mmcv_transforms/aug_pix.py", "snippet": "class Clip(BaseTransform):\n \"\"\"Clip the pixels.\n\n Modified keys are the attributes specified in \"keys\".\n\n Args:\n keys (list[str]): The keys whose values are clipped.\n a_min (int): Lo...
import cv2 import math import time import os import os.path as osp import numpy as np import random import torch from copy import deepcopy from pathlib import Path from torch.utils import data as data from basicsr.data.mmcv_transforms import Clip, UnsharpMasking, RescaleToZeroOne from basicsr.data.mmcv_transforms import RandomBlur, RandomResize, RandomNoise, RandomJPEGCompression, RandomVideoCompression, DegradationsWithShuffle from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels from basicsr.data.transforms import augment, single_random_crop, paired_random_crop from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor, tensor2img, imwrite from basicsr.utils.flow_util import dequantize_flow from basicsr.utils.registry import DATASET_REGISTRY
15,788
folder, frame_num, _ = line.split(' ') self.keys.extend([f'{folder}/{i:08d}' for i in range(int(frame_num))]) # remove the video clips used in validation if opt['val_partition'] == 'REDS4': val_partition = ['000', '011', '015', '020'] elif opt['val_partition'] == 'official': val_partition = [f'{v:03d}' for v in range(240, 270)] else: raise ValueError(f'Wrong validation partition {opt["val_partition"]}.' f"Supported ones are ['official', 'REDS4'].") if opt['test_mode']: self.keys = [v for v in self.keys if v.split('/')[0] in val_partition] else: self.keys = [v for v in self.keys if v.split('/')[0] not in val_partition] # file client (io backend) self.file_client = None self.io_backend_opt = opt['io_backend'] self.is_lmdb = False if self.io_backend_opt['type'] == 'lmdb': self.is_lmdb = True self.io_backend_opt['db_paths'] = [self.gt_root] self.io_backend_opt['client_keys'] = ['gt'] # temporal augmentation configs self.interval_list = opt.get('interval_list', [1]) self.random_reverse = opt.get('random_reverse', False) interval_str = ','.join(str(x) for x in self.interval_list) logger = get_root_logger() logger.info(f'Temporal augmentation interval list: [{interval_str}]; ' f'random reverse is {self.random_reverse}.') # blur settings for the first degradation self.blur_kernel_size = opt['blur_kernel_size'] self.kernel_list = opt['kernel_list'] self.kernel_prob = opt['kernel_prob'] # a list for each kernel probability self.blur_sigma = opt['blur_sigma'] self.betag_range = opt['betag_range'] # betag used in generalized Gaussian blur kernels self.betap_range = opt['betap_range'] # betap used in plateau blur kernels self.sinc_prob = opt['sinc_prob'] # the probability for sinc filters # blur settings for the second degradation self.blur_kernel_size2 = opt['blur_kernel_size2'] self.kernel_list2 = opt['kernel_list2'] self.kernel_prob2 = opt['kernel_prob2'] self.blur_sigma2 = opt['blur_sigma2'] self.betag_range2 = opt['betag_range2'] self.betap_range2 = opt['betap_range2'] self.sinc_prob2 = opt['sinc_prob2'] # a final sinc filter self.final_sinc_prob = opt['final_sinc_prob'] self.kernel_range = [2 * v + 1 for v in range(3, 11)] # kernel size ranges from 7 to 21 # TODO: kernel range is now hard-coded, should be in the configure file self.pulse_tensor = torch.zeros(21, 21).float() # convolving with pulse tensor brings no blurry effect self.pulse_tensor[10, 10] = 1 def __getitem__(self, index): if self.file_client is None: self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) gt_size = self.opt['gt_size'] key = self.keys[index] clip_name, frame_name = key.split('/') # key example: 000/00000000 # determine the neighboring frames interval = random.choice(self.interval_list) # ensure not exceeding the borders start_frame_idx = int(frame_name) if start_frame_idx > 100 - self.num_frame * interval: start_frame_idx = random.randint(0, 100 - self.num_frame * interval) end_frame_idx = start_frame_idx + self.num_frame * interval neighbor_list = list(range(start_frame_idx, end_frame_idx, interval)) # random reverse if self.random_reverse and random.random() < 0.5: neighbor_list.reverse() # get the GT frames img_gts = [] for neighbor in neighbor_list: if self.is_lmdb: img_gt_path = f'{clip_name}/{neighbor:08d}' else: img_gt_path = self.gt_root / clip_name / f'{neighbor:08d}.png' # get GT img_bytes = self.file_client.get(img_gt_path, 'gt') img_gt = imfrombytes(img_bytes, float32=True) img_gts.append(img_gt) # randomly crop img_gts = single_random_crop(img_gts, gt_size, img_gt_path) # augmentation - flip, rotate img_gts = augment(img_gts, self.opt['use_hflip'], self.opt['use_rot']) # list-to-list img_gts = img2tensor(img_gts) # kernels kernel1s = [] kernel2s = [] sinc_kernels = [] for _ in range(len(img_gts)): # ------------------------ Generate kernels (used in the first degradation) ------------------------ # kernel_size = random.choice(self.kernel_range) if np.random.uniform() < self.opt['sinc_prob']: # this sinc filter setting is for kernels ranging from [7, 21] if kernel_size < 13: omega_c = np.random.uniform(np.pi / 3, np.pi) else: omega_c = np.random.uniform(np.pi / 5, np.pi) kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False) else:
# @DATASET_REGISTRY.register() class RealVSRRecurrentDataset(data.Dataset): """REDS dataset for training recurrent networks. The keys are generated from a meta info txt file. basicsr/data/meta_info/meta_info_REDS_GT.txt Each line contains: 1. subfolder (clip) name; 2. frame number; 3. image shape, separated by a white space. Examples: 000 100 (720,1280,3) 001 100 (720,1280,3) ... Key examples: "000/00000000" GT (gt): Ground-Truth; LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames. Args: opt (dict): Config for train dataset. It contains the following keys: dataroot_gt (str): Data root path for gt. meta_info_file (str): Path for meta information file. val_partition (str): Validation partition types. 'REDS4' or 'official'. io_backend (dict): IO backend type and other kwarg. num_frame (int): Window size for input frames. gt_size (int): Cropped patched size for gt patches. interval_list (list): Interval list for temporal augmentation. random_reverse (bool): Random reverse input frames. use_hflip (bool): Use horizontal flips. use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation). """ def __init__(self, opt): super(RealVSRRecurrentDataset, self).__init__() self.opt = opt self.gt_root = Path(opt['dataroot_gt']) self.num_frame = opt['num_frame'] self.keys = [] with open(opt['meta_info_file'], 'r') as fin: for line in fin: folder, frame_num, _ = line.split(' ') self.keys.extend([f'{folder}/{i:08d}' for i in range(int(frame_num))]) # remove the video clips used in validation if opt['val_partition'] == 'REDS4': val_partition = ['000', '011', '015', '020'] elif opt['val_partition'] == 'official': val_partition = [f'{v:03d}' for v in range(240, 270)] else: raise ValueError(f'Wrong validation partition {opt["val_partition"]}.' f"Supported ones are ['official', 'REDS4'].") if opt['test_mode']: self.keys = [v for v in self.keys if v.split('/')[0] in val_partition] else: self.keys = [v for v in self.keys if v.split('/')[0] not in val_partition] # file client (io backend) self.file_client = None self.io_backend_opt = opt['io_backend'] self.is_lmdb = False if self.io_backend_opt['type'] == 'lmdb': self.is_lmdb = True self.io_backend_opt['db_paths'] = [self.gt_root] self.io_backend_opt['client_keys'] = ['gt'] # temporal augmentation configs self.interval_list = opt.get('interval_list', [1]) self.random_reverse = opt.get('random_reverse', False) interval_str = ','.join(str(x) for x in self.interval_list) logger = get_root_logger() logger.info(f'Temporal augmentation interval list: [{interval_str}]; ' f'random reverse is {self.random_reverse}.') # the first degradation self.random_blur_1 = RandomBlur( params=opt['degradation_1']['random_blur']['params'], keys=opt['degradation_1']['random_blur']['keys'] ) self.random_resize_1 = RandomResize( params=opt['degradation_1']['random_resize']['params'], keys=opt['degradation_1']['random_resize']['keys'] ) self.random_noise_1 = RandomNoise( params=opt['degradation_1']['random_noise']['params'], keys=opt['degradation_1']['random_noise']['keys'] ) self.random_jpeg_1 = RandomJPEGCompression( params=opt['degradation_1']['random_jpeg']['params'], keys=opt['degradation_1']['random_jpeg']['keys'] ) self.random_mpeg_1 = RandomVideoCompression( params=opt['degradation_1']['random_mpeg']['params'], keys=opt['degradation_1']['random_mpeg']['keys'] ) # the second degradation self.random_blur_2 = RandomBlur( params=opt['degradation_2']['random_blur']['params'], keys=opt['degradation_2']['random_blur']['keys'] ) self.random_resize_2 = RandomResize( params=opt['degradation_2']['random_resize']['params'], keys=opt['degradation_2']['random_resize']['keys'] ) self.random_noise_2 = RandomNoise( params=opt['degradation_2']['random_noise']['params'], keys=opt['degradation_2']['random_noise']['keys'] ) self.random_jpeg_2 = RandomJPEGCompression( params=opt['degradation_2']['random_jpeg']['params'], keys=opt['degradation_2']['random_jpeg']['keys'] ) self.random_mpeg_2 = RandomVideoCompression( params=opt['degradation_2']['random_mpeg']['params'], keys=opt['degradation_2']['random_mpeg']['keys'] ) # final self.resize_final = RandomResize( params=opt['degradation_2']['resize_final']['params'], keys=opt['degradation_2']['resize_final']['keys'] ) self.blur_final = RandomBlur( params=opt['degradation_2']['blur_final']['params'], keys=opt['degradation_2']['blur_final']['keys'] ) # transforms self.usm = UnsharpMasking( kernel_size=opt['transforms']['usm']['kernel_size'], sigma=opt['transforms']['usm']['sigma'], weight=opt['transforms']['usm']['weight'], threshold=opt['transforms']['usm']['threshold'], keys=opt['transforms']['usm']['keys'] ) self.clip = Clip(keys=opt['transforms']['clip']['keys']) self.rescale = RescaleToZeroOne(keys=opt['transforms']['rescale']['keys']) def __getitem__(self, index): if self.file_client is None: self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) gt_size = self.opt['gt_size'] key = self.keys[index] clip_name, frame_name = key.split('/') # key example: 000/00000000 # determine the neighboring frames interval = random.choice(self.interval_list) # ensure not exceeding the borders start_frame_idx = int(frame_name) if start_frame_idx > 100 - self.num_frame * interval: start_frame_idx = random.randint(0, 100 - self.num_frame * interval) end_frame_idx = start_frame_idx + self.num_frame * interval neighbor_list = list(range(start_frame_idx, end_frame_idx, interval)) # random reverse if self.random_reverse and random.random() < 0.5: neighbor_list.reverse() # get the GT frames img_gts = [] for neighbor in neighbor_list: if self.is_lmdb: img_gt_path = f'{clip_name}/{neighbor:08d}' else: img_gt_path = self.gt_root / clip_name / f'{neighbor:08d}.png' # get GT img_bytes = self.file_client.get(img_gt_path, 'gt') img_gt = imfrombytes(img_bytes, float32=False) img_gts.append(img_gt) # randomly crop img_gts = single_random_crop(img_gts, gt_size, img_gt_path) # augmentation - flip, rotate img_gts = augment(img_gts, self.opt['use_hflip'], self.opt['use_rot']) img_lqs = deepcopy(img_gts) out_dict = {'lqs': img_lqs, 'gts': img_gts} out_dict = self.usm.transform(out_dict) ## the first degradation out_dict = self.random_blur_1(out_dict) out_dict = self.random_resize_1(out_dict) out_dict = self.random_noise_1(out_dict) out_dict = self.random_jpeg_1(out_dict) out_dict = self.random_mpeg_1(out_dict) ## the second degradation out_dict = self.random_blur_2(out_dict) out_dict = self.random_resize_2(out_dict) out_dict = self.random_noise_2(out_dict) out_dict = self.random_jpeg_2(out_dict) out_dict = self.random_mpeg_2(out_dict) ## final resize out_dict = self.resize_final(out_dict) out_dict = self.blur_final(out_dict) # post process out_dict = self.clip(out_dict) out_dict = self.rescale.transform(out_dict) # list-to-list for k in out_dict.keys(): out_dict[k] = img2tensor(out_dict[k]) # img_gts: (t, c, h, w) # key: str return out_dict def __len__(self): return len(self.keys) # @DATASET_REGISTRY.register() class RealESRGANRecurrentDataset(data.Dataset): """REDS dataset for training recurrent networks. The keys are generated from a meta info txt file. basicsr/data/meta_info/meta_info_REDS_GT.txt Each line contains: 1. subfolder (clip) name; 2. frame number; 3. image shape, separated by a white space. Examples: 000 100 (720,1280,3) 001 100 (720,1280,3) ... Key examples: "000/00000000" GT (gt): Ground-Truth; LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames. Args: opt (dict): Config for train dataset. It contains the following keys: dataroot_gt (str): Data root path for gt. meta_info_file (str): Path for meta information file. val_partition (str): Validation partition types. 'REDS4' or 'official'. io_backend (dict): IO backend type and other kwarg. num_frame (int): Window size for input frames. gt_size (int): Cropped patched size for gt patches. interval_list (list): Interval list for temporal augmentation. random_reverse (bool): Random reverse input frames. use_hflip (bool): Use horizontal flips. use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation). """ def __init__(self, opt): super(RealESRGANRecurrentDataset, self).__init__() self.opt = opt self.gt_root = Path(opt['dataroot_gt']) self.num_frame = opt['num_frame'] self.keys = [] with open(opt['meta_info_file'], 'r') as fin: for line in fin: folder, frame_num, _ = line.split(' ') self.keys.extend([f'{folder}/{i:08d}' for i in range(int(frame_num))]) # remove the video clips used in validation if opt['val_partition'] == 'REDS4': val_partition = ['000', '011', '015', '020'] elif opt['val_partition'] == 'official': val_partition = [f'{v:03d}' for v in range(240, 270)] else: raise ValueError(f'Wrong validation partition {opt["val_partition"]}.' f"Supported ones are ['official', 'REDS4'].") if opt['test_mode']: self.keys = [v for v in self.keys if v.split('/')[0] in val_partition] else: self.keys = [v for v in self.keys if v.split('/')[0] not in val_partition] # file client (io backend) self.file_client = None self.io_backend_opt = opt['io_backend'] self.is_lmdb = False if self.io_backend_opt['type'] == 'lmdb': self.is_lmdb = True self.io_backend_opt['db_paths'] = [self.gt_root] self.io_backend_opt['client_keys'] = ['gt'] # temporal augmentation configs self.interval_list = opt.get('interval_list', [1]) self.random_reverse = opt.get('random_reverse', False) interval_str = ','.join(str(x) for x in self.interval_list) logger = get_root_logger() logger.info(f'Temporal augmentation interval list: [{interval_str}]; ' f'random reverse is {self.random_reverse}.') # blur settings for the first degradation self.blur_kernel_size = opt['blur_kernel_size'] self.kernel_list = opt['kernel_list'] self.kernel_prob = opt['kernel_prob'] # a list for each kernel probability self.blur_sigma = opt['blur_sigma'] self.betag_range = opt['betag_range'] # betag used in generalized Gaussian blur kernels self.betap_range = opt['betap_range'] # betap used in plateau blur kernels self.sinc_prob = opt['sinc_prob'] # the probability for sinc filters # blur settings for the second degradation self.blur_kernel_size2 = opt['blur_kernel_size2'] self.kernel_list2 = opt['kernel_list2'] self.kernel_prob2 = opt['kernel_prob2'] self.blur_sigma2 = opt['blur_sigma2'] self.betag_range2 = opt['betag_range2'] self.betap_range2 = opt['betap_range2'] self.sinc_prob2 = opt['sinc_prob2'] # a final sinc filter self.final_sinc_prob = opt['final_sinc_prob'] self.kernel_range = [2 * v + 1 for v in range(3, 11)] # kernel size ranges from 7 to 21 # TODO: kernel range is now hard-coded, should be in the configure file self.pulse_tensor = torch.zeros(21, 21).float() # convolving with pulse tensor brings no blurry effect self.pulse_tensor[10, 10] = 1 def __getitem__(self, index): if self.file_client is None: self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) gt_size = self.opt['gt_size'] key = self.keys[index] clip_name, frame_name = key.split('/') # key example: 000/00000000 # determine the neighboring frames interval = random.choice(self.interval_list) # ensure not exceeding the borders start_frame_idx = int(frame_name) if start_frame_idx > 100 - self.num_frame * interval: start_frame_idx = random.randint(0, 100 - self.num_frame * interval) end_frame_idx = start_frame_idx + self.num_frame * interval neighbor_list = list(range(start_frame_idx, end_frame_idx, interval)) # random reverse if self.random_reverse and random.random() < 0.5: neighbor_list.reverse() # get the GT frames img_gts = [] for neighbor in neighbor_list: if self.is_lmdb: img_gt_path = f'{clip_name}/{neighbor:08d}' else: img_gt_path = self.gt_root / clip_name / f'{neighbor:08d}.png' # get GT img_bytes = self.file_client.get(img_gt_path, 'gt') img_gt = imfrombytes(img_bytes, float32=True) img_gts.append(img_gt) # randomly crop img_gts = single_random_crop(img_gts, gt_size, img_gt_path) # augmentation - flip, rotate img_gts = augment(img_gts, self.opt['use_hflip'], self.opt['use_rot']) # list-to-list img_gts = img2tensor(img_gts) # kernels kernel1s = [] kernel2s = [] sinc_kernels = [] for _ in range(len(img_gts)): # ------------------------ Generate kernels (used in the first degradation) ------------------------ # kernel_size = random.choice(self.kernel_range) if np.random.uniform() < self.opt['sinc_prob']: # this sinc filter setting is for kernels ranging from [7, 21] if kernel_size < 13: omega_c = np.random.uniform(np.pi / 3, np.pi) else: omega_c = np.random.uniform(np.pi / 5, np.pi) kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False) else:
kernel = random_mixed_kernels(
10
2023-11-30 01:50:29+00:00
24k
Czm369/MixPL
mmdet/configs/rtmdet/rtmdet_ins_s_8xb32_300e_coco.py
[ { "identifier": "PackDetInputs", "path": "mmdet/datasets/transforms/formatting.py", "snippet": "class PackDetInputs(BaseTransform):\n \"\"\"Pack the inputs data for the detection / semantic segmentation /\n panoptic segmentation.\n\n The ``img_meta`` item is always populated. The contents of t...
from mmengine.config import read_base from .rtmdet_ins_l_8xb32_300e_coco import * from mmcv.transforms.loading import LoadImageFromFile from mmcv.transforms.processing import RandomResize from mmengine.hooks.ema_hook import EMAHook from mmdet.datasets.transforms.formatting import PackDetInputs from mmdet.datasets.transforms.loading import (FilterAnnotations, LoadAnnotations) from mmdet.datasets.transforms.transforms import (CachedMixUp, CachedMosaic, Pad, RandomCrop, RandomFlip, Resize, YOLOXHSVRandomAug) from mmdet.engine.hooks.pipeline_switch_hook import PipelineSwitchHook from mmdet.models.layers.ema import ExpMomentumEMA
18,177
# Copyright (c) OpenMMLab. All rights reserved. # Please refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta for more details. # noqa # mmcv >= 2.0.1 # mmengine >= 0.8.0 with read_base(): checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa model.update( dict( backbone=dict( deepen_factor=0.33, widen_factor=0.5, init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), neck=dict( in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1), bbox_head=dict(in_channels=128, feat_channels=128))) train_pipeline = [ dict(type=LoadImageFromFile, backend_args=backend_args), dict( type=LoadAnnotations, with_bbox=True, with_mask=True, poly2mask=False), dict(type=CachedMosaic, img_scale=(640, 640), pad_val=114.0), dict( type=RandomResize, scale=(1280, 1280), ratio_range=(0.5, 2.0), resize_type=Resize, keep_ratio=True), dict(
# Copyright (c) OpenMMLab. All rights reserved. # Please refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta for more details. # noqa # mmcv >= 2.0.1 # mmengine >= 0.8.0 with read_base(): checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa model.update( dict( backbone=dict( deepen_factor=0.33, widen_factor=0.5, init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), neck=dict( in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1), bbox_head=dict(in_channels=128, feat_channels=128))) train_pipeline = [ dict(type=LoadImageFromFile, backend_args=backend_args), dict( type=LoadAnnotations, with_bbox=True, with_mask=True, poly2mask=False), dict(type=CachedMosaic, img_scale=(640, 640), pad_val=114.0), dict( type=RandomResize, scale=(1280, 1280), ratio_range=(0.5, 2.0), resize_type=Resize, keep_ratio=True), dict(
type=RandomCrop,
6
2023-11-30 08:58:00+00:00
24k
SEU-ProactiveSecurity-Group/MalPurifier
core/defense/amd_dnn_plus.py
[ { "identifier": "Max", "path": "core/attack/max.py", "snippet": "class Max(BaseAttack):\n \"\"\"\n Max攻击:迭代地从多个攻击方法中选择结果。\n\n 参数\n --------\n @param attack_list: List, 已实例化的攻击对象的列表。\n @param varepsilon: Float, 用于判断收敛性的标量。\n \"\"\"\n\n def __init__(self, attack_list, varepsilon=1e...
import time import os.path as path import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import numpy as np from core.attack.max import Max from core.attack.stepwise_max import StepwiseMax from core.defense.md_dnn import MalwareDetectionDNN from core.defense.amd_template import DetectorTemplate from config import config, logging, ErrorHandler from tools import utils from sklearn.metrics import f1_score, accuracy_score, confusion_matrix, balanced_accuracy_score
19,741
""" @article{grosse2017statistical, title={On the (statistical) detection of adversarial examples}, author={Grosse, Kathrin and Manoharan, Praveen and Papernot, Nicolas and Backes, Michael and McDaniel, Patrick}, journal={arXiv preprint arXiv:1702.06280}, year={2017} } @inproceedings{carlini2017adversarial, title={Adversarial examples are not easily detected: Bypassing ten detection methods}, author={Carlini, Nicholas and Wagner, David}, booktitle={Proceedings of the 10th ACM workshop on artificial intelligence and security}, pages={3--14}, year={2017} } This implementation is not an official version, but adapted from: https://github.com/carlini/nn_breaking_detection """ from __future__ import absolute_import from __future__ import division from __future__ import print_function
""" @article{grosse2017statistical, title={On the (statistical) detection of adversarial examples}, author={Grosse, Kathrin and Manoharan, Praveen and Papernot, Nicolas and Backes, Michael and McDaniel, Patrick}, journal={arXiv preprint arXiv:1702.06280}, year={2017} } @inproceedings{carlini2017adversarial, title={Adversarial examples are not easily detected: Bypassing ten detection methods}, author={Carlini, Nicholas and Wagner, David}, booktitle={Proceedings of the 10th ACM workshop on artificial intelligence and security}, pages={3--14}, year={2017} } This implementation is not an official version, but adapted from: https://github.com/carlini/nn_breaking_detection """ from __future__ import absolute_import from __future__ import division from __future__ import print_function
logger = logging.getLogger('core.defense.amd_dnn_plus')
4
2023-11-27 02:00:23+00:00
24k
iann838/pulsefire
tests/test_taskgroups.py
[ { "identifier": "RiotAPIClient", "path": "pulsefire/clients.py", "snippet": "class RiotAPIClient(BaseClient):\n \"\"\"Riot API Client.\n\n | Resources | Support |\n | -------------------- | -------------------------- |\n | League of Legends | ✅ ...
import asyncio import os from pulsefire.clients import RiotAPIClient from pulsefire.functools import async_to_sync from pulsefire.schemas import RiotAPISchema from pulsefire.taskgroups import TaskGroup
16,605
@async_to_sync() async def test_taskgroup(): async with RiotAPIClient(default_headers={"X-Riot-Token": os.environ["RIOT_API_KEY"]}) as client: plat_league = await client.get_lol_league_v4_entries_by_division(region="na1", queue="RANKED_SOLO_5x5", tier="PLATINUM", division="IV") summoner = await client.get_lol_summoner_v4_by_id(region="na1", id=plat_league[0]["summonerId"]) match_ids = await client.get_lol_match_v5_match_ids_by_puuid(region="americas", puuid=summoner["puuid"])
@async_to_sync() async def test_taskgroup(): async with RiotAPIClient(default_headers={"X-Riot-Token": os.environ["RIOT_API_KEY"]}) as client: plat_league = await client.get_lol_league_v4_entries_by_division(region="na1", queue="RANKED_SOLO_5x5", tier="PLATINUM", division="IV") summoner = await client.get_lol_summoner_v4_by_id(region="na1", id=plat_league[0]["summonerId"]) match_ids = await client.get_lol_match_v5_match_ids_by_puuid(region="americas", puuid=summoner["puuid"])
async with TaskGroup() as tg:
3
2023-11-27 13:37:24+00:00
24k
Matrixeigs/UncertaintyManagementInteroperablePowerTransportationSystems
TestCaseDistributionSystems/uc_mmgs_tess_stochastic.py
[ { "identifier": "case33", "path": "TestCaseDistributionSystems/test_cases/case33.py", "snippet": "def case33():\n \"\"\"Power flow data for 33 bus, 6 generator case.\n Please see L{caseformat} for details on the case file format.\n\n Based on data from ...\n\n Alsac, O. & Stott, B., I{\"Opti...
from TestCaseDistributionSystems.test_cases import case33 from TestCasesMicrogrids.test_cases.cases_unit_commitment import micro_grid from TestCasesTransportationSystems.test_cases import case3, TIME, LOCATION from numpy import zeros, shape, ones, diag, concatenate, eye from scipy.sparse import csr_matrix as sparse from scipy.sparse import hstack, vstack, lil_matrix from numpy import flatnonzero as find from numpy import array, tile, arange, random from pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, RATE_A from pypower.idx_bus import PD, VMAX, VMIN, QD from pypower.idx_gen import GEN_BUS, PMAX, PMIN, QMAX, QMIN from pypower.ext2int import ext2int from Solvers.mixed_integer_quadratic_constrained_cplex import mixed_integer_quadratic_constrained_programming as miqcp from Solvers.mixed_integer_solvers_cplex import mixed_integer_linear_programming as milp from copy import deepcopy from TestCaseDistributionSystems.data_format.idx_MG import PBIC_AC2DC, PG, PESS_DC, PBIC_DC2AC, PUG, PESS_CH, \ PMESS, EESS, NX_MG, QBIC, QUG, QG from TestCaseDistributionSystems.database_management import DataBaseManagement from StochasticOptimization.scenario_reduction import ScenarioReduction
15,480
for i in range(ng): c[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 5] * baseMVA q[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 4] * baseMVA * baseMVA c0 += gencost[i, 6] # Coupling constraints between the distribution systems and micro_grids Ax2y = lil_matrix((2 * nmg * T, nv_ds)) # connection matrix with the microgrids for i in range(T): for j in range(nmg): # Active power Ax2y[i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + j] = 1000 * baseMVA # Reactive power Ax2y[nmg * T + i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + nmg + j] = 1000 * baseMVA # II) Formulate the problem for microgrids model_microgrids = {} for i in range(nmg): model_microgrids[i] = self.problem_formulation_microgrid(mg=mgs[i], mess=mess) # II.A) Combine the distribution system operation problem and microgrid systems if Aeq is not None: neq_ds = Aeq.shape[0] else: neq_ds = 0 nVariables = int(nv_ds) neq = int(neq_ds) nv_index = zeros(nmg + 1).astype(int) neq_index = zeros(nmg + 1).astype(int) nv_index[0] = nv_ds neq_index[0] = int(neq_ds) for i in range(nmg): nv_index[i + 1] = nv_index[i] + len(model_microgrids[i]["c"]) neq_index[i + 1] = neq_index[i] + model_microgrids[i]["Aeq"].shape[0] nVariables += len(model_microgrids[i]["c"]) neq += int(model_microgrids[i]["Aeq"].shape[0]) Aeq_full = lil_matrix((int(neq_index[-1]), int(nv_index[-1]))) Aeq_full[0:neq_ds, 0:nv_ds] = Aeq for i in range(nmg): lb = concatenate([lb, model_microgrids[i]["lb"]]) ub = concatenate([ub, model_microgrids[i]["ub"]]) c = concatenate([c, model_microgrids[i]["c"]]) q = concatenate([q, model_microgrids[i]["q"]]) vtypes += model_microgrids[i]["vtypes"] beq = concatenate([beq, model_microgrids[i]["beq"]]) Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_microgrids[i]["Aeq"] # Add coupling constraints, between the microgrids and distribution networks Ay2x = lil_matrix((2 * nmg * T, nv_index[-1] - nv_index[0])) for i in range(T): for j in range(nmg): Ay2x[i * nmg + j, int(nv_index[j] - nv_index[0]) + i * NX_MG + PUG] = -1 Ay2x[nmg * T + i * nmg + j, int(nv_index[j] - nv_index[0]) + i * NX_MG + QUG] = -1 Aeq_temp = hstack([Ax2y, Ay2x]) beq_temp = zeros(2 * nmg * T) Aeq_full = vstack([Aeq_full, Aeq_temp]) beq = concatenate([beq, beq_temp]) # III) Formulate the optimization problem for tess in the second stage optimization model_tess = {} for i in range(nmes): model_tess[i] = self.problem_formulation_tess_second_stage(mess=mess[i]) # III.1) Merge the models of mirogrids and distribution # Formulate the index nv_index_ev = zeros(1 + nmes).astype(int) neq_index_temp = zeros(1 + nmes).astype(int) nv_index_ev[0] = int(Aeq_full.shape[1]) neq_index_temp[0] = int(Aeq_full.shape[0]) for i in range(nmes): nv_index_ev[i + 1] = nv_index_ev[i] + len(model_tess[i]["c"]) neq_index_temp[i + 1] = neq_index_temp[i] + model_tess[i]["Aeq"].shape[0] Aeq = lil_matrix((int(neq_index_temp[-1]), int(nv_index_ev[-1]))) Aeq[0:int(neq_index_temp[0]), 0:int(nv_index_ev[0])] = Aeq_full for i in range(nmes): lb = concatenate([lb, model_tess[i]["lb"]]) ub = concatenate([ub, model_tess[i]["ub"]]) c = concatenate([c, model_tess[i]["c"]]) q = concatenate([q, model_tess[i]["q"]]) vtypes += model_tess[i]["vtypes"] beq = concatenate([beq, model_tess[i]["beq"]]) Aeq[neq_index_temp[i]:neq_index_temp[i + 1], nv_index_ev[i]:nv_index_ev[i + 1]] = model_tess[i]["Aeq"] # III.2) Coupling constraints between the microgrids and mobile energy storage systems # Additional equal constraints, nmg*T Aeq_temp = lil_matrix((nmg * T, nv_index_ev[-1])) beq_temp = zeros(nmg * T) for i in range(nmg): for t in range(T): Aeq_temp[i * T + t, nv_index[i] + t * NX_MG + PMESS] = 1 # TESSs injections to the MGs for j in range(nmes): Aeq_temp[i * T + t, nv_index_ev[j] + t * self.nb_tra_ele + i] = -1 # Discharging Aeq_temp[i * T + t, nv_index_ev[j] + self.nb_tra_ele * T + t * self.nb_tra_ele + i] = 1 # Sort by order Aeq = vstack([Aeq, Aeq_temp]) beq = concatenate((beq, beq_temp)) nv_second_stage = nv_index_ev[-1] nv_first_stage = self.nv_first_stage self.nv_second_stage = nv_second_stage Qc = dict() # 4) Pij**2+Qij**2<=Vi*Iij for t in range(T): for i in range(nl): Qc[(T * nl + T * nmg) * index + t * nl + i] = [ [int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + 2 * nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + f[i] + 3 * nl)], [int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + f[i] + 3 * nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + 2 * nl)], [1, 1, -1 / 2, -1 / 2]] Rc = zeros(nl * T) # 5) (Pbic_ac2dc+Pbic_dc2ac)**2+Qbic**2<=Sbic**2 Rc_temp = zeros(nmg * T) for i in range(nmg): for t in range(T): Qc[(T * nl + T * nmg) * index + T * nl + T * i + t] = [ [int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_AC2DC),
""" Stochastic optimal power flow with multiple microgrids and mobile energy storage systems @author: Zhao Tianyang @e-mail: zhaoty@ntu.edu.sg @date: 10 Jan 2019 Major updates: 1) Update code style using PEP 8 -- Style Guide for Python Code 2) Store data in database 3) Scenario generation and reduction 4) Automatic results analysis Nomenclature: nV: number of variables mg: microgrid ds: distribution systems me: mobile energy storage systems ch: charging dc: discharging ele: electricity tra: traffic i,j,k: index t: time index T: time periods tns:traffic networks pns:power networks """ class StochasticDynamicOptimalPowerFlowTess(): def __init__(self): self.name = "Stochastic optimal power flow with tess" def main(self, power_networks, micro_grids, profile, mess, traffic_networks, ns=100): """ Main entrance for network reconfiguration problems :param case: electric network information :param profile: load profile within the distribution networks :param micrgrids: dictionary for microgrids :param tess: dictionary for tess :return: network reconfiguration, distribution network status, and microgrid status """ T = len(profile) # Time spans self.T = T nmg = len(micro_grids) # Number of microgrids self.nmg = nmg nmes = len(mess) # Number of mobile energy storage systems self.nmes = nmes nb_tra = traffic_networks["bus"].shape[0] # Number of buses in the transportation networks self.nb_tra = nb_tra assert nb_tra == nmg, "The microgrids within the transportation networks are not synchronized!" # 1) Formulate the first stage optimization problem model_first_stage = self.first_stage_problem_formualtion(pns=power_networks, mgs=micro_grids, mess=mess, tns=traffic_networks) # (sol_first_stage, obj, success) = milp(model_first_stage["c"], Aeq=model_first_stage["Aeq"], # beq=model_first_stage["beq"], # A=model_first_stage["A"], b=model_first_stage["b"], # vtypes=model_first_stage["vtypes"], # xmax=model_first_stage["ub"], xmin=model_first_stage["lb"]) # sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 2) Formulate the second stage optimization problem # Formulate the second stage scenarios (ds_second_stage, mgs_second_stage, weight) = self.scenario_generation_reduction(profile=profile, micro_grids=micro_grids, ns=ns, pns=power_networks, ns_reduced=round(0.98 * ns)) ns -= round(0.98 * ns) model_second_stage = {} for i in range(ns): model_second_stage[i] = self.second_stage_problem_formualtion(pns=power_networks, mgs=mgs_second_stage[i], mess=mess, tns=traffic_networks, profile=ds_second_stage[i, :], index=i, weight=weight[i]) # 3) Merge the first-stage problem and second stage problem lb = model_first_stage["lb"] ub = model_first_stage["ub"] vtypes = model_first_stage["vtypes"] c = model_first_stage["c"] Qc = dict() if model_first_stage["Aeq"] is not None: neq = model_first_stage["Aeq"].shape[0] else: neq = 0 if model_first_stage["A"] is not None: nineq = model_first_stage["A"].shape[0] else: nineq = 0 nv_first_stage = self.nv_first_stage nv_second_stage = self.nv_second_stage q = zeros(nv_first_stage) nv_index = zeros(ns + 1).astype(int) neq_index = zeros(ns + 1).astype(int) nineq_index = zeros(ns + 1).astype(int) neq_index[0] = neq nineq_index[0] = nineq nv_index[0] = nv_first_stage beq = model_first_stage["beq"] for i in range(ns): if model_second_stage[i]["Aeq"] is not None: neq_index[i + 1] = neq_index[i] + model_second_stage[i]["Aeq"].shape[0] else: neq_index[i + 1] = neq_index[i] if model_second_stage[i]["Ts"] is not None: nineq_index[i + 1] = nineq_index[i] + model_second_stage[i]["Ts"].shape[0] else: nineq_index[i + 1] = nineq_index[i] nv_index[i + 1] = nv_index[i] + nv_second_stage c = concatenate([c, model_second_stage[i]["c"]]) q = concatenate([q, model_second_stage[i]["q"]]) lb = concatenate([lb, model_second_stage[i]["lb"]]) ub = concatenate([ub, model_second_stage[i]["ub"]]) vtypes += model_second_stage[i]["vtypes"] beq = concatenate([beq, model_second_stage[i]["beq"]]) Aeq_full = lil_matrix((neq_index[-1], nv_index[-1])) Aeq_full[0:neq_index[0], 0:nv_index[0]] = model_first_stage["Aeq"] rc = zeros(0) for i in range(ns): Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Aeq"] Qc.update(model_second_stage[i]["Qc"]) rc = concatenate([rc, model_second_stage[i]["rc"]]) A_full = lil_matrix((nineq_index[-1], nv_index[-1])) b = model_first_stage["b"] A_full[0:int(nineq_index[0]), 0:int(nv_index[0])] = model_first_stage["A"] for i in range(ns): A_full[nineq_index[i]:nineq_index[i + 1], 0:nv_index[0]] = model_second_stage[i]["Ts"] A_full[nineq_index[i]:nineq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Ws"] b = concatenate([b, model_second_stage[i]["hs"]]) # 3) Obtain the results for first-stage and second stage optimization problems # 3.1) Obtain the integrated solution (sol, obj, success) = miqcp(c, q, Aeq=Aeq_full, beq=beq, A=A_full, b=b, Qc=Qc, rc=rc, xmin=lb, xmax=ub, vtypes=vtypes) # 3.2) decouple the solution into multiple subsystems sol_first_stage = sol[0:nv_second_stage] sol_second_stage = {} for i in range(ns): sol_second_stage[i] = sol[int(nv_index[i]):int(nv_index[i + 1])] # 4) Verify the first-stage and second stage optization problem # 4.1) First-stage solution sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 4.2) Second-stage solution sol_second_stage_checked = {} db_management = DataBaseManagement() db_management.create_table(table_name="distribution_networks", nl=self.nl, nb=self.nb, ng=self.ng) db_management.create_table(table_name="micro_grids", nmg=self.nmg) db_management.create_table(table_name="mobile_energy_storage_systems", nmg=self.nmg) db_management.create_table(table_name="first_stage_solutions", nmg=self.nmg, ng=self.ng, nmes=self.nmes) db_management.create_table(table_name="fisrt_stage_mess", nmg=self.nmg) for t in range(T): db_management.insert_data_first_stage(table_name="first_stage_solutions", time=t, ng=self.ng, nmg=self.nmg, pg=sol_first_stage["pg"][:, t].tolist(), rg=sol_first_stage["rg"][:, t].tolist(), pg_mg=sol_first_stage["pg_mg"][:, t].tolist(), rg_mg=sol_first_stage["rg_mg"][:, t].tolist(), pess_ch=sol_first_stage["pess_ch"][:, t].tolist(), pess_dc=sol_first_stage["pess_dc"][:, t].tolist(), ress=sol_first_stage["ress"][:, t].tolist(), ess=sol_first_stage["eess"][:, t].tolist(), iess=sol_first_stage["iess"][:, t].tolist()) for i in range(nmes): for t in range(T): db_management.insert_data_first_stage_mess(table_name="fisrt_stage_mess", nmg=self.nmg, time=t, mess=i, imess=sol_first_stage["MESS"][i]["idc"][:, t].tolist(), rmess=sol_first_stage["MESS"][i]["rmess"][:, t].tolist(), pmess_ch= sol_first_stage["MESS"][i]["pmess_ch"][:, t].tolist(), pmess_dc= sol_first_stage["MESS"][i]["pmess_dc"][:, t].tolist(), mess_f_stop=sol_first_stage["MESS"][i]["VRP"][t + 1][0], mess_t_stop=sol_first_stage["MESS"][i]["VRP"][t + 1][1]) for i in range(ns): sol_second_stage_checked[i] = self.second_stage_solution_validation(sol_second_stage[i]) for i in range(ns): for t in range(T): db_management.insert_data_ds(table_name="distribution_networks", nl=self.nl, nb=self.nb, ng=self.ng, scenario=i, time=t, pij=sol_second_stage_checked[i]["DS"]["pij"][:, t].tolist(), qij=sol_second_stage_checked[i]["DS"]["qij"][:, t].tolist(), lij=sol_second_stage_checked[i]["DS"]["lij"][:, t].tolist(), vi=sol_second_stage_checked[i]["DS"]["vi"][:, t].tolist(), pg=sol_second_stage_checked[i]["DS"]["pg"][:, t].tolist(), qg=sol_second_stage_checked[i]["DS"]["qg"][:, t].tolist(), ) for i in range(ns): for j in range(nmg): for t in range(T): db_management.insert_data_mg(table_name="micro_grids", scenario=i, time=t, mg=j, pg=sol_second_stage_checked[i]["MG"]["pg"][j, t], qg=sol_second_stage_checked[i]["MG"]["qg"][j, t], pug=sol_second_stage_checked[i]["MG"]["pug"][j, t], qug=sol_second_stage_checked[i]["MG"]["qug"][j, t], pbic_ac2dc=sol_second_stage_checked[i]["MG"]["pbic_ac2dc"][j, t], pbic_dc2ac=sol_second_stage_checked[i]["MG"]["pbic_dc2ac"][j, t], qbic=sol_second_stage_checked[i]["MG"]["qbic"][j, t], pess_ch=sol_second_stage_checked[i]["MG"]["pess_ch"][j, t], pess_dc=sol_second_stage_checked[i]["MG"]["pess_dc"][j, t], eess=sol_second_stage_checked[i]["MG"]["eess"][j, t], pmess=sol_second_stage_checked[i]["MG"]["pmess"][j, t]) for i in range(ns): for j in range(nmes): for t in range(T): db_management.insert_data_mess(table_name="mobile_energy_storage_systems", scenario=i, time=t, mess=j, nmg=self.nmg, pmess_dc= sol_second_stage_checked[i]["MESS"][j]["pmess_dc"][:, t].tolist(), pmess_ch= sol_second_stage_checked[i]["MESS"][j]["pmess_ch"][:, t].tolist(), emess=sol_second_stage_checked[i]["MESS"][j]["emess"][0, t]) # 4.3) Cross validation of the first-stage and second-stage decision variables tess_check = {} for i in range(ns): tess_temp = {} for j in range(nmes): tess_temp[j] = sol_second_stage_checked[i]["MESS"][j]["pmess_dc"] - \ sol_second_stage_checked[i]["MESS"][j]["pmess_ch"] - \ sol_first_stage["MESS"][j]["pmess_dc"] + \ sol_first_stage["MESS"][j]["pmess_ch"] - \ sol_first_stage["MESS"][j]["rmess"] tess_temp[j + nmes] = sol_second_stage_checked[i]["MESS"][j]["pmess_ch"] - \ sol_second_stage_checked[i]["MESS"][j]["pmess_dc"] - \ sol_first_stage["MESS"][j]["pmess_ch"] + \ sol_first_stage["MESS"][j]["pmess_dc"] - \ sol_first_stage["MESS"][j]["rmess"] tess_check[i] = tess_temp # return sol_distribution_network, sol_microgrids, sol_tess return sol_first_stage, sol_second_stage_checked def first_stage_problem_formualtion(self, pns, mgs, mess, tns): """ Problem formulation for the first stage optimization, Decision variables include, DGs within power networks, DGs within MGs, EESs within MGs and TESSs :param power_networks: Parameters for the power networks :param micro_grids: Parameters for the microgrids :param tess: Parameters for the mobile energy storage systems :param traffic_networks: Parameters for the transportation networks :return: Formulated first-stage problem """ T = self.T # Time slots nmg = self.nmg # Number of mgs nmes = self.nmes # Number of tess mpc = ext2int(pns) baseMVA, bus, gen, branch, gencost = mpc["baseMVA"], mpc["bus"], mpc["gen"], mpc["branch"], mpc["gencost"] ng = shape(mpc['gen'])[0] ## number of dispatchable injections nb = shape(mpc["bus"])[0] self.nb = nb self.ng = ng # Obtain the initial status, start-up and shut down of generators Ig0 = gen[:, -1].astype(int) MIN_DOWN = gen[:, -2].astype(int) MIN_UP = gen[:, -3].astype(int) alpha_l = zeros(ng) beta_l = zeros(ng) Ig_l = zeros(ng) pg_l = zeros(ng) # Boundary for DGs within distribution networks rg_l = zeros(ng) alpha_u = ones(ng) beta_u = ones(ng) Ig_u = ones(ng) pg_u = gen[:, PMAX] / baseMVA rg_u = gen[:, PMAX] / baseMVA c_alpha = gencost[:, 0] c_beta = gencost[:, 1] c_ig = gencost[:, 6] cg = gencost[:, 5] * baseMVA cr = zeros(ng) pg_mg_l = zeros(nmg) # Boundary for DGs within MGs rg_mg_l = zeros(nmg) pg_mg_u = zeros(nmg) rg_mg_u = zeros(nmg) cg_mg = zeros(nmg) cr_mg = zeros(nmg) for i in range(nmg): pg_mg_l[i] = mgs[i]["DG"]["PMIN"] pg_mg_u[i] = mgs[i]["DG"]["PMAX"] rg_mg_u[i] = mgs[i]["DG"]["PMAX"] cg_mg[i] = mgs[i]["DG"]["COST_B"] pes_ch_l = zeros(nmg) # Lower boundary for ESSs within MGs pes_dc_l = zeros(nmg) ees_l = zeros(nmg) res_l = zeros(nmg) ies_l = zeros(nmg) pes_ch_u = zeros(nmg) # Upper boundary for ESSs within MGs pes_dc_u = zeros(nmg) ees_u = zeros(nmg) res_u = zeros(nmg) ies_u = ones(nmg) ces_ch = zeros(nmg) # Cost boundary for ESSs within MGs ces_dc = zeros(nmg) ces_r = zeros(nmg) ces = zeros(nmg) ces_i = zeros(nmg) for i in range(nmg): pes_ch_u[i] = mgs[i]["ESS"]["PCH_MAX"] pes_dc_u[i] = mgs[i]["ESS"]["PDC_MAX"] + mgs[i]["ESS"]["PCH_MAX"] res_u[i] = mgs[i]["ESS"]["PCH_MAX"] ees_l[i] = mgs[i]["ESS"]["EMIN"] ees_u[i] = mgs[i]["ESS"]["EMAX"] _nv_first_stage = ng * 5 + nmg * 2 + nmg * 5 nv_first_stage = _nv_first_stage * T # Formulate the boundaries lb = concatenate( [tile(concatenate( [alpha_l, beta_l, Ig_l, pg_l, rg_l, pg_mg_l, rg_mg_l, pes_ch_l, pes_dc_l, res_l, ees_l, ies_l]), T)]) ub = concatenate( [tile(concatenate( [alpha_u, beta_u, Ig_u, pg_u, rg_u, pg_mg_u, rg_mg_u, pes_ch_u, pes_dc_u, res_u, ees_u, ies_u]), T)]) # Objective value c = concatenate( [tile(concatenate([c_alpha, c_beta, c_ig, cg, cr, cg_mg, cr_mg, ces_ch, ces_dc, ces, ces_r, ces_i]), T)]) # Variable types vtypes = (["b"] * ng * 3 + ["c"] * (ng * 2 + nmg * 2 + nmg * 4) + ["b"] * nmg) * T ## Constraint sets # 1) Pg+Rg<=PguIg A = lil_matrix((ng * T, nv_first_stage)) b = zeros(ng * T) for t in range(T): for j in range(ng): A[t * ng + j, t * _nv_first_stage + ng * 3 + j] = 1 A[t * ng + j, t * _nv_first_stage + ng * 4 + j] = 1 A[t * ng + j, t * _nv_first_stage + ng * 2 + j] = -pg_u[j] # 2) Pg-Rg>=IgPgl A_temp = lil_matrix((ng * T, nv_first_stage)) b_temp = zeros(ng * T) for t in range(T): for j in range(ng): A_temp[t * ng + j, t * _nv_first_stage + ng * 3 + j] = -1 A_temp[t * ng + j, t * _nv_first_stage + ng * 4 + j] = 1 A_temp[t * ng + j, t * _nv_first_stage + j] = pg_l[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 3) Start-up and shut-down constraints of DGs UP_LIMIT = zeros(ng).astype(int) DOWN_LIMIT = zeros(ng).astype(int) for i in range(ng): UP_LIMIT[i] = T - MIN_UP[i] DOWN_LIMIT[i] = T - MIN_DOWN[i] # 3.1) Up limit A_temp = lil_matrix((sum(UP_LIMIT), nv_first_stage)) b_temp = zeros(sum(UP_LIMIT)) for i in range(ng): for t in range(MIN_UP[i], T): for k in range(t - MIN_UP[i], t): A_temp[sum(UP_LIMIT[0:i]) + t - MIN_UP[i], k * _nv_first_stage + i] = 1 A_temp[sum(UP_LIMIT[0:i]) + t - MIN_UP[i], t * _nv_first_stage + ng * 2 + i] = -1 A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # # 3.2) Down limit A_temp = lil_matrix((sum(DOWN_LIMIT), nv_first_stage)) b_temp = ones(sum(DOWN_LIMIT)) for i in range(ng): for t in range(MIN_DOWN[i], T): for k in range(t - MIN_DOWN[i], t): A_temp[sum(DOWN_LIMIT[0:i]) + t - MIN_DOWN[i], k * _nv_first_stage + ng + i] = 1 A_temp[sum(DOWN_LIMIT[0:i]) + t - MIN_DOWN[i], t * _nv_first_stage + ng * 2 + i] = 1 A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 4) Status transformation of each unit Aeq = lil_matrix((T * ng, nv_first_stage)) beq = zeros(T * ng) for i in range(ng): for t in range(T): Aeq[i * T + t, t * _nv_first_stage + i] = 1 Aeq[i * T + t, t * _nv_first_stage + ng + i] = -1 Aeq[i * T + t, t * _nv_first_stage + ng * 2 + i] = -1 if t != 0: Aeq[i * T + t, (t - 1) * _nv_first_stage + ng * 2 + i] = 1 else: beq[i * T + t] = -Ig0[i] # 3) Pg_mg+Rg_mg<=Pg_mg_u A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg + j] = 1 b_temp[t * nmg + j] = pg_mg_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 4) Pg_mg-Rg_mg<=Pg_mg_l A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + j] = -1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg + j] = 1 b_temp[t * nmg + j] = pg_mg_l[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 5) Pess_dc-Pess_ch+Ress<=Pess_dc_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = -1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 2 + j] = 1 b_temp[t * nmg + j] = pes_dc_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 6) Pess_ch-Pess_dc+Ress<=Pess_ch_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, ng * 5 + nmg * 2 + t] = 1 A_temp[t * nmg + j, ng * 5 + nmg * 2 + nmg + t] = -1 A_temp[t * nmg + j, ng * 5 + nmg * 2 + nmg * 2 + t] = 1 b_temp[t * nmg + j] = pes_ch_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 7) Energy storage balance equation Aeq_temp = lil_matrix((T * nmg, nv_first_stage)) beq_temp = zeros(T * nmg) for t in range(T): for j in range(nmg): Aeq_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 3 + j] = 1 Aeq_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = -mgs[j]["ESS"]["EFF_CH"] Aeq_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = 1 / mgs[j]["ESS"]["EFF_DC"] if t == 0: beq_temp[i * nmg + j] = mgs[j]["ESS"]["E0"] else: Aeq_temp[i * nmg + j, (i - 1) * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 3 + j] = -1 Aeq = vstack([Aeq, Aeq_temp]) beq = concatenate([beq, beq_temp]) # 8) Pess_ch<=I*Pess_ch_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 4 + j] = -pes_ch_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 9) Pess_dc<=(1-I)*Pess_dc_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 4 + j] = pes_dc_u[j] b_temp[t * nmg + j] = pes_dc_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 2) Transportation energy storage systems problem model_mess = {} for i in range(nmes): model_mess[i] = self.problem_formulation_tess(mess=mess[i], tns=tns) # 3) Merge the DGs, ESSs and TESSs neq = Aeq.shape[0] nineq = A.shape[0] nV_index = zeros(nmes + 1).astype(int) neq_index = zeros(nmes + 1).astype(int) nineq_index = zeros(nmes + 1).astype(int) nV_index[0] = nv_first_stage neq_index[0] = neq nineq_index[0] = nineq for i in range(nmes): nV_index[i + 1] = nV_index[i] + len(model_mess[i]["c"]) neq_index[i + 1] = neq_index[i] + model_mess[i]["Aeq"].shape[0] nineq_index[i + 1] = nineq_index[i] + model_mess[i]["A"].shape[0] neq += model_mess[i]["Aeq"].shape[0] nineq += model_mess[i]["A"].shape[0] # Merge the objective function, boundaries, types and rhs c = concatenate([c, model_mess[i]["c"]]) lb = concatenate([lb, model_mess[i]["lb"]]) ub = concatenate([ub, model_mess[i]["ub"]]) vtypes += model_mess[i]["vtypes"] beq = concatenate([beq, model_mess[i]["beq"]]) b = concatenate([b, model_mess[i]["b"]]) A_full = lil_matrix((nineq_index[-1], nV_index[-1])) Aeq_full = lil_matrix((neq_index[-1], nV_index[-1])) if Aeq is not None: Aeq_full[0:int(neq_index[0]), 0:int(nV_index[0])] = Aeq if A is not None: A_full[0:int(nineq_index[0]), 0:int(nV_index[0])] = A for i in range(nmes): Aeq_full[neq_index[i]:neq_index[i + 1], nV_index[i]:nV_index[i + 1]] = model_mess[i]["Aeq"] A_full[nineq_index[i]:nineq_index[i + 1], nV_index[i]:nV_index[i + 1]] = model_mess[i]["A"] self.nv_first_stage = nV_index[-1] # The number of first stage decision variables self._nv_first_stage = _nv_first_stage model_first_stage = {"c": c, "lb": lb, "ub": ub, "vtypes": vtypes, "A": A_full, "b": b, "Aeq": Aeq_full, "beq": beq, } return model_first_stage def first_stage_solution_validation(self, sol): """ Validation of the first-stage solution :param sol: The first stage solution :return: the first stage solution """ T = self.T ng = self.ng nmg = self.nmg nmes = self.nmes # Set-points of DGs within DSs, MGs and ESSs _nv_first_stage = self._nv_first_stage alpha = zeros((ng, T)) beta = zeros((ng, T)) Ig = zeros((ng, T)) Pg = zeros((ng, T)) Rg = zeros((ng, T)) Pg_mg = zeros((nmg, T)) Rg_mg = zeros((nmg, T)) Pess_dc = zeros((nmg, T)) Pess_ch = zeros((nmg, T)) Ress = zeros((nmg, T)) Eess = zeros((nmg, T)) Iess = zeros((nmg, T)) for i in range(T): alpha[:, i] = sol[_nv_first_stage * i:_nv_first_stage * i + ng] beta[:, i] = sol[_nv_first_stage * i + ng:_nv_first_stage * i + ng * 2] Ig[:, i] = sol[_nv_first_stage * i + ng * 2:_nv_first_stage * i + ng * 3] Pg[:, i] = sol[_nv_first_stage * i + ng * 3:_nv_first_stage * i + ng * 4] Rg[:, i] = sol[_nv_first_stage * i + ng * 4:_nv_first_stage * i + ng * 5] Pg_mg[:, i] = sol[_nv_first_stage * i + ng * 5:_nv_first_stage * i + ng * 5 + nmg] Rg_mg[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg:_nv_first_stage * i + ng * 5 + nmg * 2] Pess_ch[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 2:_nv_first_stage * i + ng * 5 + nmg * 3] Pess_dc[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 3:_nv_first_stage * i + ng * 5 + nmg * 4] Ress[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 4:_nv_first_stage * i + ng * 5 + nmg * 5] Eess[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 5:_nv_first_stage * i + ng * 5 + nmg * 6] Iess[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 6:_nv_first_stage * i + ng * 5 + nmg * 7] # Set-points and scheduling of mobile energy storage systems nv_tra = self.nv_tra nl_traffic = self.nl_tra n_stops = self.n_stops nb_tra_ele = self.nb_tra_ele sol_ev = {} for i in range(nmes): ev_temp = {} ev_temp["VRP"] = [] for t in range(nl_traffic): if sol[_nv_first_stage * T + nv_tra * i + t] > 0: # obtain the solution for vrp if self.connection_matrix[t, TIME] > 0: for j in range(int(self.connection_matrix[t, TIME])): ev_temp["VRP"].append(((self.connection_matrix[t, F_BUS] - 1) % nmg, (self.connection_matrix[t, T_BUS] - 1) % nmg)) else: ev_temp["VRP"].append(((self.connection_matrix[t, F_BUS] - 1) % nmg, (self.connection_matrix[t, T_BUS] - 1) % nmg)) ev_temp["idc"] = zeros((nb_tra_ele, T)) ev_temp["pmess_dc"] = zeros((nb_tra_ele, T)) ev_temp["pmess_ch"] = zeros((nb_tra_ele, T)) ev_temp["rmess"] = zeros((nb_tra_ele, T)) for t in range(T): for k in range(nb_tra_ele): ev_temp["idc"][k, t] = sol[_nv_first_stage * T + nv_tra * i + nl_traffic + nb_tra_ele * t + k] ev_temp["pmess_dc"][k, t] = \ sol[_nv_first_stage * T + nv_tra * i + nl_traffic + n_stops + nb_tra_ele * t + k] ev_temp["pmess_ch"][k, t] = \ sol[_nv_first_stage * T + nv_tra * i + nl_traffic + n_stops * 2 + nb_tra_ele * t + k] ev_temp["rmess"][k, t] = \ sol[_nv_first_stage * T + nv_tra * i + nl_traffic + n_stops * 3 + nb_tra_ele * t + k] sol_ev[i] = ev_temp sol_first_stage = {"alpha": alpha, "beta": beta, "ig": Ig, "rg": Rg, "pg": Pg, "pg_mg": Pg_mg, "rg_mg": Rg_mg, "pess_ch": Pess_ch, "pess_dc": Pess_dc, "ress": Ress, "eess": Eess, "iess": Iess, "MESS": sol_ev, } return sol_first_stage def second_stage_problem_formualtion(self, pns, mgs, mess, tns, profile, index=0, weight=1): """ Second-stage problem formulation, the decision variables includes DGs within power networks, DGs within MGs, EESs within MGs and TESSs and other systems' information :param power_networks: :param micro_grids: :param tess: :param traffic_networks: :return: The second stage problems as list, including coupling constraints, and other constraint set """ # I) Formulate the problem for distribution systems operator T = self.T mpc = ext2int(pns) baseMVA, bus, gen, branch, gencost = mpc["baseMVA"], mpc["bus"], mpc["gen"], mpc["branch"], mpc["gencost"] nb = shape(mpc['bus'])[0] ## number of buses nl = shape(mpc['branch'])[0] ## number of branches ng = shape(mpc['gen'])[0] ## number of dispatchable injections nmg = self.nmg nmes = self.nmes self.nl = nl self.nb = nb self.ng = ng m = zeros(nmg) ## list of integration index pmg_l = zeros(nmg) ## list of lower boundary pmg_u = zeros(nmg) ## list of upper boundary qmg_l = zeros(nmg) ## list of lower boundary qmg_u = zeros(nmg) ## list of upper boundary for i in range(nmg): m[i] = mgs[i]["BUS"] pmg_l[i] = mgs[i]["UG"]["PMIN"] / 1000 / baseMVA pmg_u[i] = mgs[i]["UG"]["PMAX"] / 1000 / baseMVA qmg_l[i] = mgs[i]["UG"]["QMIN"] / 1000 / baseMVA qmg_u[i] = mgs[i]["UG"]["QMAX"] / 1000 / baseMVA f = branch[:, F_BUS] ## list of "from" buses t = branch[:, T_BUS] ## list of "to" buses i = range(nl) ## double set of row indices self.f = f ## record from bus for each branch # Connection matrix Cf = sparse((ones(nl), (i, f)), (nl, nb)) Ct = sparse((ones(nl), (i, t)), (nl, nb)) Cg = sparse((ones(ng), (gen[:, GEN_BUS], range(ng))), (nb, ng)) Cmg = sparse((ones(nmg), (m, range(nmg))), (nb, nmg)) Branch_R = branch[:, BR_R] Branch_X = branch[:, BR_X] Cf = Cf.T Ct = Ct.T # Obtain the boundary information slmax = branch[:, RATE_A] / baseMVA pij_l = -slmax qij_l = -slmax lij_l = zeros(nl) vm_l = bus[:, VMIN] ** 2 pg_l = gen[:, PMIN] / baseMVA qg_l = gen[:, QMIN] / baseMVA pij_u = slmax qij_u = slmax lij_u = slmax vm_u = bus[:, VMAX] ** 2 pg_u = 2 * gen[:, PMAX] / baseMVA qg_u = 2 * gen[:, QMAX] / baseMVA _nv_second_stage = int(3 * nl + nb + 2 * ng + 2 * nmg) self._nv_second_stage = _nv_second_stage # Number of decision variable within each time slot lb = concatenate([tile(concatenate([pij_l, qij_l, lij_l, vm_l, pg_l, qg_l, pmg_l, qmg_l]), T)]) ub = concatenate([tile(concatenate([pij_u, qij_u, lij_u, vm_u, pg_u, qg_u, pmg_u, qmg_u]), T)]) vtypes = ["c"] * _nv_second_stage * T nv_ds = _nv_second_stage * T # Number of total decision variables # Add system level constraints # 1) Active power balance Aeq_p = lil_matrix((nb * T, nv_ds)) beq_p = zeros(nb * T) for i in range(T): Aeq_p[i * nb:(i + 1) * nb, i * _nv_second_stage: (i + 1) * _nv_second_stage] = \ hstack([Ct - Cf, zeros((nb, nl)), -diag(Ct * Branch_R) * Ct, zeros((nb, nb)), Cg, zeros((nb, ng)), -Cmg, zeros((nb, nmg))]) beq_p[i * nb:(i + 1) * nb] = profile[i * nb:(i + 1) * nb] / baseMVA # 2) Reactive power balance Aeq_q = lil_matrix((nb * T, nv_ds)) beq_q = zeros(nb * T) for i in range(T): Aeq_q[i * nb:(i + 1) * nb, i * _nv_second_stage: (i + 1) * _nv_second_stage] = \ hstack([zeros((nb, nl)), Ct - Cf, -diag(Ct * Branch_X) * Ct, zeros((nb, nb)), zeros((nb, ng)), Cg, zeros((nb, nmg)), -Cmg]) for j in range(nb): if bus[j, PD] > 0: beq_q[i * nb:(i + 1) * nb] = profile[i * nb + j] / bus[j, PD] * bus[j, QD] / baseMVA # 3) KVL equation Aeq_kvl = lil_matrix((nl * T, nv_ds)) beq_kvl = zeros(nl * T) for i in range(T): Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage: i * _nv_second_stage + nl] = -2 * diag(Branch_R) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + nl: i * _nv_second_stage + 2 * nl] = -2 * diag(Branch_X) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + 2 * nl: i * _nv_second_stage + 3 * nl] = diag( Branch_R ** 2) + diag(Branch_X ** 2) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + 3 * nl:i * _nv_second_stage + 3 * nl + nb] = ( Cf.T - Ct.T).toarray() Aeq = vstack([Aeq_p, Aeq_q, Aeq_kvl]) beq = concatenate([beq_p, beq_q, beq_kvl]) c = zeros(nv_ds) q = zeros(nv_ds) c0 = 0 for t in range(T): for i in range(ng): c[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 5] * baseMVA q[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 4] * baseMVA * baseMVA c0 += gencost[i, 6] # Coupling constraints between the distribution systems and micro_grids Ax2y = lil_matrix((2 * nmg * T, nv_ds)) # connection matrix with the microgrids for i in range(T): for j in range(nmg): # Active power Ax2y[i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + j] = 1000 * baseMVA # Reactive power Ax2y[nmg * T + i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + nmg + j] = 1000 * baseMVA # II) Formulate the problem for microgrids model_microgrids = {} for i in range(nmg): model_microgrids[i] = self.problem_formulation_microgrid(mg=mgs[i], mess=mess) # II.A) Combine the distribution system operation problem and microgrid systems if Aeq is not None: neq_ds = Aeq.shape[0] else: neq_ds = 0 nVariables = int(nv_ds) neq = int(neq_ds) nv_index = zeros(nmg + 1).astype(int) neq_index = zeros(nmg + 1).astype(int) nv_index[0] = nv_ds neq_index[0] = int(neq_ds) for i in range(nmg): nv_index[i + 1] = nv_index[i] + len(model_microgrids[i]["c"]) neq_index[i + 1] = neq_index[i] + model_microgrids[i]["Aeq"].shape[0] nVariables += len(model_microgrids[i]["c"]) neq += int(model_microgrids[i]["Aeq"].shape[0]) Aeq_full = lil_matrix((int(neq_index[-1]), int(nv_index[-1]))) Aeq_full[0:neq_ds, 0:nv_ds] = Aeq for i in range(nmg): lb = concatenate([lb, model_microgrids[i]["lb"]]) ub = concatenate([ub, model_microgrids[i]["ub"]]) c = concatenate([c, model_microgrids[i]["c"]]) q = concatenate([q, model_microgrids[i]["q"]]) vtypes += model_microgrids[i]["vtypes"] beq = concatenate([beq, model_microgrids[i]["beq"]]) Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_microgrids[i]["Aeq"] # Add coupling constraints, between the microgrids and distribution networks Ay2x = lil_matrix((2 * nmg * T, nv_index[-1] - nv_index[0])) for i in range(T): for j in range(nmg): Ay2x[i * nmg + j, int(nv_index[j] - nv_index[0]) + i * NX_MG + PUG] = -1 Ay2x[nmg * T + i * nmg + j, int(nv_index[j] - nv_index[0]) + i * NX_MG + QUG] = -1 Aeq_temp = hstack([Ax2y, Ay2x]) beq_temp = zeros(2 * nmg * T) Aeq_full = vstack([Aeq_full, Aeq_temp]) beq = concatenate([beq, beq_temp]) # III) Formulate the optimization problem for tess in the second stage optimization model_tess = {} for i in range(nmes): model_tess[i] = self.problem_formulation_tess_second_stage(mess=mess[i]) # III.1) Merge the models of mirogrids and distribution # Formulate the index nv_index_ev = zeros(1 + nmes).astype(int) neq_index_temp = zeros(1 + nmes).astype(int) nv_index_ev[0] = int(Aeq_full.shape[1]) neq_index_temp[0] = int(Aeq_full.shape[0]) for i in range(nmes): nv_index_ev[i + 1] = nv_index_ev[i] + len(model_tess[i]["c"]) neq_index_temp[i + 1] = neq_index_temp[i] + model_tess[i]["Aeq"].shape[0] Aeq = lil_matrix((int(neq_index_temp[-1]), int(nv_index_ev[-1]))) Aeq[0:int(neq_index_temp[0]), 0:int(nv_index_ev[0])] = Aeq_full for i in range(nmes): lb = concatenate([lb, model_tess[i]["lb"]]) ub = concatenate([ub, model_tess[i]["ub"]]) c = concatenate([c, model_tess[i]["c"]]) q = concatenate([q, model_tess[i]["q"]]) vtypes += model_tess[i]["vtypes"] beq = concatenate([beq, model_tess[i]["beq"]]) Aeq[neq_index_temp[i]:neq_index_temp[i + 1], nv_index_ev[i]:nv_index_ev[i + 1]] = model_tess[i]["Aeq"] # III.2) Coupling constraints between the microgrids and mobile energy storage systems # Additional equal constraints, nmg*T Aeq_temp = lil_matrix((nmg * T, nv_index_ev[-1])) beq_temp = zeros(nmg * T) for i in range(nmg): for t in range(T): Aeq_temp[i * T + t, nv_index[i] + t * NX_MG + PMESS] = 1 # TESSs injections to the MGs for j in range(nmes): Aeq_temp[i * T + t, nv_index_ev[j] + t * self.nb_tra_ele + i] = -1 # Discharging Aeq_temp[i * T + t, nv_index_ev[j] + self.nb_tra_ele * T + t * self.nb_tra_ele + i] = 1 # Sort by order Aeq = vstack([Aeq, Aeq_temp]) beq = concatenate((beq, beq_temp)) nv_second_stage = nv_index_ev[-1] nv_first_stage = self.nv_first_stage self.nv_second_stage = nv_second_stage Qc = dict() # 4) Pij**2+Qij**2<=Vi*Iij for t in range(T): for i in range(nl): Qc[(T * nl + T * nmg) * index + t * nl + i] = [ [int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + 2 * nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + f[i] + 3 * nl)], [int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + f[i] + 3 * nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + 2 * nl)], [1, 1, -1 / 2, -1 / 2]] Rc = zeros(nl * T) # 5) (Pbic_ac2dc+Pbic_dc2ac)**2+Qbic**2<=Sbic**2 Rc_temp = zeros(nmg * T) for i in range(nmg): for t in range(T): Qc[(T * nl + T * nmg) * index + T * nl + T * i + t] = [ [int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_AC2DC),
int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_DC2AC),
5
2023-11-27 15:57:53+00:00
24k
girgle/DouZero_For_New_HLDDZ
GOOD.py
[ { "identifier": "GameHelper", "path": "GameHelper.py", "snippet": "class GameHelper:\n def __init__(self):\n self.ScreenZoomRate = None\n self.counter = QTime()\n self.Pics = {}\n self.PicsCV = {}\n st = time.time()\n self.Handle = win32gui.FindWindow(\"Unity...
import GameHelper as gh import os import sys import time import threading import pyautogui import win32gui import multiprocessing as mp import DetermineColor as DC import cv2 import numpy as np import traceback import BidModel import LandlordModel import FarmerModel from GameHelper import GameHelper from PIL import Image from skimage.metrics import structural_similarity as ssim from collections import defaultdict from douzero.env.move_detector import get_move_type from PyQt5 import QtGui, QtWidgets, QtCore from PyQt5.QtWidgets import QTableWidgetItem, QInputDialog, QMessageBox from PyQt5.QtGui import QPixmap, QIcon from PyQt5.QtCore import QTime, QEventLoop, Qt from MainWindow import Ui_Form from douzero.env.game import GameEnv from douzero.evaluation.deep_agent import DeepAgent
15,236
def init_display(self): self.WinRate.setText("评分") self.label.setText("游戏状态") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0);') self.UserHandCards.setText("手牌") # self.LBrowser.clear() # self.RBrowser.clear() self.LPlayedCard.setText("上家出牌区域") self.RPlayedCard.setText("下家出牌区域") self.PredictedCard.setText("AI出牌区域") self.ThreeLandlordCards.setText("地主牌") self.recorder2zero() for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') def init_cards(self): self.RunGame = True GameHelper.Interrupt = False self.user_hand_cards_real = "" self.user_hand_cards_env = [] # 其他玩家出牌 self.other_played_cards_real = "" self.other_played_cards_env = [] # 其他玩家手牌(整副牌减去玩家手牌,后续再减掉历史出牌) self.other_hand_cards = [] # 三张底牌 self.three_landlord_cards_real = "" self.three_landlord_cards_env = [] # 玩家角色代码:0-地主上家, 1-地主, 2-地主下家 self.user_position_code = None self.user_position = "" # 开局时三个玩家的手牌 self.card_play_data_list = {} # 识别玩家手牌 self.user_hand_cards_real = self.find_my_cards() while len(self.user_hand_cards_real) != 17 and len(self.user_hand_cards_real) != 20: self.detect_start_btn() if not self.RunGame: break self.sleep(200) self.user_hand_cards_real = self.find_my_cards() self.user_hand_cards_env = [RealCard2EnvCard[c] for c in list(self.user_hand_cards_real)] # 识别三张底牌 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] while len(self.three_landlord_cards_env) != 3: self.detect_start_btn() if not self.RunGame: break if len(self.three_landlord_cards_env) > 3: self.ThreeLandlordCardsConfidence += 0.05 elif len(self.three_landlord_cards_env) < 3: self.ThreeLandlordCardsConfidence -= 0.05 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] # 识别玩家的角色 self.sleep(500) self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) while self.user_position_code is None: self.detect_start_btn() if not self.RunGame: break self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) print("正在出牌人的代码: ", self.user_position_code) if self.user_position_code is None: items = ("地主上家", "地主", "地主下家") item, okPressed = QInputDialog.getItem(self, "选择角色", "未识别到地主,请手动选择角色:", items, 0, False) if okPressed and item: self.user_position_code = items.index(item) else: return self.user_position = ['landlord_up', 'landlord', 'landlord_down'][self.user_position_code] print("我现在在地主的方向:", self.user_position) for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') self.Players[self.user_position_code].setStyleSheet('background-color: rgba(0, 255, 0, 0.5);') # 整副牌减去玩家手上的牌,就是其他人的手牌,再分配给另外两个角色(如何分配对AI判断没有影响) for i in set(AllEnvCard): self.other_hand_cards.extend([i] * (AllEnvCard.count(i) - self.user_hand_cards_env.count(i))) self.other_hands_cards_str = str(''.join([EnvCard2RealCard[c] for c in self.other_hand_cards]))[::-1] self.cards_recorder(self.other_hands_cards_str) self.card_play_data_list.update({ 'three_landlord_cards': self.three_landlord_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 0) % 3]: self.user_hand_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 1) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 != 1 else self.other_hand_cards[17:], ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 2) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 == 1 else self.other_hand_cards[17:] }) print("开始对局") print("手牌:", self.user_hand_cards_real) print("地主牌:", self.three_landlord_cards_real) # 生成手牌结束,校验手牌数量 if len(self.card_play_data_list["three_landlord_cards"]) != 3: QMessageBox.critical(self, "底牌识别出错", "底牌必须是3张!", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return if len(self.card_play_data_list["landlord_up"]) != 17 or \ len(self.card_play_data_list["landlord_down"]) != 17 or \ len(self.card_play_data_list["landlord"]) != 20: QMessageBox.critical(self, "手牌识别出错", "初始手牌数目有误", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return # 出牌顺序:0-玩家出牌, 1-玩家下家出牌, 2-玩家上家出牌 self.play_order = 0 if self.user_position == "landlord" else 1 if self.user_position == "landlord_up" else 2 # 创建一个代表玩家的AI ai_players = [0, 0] ai_players[0] = self.user_position ai_players[1] = DeepAgent(self.user_position, self.card_play_model_path_dict[self.user_position])
# -*- coding: utf-8 -*- # Created by: Raf # Modify by: Vincentzyx EnvCard2RealCard = {3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'T', 11: 'J', 12: 'Q', 13: 'K', 14: 'A', 17: '2', 20: 'X', 30: 'D'} RealCard2EnvCard = {'3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'T': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 14, '2': 17, 'X': 20, 'D': 30} AllEnvCard = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30] AllCards = ['D', 'X', '2', 'A', 'K', 'Q', 'J', 'T', '9', '8', '7', '6', '5', '4', '3'] helper = GameHelper() class MyPyQT_Form(QtWidgets.QWidget, Ui_Form): def __init__(self): super(MyPyQT_Form, self).__init__() self.other_hands_cards_str = None self.stop_sign = None self.loop_sign = None self.env = None self.three_landlord_cards_env = None self.three_landlord_cards_real = None self.user_hand_cards_env = None self.user_hand_cards_real = None self.play_order = None self.card_play_data_list = None self.other_hand_cards = None self.other_played_cards_env = None self.other_played_cards_real = None self.user_position = None self.user_position_code = None self.setupUi(self) self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint | # 使能最小化按钮 QtCore.Qt.WindowStaysOnTopHint | # 窗体总在最前端 QtCore.Qt.WindowCloseButtonHint) self.setWindowIcon(QIcon(':/pics/favicon.ico')) self.setWindowTitle("DouZero欢乐斗地主v2.0") self.setFixedSize(self.width(), self.height()) # 固定窗体大小 self.move(50, 50) # self.setWindowIcon(QIcon('pics/favicon.ico')) window_pale = QtGui.QPalette() # window_pale.setBrush(self.backgroundRole(), QtGui.QBrush(QtGui.QPixmap("pics/bg.png"))) self.setPalette(window_pale) self.SingleButton.clicked.connect(self.game_single) self.LoopButton.clicked.connect(self.game_loop) self.StopButton.clicked.connect(self.stop) # self.Players = [self.RPlayer, self.Player, self.LPlayer] self.Players = [self.RPlayedCard, self.PredictedCard, self.LPlayedCard] self.counter = QTime() # 参数 self.MyConfidence = 0.8 # 我的牌的置信度 self.OtherConfidence = 0.8 # 别人的牌的置信度 self.WhiteConfidence = 0.85 # 检测白块的置信度 self.LandlordFlagConfidence = 0.8 # 检测地主标志的置信度 self.ThreeLandlordCardsConfidence = 0.8 # 检测地主底牌的置信度 self.PassConfidence = 0.7 self.PassConfidence = 0.8 self.WaitTime = 1 # 等待状态稳定延时 self.MyFilter = 40 # 我的牌检测结果过滤参数 self.OtherFilter = 25 # 别人的牌检测结果过滤参数 self.SleepTime = 0.1 # 循环中睡眠时间 self.RunGame = False self.AutoPlay = False self.BidThreshold1 = 65 # 叫地主阈值 self.BidThreshold2 = 72 # 抢地主阈值 self.JiabeiThreshold = ( (85, 72), # 叫地主 超级加倍 加倍 阈值 (85, 75) # 叫地主 超级加倍 加倍 阈值 (在地主是抢来的情况下) ) self.MingpaiThreshold = 92 # 坐标 self.MyHandCardsPos = (180, 560, 1050, 90) # 我的截图区域 self.LPlayedCardsPos = (320, 280, 500, 120) # 左边出牌截图区域 self.RPlayedCardsPos = (600, 280, 500, 120) # 右边出牌截图区域 self.LandlordCardsPos = (600, 33, 220, 103) # 地主底牌截图区域 self.LPassPos = (360, 360, 120, 80) # 左边不出截图区域 self.RPassPos = (940, 360, 120, 80) # 右边不出截图区域 self.PassBtnPos = (200, 450, 1000, 120) # 要不起截图区域 self.GeneralBtnPos = (200, 450, 1000, 120) # 叫地主、抢地主、加倍按钮截图区域 self.LandlordFlagPos = [(1247, 245, 48, 52), (12, 661, 51, 53), (123, 243, 52, 54)] # 地主标志截图区域(右-我-左) self.card_play_model_path_dict = { 'landlord': "baselines/resnet/resnet_landlord.ckpt", 'landlord_up': "baselines/resnet/resnet_landlord_up.ckpt", 'landlord_down': "baselines/resnet/resnet_landlord_down.ckpt" } def game_single(self): self.loop_sign = 0 self.stop_sign = 0 self.detect_start_btn() self.before_start() self.init_cards() def game_loop(self): self.loop_sign = 1 self.stop_sign = 0 while True: if self.stop_sign == 1: break self.detect_start_btn() self.before_start() self.init_cards() self.sleep(5000) def stop(self): self.stop_sign = 1 print("按下停止键") try: self.RunGame = False self.loop_sign = 0 self.env.game_over = True self.env.reset() self.init_display() self.PreWinrate.setText("局前胜率: ") self.BidWinrate.setText("叫牌胜率: ") except AttributeError as e: traceback.print_exc() def init_display(self): self.WinRate.setText("评分") self.label.setText("游戏状态") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0);') self.UserHandCards.setText("手牌") # self.LBrowser.clear() # self.RBrowser.clear() self.LPlayedCard.setText("上家出牌区域") self.RPlayedCard.setText("下家出牌区域") self.PredictedCard.setText("AI出牌区域") self.ThreeLandlordCards.setText("地主牌") self.recorder2zero() for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') def init_cards(self): self.RunGame = True GameHelper.Interrupt = False self.user_hand_cards_real = "" self.user_hand_cards_env = [] # 其他玩家出牌 self.other_played_cards_real = "" self.other_played_cards_env = [] # 其他玩家手牌(整副牌减去玩家手牌,后续再减掉历史出牌) self.other_hand_cards = [] # 三张底牌 self.three_landlord_cards_real = "" self.three_landlord_cards_env = [] # 玩家角色代码:0-地主上家, 1-地主, 2-地主下家 self.user_position_code = None self.user_position = "" # 开局时三个玩家的手牌 self.card_play_data_list = {} # 识别玩家手牌 self.user_hand_cards_real = self.find_my_cards() while len(self.user_hand_cards_real) != 17 and len(self.user_hand_cards_real) != 20: self.detect_start_btn() if not self.RunGame: break self.sleep(200) self.user_hand_cards_real = self.find_my_cards() self.user_hand_cards_env = [RealCard2EnvCard[c] for c in list(self.user_hand_cards_real)] # 识别三张底牌 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] while len(self.three_landlord_cards_env) != 3: self.detect_start_btn() if not self.RunGame: break if len(self.three_landlord_cards_env) > 3: self.ThreeLandlordCardsConfidence += 0.05 elif len(self.three_landlord_cards_env) < 3: self.ThreeLandlordCardsConfidence -= 0.05 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] # 识别玩家的角色 self.sleep(500) self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) while self.user_position_code is None: self.detect_start_btn() if not self.RunGame: break self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) print("正在出牌人的代码: ", self.user_position_code) if self.user_position_code is None: items = ("地主上家", "地主", "地主下家") item, okPressed = QInputDialog.getItem(self, "选择角色", "未识别到地主,请手动选择角色:", items, 0, False) if okPressed and item: self.user_position_code = items.index(item) else: return self.user_position = ['landlord_up', 'landlord', 'landlord_down'][self.user_position_code] print("我现在在地主的方向:", self.user_position) for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') self.Players[self.user_position_code].setStyleSheet('background-color: rgba(0, 255, 0, 0.5);') # 整副牌减去玩家手上的牌,就是其他人的手牌,再分配给另外两个角色(如何分配对AI判断没有影响) for i in set(AllEnvCard): self.other_hand_cards.extend([i] * (AllEnvCard.count(i) - self.user_hand_cards_env.count(i))) self.other_hands_cards_str = str(''.join([EnvCard2RealCard[c] for c in self.other_hand_cards]))[::-1] self.cards_recorder(self.other_hands_cards_str) self.card_play_data_list.update({ 'three_landlord_cards': self.three_landlord_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 0) % 3]: self.user_hand_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 1) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 != 1 else self.other_hand_cards[17:], ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 2) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 == 1 else self.other_hand_cards[17:] }) print("开始对局") print("手牌:", self.user_hand_cards_real) print("地主牌:", self.three_landlord_cards_real) # 生成手牌结束,校验手牌数量 if len(self.card_play_data_list["three_landlord_cards"]) != 3: QMessageBox.critical(self, "底牌识别出错", "底牌必须是3张!", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return if len(self.card_play_data_list["landlord_up"]) != 17 or \ len(self.card_play_data_list["landlord_down"]) != 17 or \ len(self.card_play_data_list["landlord"]) != 20: QMessageBox.critical(self, "手牌识别出错", "初始手牌数目有误", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return # 出牌顺序:0-玩家出牌, 1-玩家下家出牌, 2-玩家上家出牌 self.play_order = 0 if self.user_position == "landlord" else 1 if self.user_position == "landlord_up" else 2 # 创建一个代表玩家的AI ai_players = [0, 0] ai_players[0] = self.user_position ai_players[1] = DeepAgent(self.user_position, self.card_play_model_path_dict[self.user_position])
self.env = GameEnv(ai_players)
3
2023-12-01 04:04:30+00:00
24k
super1207/satoricq
satori.py
[ { "identifier": "AdapterKook", "path": "kook_adapter.py", "snippet": "class AdapterKook:\n def __init__(self,config = {}) -> None:\n '''用于初始化一些配置信息,尽量不要在这里阻塞,因为此处不具备异步环境,如果你需要读写配置文件,请在init_after中进行'''\n self._access_token = config[\"access_token\"]\n self._http_url = \"https://ww...
import asyncio import aiohttp import json import uuid from kook_adapter import AdapterKook from mihoyo_adapter import AdapterMihoyo from onebot_adapter import AdapterOnebot from config import Config from aiohttp import web from qq_adapter import AdapterQQ from tool import remove_json_null
17,914
return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/guild.member.get": body = await request.json() ret = await adapter.get_guild_member(platform,self_id,body["guild_id"],body["user_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/message.create": body = await request.json() ret = await adapter.create_message(platform,self_id,body["channel_id"],body["content"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/channel.list": body = await request.json() ret = await adapter.get_channel_list(platform,self_id,body["guild_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/user.get": body = await request.json() ret = await adapter.get_user(platform,self_id,body["user_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) return web.Response(text="method not found") async def _handle_http_admin(self,request:web.Request): print("----http admin",request) '''在这里处理管理api调用''' # 鉴权 if self._config.access_token != "": if request.headers.get("Authorization") != "Bearer " + self._config.access_token: print("token err") return web.Response(text="token err") method = request.url.path if method == "/v1/admin/login.list": ret = [] for adapter in self.adapterlist: ret += await adapter["adapter"].get_login(None,None) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) return web.Response(text="method not found") async def _handle_http_foo(self,request:web.Request): '''在这里处理其余任何api调用''' print("--------http other",request) return web.Response(text="method not found") async def _handle_events_ws(self,request:web.Request): '''在这里处理websocket''' ws_id = str(uuid.uuid4()) ws = web.WebSocketResponse() ws.can_prepare(request) await ws.prepare(request) self.wsmap[ws_id] = { "ws":ws, "is_access":False } print("--------http ws",request,ws_id) try: async for msg in ws: if msg.type == aiohttp.WSMsgType.TEXT: data_json = json.loads(msg.data) print("--------recv_ws",json.dumps(msg.data)) op = data_json["op"] if op == 3: if self._config.access_token != "": if data_json["body"]["token"] != self._config.access_token: raise "token err" self.wsmap[ws_id]["is_access"] = True async def get_logins(self,ws): logins = [] for adapter in self.adapterlist: logins += await adapter["adapter"].get_login(None,None) await Satori.ws_send_json(ws,{ "op":4, "body":{ "logins":logins } }) asyncio.create_task(get_logins(self,ws)) elif op == 1: async def send_pong(ws): await Satori.ws_send_json(ws,{ "op":2 }) asyncio.create_task(send_pong(ws)) elif msg.type == aiohttp.WSMsgType.ERROR: print('ws connection closed with exception %s' % ws.exception()) finally: del self.wsmap[ws_id] print("--------http ws close",ws_id) return ws async def init_after(self): async def event_loop(self:Satori,adapter:AdapterOnebot): while True: msg = await adapter.get_msg() for wsid in self.wsmap: ws = self.wsmap[wsid] if ws["is_access"]: msg["id"] = self._evt_id asyncio.create_task(Satori.ws_send_json(ws["ws"],{"op":0,"body":msg})) self._evt_id += 1 # 读取配置文件 await self._config.read_config() # 创建 adapter for botcfg in self._config.botlist: if botcfg["platform"] == "onebot": adapter = AdapterOnebot(botcfg) elif botcfg["platform"] == "kook": adapter = AdapterKook(botcfg) elif botcfg["platform"] == "mihoyo": adapter = AdapterMihoyo(botcfg) elif botcfg["platform"] == "qq":
class Satori: def __init__(self) -> None: self._config:Config = Config() self.adapterlist = [] self.wsmap = {} self._evt_id = 100 async def _get_adapter(self,platform,self_id): ''' 用于获取适配器 ''' for adapter in self.adapterlist: info = adapter["info"] for bot in info: if self_id == bot["self_id"] and bot["platform"] == platform: return adapter["adapter"] return None async def ws_send_json(ws,js) -> None: js = remove_json_null(js) print("--------ws_send_json",json.dumps(js)) await ws.send_json(js) async def _handle_http_normal(self,request:web.Request): print("----http normal",request) '''在这里处理普通api调用''' # 鉴权 if self._config.access_token != "": if request.headers.get("Authorization") != "Bearer " + self._config.access_token: print("token err") return web.Response(text="token err") method = request.url.path platform = request.headers.get("X-Platform") self_id = request.headers.get("X-Self-ID") adapter:AdapterOnebot = await self._get_adapter(platform,self_id) if adapter == None: return web.Response(text="bot not found") if method == "/v1/login.get": ret = await adapter.get_login(platform,self_id) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/guild.member.get": body = await request.json() ret = await adapter.get_guild_member(platform,self_id,body["guild_id"],body["user_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/message.create": body = await request.json() ret = await adapter.create_message(platform,self_id,body["channel_id"],body["content"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/channel.list": body = await request.json() ret = await adapter.get_channel_list(platform,self_id,body["guild_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/user.get": body = await request.json() ret = await adapter.get_user(platform,self_id,body["user_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) return web.Response(text="method not found") async def _handle_http_admin(self,request:web.Request): print("----http admin",request) '''在这里处理管理api调用''' # 鉴权 if self._config.access_token != "": if request.headers.get("Authorization") != "Bearer " + self._config.access_token: print("token err") return web.Response(text="token err") method = request.url.path if method == "/v1/admin/login.list": ret = [] for adapter in self.adapterlist: ret += await adapter["adapter"].get_login(None,None) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) return web.Response(text="method not found") async def _handle_http_foo(self,request:web.Request): '''在这里处理其余任何api调用''' print("--------http other",request) return web.Response(text="method not found") async def _handle_events_ws(self,request:web.Request): '''在这里处理websocket''' ws_id = str(uuid.uuid4()) ws = web.WebSocketResponse() ws.can_prepare(request) await ws.prepare(request) self.wsmap[ws_id] = { "ws":ws, "is_access":False } print("--------http ws",request,ws_id) try: async for msg in ws: if msg.type == aiohttp.WSMsgType.TEXT: data_json = json.loads(msg.data) print("--------recv_ws",json.dumps(msg.data)) op = data_json["op"] if op == 3: if self._config.access_token != "": if data_json["body"]["token"] != self._config.access_token: raise "token err" self.wsmap[ws_id]["is_access"] = True async def get_logins(self,ws): logins = [] for adapter in self.adapterlist: logins += await adapter["adapter"].get_login(None,None) await Satori.ws_send_json(ws,{ "op":4, "body":{ "logins":logins } }) asyncio.create_task(get_logins(self,ws)) elif op == 1: async def send_pong(ws): await Satori.ws_send_json(ws,{ "op":2 }) asyncio.create_task(send_pong(ws)) elif msg.type == aiohttp.WSMsgType.ERROR: print('ws connection closed with exception %s' % ws.exception()) finally: del self.wsmap[ws_id] print("--------http ws close",ws_id) return ws async def init_after(self): async def event_loop(self:Satori,adapter:AdapterOnebot): while True: msg = await adapter.get_msg() for wsid in self.wsmap: ws = self.wsmap[wsid] if ws["is_access"]: msg["id"] = self._evt_id asyncio.create_task(Satori.ws_send_json(ws["ws"],{"op":0,"body":msg})) self._evt_id += 1 # 读取配置文件 await self._config.read_config() # 创建 adapter for botcfg in self._config.botlist: if botcfg["platform"] == "onebot": adapter = AdapterOnebot(botcfg) elif botcfg["platform"] == "kook": adapter = AdapterKook(botcfg) elif botcfg["platform"] == "mihoyo": adapter = AdapterMihoyo(botcfg) elif botcfg["platform"] == "qq":
adapter = AdapterQQ(botcfg)
4
2023-12-03 13:53:47+00:00
24k
aliyun/pai-python-sdk
pai/model.py
[ { "identifier": "git_utils", "path": "pai/common/git_utils.py", "snippet": "def git_clone_repo(git_config: Dict[str, str], source_dir: Optional[str] = None):\ndef _validate_git_config(git_config):\ndef _build_and_run_clone_command(git_config, dest_dir):\ndef _clone_command_for_codeup(git_config, dest_di...
import copy import distutils.dir_util import json import logging import os.path import posixpath import shlex import shutil import tempfile import textwrap import time import requests from typing import Any, Dict, Iterator, List, Optional, Tuple, Union from addict import Dict as AttrDict from oss2 import ObjectIterator from .common import git_utils from .common.consts import INSTANCE_TYPE_LOCAL_GPU, ModelFormat from .common.docker_utils import ContainerRun, run_container from .common.oss_utils import OssUriObj, download, is_oss_uri, upload from .common.utils import ( generate_repr, is_local_run_instance_type, random_str, to_plain_text, ) from .exception import DuplicatedMountException, MountPathIsOccupiedException from .image import ImageInfo from .predictor import AsyncPredictor, LocalPredictor, Predictor, ServiceType from .serializers import SerializerBase from .session import Session, get_default_session from .estimator import AlgorithmEstimator
17,509
# if source is a local path, upload it to OSS bucket and use OSS URI # as storage source. oss_path = session.get_storage_path_by_category("model_data") oss_uri = upload( source_path=source, oss_path=oss_path, bucket=session.oss_bucket ) oss_uri_obj = OssUriObj(oss_uri) storage_config = { "mount_path": mount_path, "oss": {"path": oss_uri_obj.get_dir_uri()}, } else: raise ValueError( "Source path is not a valid OSS URI or a existing local path." ) # check if the source OSS Path is already mounted to the container. if oss_uri_obj.get_dir_uri() in uris: raise DuplicatedMountException( f"Source OSS path '{oss_uri_obj.get_dir_uri()}' is already " f"mounted to the container." ) configs.append(storage_config) self.storage = configs return storage_config def container_serving_spec( command: str, image_uri: Union[str, ImageInfo], source_dir: Optional[str] = None, git_config: Optional[Dict[str, Any]] = None, port: Optional[int] = None, environment_variables: Optional[Dict[str, str]] = None, requirements: Optional[List[str]] = None, requirements_path: Optional[str] = None, health_check: Optional[Dict[str, Any]] = None, session: Optional[Session] = None, ) -> InferenceSpec: """A convenient function to create an InferenceSpec instance that serving the model with given container and script. Examples:: infer_spec: InferenceSpec = container_serving_spec( command="python run.py", source_dir="./model_server/", image_uri="<ServingImageUri>", ) m = Model( model_data="oss://<YourOssBucket>/path/to/your/model", inference_spec=infer_spec, ) m.deploy( instance_type="ecs.c6.xlarge" ) Args: command (str): The command used to launch the Model server. source_dir (str): A relative path or an absolute path to the source code directory used to load model and launch the HTTP server, it will be uploaded to the OSS bucket and mounted to the container. If there is a ``requirements.txt`` file under the directory, it will be installed before the prediction server started. If 'git_config' is provided, 'source_dir' should be a relative location to a directory in the Git repo. With the following GitHub repo directory structure: .. code:: |----- README.md |----- src |----- train.py |----- test.py if you need 'src' directory as the source code directory, you can assign source_dir='./src/'. git_config (Dict[str, str]): Git configuration used to clone the repo. Including ``repo``, ``branch``, ``commit``, ``username``, ``password`` and ``token``. The ``repo`` is required. All other fields are optional. ``repo`` specifies the Git repository. If you don't provide ``branch``, the default value 'master' is used. If you don't provide ``commit``, the latest commit in the specified branch is used. ``username``, ``password`` and ``token`` are for authentication purpose. For example, the following config: .. code:: python git_config = { 'repo': 'https://github.com/modelscope/modelscope.git', 'branch': 'master', 'commit': '9bfc4a9d83c4beaf8378d0a186261ffc1cd9f960' } results in cloning the repo specified in 'repo', then checking out the 'master' branch, and checking out the specified commit. image_uri (str): The Docker image used to run the prediction service. port (int): Expose port of the server in container, the prediction request will be forward to the port. The environment variable ``LISTENING_PORT`` in the container will be set to this value. Default to 8000. environment_variables (Dict[str, str], optional): Dictionary of environment variable key-value pairs to set on the running container. requirements (List[str], optional): A list of Python package dependency, it will be installed before the serving container run. requirements_path (str, optional): A absolute path to the requirements.txt in the container. health_check (Dict[str, Any], optional): The health check configuration. If it not set, A TCP readiness probe will be used to check the health of the HTTP server. session (Session, optional): A PAI session instance used for communicating with PAI service. Returns: :class:`pai.model.InferenceSpec`: An InferenceSpec instance. """ session = session or get_default_session() if git_config:
# Copyright 2023 Alibaba, Inc. or its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.getLogger(__name__) # Reserved ports for internal use, do not use them for service _RESERVED_PORTS = [8080, 9090] class DefaultServiceConfig(object): """Default configuration used in creating prediction service.""" # Listen Port listen_port = 8000 # Default model path in container model_path = "/eas/workspace/model/" # Default user code path in container code_path = "/ml/usercode/" class ResourceConfig(object): """A class that represents the resource used by a PAI prediction service instance.""" def __init__(self, cpu: int, memory: int, gpu: int = None, gpu_memory: int = None): """ResourceConfig initializer. The public resource group does not support requesting GPU resources with `ResourceConfig`. Use the 'gpu' and 'gpu_memory' parameter only for services deployed to dedicated resource groups that provide GPU machine instances. Args: cpu (int): The number of CPUs that each instance requires. memory (int): The amount of memory that each instance requires, must be an integer, Unit: MB. gpu (int): The number of GPUs that each instance requires. gpu_memory (int): The amount of GPU memory that each instance requires. The value must be an integer, Unit: GB. PAI allows memory resources of a GPU to be allocated to multiple instances. If you want multiple instances to share the memory resources of a GPU, set the gpu parameter to 0. If you set the ``gpu`` parameter to 1, each instance occupies a GPU and the gpu_memory parameter does not take effect. .. note:: **Important** PAI does not enable the strict isolation of GPU memory. To prevent out of memory (OOM) errors, make sure that the GPU memory used by each instance does not exceed the requested amount. """ self.cpu = cpu self.memory = memory self.gpu = gpu self.gpu_memory = gpu_memory def __repr__(self): return ( f"ResourceConfig(cpu={self.cpu}, memory={self.memory}MB, gpu={self.gpu or 0}," f" gpu_memory={self.gpu_memory or 0}GB)" ) def __str__(self): return self.__repr__() def to_dict(self): """Transform the ResourceConfig instance to a dictionary. Returns: dict: """ res = { "cpu": self.cpu, "gpu": self.gpu, "gpu_memory": self.gpu_memory, "memory": self.memory, } return {k: v for k, v in res.items() if v is not None} class InferenceSpec(object): """A class used to describe how to create a prediction service. InferenceSpec is using to describe how the model is serving in PAI. To view the full supported parameters, please see the following hyperlink: `Parameters of model services <https://help.aliyun.com/document_detail/450525.htm>`_. Example of how to config a InferneceSpec:: >>> # build an inference_spec that using XGBoost processor. >>> infer_spec = InferenceSpec(processor="xgboost") >>> infer_spec.metadata.rpc.keepalive = 1000 >>> infer_spec.warm_up_data_path = "oss://bucket-name/path/to/warmup-data" >>> infer_spec.add_option("metadata.rpc.max_batch_size", 8) >>> print(infer_spec.processor) xgboost >>> print(infer_spec.metadata.rpc.keepalive) 1000 >>> print(infer_spec.metadata.rpc.max_batch_size) 8 >>> print(infer_spec.to_dict()) {'processor': 'xgboost', 'metadata': {'rpc': {'keepalive': 1000, 'max_batch_size': 8}}, 'warm_up_data_path': 'oss://bucket-name/path/to/warmup-data'} """ def __init__(self, *args, **kwargs): """InferenceSpec initializer. Args: **kwargs: Parameters of the inference spec. """ properties = kwargs.pop("__properties", []) cfg_dict = copy.deepcopy(kwargs) cfg_dict = {k: v for k, v in cfg_dict.items() if not k.startswith("_")} if args: if len(args) > 1: raise TypeError() cfg_dict.update(args[0]) super(InferenceSpec, self).__setattr__( "_cfg_dict", self._transform_value(cfg_dict) ) super(InferenceSpec, self).__setattr__("__properties", properties) def __repr__(self): return json.dumps(self.to_dict(), sort_keys=True, indent=4) def _transform_value(self, value): if isinstance(value, (List, Tuple)): return [self._transform_value(item) for item in value] elif isinstance(value, (Dict, AttrDict)): return AttrDict( {key: self._transform_value(value) for key, value in value.items()} ) return value def __missing__(self, name): return self._cfg_dict.__missing__(name) def __setitem__(self, name, value): return self._cfg_dict.__setitem__(name, self._transform_value(value)) def __setattr__(self, name, value): if name in getattr(self, "__properties"): super(InferenceSpec, self).__setattr__(name, self._transform_value(value)) else: self._cfg_dict.__setattr__(name, self._transform_value(value)) def __getattr__(self, item): if item.startswith("_"): return getattr(self, item) return self._cfg_dict.__getitem__(item) def __contains__(self, item): return item in self._cfg_dict def to_dict(self) -> Dict: """Return a dictionary that represent the InferenceSpec.""" return self._cfg_dict.to_dict() def add_option(self, name: str, value): """Add an option to the inference_spec instance. Args: name (str): Name of the option to set, represented as the JSON path of the parameter for the InferenceSpec. To view the full supported parameters, please see the following hyperlink: `Parameters of model services <https://help.aliyun.com/document_detail/450525.htm>`_. value: Value for the option. Examples: >>> infer_spec = InferenceSpec(processor="tensorflow_gpu_1.12") >>> infer_spec.add_option("metadata.rpc.keepalive", 10000) >>> infer_spec.metadata.rpc.keepalive 10000 >>> infer_spec.to_dict() {'processor': 'tensorflow_gpu_1.12', 'metadata': {'rpc': {'keepalive': 10000}}} """ src = self._transform_value(value) for k in reversed(name.split(".")): src = {k: src} self._cfg_dict.update(AttrDict(src)) def merge_options(self, options: Dict[str, Any]): """Merge options from a dictionary.""" for key, value in options.items(): self.add_option(key, value) @classmethod def from_dict(cls, config: Dict[str, Any]) -> "InferenceSpec": """Initialize a InferenceSpec from a dictionary. You can use this method to initialize a InferenceSpec instance from a dictionary. Returns: :class:`pai.model.InferenceSpec`: A InferenceSpec instance. """ config = config or dict() return cls(**config) def is_container_serving(self): return "containers" in self._cfg_dict @classmethod def _upload_source_dir(cls, source_dir, session): """Upload source files to OSS bucket.""" if not os.path.exists(source_dir): raise ValueError(f"Input source code path does not exist: {source_dir}.") if not os.path.isdir(source_dir): raise ValueError( f"Input source code path should be a directory: {source_dir}." ) target_dir = session.get_storage_path_by_category(category="inference_src") # upload local script data to the OSS bucket. uploaded_source_code = upload( source_dir, target_dir, session.oss_bucket, ) logger.debug("Uploaded source code to OSS: %s", uploaded_source_code) return uploaded_source_code def mount( self, source: str, mount_path: str, session: Session = None, ) -> Dict[str, Any]: """Mount a source storage to the running container. .. note:: If source is a local path, it will be uploaded to the OSS bucket and mounted. If source is a OSS path, it will be mounted directly. Args: source (str): The source storage to be attached, currently only support OSS path in OSS URI format and local path. mount_path (str): The mount path in the container. session (Session, optional): A PAI session instance used for communicating with PAI service. Returns: Dict[str, Any]: The storage config. Raises: DuplicateMountException: If the mount path is already used or source OSS path is mounted to the container. Examples:: # Mount a OSS storage path to the running container. >>> inference_spec.mount("oss://<YourOssBucket>/path/to/directory/model.json", ... "/ml/model/") # 'Mount' a local path to the running container. >>> inference_spec.mount("/path/to/your/data/", "/ml/model/") """ session = session or get_default_session() # TODO: supports more storages, such as NAS, PAI Dataset, PAI CodeSource, etc. if not isinstance(source, str): raise ValueError( "Parameter should be a string which represents an OSS storage path" " or a local file path." ) if "storage" in self._cfg_dict: configs = self._cfg_dict.get("storage", []) else: configs = [] uris = set() for conf in configs: # check if target mount path is already used. if conf.get("mount_path") == mount_path: raise MountPathIsOccupiedException( f"The mount path '{mount_path}' has already been used." ) mount_uri = conf.get("oss", {}).get("path") uris.add(mount_uri) if is_oss_uri(source): oss_uri_obj = OssUriObj(source) storage_config = { "mount_path": mount_path, "oss": {"path": oss_uri_obj.get_dir_uri()}, } elif os.path.exists(source): # if source is a local path, upload it to OSS bucket and use OSS URI # as storage source. oss_path = session.get_storage_path_by_category("model_data") oss_uri = upload( source_path=source, oss_path=oss_path, bucket=session.oss_bucket ) oss_uri_obj = OssUriObj(oss_uri) storage_config = { "mount_path": mount_path, "oss": {"path": oss_uri_obj.get_dir_uri()}, } else: raise ValueError( "Source path is not a valid OSS URI or a existing local path." ) # check if the source OSS Path is already mounted to the container. if oss_uri_obj.get_dir_uri() in uris: raise DuplicatedMountException( f"Source OSS path '{oss_uri_obj.get_dir_uri()}' is already " f"mounted to the container." ) configs.append(storage_config) self.storage = configs return storage_config def container_serving_spec( command: str, image_uri: Union[str, ImageInfo], source_dir: Optional[str] = None, git_config: Optional[Dict[str, Any]] = None, port: Optional[int] = None, environment_variables: Optional[Dict[str, str]] = None, requirements: Optional[List[str]] = None, requirements_path: Optional[str] = None, health_check: Optional[Dict[str, Any]] = None, session: Optional[Session] = None, ) -> InferenceSpec: """A convenient function to create an InferenceSpec instance that serving the model with given container and script. Examples:: infer_spec: InferenceSpec = container_serving_spec( command="python run.py", source_dir="./model_server/", image_uri="<ServingImageUri>", ) m = Model( model_data="oss://<YourOssBucket>/path/to/your/model", inference_spec=infer_spec, ) m.deploy( instance_type="ecs.c6.xlarge" ) Args: command (str): The command used to launch the Model server. source_dir (str): A relative path or an absolute path to the source code directory used to load model and launch the HTTP server, it will be uploaded to the OSS bucket and mounted to the container. If there is a ``requirements.txt`` file under the directory, it will be installed before the prediction server started. If 'git_config' is provided, 'source_dir' should be a relative location to a directory in the Git repo. With the following GitHub repo directory structure: .. code:: |----- README.md |----- src |----- train.py |----- test.py if you need 'src' directory as the source code directory, you can assign source_dir='./src/'. git_config (Dict[str, str]): Git configuration used to clone the repo. Including ``repo``, ``branch``, ``commit``, ``username``, ``password`` and ``token``. The ``repo`` is required. All other fields are optional. ``repo`` specifies the Git repository. If you don't provide ``branch``, the default value 'master' is used. If you don't provide ``commit``, the latest commit in the specified branch is used. ``username``, ``password`` and ``token`` are for authentication purpose. For example, the following config: .. code:: python git_config = { 'repo': 'https://github.com/modelscope/modelscope.git', 'branch': 'master', 'commit': '9bfc4a9d83c4beaf8378d0a186261ffc1cd9f960' } results in cloning the repo specified in 'repo', then checking out the 'master' branch, and checking out the specified commit. image_uri (str): The Docker image used to run the prediction service. port (int): Expose port of the server in container, the prediction request will be forward to the port. The environment variable ``LISTENING_PORT`` in the container will be set to this value. Default to 8000. environment_variables (Dict[str, str], optional): Dictionary of environment variable key-value pairs to set on the running container. requirements (List[str], optional): A list of Python package dependency, it will be installed before the serving container run. requirements_path (str, optional): A absolute path to the requirements.txt in the container. health_check (Dict[str, Any], optional): The health check configuration. If it not set, A TCP readiness probe will be used to check the health of the HTTP server. session (Session, optional): A PAI session instance used for communicating with PAI service. Returns: :class:`pai.model.InferenceSpec`: An InferenceSpec instance. """ session = session or get_default_session() if git_config:
updated_args = git_utils.git_clone_repo(
0
2023-12-01 01:40:12+00:00
24k
mpenning/ciscoconfparse2
tests/conftest.py
[ { "identifier": "CiscoConfParse", "path": "ciscoconfparse2/ciscoconfparse2.py", "snippet": "class CiscoConfParse(object):\n \"\"\"Parse Cisco IOS configurations and answer queries about the configs.\"\"\"\n config: Optional[Union[str,List[str]]] = None\n syntax: str = \"ios\"\n encoding: str...
import platform import sys import os import dns.exception import dns.resolver import pytest from ciscoconfparse2.ciscoconfparse2 import CiscoConfParse from ciscoconfparse2.ccp_util import PythonOptimizeCheck from fixtures.devices.mock_cisco import start_cisco_mock, stop_cisco_mock
18,528
tacacs-server host 10.0.0.32 tacacs-server host 10.0.0.33 aaa group server tacacs+ TACACS_GROUP server 10.0.0.32 server 10.0.0.33 use-vrf management source-interface mgmt0 aaa authentication login default group TACACS_GROUP aaa authentication login console group TACACS_GROUP aaa authorization commands default group TACACS_GROUP aaa accounting default group TACACS_GROUP aaa authentication login error-enable logging event link-status default vpc domain 999 role priority 100 system-priority 1 auto-recovery peer-keepalive destination 1.1.1.2 fex 115 desc FEX115 pinning max-links 1 interface loopback0 ip address 10.1.1.1/32 interface mgmt0 ip address 10.0.0.5/24 interface port-channel1 vpc peer-link switchport mode trunk spanning-tree port type network description [vPC PEER LINK] interface port-channel21 description Uplink to core switchport mode trunk switchport trunk native vlan 999 switchport trunk allowed vlan 13,31-38,155 mtu 9216 vpc 21 interface port-channel115 switchport mode fex-fabric fex associate 115 interface Ethernet1/1 switchport mode trunk spanning-tree port type network channel-group 1 mode active interface Ethernet1/2 switchport mode trunk spanning-tree port type network channel-group 1 mode active interface Ethernet1/3 ip address 192.0.2.0/31 interface Ethernet1/4 switchport mode trunk switchport trunk native vlan 999 switchport trunk allowed vlan 13,31-38,15 channel-group 21 mode active mtu 9216 interface Ethernet1/5 switchport mode trunk switchport trunk native vlan 999 switchport trunk allowed vlan 13,31-38,15 channel-group 21 mode active mtu 9216 interface Ethernet1/6 switchport mode fex-fabric fex associate 115 channel-group 115 interface Ethernet1/7 switchport mode access switchport access vlan 100 mtu 9216 interface Ethernet1/8 switchport mode access switchport access vlan 102 mtu 9216 interface Ethernet1/9 ip address 10.1.2.6/30 mtu 9216 interface Ethernet1/10 encapsulation dot1Q 200 bandwidth 100000000 delay 200 beacon ip address 10.1.2.2/30 mpls ip mtu 9216 """.splitlines() @pytest.fixture(scope="session") def c01_default_gigethernets(request): yield config_c01_default_gige @pytest.fixture(scope="session") def c01_insert_serial_replace(request): yield config_c01_insert_serial_replace @pytest.fixture(scope="function") def parse_c01(request): """Preparsed c01"""
r""" conftest.py - Parse, Query, Build, and Modify IOS-style configs Copyright (C) 2023 David Michael Pennington at Cisco Systems This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. If you need to contact the author, you can do so by emailing: mike [~at~] pennington [/dot\] net """ sys.path.insert(0, "..") c01 = """policy-map QOS_1 class GOLD priority percent 10 ! class SILVER bandwidth 30 random-detect ! class BRONZE random-detect ! interface Serial 1/0 encapsulation ppp ip address 1.1.1.1 255.255.255.252 ! interface GigabitEthernet4/1 switchport switchport access vlan 100 switchport voice vlan 150 power inline static max 7000 ! interface GigabitEthernet4/2 switchport switchport access vlan 100 switchport voice vlan 150 power inline static max 7000 ! interface GigabitEthernet4/3 switchport switchport access vlan 100 switchport voice vlan 150 ! interface GigabitEthernet4/4 shutdown ! interface GigabitEthernet4/5 switchport switchport access vlan 110 ! interface GigabitEthernet4/6 switchport switchport access vlan 110 ! interface GigabitEthernet4/7 switchport switchport access vlan 110 ! interface GigabitEthernet4/8 switchport switchport access vlan 110 ! access-list 101 deny tcp any any eq 25 log access-list 101 permit ip any any ! ! logging 1.1.3.5 logging 1.1.3.17 ! banner login ^C This is a router, and you cannot have it. Log off now while you still can type. I break the fingers of all tresspassers. ^C alias exec showthang show ip route vrf THANG""".splitlines() config_c01_default_gige = """policy-map QOS_1 class GOLD priority percent 10 ! class SILVER bandwidth 30 random-detect ! class BRONZE random-detect ! interface Serial 1/0 encapsulation ppp ip address 1.1.1.1 255.255.255.252 ! default interface GigabitEthernet4/1 interface GigabitEthernet4/1 switchport switchport access vlan 100 switchport voice vlan 150 power inline static max 7000 ! default interface GigabitEthernet4/2 interface GigabitEthernet4/2 switchport switchport access vlan 100 switchport voice vlan 150 power inline static max 7000 ! default interface GigabitEthernet4/3 interface GigabitEthernet4/3 switchport switchport access vlan 100 switchport voice vlan 150 ! default interface GigabitEthernet4/4 interface GigabitEthernet4/4 shutdown ! default interface GigabitEthernet4/5 interface GigabitEthernet4/5 switchport switchport access vlan 110 ! default interface GigabitEthernet4/6 interface GigabitEthernet4/6 switchport switchport access vlan 110 ! default interface GigabitEthernet4/7 interface GigabitEthernet4/7 switchport switchport access vlan 110 ! default interface GigabitEthernet4/8 interface GigabitEthernet4/8 switchport switchport access vlan 110 ! access-list 101 deny tcp any any eq 25 log access-list 101 permit ip any any ! ! logging 1.1.3.5 logging 1.1.3.17 ! banner login ^C This is a router, and you cannot have it. Log off now while you still can type. I break the fingers of all tresspassers. ^C alias exec showthang show ip route vrf THANG""".splitlines() config_c01_insert_serial_replace = """policy-map QOS_1 class GOLD priority percent 10 ! class SILVER bandwidth 30 random-detect ! class BRONZE random-detect ! default interface Serial 2/0 interface Serial 2/0 encapsulation ppp ip address 1.1.1.1 255.255.255.252 ! interface GigabitEthernet4/1 switchport switchport access vlan 100 switchport voice vlan 150 power inline static max 7000 ! interface GigabitEthernet4/2 switchport switchport access vlan 100 switchport voice vlan 150 power inline static max 7000 ! interface GigabitEthernet4/3 switchport switchport access vlan 100 switchport voice vlan 150 ! interface GigabitEthernet4/4 shutdown ! interface GigabitEthernet4/5 switchport switchport access vlan 110 ! interface GigabitEthernet4/6 switchport switchport access vlan 110 ! interface GigabitEthernet4/7 switchport switchport access vlan 110 ! interface GigabitEthernet4/8 switchport switchport access vlan 110 ! access-list 101 deny tcp any any eq 25 log access-list 101 permit ip any any ! ! logging 1.1.3.5 logging 1.1.3.17 ! banner login ^C This is a router, and you cannot have it. Log off now while you still can type. I break the fingers of all tresspassers. ^C alias exec showthang show ip route vrf THANG""".splitlines() # A smaller version of c01... c02 = """policy-map QOS_1 class GOLD priority percent 10 ! class SILVER bandwidth 30 random-detect ! class BRONZE random-detect ! interface GigabitEthernet4/1 switchport switchport access vlan 100 switchport voice vlan 150 power inline static max 7000 !""".splitlines() ## For historical reasons, I'm use c03 for configs/sample_01.ios (i.e. c01 was ## already taken) c03 = """! service timestamps debug datetime msec localtime show-timezone service timestamps log datetime msec localtime show-timezone ! errdisable recovery cause bpduguard errdisable recovery interval 400 ! aaa new-model ! ip vrf TEST_100_001 route-target 100:1 rd 100:1 ! interface Serial 1/0 description Uplink to SBC F923X2K425 bandwidth 1500 clock rate 1500 delay 70 encapsulation ppp ip address 1.1.1.1 255.255.255.252 ! interface Serial 1/1 description Uplink to AT&T encapsulation hdlc ip address 1.1.1.9 255.255.255.254 hold-queue 1000 in hold-queue 1000 out mpls mtu 1540 ip mtu 1500 mpls ip ! interface GigabitEthernet4/1 description switchport switchport access vlan 100 switchport voice vlan 150 power inline static max 7000 ! interface GigabitEthernet4/2 switchport switchport access vlan 100 switchport voice vlan 150 power inline static max 7000 speed 100 duplex full ! interface GigabitEthernet4/3 mtu 9216 switchport switchport access vlan 100 switchport voice vlan 150 ! interface GigabitEthernet4/4 shutdown ! interface GigabitEthernet4/5 switchport switchport access vlan 110 switchport port-security switchport port-security maximum 3 switchport port-security mac-address sticky switchport port-security mac-address 1000.2000.3000 switchport port-security mac-address 1000.2000.3001 switchport port-security mac-address 1000.2000.3002 switchport port-security violation shutdown ! interface GigabitEthernet4/6 description Simulate a Catalyst6500 access port switchport switchport access vlan 110 switchport mode access switchport nonegotiate switchport port-security switchport port-security maximum 2 switchport port-security violation restrict switchport port-security aging type inactivity switchport port-security aging time 5 spanning-tree portfast spanning-tree portfast bpduguard storm-control action shutdown storm-control broadcast level 0.40 storm-control multicast level 0.35 ! interface GigabitEthernet4/7 description Dot1Q trunk allowing vlans 2-4,7,10,11-19,21-4094 switchport switchport trunk encapsulation dot1q switchport mode trunk switchport trunk native vlan 4094 switchport trunk allowed vlan remove 1,5-10,20 switchport trunk allowed vlan add 7,10 switchport nonegotiate ! interface GigabitEthernet4/8.120 no switchport encapsulation dot1q 120 ip vrf forwarding TEST_100_001 ip address 1.1.2.254 255.255.255.0 ! interface ATM5/0/0 no ip address no ip redirects no ip unreachables no ip proxy-arp load-interval 30 carrier-delay msec 100 no atm ilmi-keepalive bundle-enable max-reserved-bandwidth 100 hold-queue 500 in ! interface ATM5/0/0.32 point-to-point ip address 1.1.1.5 255.255.255.252 no ip redirects no ip unreachables no ip proxy-arp ip accounting access-violations pvc 0/32 vbr-nrt 704 704 ! interface ATM5/0/1 shutdown ! router ospf 100 vrf TEST_100_001 router-id 1.1.2.254 network 1.1.2.0 0.0.0.255 area 0 ! policy-map QOS_1 class GOLD priority percent 10 ! class SILVER bandwidth 30 random-detect ! class BRONZE random-detect ! access-list 101 deny tcp any any eq 25 log access-list 101 permit ip any any ! ! logging 1.1.3.5 logging 1.1.3.17 ! banner login ^C This is a router, and you cannot have it. Log off now while you still can type. I break the fingers of all tresspassers. ^C ! alias exec showthang show ip route vrf THANG""".splitlines() f01 = """ltm virtual ACME { destination 192.168.1.191:http ip-protocol tcp mask 255.255.255.255 pool pool1 profiles { http { } tcp { } } rules { MOBILE } source 0.0.0.0/0 source-address-translation { type automap } translate-address enabled translate-port enabled vs-index 17 }""".splitlines() # Using configs/sample_02.f5 f02 = """ ltm profile udp DNS-UDP { app-service none datagram-load-balancing disabled idle-timeout 31 } ltm rule contrail-monitor { when HTTP_REQUEST { if {[active_members APN-DNS-TCP] > 0 & [active_members APN-DNS-UDP] > 0 } { HTTP::respond 200 content "up" } } } ltm rule contrail-monitor1 { when HTTP_REQUEST { if {[active_members APN-DNS-TCP] >= 0 & [active_members APN-DNS-UDP] >= 0 } { HTTP::respond 200 content "up" } } } ltm tacdb licenseddb licensed-tacdb { partition none } ltm virtual ACME_VIP { destination 192.168.1.191:http ip-protocol tcp mask 255.255.255.255 pool pool1 profiles { http { } tcp { } } rules { MOBILE } source 0.0.0.0/0 source-address-translation { type automap } translate-address enabled translate-port enabled vs-index 17 } sys state-mirroring { } sys syslog { include " template t_remotetmpl { template (\"<$PRI>$STAMP $HOST $FACILITY[$PID]: $MSGONLY\"); template_escape(no); }; filter f_remote_loghost { level(info..emerg); }; destination d_remote_loghost { udp(\"102.223.51.181\" port(519) template(t_remotetmpl)); }; log { source(s_syslog_pipe); filter(f_remote_loghost); destination(d_remote_loghost); }; " remote-servers { JSA { host 102.223.51.181 } } } sys url-db download-schedule urldb { } """.splitlines() j01 = """## Last commit: 2015-06-28 13:00:59 CST by mpenning system { host-name TEST01_EX; domain-name pennington.net; domain-search [ pennington.net lab.pennington.net ]; location { country-code 001; building HQ_005; floor 1; } root-authentication { encrypted-password "$1$y7ArHxKU$zUbdeLfBirgkCsKiOJ5Qa0"; ## SECRET-DATA } name-server { 172.16.3.222; } login { announcement "Test Lab Switch"; message "Unauthorized access is prohibited"; user mpenning { full-name "Mike Pennington"; uid 1000; class super-user; authentication { encrypted-password "$1$y7ArHxKU$zUbdeLfBirgkCsKiOJ5Qa0"; ## SECRET-DATA } } } services { ssh { root-login allow; } telnet; web-management { http; } } syslog { user * { any emergency; } file messages { any notice; authorization info; } file interactive-commands { interactive-commands any; } } ntp { Management { vlan-id 1; interface { ge-0/0/0.0; ge-0/0/1.0; ge-0/0/2.0; ge-0/0/3.0; } } VLAN_FOO { vlan-id 5; } vlan1 { vlan-id 1; l3-interface vlan.1; } vlan800 { vlan-id 800; } } ethernet-switching-options { storm-control { interface all; } } interfaces { ge-0/0/0 { unit 0 { family ethernet-switching { port-mode access; vlan { members VLAN_FOO; } } } } ge-0/0/1 { unit 0 { family ethernet-switching { port-mode trunk; vlan { members all; } native-vlan-id 1; } } } vlan { unit 0 { family inet { address 172.16.15.5/22; } } } } routing-options { static { route 0.0.0.0/0 next-hop 172.16.12.1; route 192.168.36.0/25 next-hop 172.16.12.1; } }""".splitlines() a01 = """hostname TEST-FW ! name 1.1.2.20 loghost01 name 1.1.3.10 dmzsrv00 name 1.1.3.11 dmzsrv01 name 1.1.3.12 dmzsrv02 name 1.1.3.13 dmzsrv03 ! interface Ethernet0/0 description Uplink to SBC F923X2K425 nameif OUTSIDE security-level 0 delay 70 ip address 1.1.1.1 255.255.255.252 ! interface Ethernet0/1 nameif INSIDE security-level 100 ip address 1.1.2.1 255.255.255.0 ! interface Ethernet0/2 switchport access vlan 100 ! interface VLAN100 nameif DMZ security-level 50 ip address 1.1.3.1 255.255.255.0 ! object-group network ANY_addrs network-object 0.0.0.0 0.0.0.0 ! object-group network INSIDE_addrs1 network-object host 1.1.2.1 network-object 1.1.2.2 255.255.255.255 network-object 1.1.2.0 255.255.255.0 ! object-group network INSIDE_addrs1 network-object host 1.1.2.1 network-object 1.1.2.2 255.255.255.255 network-object 1.1.2.0 255.255.255.0 ! object-group service DNS_svc service-object udp destination eq dns ! object-group service NTP_svc service-object udp destination eq ntp ! object-group service FTP_svc service-object tcp destination eq ftp ! object-group service HTTP_svc service-object tcp destination eq http ! object-group service HTTPS_svc service-object tcp destination eq https ! access-list INSIDE_in extended permit object-group FTP_svc object-group INSIDE_addrs1 object-group ANY_addrs log access-list INSIDE_in remark Overlap for test purposes access-list INSIDE_in extended permit ip object-group INSIDE_addrs1 object-group ANY_addrs log access-list INSIDE_in extended deny ip any any log ! ! clock timezone CST -6 clock summer-time CDT recurring ! logging enable logging timestamp logging buffer-size 1048576 logging buffered informational logging trap informational logging asdm informational logging facility 22 logging host INSIDE loghost01 no logging message 302021 ! access-group OUTSIDE_in in interface OUTSIDE access-group INSIDE_in in interface INSIDE !""".splitlines() a02 = """: Saved : Written by mpenning at 05:37:43.184 CDT Sun Jun 29 2015 ! ASA Version 9.0(3) ! command-alias exec slog show log | i Deny|denied command-alias exec sacl sh access-list INSIDE_out | e hitcnt=0 |remark|elements hostname fw domain-name pennington.net enable password 2KFQnbNIdI.2KYOU encrypted xlate per-session deny tcp any4 any4 xlate per-session deny tcp any4 any6 xlate per-session deny tcp any6 any4 xlate per-session deny tcp any6 any6 xlate per-session deny udp any4 any4 eq domain xlate per-session deny udp any4 any6 eq domain xlate per-session deny udp any6 any4 eq domain xlate per-session deny udp any6 any6 eq domain passwd 2KFQnbNIdI.2KYOU encrypted names name 192.0.2.13 Machine01 description machine01 name 192.0.2.17 Machine02_Windows name 10.0.0.6 Machine03 name 74.125.130.125 GTalk01 description Google talk server name 74.125.134.125 GTalk02 description Google talk server name 74.125.139.125 GTalk03 description Google Talk server name 74.125.142.125 GTalk04 description Google Talk server name 74.125.192.125 GTalk05 description Google Talk server name 74.125.140.125 GTalk06 description Google Talk server name 74.125.137.125 GTalk07 name 74.125.138.125 GTalk08 name 74.125.141.125 GTalk09 name 74.125.136.125 GTalk10 name 74.125.135.125 GTalk11 name 108.160.160.0 AS19679_Dropbox__108-160-160-0__20 name 199.47.216.0 AS19679_Dropbox__199.47.216.0__22 name 173.194.64.109 GmailSMTP01 name 173.194.64.108 GmailSMTP02 name 128.223.51.103 route-views.oregon-ix.net description Route-Views route server ip local pool SSL_VPN_ADDRS 10.1.1.240-10.1.1.241 mask 255.255.255.0 ! interface Ethernet0/0 description Internet ISP switchport access vlan 100 ! interface Ethernet0/1 switchport access vlan 200 ! interface Ethernet0/2 switchport access vlan 200 shutdown ! interface Ethernet0/3 switchport access vlan 200 ! interface Ethernet0/4 switchport access vlan 200 ! interface Ethernet0/5 switchport access vlan 200 ! interface Ethernet0/6 switchport access vlan 200 ! interface Ethernet0/7 shutdown ! interface Vlan1 no nameif no security-level no ip address ! interface Vlan100 mac-address 0030.dead.beef nameif OUTSIDE security-level 0 ip address dhcp setroute ! interface Vlan200 nameif INSIDE security-level 100 ip address 192.0.2.1 255.255.255.0 ! banner motd banner motd Test banner for $(hostname) banner motd banner motd ******************************* boot system disk0:/asa903-k8.bin ftp mode passive clock timezone CST -6 clock summer-time CDT recurring dns domain-lookup INSIDE dns server-group DefaultDNS name-server Machine01 domain-name pennington.net object network GTalk01 host 74.125.130.125 description Created during name migration object network GTalk02 host 74.125.134.125 description Created during name migration object network GTalk03 host 74.125.139.125 description Created during name migration object network GTalk04 host 74.125.142.125 description Created during name migration object network GTalk05 host 74.125.192.125 description Created during name migration object network GTalk06 host 74.125.140.125 description Created during name migration object network GTalk07 host 74.125.137.125 description Created during name migration object network GTalk08 host 74.125.138.125 description Created during name migration object network GTalk09 host 74.125.141.125 description Created during name migration object network GTalk10 host 74.125.136.125 description Created during name migration object network GTalk11 host 74.125.135.125 description Created during name migration object network AS19679_Dropbox__108-160-160-0__20 subnet 108.160.160.0 255.255.240.0 description Created during name migration object network AS19679_Dropbox__199.47.216.0__22 subnet 199.47.216.0 255.255.252.0 description Created during name migration object network Machine01 host 192.0.2.5 description Created during name migration object network obj_any subnet 0.0.0.0 0.0.0.0 object network Machine02_Windows host 192.0.2.17 description Created during name migration object-group network GoogleTalk network-object object GTalk01 network-object object GTalk02 network-object object GTalk03 network-object object GTalk04 network-object object GTalk05 network-object object GTalk06 network-object object GTalk07 network-object object GTalk08 network-object object GTalk09 network-object object GTalk10 network-object object GTalk11 object-group service GoogleTalkPorts service-object tcp destination eq 5222 service-object tcp destination eq https service-object udp destination range 19302 19309 object-group network Inside network-object 192.0.2.0 255.255.255.0 network-object 192.0.22.0 255.255.255.0 network-object 192.0.23.0 255.255.255.0 object-group network DROPBOX_AS19679 network-object object AS19679_Dropbox__108-160-160-0__20 network-object object AS19679_Dropbox__199.47.216.0__22 object-group network GOOGLE_addrs description dig -t TXT _netblocks.google.com 8.8.8.8 network-object 216.239.32.0 255.255.224.0 network-object 64.233.160.0 255.255.224.0 network-object 66.249.80.0 255.255.240.0 network-object 72.14.192.0 255.255.192.0 network-object 209.85.128.0 255.255.128.0 network-object 66.102.0.0 255.255.240.0 network-object 74.125.0.0 255.255.0.0 network-object 64.18.0.0 255.255.240.0 network-object 207.126.144.0 255.255.240.0 network-object 173.194.0.0 255.255.0.0 object-group network SSH_addrs network-object 192.168.1.0 255.255.255.0 object-group network ANY_addrs network-object 0.0.0.0 0.0.0.0 object-group network INSIDE_addrs network-object 192.0.2.0 255.255.255.0 network-object 10.0.0.0 255.0.0.0 object-group service GOOGLE_svc description Google's push service for Android service-object tcp destination eq www service-object tcp destination eq https service-object tcp destination eq 5228 service-object tcp destination eq 5222 service-object tcp destination eq 587 object-group service TELNET_svc service-object tcp destination eq telnet object-group service WHOIS_svc service-object tcp destination eq whois object-group service SSH_svc service-object tcp destination eq ssh object-group service WEB_svc description Standard web services - http, https, ftp service-object tcp destination eq ftp service-object tcp destination eq www service-object tcp destination eq https service-object icmp object-group service DNS_svc service-object udp destination eq domain service-object tcp destination eq domain object-group network MACHINE01_addrs network-object object Machine01 object-group service ANDROID_svc description Google's push service for Android service-object tcp destination eq 5228 object-group service GMAILSMTP_svc service-object tcp destination eq 2525 object-group service NTP_svc service-object udp destination eq ntp object-group service SKYPE_svc service-object udp destination eq 5555 object-group service XBOX_svc service-object tcp destination eq domain service-object udp destination eq domain service-object udp destination eq 88 service-object tcp destination eq 3074 service-object udp destination eq 3074 object-group network ANY object-group service NaverLine_svc service-object udp destination eq 11000 service-object udp destination range 9401 9405 object-group network NaverLine_addrs network-object 174.35.127.0 255.255.255.0 object-group network Facebook_addrs network-object 66.220.144.0 255.255.240.0 network-object 69.63.176.0 255.255.248.0 network-object 69.63.184.0 255.255.248.0 network-object 69.171.224.0 255.255.240.0 network-object 69.171.239.0 255.255.255.0 network-object 69.171.240.0 255.255.240.0 network-object 69.171.253.0 255.255.255.0 network-object 69.171.255.0 255.255.255.0 network-object 74.119.76.0 255.255.252.0 network-object 103.4.96.0 255.255.252.0 network-object 173.252.64.0 255.255.192.0 network-object 204.15.20.0 255.255.252.0 network-object 31.13.24.0 255.255.248.0 network-object 31.13.64.0 255.255.192.0 network-object 31.13.96.0 255.255.224.0 object-group service IP_SLA_PathTrace_svc service-object udp destination range 33400 33499 object-group service FTP_svc service-object tcp destination eq ftp object-group service TeamViewerPorts service-object tcp destination eq 5938 object-group service SSLVPN_svc service-object udp destination eq 443 object-group service TEST_PORTS tcp port-object eq domain port-object eq smtp access-list SPLIT_TUNNEL_NETS remark [[ destinations available via the VPN ]] access-list SPLIT_TUNNEL_NETS standard permit 192.0.2.0 255.255.255.0 access-list NO_SSLVPN_NAT remark [[ prevent inadvertent nat of sslvpn traffic ]] access-list NO_SSLVPN_NAT extended permit ip 192.0.2.0 255.255.255.0 192.0.2.0 255.255.255.0 access-list INSIDE_in extended deny object-group SKYPE_svc object-group INSIDE_addrs object-group ANY_addrs log disable access-list INSIDE_in extended permit object-group GOOGLE_svc object-group INSIDE_addrs object-group GOOGLE_addrs log access-list INSIDE_in extended permit object-group ANDROID_svc object-group INSIDE_addrs object-group GOOGLE_addrs log access-list INSIDE_in extended permit object-group IP_SLA_PathTrace_svc any host 4.2.2.2 log access-list INSIDE_in extended permit object-group DNS_svc object-group INSIDE_addrs object-group ANY_addrs log access-list INSIDE_in extended permit object-group NTP_svc object-group INSIDE_addrs object-group ANY_addrs log access-list INSIDE_in extended permit object-group TELNET_svc object-group INSIDE_addrs host 128.223.51.103 log access-list INSIDE_in extended permit object-group FTP_svc object-group INSIDE_addrs object-group ANY_addrs log access-list INSIDE_in extended permit object-group WEB_svc object-group INSIDE_addrs object-group ANY_addrs log access-list INSIDE_in extended permit object-group SSH_svc object-group INSIDE_addrs object-group SSH_addrs log access-list INSIDE_in extended permit object-group GMAILSMTP_svc object-group TSUNAMI_addrs object-group ANY_addrs log access-list INSIDE_in extended permit object-group WHOIS_svc object-group TSUNAMI_addrs object-group ANY_addrs log access-list INSIDE_in extended deny ip any4 any4 log access-list ANY extended permit ip object-group Inside any4 access-list ANY extended permit ip any4 object-group Inside access-list VOIP extended permit object-group GoogleTalkPorts object-group Inside object-group GoogleTalk access-list VOIP extended permit object-group GoogleTalkPorts object-group GoogleTalk object-group Inside access-list MAINTENANCE extended deny ip any4 any4 log access-list OUTSIDE_in extended deny ip host 4.2.2.2 any4 log access-list OUTSIDE_in extended permit icmp any4 0.0.0.0 0.0.0.0 unreachable log interval 1 access-list OUTSIDE_in extended permit icmp any4 0.0.0.0 0.0.0.0 time-exceeded log interval 1 access-list OUTSIDE_in extended deny ip any4 any4 log pager lines 23 logging enable logging timestamp logging buffer-size 1048576 logging buffered informational logging trap informational logging asdm informational logging facility 22 logging host INSIDE Machine01 logging class sys buffered informational no logging message 302021 no logging message 302020 mtu OUTSIDE 1500 mtu INSIDE 1500 ip verify reverse-path interface INSIDE icmp unreachable rate-limit 1 burst-size 1 asdm image disk0:/asdm-645.bin no asdm history enable arp timeout 14400 no arp permit-nonconnected ! object network obj_any nat (INSIDE,OUTSIDE) dynamic interface access-group OUTSIDE_in in interface OUTSIDE access-group INSIDE_in in interface INSIDE route INSIDE 10.0.0.0 255.0.0.0 192.0.2.2 1 timeout xlate 3:00:00 timeout pat-xlate 0:00:30 timeout conn 1:00:00 half-closed 0:59:00 udp 0:02:00 icmp 0:00:02 timeout sunrpc 0:10:00 h323 0:05:00 h225 1:00:00 mgcp 0:05:00 mgcp-pat 0:05:00 timeout sip 0:30:00 sip_media 0:02:00 sip-invite 0:03:00 sip-disconnect 0:02:00 timeout sip-provisional-media 0:02:00 uauth 0:05:00 absolute timeout tcp-proxy-reassembly 0:01:00 timeout floating-conn 0:00:00 dynamic-access-policy-record DfltAccessPolicy user-identity default-domain LOCAL aaa authentication ssh console LOCAL aaa authentication enable console LOCAL aaa authentication http console LOCAL aaa authorization command LOCAL aaa local authentication attempts max-fail 16 filter java 1-65535 192.0.2.0 255.255.255.0 0.0.0.0 0.0.0.0 http server enable http 192.0.2.0 255.255.255.0 INSIDE snmp-server host INSIDE Machine01 poll community public snmp-server location ServerRoom snmp-server contact mike@pennington.net snmp-server community public snmp-server enable traps snmp authentication linkup linkdown coldstart crypto ipsec security-association pmtu-aging infinite crypto ca trustpoint LOCAL_CERT_fw enrollment self fqdn fw.pennington.net subject-name CN=fw.pennington.net crl configure crypto ca trustpool policy telnet timeout 5 ssh scopy enable ssh 192.0.2.0 255.255.255.0 INSIDE ssh 10.0.0.0 255.0.0.0 INSIDE ssh timeout 60 ssh version 2 console timeout 5 no vpn-addr-assign aaa no vpn-addr-assign dhcp dhcpd dns 68.94.156.1 Machine01 dhcpd lease 604800 dhcpd domain pennington.net dhcpd auto_config OUTSIDE ! threat-detection basic-threat threat-detection scanning-threat shun duration 30 threat-detection statistics host threat-detection statistics port threat-detection statistics protocol threat-detection statistics access-list no threat-detection statistics tcp-intercept ntp server 17.151.16.20 ntp server 17.151.16.21 ntp server 17.151.16.22 ntp server 17.151.16.23 group-policy SSL_VPN_Policy01 internal group-policy SSL_VPN_Policy01 attributes dns-server value 192.0.2.13 vpn-idle-timeout none vpn-filter none vpn-tunnel-protocol ssl-client ssl-clientless split-tunnel-policy tunnelspecified split-tunnel-network-list value SPLIT_TUNNEL_NETS default-domain value pennington.net webvpn anyconnect keep-installer installed anyconnect ssl rekey time 30 anyconnect ssl rekey method ssl anyconnect ask none default anyconnect username mpenning password dXRTaA5wrZ3OL8gz encrypted privilege 15 tunnel-group DefaultWEBVPNGroup general-attributes address-pool SSL_VPN_ADDRS default-group-policy SSL_VPN_Policy01 ! ! policy-map type inspect dns preset_dns_map parameters message-length maximum client auto message-length maximum 512 policy-map global_policy class inspection_default inspect dns preset_dns_map inspect h323 h225 inspect h323 ras inspect rsh inspect rtsp inspect esmtp inspect sqlnet inspect skinny inspect sunrpc inspect xdmcp inspect sip inspect netbios inspect tftp inspect ip-options inspect icmp inspect http ! service-policy global_policy global prompt hostname context no call-home reporting anonymous call-home profile CiscoTAC-1 no active destination address http https://tools.cisco.com/its/service/oddce/services/DDCEService destination address email callhome@cisco.com destination transport-method http subscribe-to-alert-group diagnostic subscribe-to-alert-group environment subscribe-to-alert-group inventory periodic monthly subscribe-to-alert-group configuration periodic monthly subscribe-to-alert-group telemetry periodic daily Cryptochecksum:571d01b7b08342e35db838e9acec00f6 : end""".splitlines() n01 = """ feature tacacs+ feature interface-vlan feature vpc feature fex feature lacp feature lldp feature ospf no feature telnet ip domain-lookup ip domain-name pennington.net ip name-server 10.0.0.10 vrf context management ip route 0.0.0.0/0 10.0.0.1 vrf context vpc-peerkeepalive tacacs-server key 0 DontTreadOnMe ip tacacs source-interface Vlan10 tacacs-server host 10.0.0.32 tacacs-server host 10.0.0.33 aaa group server tacacs+ TACACS_GROUP server 10.0.0.32 server 10.0.0.33 use-vrf management source-interface mgmt0 aaa authentication login default group TACACS_GROUP aaa authentication login console group TACACS_GROUP aaa authorization commands default group TACACS_GROUP aaa accounting default group TACACS_GROUP aaa authentication login error-enable logging event link-status default vpc domain 999 role priority 100 system-priority 1 auto-recovery peer-keepalive destination 1.1.1.2 fex 115 desc FEX115 pinning max-links 1 interface loopback0 ip address 10.1.1.1/32 interface mgmt0 ip address 10.0.0.5/24 interface port-channel1 vpc peer-link switchport mode trunk spanning-tree port type network description [vPC PEER LINK] interface port-channel21 description Uplink to core switchport mode trunk switchport trunk native vlan 999 switchport trunk allowed vlan 13,31-38,155 mtu 9216 vpc 21 interface port-channel115 switchport mode fex-fabric fex associate 115 interface Ethernet1/1 switchport mode trunk spanning-tree port type network channel-group 1 mode active interface Ethernet1/2 switchport mode trunk spanning-tree port type network channel-group 1 mode active interface Ethernet1/3 ip address 192.0.2.0/31 interface Ethernet1/4 switchport mode trunk switchport trunk native vlan 999 switchport trunk allowed vlan 13,31-38,15 channel-group 21 mode active mtu 9216 interface Ethernet1/5 switchport mode trunk switchport trunk native vlan 999 switchport trunk allowed vlan 13,31-38,15 channel-group 21 mode active mtu 9216 interface Ethernet1/6 switchport mode fex-fabric fex associate 115 channel-group 115 interface Ethernet1/7 switchport mode access switchport access vlan 100 mtu 9216 interface Ethernet1/8 switchport mode access switchport access vlan 102 mtu 9216 interface Ethernet1/9 ip address 10.1.2.6/30 mtu 9216 interface Ethernet1/10 encapsulation dot1Q 200 bandwidth 100000000 delay 200 beacon ip address 10.1.2.2/30 mpls ip mtu 9216 """.splitlines() @pytest.fixture(scope="session") def c01_default_gigethernets(request): yield config_c01_default_gige @pytest.fixture(scope="session") def c01_insert_serial_replace(request): yield config_c01_insert_serial_replace @pytest.fixture(scope="function") def parse_c01(request): """Preparsed c01"""
parse_c01 = CiscoConfParse(c01, factory=False)
0
2023-12-01 18:43:27+00:00
24k
zerolink-io/zerolink-python
zerolink/req.py
[ { "identifier": "settings", "path": "zerolink/settings.py", "snippet": " CONFIG_FILE = os.path.join(os.environ[\"APPDATA\"], \"zerolink\", \"config\")\n CONFIG_FILE = os.path.join(os.environ[\"HOME\"], \".config\", \"zerolink\", \"config\")\n CONFIG_FILE = os.path.join(\n os.environ[\"HO...
from typing import Any, Optional, cast from zerolink import settings from zerolink.exc import APIError, AuthenticationError from zerolink_client import Client from zerolink_client.api.default import finetune, get_models_models_get from zerolink_client.api.entity import ( desc_entity_id, desc_entity_ontology, lookup_entity, lookup_relation, search_entity, ) from zerolink_client.api.extract import extract_text from zerolink_client.api.fact import ( create_userattribute, create_userentity, create_userrule, create_usertriple, ) from zerolink_client.api.kg import get_triple from zerolink_client.api.question import post_question from zerolink_client.api.session import ( create_session, get_session_entities, get_session_facts, get_user_session, ) from zerolink_client.api.user import create_user from zerolink_client.models import ( ChatSession, CreateAttribute, CreateEntity, CreateRule, CreateRuleResponse, CreateTriple, CreateTuneJobResponse, Entity, HTTPValidationError, Question, QuestionResponse, TextExtract, ) from zerolink_client.types import File, UNSET
16,468
# ------------------------------------------------------------------------ # Endpoints # ------------------------------------------------------------------------ client = Client( base_url=settings.server_url, raise_on_unexpected_status=False, ) def check_api_key() -> None: """ Check if the API key is set. """ if settings.api_key is None: raise AuthenticationError() else: pass def get_user_id() -> str: """ Get the user ID from the server. Only used for Demo server. """ client._headers["Authorization"] = settings.api_key rep = create_user.sync(client=client) if rep is None: raise Exception("Failed to authenticate.") settings.api_key = rep.user_id if isinstance(rep, HTTPValidationError): raise APIError(str(rep)) return rep.user_id def post_session(user_id: str, **kwargs) -> Optional[ChatSession]: """ Create a new session. """ check_api_key() if user_id is None: user_id = settings.api_key rep = create_session.sync(client=client, user_id=user_id, **kwargs) if isinstance(rep, HTTPValidationError): raise APIError(str(rep)) return rep def get_session_name(user_id: str, session_name: str, **kwargs): """ Lookup a session by user and name. """ check_api_key() rep = get_user_session.sync_detailed(user_id, session_name, client=client, **kwargs) if rep.status_code == 200: return rep.parsed elif rep.status_code == 404: return None else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_session_entities_list(session_id: int, **kwargs): """ Get the entities of a session. """ check_api_key() rep = get_session_entities.sync_detailed(session_id, client=client, **kwargs) if rep.status_code == 200: return rep.parsed else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_session_facts_list(session_id: int, **kwargs): """ Get the facts of a session. """ check_api_key() rep = get_session_facts.sync_detailed(session_id, client=client, **kwargs) if rep.status_code == 200: return rep.parsed else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def ask_question( session_id: Optional[int], body: str, assumps: Optional[dict[str, Any]] = None, **kwargs, ) -> QuestionResponse: """ Ask a question. """ check_api_key() rep = post_question.sync_detailed( client=client, session_id=(session_id or UNSET), body=Question(body=body, **(assumps or {})), **kwargs, ) if rep.status_code == 200: return cast(QuestionResponse, rep.parsed) else: err = rep.content.decode("utf-8") print(err) raise APIError(err)
# ------------------------------------------------------------------------ # Endpoints # ------------------------------------------------------------------------ client = Client( base_url=settings.server_url, raise_on_unexpected_status=False, ) def check_api_key() -> None: """ Check if the API key is set. """ if settings.api_key is None: raise AuthenticationError() else: pass def get_user_id() -> str: """ Get the user ID from the server. Only used for Demo server. """ client._headers["Authorization"] = settings.api_key rep = create_user.sync(client=client) if rep is None: raise Exception("Failed to authenticate.") settings.api_key = rep.user_id if isinstance(rep, HTTPValidationError): raise APIError(str(rep)) return rep.user_id def post_session(user_id: str, **kwargs) -> Optional[ChatSession]: """ Create a new session. """ check_api_key() if user_id is None: user_id = settings.api_key rep = create_session.sync(client=client, user_id=user_id, **kwargs) if isinstance(rep, HTTPValidationError): raise APIError(str(rep)) return rep def get_session_name(user_id: str, session_name: str, **kwargs): """ Lookup a session by user and name. """ check_api_key() rep = get_user_session.sync_detailed(user_id, session_name, client=client, **kwargs) if rep.status_code == 200: return rep.parsed elif rep.status_code == 404: return None else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_session_entities_list(session_id: int, **kwargs): """ Get the entities of a session. """ check_api_key() rep = get_session_entities.sync_detailed(session_id, client=client, **kwargs) if rep.status_code == 200: return rep.parsed else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_session_facts_list(session_id: int, **kwargs): """ Get the facts of a session. """ check_api_key() rep = get_session_facts.sync_detailed(session_id, client=client, **kwargs) if rep.status_code == 200: return rep.parsed else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def ask_question( session_id: Optional[int], body: str, assumps: Optional[dict[str, Any]] = None, **kwargs, ) -> QuestionResponse: """ Ask a question. """ check_api_key() rep = post_question.sync_detailed( client=client, session_id=(session_id or UNSET), body=Question(body=body, **(assumps or {})), **kwargs, ) if rep.status_code == 200: return cast(QuestionResponse, rep.parsed) else: err = rep.content.decode("utf-8") print(err) raise APIError(err)
def get_entity_id(id: str, **kwargs) -> Entity:
30
2023-12-03 07:50:04+00:00
24k
JunMa11/UHNSeg-Quiz
nnunetv2/training/nnUNetTrainer/variants/network_architecture/nnUNetTrainerNoDeepSupervision.py
[ { "identifier": "DC_and_BCE_loss", "path": "nnunetv2/training/loss/compound_losses.py", "snippet": "class DC_and_BCE_loss(nn.Module):\n def __init__(self, bce_kwargs, soft_dice_kwargs, weight_ce=1, weight_dice=1, use_ignore_label: bool = False,\n dice_class=MemoryEfficientSoftDiceLoss...
import torch from torch import autocast from nnunetv2.training.loss.compound_losses import DC_and_BCE_loss, DC_and_CE_loss from nnunetv2.training.loss.dice import get_tp_fp_fn_tn, MemoryEfficientSoftDiceLoss from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer from nnunetv2.utilities.helpers import dummy_context from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels from torch.nn.parallel import DistributedDataParallel as DDP
17,114
class nnUNetTrainerNoDeepSupervision(nnUNetTrainer): def _build_loss(self): if self.label_manager.has_regions:
class nnUNetTrainerNoDeepSupervision(nnUNetTrainer): def _build_loss(self): if self.label_manager.has_regions:
loss = DC_and_BCE_loss({},
0
2023-12-04 19:43:14+00:00
24k
opisaac9001/TTS-With-ooba-and-voice
TTS/tts/models/neuralhmm_tts.py
[ { "identifier": "Encoder", "path": "TTS/tts/layers/overflow/common_layers.py", "snippet": "class Encoder(nn.Module):\n r\"\"\"Neural HMM Encoder\n\n Same as Tacotron 2 encoder but increases the input length by states per phone\n\n Args:\n num_chars (int): Number of characters in the inpu...
import os import torch from typing import Dict, List, Union from coqpit import Coqpit from torch import nn from trainer.logging.tensorboard_logger import TensorboardLogger from TTS.tts.layers.overflow.common_layers import Encoder, OverflowUtils from TTS.tts.layers.overflow.neural_hmm import NeuralHMM from TTS.tts.layers.overflow.plotting_utils import ( get_spec_from_most_probable_state, plot_transition_probabilities_to_numpy, ) from TTS.tts.models.base_tts import BaseTTS from TTS.tts.utils.speakers import SpeakerManager from TTS.tts.utils.text.tokenizer import TTSTokenizer from TTS.tts.utils.visual import plot_alignment, plot_spectrogram from TTS.utils.generic_utils import format_aux_input from TTS.utils.io import load_fsspec from TTS.utils.audio import AudioProcessor
18,911
@torch.no_grad() def inference( self, text: torch.Tensor, aux_input={"x_lengths": None, "sampling_temp": None, "max_sampling_time": None, "duration_threshold": None}, ): # pylint: disable=dangerous-default-value """Sampling from the model Args: text (torch.Tensor): :math:`[B, T_in]` aux_inputs (_type_, optional): _description_. Defaults to None. Returns: outputs: Dictionary containing the following - mel (torch.Tensor): :math:`[B, T_out, C]` - hmm_outputs_len (torch.Tensor): :math:`[B]` - state_travelled (List[List[int]]): List of lists containing the state travelled for each sample in the batch. - input_parameters (list[torch.FloatTensor]): Input parameters to the neural HMM. - output_parameters (list[torch.FloatTensor]): Output parameters to the neural HMM. """ default_input_dict = { "x_lengths": torch.sum(text != 0, dim=1), } aux_input = self._format_aux_input(aux_input, default_input_dict) encoder_outputs, encoder_output_len = self.encoder.inference(text, aux_input["x_lengths"]) outputs = self.neural_hmm.inference( encoder_outputs, encoder_output_len, sampling_temp=aux_input["sampling_temp"], max_sampling_time=aux_input["max_sampling_time"], duration_threshold=aux_input["duration_threshold"], ) mels, mel_outputs_len = outputs["hmm_outputs"], outputs["hmm_outputs_len"] mels = self.inverse_normalize(mels) outputs.update({"model_outputs": mels, "model_outputs_len": mel_outputs_len}) outputs["alignments"] = OverflowUtils.double_pad(outputs["alignments"]) return outputs @staticmethod def get_criterion(): return NLLLoss() @staticmethod def init_from_config(config: "NeuralhmmTTSConfig", samples: Union[List[List], List[Dict]] = None, verbose=True): """Initiate model from config Args: config (VitsConfig): Model config. samples (Union[List[List], List[Dict]]): Training samples to parse speaker ids for training. Defaults to None. verbose (bool): If True, print init messages. Defaults to True. """ ap = AudioProcessor.init_from_config(config, verbose) tokenizer, new_config = TTSTokenizer.init_from_config(config) speaker_manager = SpeakerManager.init_from_config(config, samples) return NeuralhmmTTS(new_config, ap, tokenizer, speaker_manager) def load_checkpoint( self, config: Coqpit, checkpoint_path: str, eval: bool = False, strict: bool = True, cache=False ): # pylint: disable=unused-argument, redefined-builtin state = load_fsspec(checkpoint_path, map_location=torch.device("cpu")) self.load_state_dict(state["model"]) if eval: self.eval() assert not self.training def on_init_start(self, trainer): """If the current dataset does not have normalisation statistics and initialisation transition_probability it computes them otherwise loads.""" if not os.path.isfile(trainer.config.mel_statistics_parameter_path) or trainer.config.force_generate_statistics: dataloader = trainer.get_train_dataloader( training_assets=None, samples=trainer.train_samples, verbose=False ) print( f" | > Data parameters not found for: {trainer.config.mel_statistics_parameter_path}. Computing mel normalization parameters..." ) data_mean, data_std, init_transition_prob = OverflowUtils.get_data_parameters_for_flat_start( dataloader, trainer.config.out_channels, trainer.config.state_per_phone ) print( f" | > Saving data parameters to: {trainer.config.mel_statistics_parameter_path}: value: {data_mean, data_std, init_transition_prob}" ) statistics = { "mean": data_mean.item(), "std": data_std.item(), "init_transition_prob": init_transition_prob.item(), } torch.save(statistics, trainer.config.mel_statistics_parameter_path) else: print( f" | > Data parameters found for: {trainer.config.mel_statistics_parameter_path}. Loading mel normalization parameters..." ) statistics = torch.load(trainer.config.mel_statistics_parameter_path) data_mean, data_std, init_transition_prob = ( statistics["mean"], statistics["std"], statistics["init_transition_prob"], ) print(f" | > Data parameters loaded with value: {data_mean, data_std, init_transition_prob}") trainer.config.flat_start_params["transition_p"] = ( init_transition_prob.item() if torch.is_tensor(init_transition_prob) else init_transition_prob ) OverflowUtils.update_flat_start_transition(trainer.model, init_transition_prob) trainer.model.update_mean_std(statistics) @torch.inference_mode() def _create_logs(self, batch, outputs, ap): # pylint: disable=no-self-use, unused-argument alignments, transition_vectors = outputs["alignments"], outputs["transition_vectors"] means = torch.stack(outputs["means"], dim=1) figures = { "alignment": plot_alignment(alignments[0].exp(), title="Forward alignment", fig_size=(20, 20)), "log_alignment": plot_alignment( alignments[0].exp(), title="Forward log alignment", plot_log=True, fig_size=(20, 20) ), "transition_vectors": plot_alignment(transition_vectors[0], title="Transition vectors", fig_size=(20, 20)),
class NeuralhmmTTS(BaseTTS): """Neural HMM TTS model. Paper:: https://arxiv.org/abs/2108.13320 Paper abstract:: Neural sequence-to-sequence TTS has achieved significantly better output quality than statistical speech synthesis using HMMs.However, neural TTS is generally not probabilistic and uses non-monotonic attention. Attention failures increase training time and can make synthesis babble incoherently. This paper describes how the old and new paradigms can be combined to obtain the advantages of both worlds, by replacing attention in neural TTS with an autoregressive left-right no-skip hidden Markov model defined by a neural network. Based on this proposal, we modify Tacotron 2 to obtain an HMM-based neural TTS model with monotonic alignment, trained to maximise the full sequence likelihood without approximation. We also describe how to combine ideas from classical and contemporary TTS for best results. The resulting example system is smaller and simpler than Tacotron 2, and learns to speak with fewer iterations and less data, whilst achieving comparable naturalness prior to the post-net. Our approach also allows easy control over speaking rate. Audio examples and code are available at https://shivammehta25.github.io/Neural-HMM/ . Note: - This is a parameter efficient version of OverFlow (15.3M vs 28.6M). Since it has half the number of parameters as OverFlow the synthesis output quality is suboptimal (but comparable to Tacotron2 without Postnet), but it learns to speak with even lesser amount of data and is still significantly faster than other attention-based methods. - Neural HMMs uses flat start initialization i.e it computes the means and std and transition probabilities of the dataset and uses them to initialize the model. This benefits the model and helps with faster learning If you change the dataset or want to regenerate the parameters change the `force_generate_statistics` and `mel_statistics_parameter_path` accordingly. - To enable multi-GPU training, set the `use_grad_checkpointing=False` in config. This will significantly increase the memory usage. This is because to compute the actual data likelihood (not an approximation using MAS/Viterbi) we must use all the states at the previous time step during the forward pass to decide the probability distribution at the current step i.e the difference between the forward algorithm and viterbi approximation. Check :class:`TTS.tts.configs.neuralhmm_tts_config.NeuralhmmTTSConfig` for class arguments. """ def __init__( self, config: "NeuralhmmTTSConfig", ap: "AudioProcessor" = None, tokenizer: "TTSTokenizer" = None, speaker_manager: SpeakerManager = None, ): super().__init__(config, ap, tokenizer, speaker_manager) # pass all config fields to `self` # for fewer code change self.config = config for key in config: setattr(self, key, config[key]) self.encoder = Encoder(config.num_chars, config.state_per_phone, config.encoder_in_out_features) self.neural_hmm = NeuralHMM( frame_channels=self.out_channels, ar_order=self.ar_order, deterministic_transition=self.deterministic_transition, encoder_dim=self.encoder_in_out_features, prenet_type=self.prenet_type, prenet_dim=self.prenet_dim, prenet_n_layers=self.prenet_n_layers, prenet_dropout=self.prenet_dropout, prenet_dropout_at_inference=self.prenet_dropout_at_inference, memory_rnn_dim=self.memory_rnn_dim, outputnet_size=self.outputnet_size, flat_start_params=self.flat_start_params, std_floor=self.std_floor, use_grad_checkpointing=self.use_grad_checkpointing, ) self.register_buffer("mean", torch.tensor(0)) self.register_buffer("std", torch.tensor(1)) def update_mean_std(self, statistics_dict: Dict): self.mean.data = torch.tensor(statistics_dict["mean"]) self.std.data = torch.tensor(statistics_dict["std"]) def preprocess_batch(self, text, text_len, mels, mel_len): if self.mean.item() == 0 or self.std.item() == 1: statistics_dict = torch.load(self.mel_statistics_parameter_path) self.update_mean_std(statistics_dict) mels = self.normalize(mels) return text, text_len, mels, mel_len def normalize(self, x): return x.sub(self.mean).div(self.std) def inverse_normalize(self, x): return x.mul(self.std).add(self.mean) def forward(self, text, text_len, mels, mel_len): """ Forward pass for training and computing the log likelihood of a given batch. Shapes: Shapes: text: :math:`[B, T_in]` text_len: :math:`[B]` mels: :math:`[B, T_out, C]` mel_len: :math:`[B]` """ text, text_len, mels, mel_len = self.preprocess_batch(text, text_len, mels, mel_len) encoder_outputs, encoder_output_len = self.encoder(text, text_len) log_probs, fwd_alignments, transition_vectors, means = self.neural_hmm( encoder_outputs, encoder_output_len, mels.transpose(1, 2), mel_len ) outputs = { "log_probs": log_probs, "alignments": fwd_alignments, "transition_vectors": transition_vectors, "means": means, } return outputs @staticmethod def _training_stats(batch): stats = {} stats["avg_text_length"] = batch["text_lengths"].float().mean() stats["avg_spec_length"] = batch["mel_lengths"].float().mean() stats["avg_text_batch_occupancy"] = (batch["text_lengths"].float() / batch["text_lengths"].float().max()).mean() stats["avg_spec_batch_occupancy"] = (batch["mel_lengths"].float() / batch["mel_lengths"].float().max()).mean() return stats def train_step(self, batch: dict, criterion: nn.Module): text_input = batch["text_input"] text_lengths = batch["text_lengths"] mel_input = batch["mel_input"] mel_lengths = batch["mel_lengths"] outputs = self.forward( text=text_input, text_len=text_lengths, mels=mel_input, mel_len=mel_lengths, ) loss_dict = criterion(outputs["log_probs"] / (mel_lengths.sum() + text_lengths.sum())) # for printing useful statistics on terminal loss_dict.update(self._training_stats(batch)) return outputs, loss_dict def eval_step(self, batch: Dict, criterion: nn.Module): return self.train_step(batch, criterion) def _format_aux_input(self, aux_input: Dict, default_input_dict): """Set missing fields to their default value. Args: aux_inputs (Dict): Dictionary containing the auxiliary inputs. """ default_input_dict = default_input_dict.copy() default_input_dict.update( { "sampling_temp": self.sampling_temp, "max_sampling_time": self.max_sampling_time, "duration_threshold": self.duration_threshold, } ) if aux_input: return format_aux_input(default_input_dict, aux_input) return default_input_dict @torch.no_grad() def inference( self, text: torch.Tensor, aux_input={"x_lengths": None, "sampling_temp": None, "max_sampling_time": None, "duration_threshold": None}, ): # pylint: disable=dangerous-default-value """Sampling from the model Args: text (torch.Tensor): :math:`[B, T_in]` aux_inputs (_type_, optional): _description_. Defaults to None. Returns: outputs: Dictionary containing the following - mel (torch.Tensor): :math:`[B, T_out, C]` - hmm_outputs_len (torch.Tensor): :math:`[B]` - state_travelled (List[List[int]]): List of lists containing the state travelled for each sample in the batch. - input_parameters (list[torch.FloatTensor]): Input parameters to the neural HMM. - output_parameters (list[torch.FloatTensor]): Output parameters to the neural HMM. """ default_input_dict = { "x_lengths": torch.sum(text != 0, dim=1), } aux_input = self._format_aux_input(aux_input, default_input_dict) encoder_outputs, encoder_output_len = self.encoder.inference(text, aux_input["x_lengths"]) outputs = self.neural_hmm.inference( encoder_outputs, encoder_output_len, sampling_temp=aux_input["sampling_temp"], max_sampling_time=aux_input["max_sampling_time"], duration_threshold=aux_input["duration_threshold"], ) mels, mel_outputs_len = outputs["hmm_outputs"], outputs["hmm_outputs_len"] mels = self.inverse_normalize(mels) outputs.update({"model_outputs": mels, "model_outputs_len": mel_outputs_len}) outputs["alignments"] = OverflowUtils.double_pad(outputs["alignments"]) return outputs @staticmethod def get_criterion(): return NLLLoss() @staticmethod def init_from_config(config: "NeuralhmmTTSConfig", samples: Union[List[List], List[Dict]] = None, verbose=True): """Initiate model from config Args: config (VitsConfig): Model config. samples (Union[List[List], List[Dict]]): Training samples to parse speaker ids for training. Defaults to None. verbose (bool): If True, print init messages. Defaults to True. """ ap = AudioProcessor.init_from_config(config, verbose) tokenizer, new_config = TTSTokenizer.init_from_config(config) speaker_manager = SpeakerManager.init_from_config(config, samples) return NeuralhmmTTS(new_config, ap, tokenizer, speaker_manager) def load_checkpoint( self, config: Coqpit, checkpoint_path: str, eval: bool = False, strict: bool = True, cache=False ): # pylint: disable=unused-argument, redefined-builtin state = load_fsspec(checkpoint_path, map_location=torch.device("cpu")) self.load_state_dict(state["model"]) if eval: self.eval() assert not self.training def on_init_start(self, trainer): """If the current dataset does not have normalisation statistics and initialisation transition_probability it computes them otherwise loads.""" if not os.path.isfile(trainer.config.mel_statistics_parameter_path) or trainer.config.force_generate_statistics: dataloader = trainer.get_train_dataloader( training_assets=None, samples=trainer.train_samples, verbose=False ) print( f" | > Data parameters not found for: {trainer.config.mel_statistics_parameter_path}. Computing mel normalization parameters..." ) data_mean, data_std, init_transition_prob = OverflowUtils.get_data_parameters_for_flat_start( dataloader, trainer.config.out_channels, trainer.config.state_per_phone ) print( f" | > Saving data parameters to: {trainer.config.mel_statistics_parameter_path}: value: {data_mean, data_std, init_transition_prob}" ) statistics = { "mean": data_mean.item(), "std": data_std.item(), "init_transition_prob": init_transition_prob.item(), } torch.save(statistics, trainer.config.mel_statistics_parameter_path) else: print( f" | > Data parameters found for: {trainer.config.mel_statistics_parameter_path}. Loading mel normalization parameters..." ) statistics = torch.load(trainer.config.mel_statistics_parameter_path) data_mean, data_std, init_transition_prob = ( statistics["mean"], statistics["std"], statistics["init_transition_prob"], ) print(f" | > Data parameters loaded with value: {data_mean, data_std, init_transition_prob}") trainer.config.flat_start_params["transition_p"] = ( init_transition_prob.item() if torch.is_tensor(init_transition_prob) else init_transition_prob ) OverflowUtils.update_flat_start_transition(trainer.model, init_transition_prob) trainer.model.update_mean_std(statistics) @torch.inference_mode() def _create_logs(self, batch, outputs, ap): # pylint: disable=no-self-use, unused-argument alignments, transition_vectors = outputs["alignments"], outputs["transition_vectors"] means = torch.stack(outputs["means"], dim=1) figures = { "alignment": plot_alignment(alignments[0].exp(), title="Forward alignment", fig_size=(20, 20)), "log_alignment": plot_alignment( alignments[0].exp(), title="Forward log alignment", plot_log=True, fig_size=(20, 20) ), "transition_vectors": plot_alignment(transition_vectors[0], title="Transition vectors", fig_size=(20, 20)),
"mel_from_most_probable_state": plot_spectrogram(
9
2023-11-29 08:15:06+00:00
24k
magic-research/magic-animate
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,...
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.controlnet import ControlNetModel from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
19,468
v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate: v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f) new_latents[:,:,new_index,:,:] = v.to(latents.device) new_index += 1 new_latents[:,:,new_index,:,:] = v1 new_index += 1 return new_latents def select_controlnet_res_samples(self, controlnet_res_samples_cache_dict, context, do_classifier_free_guidance, b, f): _down_block_res_samples = [] _mid_block_res_sample = [] for i in np.concatenate(np.array(context)): _down_block_res_samples.append(controlnet_res_samples_cache_dict[i][0]) _mid_block_res_sample.append(controlnet_res_samples_cache_dict[i][1]) down_block_res_samples = [[] for _ in range(len(controlnet_res_samples_cache_dict[i][0]))] for res_t in _down_block_res_samples: for i, res in enumerate(res_t): down_block_res_samples[i].append(res) down_block_res_samples = [torch.cat(res) for res in down_block_res_samples] mid_block_res_sample = torch.cat(_mid_block_res_sample) # reshape controlnet output to match the unet3d inputs b = b // 2 if do_classifier_free_guidance else b _down_block_res_samples = [] for sample in down_block_res_samples: sample = rearrange(sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: sample = sample.repeat(2, 1, 1, 1, 1) _down_block_res_samples.append(sample) down_block_res_samples = _down_block_res_samples mid_block_res_sample = rearrange(mid_block_res_sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: mid_block_res_sample = mid_block_res_sample.repeat(2, 1, 1, 1, 1) return down_block_res_samples, mid_block_res_sample @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, controlnet_condition: list = None, controlnet_conditioning_scale: float = 1.0, context_frames: int = 16, context_stride: int = 1, context_overlap: int = 4, context_batch_size: int = 1, context_schedule: str = "uniform", init_latents: Optional[torch.FloatTensor] = None, num_actual_inference_steps: Optional[int] = None, appearance_encoder = None, reference_control_writer = None, reference_control_reader = None, source_image: str = None, decoder_consistency = None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ controlnet = self.controlnet # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size)
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, controlnet: ControlNetModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents, rank, decoder_consistency=None): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0]), disable=(rank!=0)): if decoder_consistency is not None: video.append(decoder_consistency(latents[frame_idx:frame_idx+1])) else: video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None, clip_length=16): shape = (batch_size, num_channels_latents, clip_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) latents = latents.repeat(1, 1, video_length//clip_length, 1, 1) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_condition(self, condition, num_videos_per_prompt, device, dtype, do_classifier_free_guidance): # prepare conditions for controlnet condition = torch.from_numpy(condition.copy()).to(device=device, dtype=dtype) / 255.0 condition = torch.stack([condition for _ in range(num_videos_per_prompt)], dim=0) condition = rearrange(condition, 'b f h w c -> (b f) c h w').clone() if do_classifier_free_guidance: condition = torch.cat([condition] * 2) return condition def next_step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0., verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate: v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f) new_latents[:,:,new_index,:,:] = v.to(latents.device) new_index += 1 new_latents[:,:,new_index,:,:] = v1 new_index += 1 return new_latents def select_controlnet_res_samples(self, controlnet_res_samples_cache_dict, context, do_classifier_free_guidance, b, f): _down_block_res_samples = [] _mid_block_res_sample = [] for i in np.concatenate(np.array(context)): _down_block_res_samples.append(controlnet_res_samples_cache_dict[i][0]) _mid_block_res_sample.append(controlnet_res_samples_cache_dict[i][1]) down_block_res_samples = [[] for _ in range(len(controlnet_res_samples_cache_dict[i][0]))] for res_t in _down_block_res_samples: for i, res in enumerate(res_t): down_block_res_samples[i].append(res) down_block_res_samples = [torch.cat(res) for res in down_block_res_samples] mid_block_res_sample = torch.cat(_mid_block_res_sample) # reshape controlnet output to match the unet3d inputs b = b // 2 if do_classifier_free_guidance else b _down_block_res_samples = [] for sample in down_block_res_samples: sample = rearrange(sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: sample = sample.repeat(2, 1, 1, 1, 1) _down_block_res_samples.append(sample) down_block_res_samples = _down_block_res_samples mid_block_res_sample = rearrange(mid_block_res_sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: mid_block_res_sample = mid_block_res_sample.repeat(2, 1, 1, 1, 1) return down_block_res_samples, mid_block_res_sample @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, controlnet_condition: list = None, controlnet_conditioning_scale: float = 1.0, context_frames: int = 16, context_stride: int = 1, context_overlap: int = 4, context_batch_size: int = 1, context_schedule: str = "uniform", init_latents: Optional[torch.FloatTensor] = None, num_actual_inference_steps: Optional[int] = None, appearance_encoder = None, reference_control_writer = None, reference_control_reader = None, source_image: str = None, decoder_consistency = None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ controlnet = self.controlnet # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size)
reference_control_writer = ReferenceAttentionControl(appearance_encoder, do_classifier_free_guidance=True, mode='write', batch_size=context_batch_size)
2
2023-11-21 08:33:54+00:00
24k
eth-sri/language-model-arithmetic
src/model_arithmetic/evaluation.py
[ { "identifier": "BaseClass", "path": "src/model_arithmetic/base.py", "snippet": "class BaseClass:\n \"\"\"\n Base class for providing a serialization and deserialization mechanism.\n \"\"\"\n def __init__(self, **kwargs):\n \"\"\"\n Instantiates the base class with keyword argu...
from .base import BaseClass from loguru import logger from transformers import set_seed, Trainer, TrainingArguments, DataCollatorWithPadding from .dataset import CustomDataset from sklearn.model_selection import train_test_split from .basic_model_loader import load_model, load_tokenizer from .model_arithmetic import ModelArithmetic from googleapiclient import discovery from dotenv import load_dotenv from torch.utils.data import DataLoader from .utils import ENABLE_LOGGING, log from lm_eval import evaluator import pandas as pd import numpy as np import torch import os import json import time
16,098
scores.append(value) time.sleep(0.1) result = np.mean(scores) log(logger.info, f"Perspective is {result:.6f}") self.output["perspective"] = { "mean": result, "std": np.std(scores), } return result def generate_samples(self, max_tokens, batch_size=1, temperature=1.0, top_p=1.0, top_k=0, stop_texts=None, speculation=False, **kwargs): """ Generates samples from the model. Args: max_tokens (int): The maximum number of tokens to be used for evaluation. batch_size (int, optional): The batch size to be used for evaluation. temperature (float, optional): The temperature to be used for sampling. top_p (float, optional): The top-p value to be used for sampling. top_k (int, optional): The top-k value to be used for sampling. stop_texts (list, optional): The list of texts at which sampling should be stopped speculation (bool, optional): Whether to use speculation or not. **kwargs: Additional keyword arguments. """ start_time = time.time() if "generated" not in self.dataset.columns: texts = self.generator.generate_text(self.dataset["input"].tolist(), max_length=max_tokens, batch_size=batch_size, temperature=temperature, top_p=top_p, top_k=top_k, stop_texts=stop_texts, do_speculation=speculation) self.dataset["generated"] = texts end_time = time.time() self.output["time"] = { "total_time": end_time - start_time, "time_per_sample": (end_time - start_time) / len(self.dataset), "dataset_size": len(self.dataset), "max_tokens": max_tokens, "batch_size": batch_size } def save_generated(self, output_location): """ Saves the generated samples to the specified location. Args: output_location (string): The location to save the generated samples. """ log(logger.debug, f"Saving generated samples to {output_location}") self.dataset.to_csv(output_location) def get_perplexity(self, dataset, model, tokenizer, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model (PreTrainedModel): The model to be evaluated. tokenizer (Tokenizer): The tokenizer to be used for tokenizing the sentences. **kwargs: Additional keyword arguments. """ perplexities = [] sum_nllos = 0 n_tokens = 0 for index, sample in dataset.iterrows(): input_sentence = sample['input'] sentence = sample['generated'] if len(sentence) == 0: continue combined_sentence = input_sentence + sentence encodings = tokenizer(combined_sentence, return_tensors='pt') input_ids = encodings['input_ids'].to(model.device) attention_mask = encodings['attention_mask'].to(model.device) input_encodings = tokenizer(input_sentence, return_tensors='pt') input_ids_inputs = input_encodings['input_ids'] input_length = input_ids_inputs.size(1) with torch.no_grad(): output = model(input_ids, labels=input_ids, attention_mask=attention_mask) logprobs = output.logits[0, :].log_softmax(dim=-1) loss_func = torch.nn.NLLLoss(ignore_index=-100, reduction='sum') loss = loss_func(logprobs[..., input_length:-1, :].contiguous(), input_ids[0, :][..., input_length+1:].contiguous()) loss = loss.to(torch.float32).detach().cpu().numpy() n_tokens_here = input_ids.shape[-1] - input_length - 1 if n_tokens_here > 0: perplexity = np.exp(loss / n_tokens_here) sum_nllos += loss n_tokens += n_tokens_here if not np.isnan(perplexity): perplexities.append(perplexity) average = np.mean(perplexities) median = np.median(perplexities) real = np.exp(sum_nllos / n_tokens) return { "average": average, "median": median, "correct_perplexity": real } def perplexity(self, dataset, model_name_fluency="gpt2-xl", dtype=torch.float16, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model_name_fluency (string, optional): The name of the model to be used for calculating fluency. dtype (torch.dtype, optional): The data type to be used for the model. **kwargs: Additional keyword arguments. """ log(logger.info, "Calculating fluency") if "perplexity" in self.output: log(logger.info, f"Reloading perplexity. Perplexity is {self.output['perplexity']}") return self.output["perplexity"] tokenizer = load_tokenizer(model_name_fluency)
load_dotenv() class Evaluation(BaseClass): """ This class is used for evaluating a model's performance on a given dataset. It includes methods for preparing the dataset, evaluating the model, generating samples, calculating perplexity and faithfulness of the model. """ def __init__(self, generator=None, dataset_location=None, dataset=None, train_dataset=None, train_dataset_location=None, n_input_words=5, bleurt_checkpoint="../models/BLEURT-20", **kwargs): """ Initialize the Evaluation class with the given parameters. Args: generator (ModelArithmetic, optional): The model to be evaluated. dataset_location (string, optional): The location of the dataset to be used for evaluation. Either this or dataset should be provided. Dataset should contain column "text", "input", "output and "label" ("label", "input", "output" optional) dataset (pd.DataFrame, optional): The dataset to be used for evaluation. Either this or dataset_location should be provided. Dataset should contain column "text", "input", "output and "label" ("label", "input", "output" optional) train_dataset (pd.DataFrame, optional): The dataset to be used for training the model. Only used when calculating the faithfulness of the model and when the downstream model still needs to be finetuned. train_dataset_location (string, optional): The location of the dataset to be used for training the model. n_input_words (int, optional): The number of input words to be used in the generator. Only used if the dataset does not contain the column "input". bleurt_checkpoint (string, optional): The location of the BLEURT model checkpoint. **kwargs: Additional keyword arguments. """ self.has_input_task = True self.dataset = None if dataset is not None: self.dataset = dataset.copy() elif dataset_location is not None: self.dataset = pd.read_csv(dataset_location, escapechar='\\', lineterminator="\n") if train_dataset is not None: self.train_dataset = train_dataset elif train_dataset_location is not None: self.train_dataset = pd.read_csv(train_dataset_location, escapechar='\\', lineterminator="\n") else: self.train_dataset = None if self.dataset is not None: self.prepare_dataset(n_input_words) super().__init__(**kwargs, dataset_location=dataset_location, generator=generator, has_input_task=self.has_input_task, output=dict(), extra_kwargs=None, bleurt_checkpoint=bleurt_checkpoint, train_dataset_location=None) if isinstance(generator, ModelArithmetic): # If we don't do it this way, we can't store the evaluator because ModelArithmetic is not serializable del self.kwargs["generator"] self.kwargs["formula"] = generator.formula self.formula = generator.formula def prepare_dataset(self, n_input_words=5): """ Prepares the dataset for evaluation. If the dataset does not have an input column, it assumes the input is the first n_input_words words of the output. If the dataset does not have a label column, it assumes all labels are 1. Args: n_input_words (int): The number of input words to be used. """ log(logger.debug, "Preparing dataset") if "input" not in self.dataset.columns: log(logger.debug, f"No input column found, assuming input is the first {n_input_words} words of the output") self.dataset["input"] = self.dataset["text"].apply(lambda x: " ".join(x.split()[:n_input_words])) self.dataset["output"] = self.dataset["text"].apply(lambda x: " " + " ".join(x.split()[n_input_words:])) self.has_input_task = False if "label" not in self.dataset.columns: log(logger.debug, "No label column found, assuming all labels are 1") self.dataset["label"] = 1 def evaluate_lm_eval(self, model, task_name, batch_size, num_fewshot, model_args, no_cache=False, limit=None, write_out=False, output_folder=None, **kwargs): """ Evaluates the model using the lm_eval package. Args: model (PreTrainedModel): The model to be evaluated. task_name (string): The name of the task for evaluation. batch_size (int): The batch size to be used for evaluation. num_fewshot (int): The number of fewshot examples to be used for evaluation. model_args (dict): The arguments to be passed to the model. no_cache (bool, optional): Whether to use cached results or not. limit (int, optional): The maximum number of examples to be used for evaluation. write_out (bool, optional): Whether to write out the results or not. output_folder (string, optional): The folder to write out the results. **kwargs: Additional keyword arguments. """ try: except ImportError: raise ImportError("Please install lm_eval to run this function") results = evaluator.simple_evaluate( model=model, model_args=model_args, tasks=[task_name], num_fewshot=num_fewshot, batch_size=batch_size, device="cuda" if torch.cuda.is_available() else "cpu", no_cache=no_cache, limit=limit, write_out=write_out, output_base_path=output_folder ) if "lm_eval" in self.output: self.output["lm_eval"][task_name] = results else: self.output["lm_eval"] = {task_name: results} def evaluate(self, max_tokens=128, store_file=None, reload=True, dataset_file=None, reload_data=True, preserve_memory=False, batch_size=1, do_perspective=True, speculation=False, only_faithfulness=False, **kwargs): """ Evaluates the model on the dataset and calculates the perplexity and faithfulness Args: max_tokens (int, optional): The maximum number of tokens to be used for evaluation. store_file (string, optional): The file to store the evaluation results. reload (bool, optional): Whether to reload the dataset or not if it was stored before. dataset_file (string, optional): The file containing the dataset. If path exists, dataset is loaded from path. If path does not exist, dataset is saved to path. reload_data (bool, optional): Whether to reload the data or not if it was stored before. preserve_memory (bool, optional): Whether to preserve memory or not. batch_size (int, optional): The batch size to be used for evaluation. do_perspective (bool, optional): Whether to calculate the perspective score or not. speculation (bool, optional): Whether to use speculation or not. **kwargs: Additional keyword arguments. """ if store_file is not None: os.makedirs(os.path.dirname(store_file), exist_ok=True) if dataset_file is not None: os.makedirs(os.path.dirname(dataset_file), exist_ok=True) if (reload_data or reload) and dataset_file is not None and os.path.isfile(dataset_file): log(logger.debug, f"Reloading dataset from {dataset_file}") self.dataset = pd.read_csv(dataset_file, escapechar='\\', lineterminator="\n") self.dataset.fillna("", inplace=True) else: log(logger.debug,"Generating samples") self.generate_samples(max_tokens, batch_size=batch_size, speculation=speculation, **kwargs) if dataset_file is not None: log(logger.info, f"Saving dataset to {dataset_file}") self.dataset.to_csv(dataset_file, index=False, escapechar='\\', lineterminator="\n") if reload and store_file is not None and os.path.isfile(store_file): self.output = json.load(open(store_file))["output"] log(logger.info, f"Reloading output. Output is {self.output}") if not reload: self.output = dict() self.dataset.dropna(inplace=True) if preserve_memory and hasattr(self.generator, "clear_memory"): # can have weird side effects! self.generator.clear_memory() if not only_faithfulness: self.perplexity(self.dataset, **kwargs) else: del self.output['faithfulness'] self.faithfulness_multiple(self.dataset, **kwargs) if do_perspective and not only_faithfulness: self.perspective(**kwargs) if dataset_file is not None: log(logger.info, f"Saving dataset to {dataset_file}") self.dataset.to_csv(dataset_file, index=False, escapechar='\\', lineterminator="\n") self.extra_kwargs = {kwarg: str(kwargs[kwarg]) for kwarg in kwargs} self.extra_kwargs["max_tokens"] = max_tokens if store_file is not None: log(logger.info, f"Saving output to {store_file}") self.save(store_file) return self.output def perspective(self, classification_with_input=True, **kwargs): """ Calculates the perspective score of the generated sentences. Args: classification_with_input (bool, optional): Whether to use the input of the sentence for calculating the perspective score or not. **kwargs: Additional keyword arguments. """ retries = 0 if "perspective" in self.output: log(logger.info, f"Reloading perspective. Perspective is {self.output['perspective']}") return self.output["perspective"] log(logger.info, "Calculating perspective") service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=os.environ["PERSPECTIVE_API_KEY"], discoveryServiceUrl="https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1", static_discovery=False) scores = [] self.dataset["perspective"] = [None] * len(self.dataset) for index, sample in self.dataset.iterrows(): sentence = sample['generated'] if classification_with_input: sentence = sample['input'] + sentence analyze_request = { 'comment': { 'text': sentence }, 'languages': ["en"], 'requestedAttributes': {'TOXICITY': {}} } while retries < 10 ** 5: try: response = service.comments().analyze(body=analyze_request).execute() break except Exception as e: if "LANGUAGE_NOT_SUPPORTED_BY_ATTRIBUTE" in str(e): continue log(logger.warning, f"Exception {e} occurred, retrying...") retries += 1 time.sleep(10) if retries == 10 ** 5: log(logger.error, "Could not get all perspective scores") break value = response["attributeScores"]["TOXICITY"]["summaryScore"]["value"] self.dataset.at[index, "perspective"] = value scores.append(value) time.sleep(0.1) result = np.mean(scores) log(logger.info, f"Perspective is {result:.6f}") self.output["perspective"] = { "mean": result, "std": np.std(scores), } return result def generate_samples(self, max_tokens, batch_size=1, temperature=1.0, top_p=1.0, top_k=0, stop_texts=None, speculation=False, **kwargs): """ Generates samples from the model. Args: max_tokens (int): The maximum number of tokens to be used for evaluation. batch_size (int, optional): The batch size to be used for evaluation. temperature (float, optional): The temperature to be used for sampling. top_p (float, optional): The top-p value to be used for sampling. top_k (int, optional): The top-k value to be used for sampling. stop_texts (list, optional): The list of texts at which sampling should be stopped speculation (bool, optional): Whether to use speculation or not. **kwargs: Additional keyword arguments. """ start_time = time.time() if "generated" not in self.dataset.columns: texts = self.generator.generate_text(self.dataset["input"].tolist(), max_length=max_tokens, batch_size=batch_size, temperature=temperature, top_p=top_p, top_k=top_k, stop_texts=stop_texts, do_speculation=speculation) self.dataset["generated"] = texts end_time = time.time() self.output["time"] = { "total_time": end_time - start_time, "time_per_sample": (end_time - start_time) / len(self.dataset), "dataset_size": len(self.dataset), "max_tokens": max_tokens, "batch_size": batch_size } def save_generated(self, output_location): """ Saves the generated samples to the specified location. Args: output_location (string): The location to save the generated samples. """ log(logger.debug, f"Saving generated samples to {output_location}") self.dataset.to_csv(output_location) def get_perplexity(self, dataset, model, tokenizer, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model (PreTrainedModel): The model to be evaluated. tokenizer (Tokenizer): The tokenizer to be used for tokenizing the sentences. **kwargs: Additional keyword arguments. """ perplexities = [] sum_nllos = 0 n_tokens = 0 for index, sample in dataset.iterrows(): input_sentence = sample['input'] sentence = sample['generated'] if len(sentence) == 0: continue combined_sentence = input_sentence + sentence encodings = tokenizer(combined_sentence, return_tensors='pt') input_ids = encodings['input_ids'].to(model.device) attention_mask = encodings['attention_mask'].to(model.device) input_encodings = tokenizer(input_sentence, return_tensors='pt') input_ids_inputs = input_encodings['input_ids'] input_length = input_ids_inputs.size(1) with torch.no_grad(): output = model(input_ids, labels=input_ids, attention_mask=attention_mask) logprobs = output.logits[0, :].log_softmax(dim=-1) loss_func = torch.nn.NLLLoss(ignore_index=-100, reduction='sum') loss = loss_func(logprobs[..., input_length:-1, :].contiguous(), input_ids[0, :][..., input_length+1:].contiguous()) loss = loss.to(torch.float32).detach().cpu().numpy() n_tokens_here = input_ids.shape[-1] - input_length - 1 if n_tokens_here > 0: perplexity = np.exp(loss / n_tokens_here) sum_nllos += loss n_tokens += n_tokens_here if not np.isnan(perplexity): perplexities.append(perplexity) average = np.mean(perplexities) median = np.median(perplexities) real = np.exp(sum_nllos / n_tokens) return { "average": average, "median": median, "correct_perplexity": real } def perplexity(self, dataset, model_name_fluency="gpt2-xl", dtype=torch.float16, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model_name_fluency (string, optional): The name of the model to be used for calculating fluency. dtype (torch.dtype, optional): The data type to be used for the model. **kwargs: Additional keyword arguments. """ log(logger.info, "Calculating fluency") if "perplexity" in self.output: log(logger.info, f"Reloading perplexity. Perplexity is {self.output['perplexity']}") return self.output["perplexity"] tokenizer = load_tokenizer(model_name_fluency)
model = load_model(model_name_fluency, dtype=dtype)
2
2023-11-21 20:01:08+00:00
24k
HeliosZhao/Animate124
dnerf/renderer.py
[ { "identifier": "custom_meshgrid", "path": "nerf/utils.py", "snippet": "def custom_meshgrid(*args):\n # ref: https://pytorch.org/docs/stable/generated/torch.meshgrid.html?highlight=meshgrid#torch.meshgrid\n if pver.parse(torch.__version__) < pver.parse('1.10'):\n return torch.meshgrid(*args...
import os import math import cv2 import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import raymarching import logging from tqdm import tqdm from nerf.utils import custom_meshgrid, safe_normalize from nerf.renderer import NeRFRenderer
15,610
if normals is not None: # _, _, _, normal_image = raymarching.composite_rays_train(sigmas.detach(), (normals + 1) / 2, ts, rays, T_thresh, binarize) # results['normal_image'] = normal_image.view(*prefix, 3) normal_image = torch.stack(normal_image, dim=0) # F,N,3 results['normal_image'] = normal_image.view(batch_size, num_frames, -1, 3) # B,F,N,3 # weights normalization results['weights'] = weights # N'*F else: image_all = [] weights_sum_all = [] depth_all = [] # ipdb.set_trace() for frame_idx in range(num_frames): _rays_o, _rays_d, _light_d, t = rays_o[frame_idx], rays_d[frame_idx], light_d[frame_idx], time_steps[0,frame_idx].item() nears, fars = raymarching.near_far_from_aabb(_rays_o, _rays_d, self.aabb_train if self.training else self.aabb_infer) # allocate outputs dtype = torch.float32 weights_sum = torch.zeros(N, dtype=dtype, device=device) depth = torch.zeros(N, dtype=dtype, device=device) image = torch.zeros(N, 3, dtype=dtype, device=device) n_alive = N rays_alive = torch.arange(n_alive, dtype=torch.int32, device=device) # [N] rays_t = nears.clone() # [N] ## test must use the same step = 0 while step < self.opt.max_steps: # hard coded max step # count alive rays n_alive = rays_alive.shape[0] # exit loop if n_alive <= 0: break # decide compact_steps n_step = max(min(N // n_alive, 8), 1) xyzs, dirs, ts = raymarching.march_rays(n_alive, n_step, rays_alive, rays_t, _rays_o, _rays_d, self.bound, self.density_bitfield[t], self.cascade, self.grid_size, nears, fars, perturb if step == 0 else False, self.opt.dt_gamma, self.opt.max_steps) dirs = safe_normalize(dirs) s_time = torch.zeros((xyzs.size(0),1), device=xyzs.device, dtype=xyzs.dtype) + time[0, frame_idx].item() # ipdb.set_trace() sigmas, rgbs, normals, _ = self(xyzs, dirs, _light_d[:1], ratio=ambient_ratio, shading=shading, t=s_time) sigmas = self.density_scale * sigmas raymarching.composite_rays(n_alive, n_step, rays_alive, rays_t, sigmas, rgbs, ts, weights_sum, depth, image, T_thresh, binarize) rays_alive = rays_alive[rays_alive >= 0] #print(f'step = {step}, n_step = {n_step}, n_alive = {n_alive}, xyzs: {xyzs.shape}') step += n_step image_all.append(image) weights_sum_all.append(weights_sum) depth_all.append(depth) # ipdb.set_trace() weights_sum = torch.stack(weights_sum_all, dim=0) # F,N depth = torch.stack(depth_all, dim=0) # F,N image = torch.stack(image_all, dim=0) # F,N,3 # mix background color ## when bg_radius < 0 -> the way Magic123, during training, bg_color is always a random color, during inference, always 1 # ipdb.set_trace() if bg_color is None: if self.opt.bg_radius > 0 and self.bg_net is not None: # use the bg model to calculate bg_color ## NOTE here the camera should be fixed in the video ## rays_d F,N,3 bg_color = self.background(rays_d.reshape(-1, 3)) # [FN, 3] # this is irrelavant to time bg_color = bg_color.reshape(batch_size, num_frames, -1, 3) # F,N,3 else: bg_color = 1 image_wo_bg = image.view(batch_size, num_frames, -1, 3) image = image + (1 - weights_sum).unsqueeze(-1) * bg_color image = image.view(batch_size, num_frames, -1, 3) depth = depth.view(batch_size, num_frames, -1) weights_sum = weights_sum.reshape(batch_size, num_frames, -1) results['image'] = image # B,F,N,3 results['depth'] = depth # B,F,N results['weights_sum'] = weights_sum # B,F,N results['image_wo_bg'] = image_wo_bg # B,F,N,3 # ipdb.set_trace() return results @torch.no_grad() def update_extra_state(self, decay=0.95, S=128): # call before each epoch to update extra states. if not (self.cuda_ray): return if self.taichi_ray: raise NotImplementedError ### update density grid tmp_grid = - torch.ones_like(self.density_grid) # full update. # if self.iter_density < 16: # update only 16 times if True: # full update X = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S) Y = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S) Z = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S) for t, time in enumerate(self.times): for xs in X: for ys in Y: for zs in Z: # construct points
logger = logging.getLogger(__name__) class DNeRFRenderer(NeRFRenderer): def __init__(self, opt): super().__init__(opt) self.time_size = opt.get("time_size", 1) self.density_scale = opt.get("density_scale", 1) self.dynamic_ft = opt.get("dynamic_ft", False) # extra state for cuda raymarching if self.cuda_ray: # density grid (with an extra time dimension) density_grid = torch.zeros(self.time_size, self.cascade, self.grid_size ** 3) # [T, CAS, H * H * H] density_bitfield = torch.zeros(self.time_size, self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [T, CAS * H * H * H // 8] self.register_buffer('density_grid', density_grid) self.register_buffer('density_bitfield', density_bitfield) self.mean_density = 0 self.iter_density = 0 # time stamps for density grid times = ((torch.arange(self.time_size, dtype=torch.float32) + 0.5) / self.time_size).view(-1, 1, 1) # [T, 1, 1] self.register_buffer('times', times) # step counter step_counter = torch.zeros(16, 2, dtype=torch.int32) # 16 is hardcoded for averaging... self.register_buffer('step_counter', step_counter) self.mean_count = 0 self.local_step = 0 def run_cuda(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, T_thresh=1e-4, binarize=False, time=None, **kwargs): # rays_o, rays_d: [B, N, 3] / B,F,N,3 # return: image: [B, N, 3], depth: [B, N] # time: [B,F] prefix = rays_o.shape[:-1] batch_size = prefix[0] if prefix[0] != 1: raise "The prefix should be 1 if different frames has different camera pose in the current version" dynamic_cam = True if rays_o.ndim == 4 else False N = rays_o.shape[:-1].numel() # B * N, in fact device = rays_o.device if light_d is None: # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face) light_d = safe_normalize(rays_o + torch.randn(3, device=rays_o.device)) # [B,N,3] / B,F,N,3 if time is None: assert not self.dynamic_ft time_steps = torch.LongTensor([[0]]).reshape(1,1) # 1,1 time = torch.FloatTensor([[0]]).reshape(1,1) # 1,1 num_frames = 1 else: time_steps = torch.floor(time * self.time_size).clamp(min=0, max=self.time_size - 1).long() # B,F num_frames = time.size(1) if dynamic_cam: rays_o = rays_o[0].contiguous() # F,N,3 rays_d = rays_d[0].contiguous() # F,N,3 light_d = light_d[0].contiguous() # F,N,3 else: rays_o = rays_o.repeat(num_frames, 1, 1).contiguous() rays_d = rays_d.repeat(num_frames, 1, 1).contiguous() light_d = light_d.repeat(num_frames, 1, 1).contiguous() # ipdb.set_trace() results = {} # ipdb.set_trace() if self.training: # ipdb.set_trace() v_xyzs = [] v_dirs = [] v_light = [] v_time = [] v_idx = [0] v_rays = [] v_ts = [] v_kernels = [] for frame_idx in range(num_frames): _rays_o, _rays_d, _light_d, t = rays_o[frame_idx], rays_d[frame_idx], light_d[frame_idx], time_steps[0,frame_idx].item() ## N,3 for the first 3, t is a value # pre-calculate near far nears, fars = raymarching.near_far_from_aabb(_rays_o, _rays_d, self.aabb_train if self.training else self.aabb_infer) xyzs, dirs, ts, rays = raymarching.march_rays_train(_rays_o, _rays_d, self.bound, self.density_bitfield[t], self.cascade, self.grid_size, nears, fars, perturb, self.opt.dt_gamma, self.opt.max_steps) dirs = safe_normalize(dirs) flatten_rays = raymarching.flatten_rays(rays, xyzs.shape[0]).long() if _light_d.shape[0] > 1: _light_d = _light_d[flatten_rays] else: # 1,3 _light_d = _light_d.repeat(xyzs.size(0), 1) v_xyzs.append(xyzs) v_dirs.append(dirs) v_light.append(_light_d) sample_num = xyzs.size(0) v_time.append(torch.zeros((sample_num,1), device=xyzs.device, dtype=xyzs.dtype)+time[0, frame_idx].item()) # NOTE this should be real time # N v_idx.append(sample_num) v_rays.append(rays) v_ts.append(ts) v_xyzs = torch.cat(v_xyzs, dim=0) v_dirs = torch.cat(v_dirs, dim=0) v_light = torch.cat(v_light, dim=0) v_time = torch.cat(v_time, dim=0) v_idx = np.cumsum(v_idx).tolist() sigmas, rgbs, normals, deforms = self(v_xyzs, v_dirs, v_light, ratio=ambient_ratio, shading=shading, t=v_time) sigmas = self.density_scale * sigmas weights = [] weights_sum = [] depth = [] image = [] normal_image = [] # ipdb.set_trace() for frame_idx in range(num_frames): start_idx, end_idx = v_idx[frame_idx], v_idx[frame_idx+1] # _weights, _weights_sum, _depth, _image out = raymarching.composite_rays_train(sigmas[start_idx:end_idx], rgbs[start_idx:end_idx], v_ts[frame_idx], v_rays[frame_idx], T_thresh, binarize) if normals is not None: _, _, _, _normal_image = raymarching.composite_rays_train(sigmas[start_idx:end_idx].detach(), (normals[start_idx:end_idx] + 1) / 2, v_ts[frame_idx], v_rays[frame_idx], T_thresh, binarize) normal_image.append(_normal_image) weights.append(out[0]) weights_sum.append(out[1]) depth.append(out[2]) image.append(out[3]) # N,3 weights = torch.cat(weights) weights_sum = torch.stack(weights_sum, dim=0) # F,N depth = torch.stack(depth, dim=0) # F,N image = torch.stack(image, dim=0) # F,N,3 # normals related regularizations if self.opt.lambda_orient > 0 and normals is not None: # orientation loss loss_orient = weights.detach() * (normals * v_dirs).sum(-1).clamp(min=0) ** 2 results['loss_orient'] = loss_orient.mean() if self.opt.lambda_3d_normal_smooth > 0 and normals is not None: normals_perturb = self.normal(v_xyzs + torch.randn_like(v_xyzs) * 1e-2, t=v_time) results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean() if normals is not None: # _, _, _, normal_image = raymarching.composite_rays_train(sigmas.detach(), (normals + 1) / 2, ts, rays, T_thresh, binarize) # results['normal_image'] = normal_image.view(*prefix, 3) normal_image = torch.stack(normal_image, dim=0) # F,N,3 results['normal_image'] = normal_image.view(batch_size, num_frames, -1, 3) # B,F,N,3 # weights normalization results['weights'] = weights # N'*F else: image_all = [] weights_sum_all = [] depth_all = [] # ipdb.set_trace() for frame_idx in range(num_frames): _rays_o, _rays_d, _light_d, t = rays_o[frame_idx], rays_d[frame_idx], light_d[frame_idx], time_steps[0,frame_idx].item() nears, fars = raymarching.near_far_from_aabb(_rays_o, _rays_d, self.aabb_train if self.training else self.aabb_infer) # allocate outputs dtype = torch.float32 weights_sum = torch.zeros(N, dtype=dtype, device=device) depth = torch.zeros(N, dtype=dtype, device=device) image = torch.zeros(N, 3, dtype=dtype, device=device) n_alive = N rays_alive = torch.arange(n_alive, dtype=torch.int32, device=device) # [N] rays_t = nears.clone() # [N] ## test must use the same step = 0 while step < self.opt.max_steps: # hard coded max step # count alive rays n_alive = rays_alive.shape[0] # exit loop if n_alive <= 0: break # decide compact_steps n_step = max(min(N // n_alive, 8), 1) xyzs, dirs, ts = raymarching.march_rays(n_alive, n_step, rays_alive, rays_t, _rays_o, _rays_d, self.bound, self.density_bitfield[t], self.cascade, self.grid_size, nears, fars, perturb if step == 0 else False, self.opt.dt_gamma, self.opt.max_steps) dirs = safe_normalize(dirs) s_time = torch.zeros((xyzs.size(0),1), device=xyzs.device, dtype=xyzs.dtype) + time[0, frame_idx].item() # ipdb.set_trace() sigmas, rgbs, normals, _ = self(xyzs, dirs, _light_d[:1], ratio=ambient_ratio, shading=shading, t=s_time) sigmas = self.density_scale * sigmas raymarching.composite_rays(n_alive, n_step, rays_alive, rays_t, sigmas, rgbs, ts, weights_sum, depth, image, T_thresh, binarize) rays_alive = rays_alive[rays_alive >= 0] #print(f'step = {step}, n_step = {n_step}, n_alive = {n_alive}, xyzs: {xyzs.shape}') step += n_step image_all.append(image) weights_sum_all.append(weights_sum) depth_all.append(depth) # ipdb.set_trace() weights_sum = torch.stack(weights_sum_all, dim=0) # F,N depth = torch.stack(depth_all, dim=0) # F,N image = torch.stack(image_all, dim=0) # F,N,3 # mix background color ## when bg_radius < 0 -> the way Magic123, during training, bg_color is always a random color, during inference, always 1 # ipdb.set_trace() if bg_color is None: if self.opt.bg_radius > 0 and self.bg_net is not None: # use the bg model to calculate bg_color ## NOTE here the camera should be fixed in the video ## rays_d F,N,3 bg_color = self.background(rays_d.reshape(-1, 3)) # [FN, 3] # this is irrelavant to time bg_color = bg_color.reshape(batch_size, num_frames, -1, 3) # F,N,3 else: bg_color = 1 image_wo_bg = image.view(batch_size, num_frames, -1, 3) image = image + (1 - weights_sum).unsqueeze(-1) * bg_color image = image.view(batch_size, num_frames, -1, 3) depth = depth.view(batch_size, num_frames, -1) weights_sum = weights_sum.reshape(batch_size, num_frames, -1) results['image'] = image # B,F,N,3 results['depth'] = depth # B,F,N results['weights_sum'] = weights_sum # B,F,N results['image_wo_bg'] = image_wo_bg # B,F,N,3 # ipdb.set_trace() return results @torch.no_grad() def update_extra_state(self, decay=0.95, S=128): # call before each epoch to update extra states. if not (self.cuda_ray): return if self.taichi_ray: raise NotImplementedError ### update density grid tmp_grid = - torch.ones_like(self.density_grid) # full update. # if self.iter_density < 16: # update only 16 times if True: # full update X = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S) Y = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S) Z = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S) for t, time in enumerate(self.times): for xs in X: for ys in Y: for zs in Z: # construct points
xx, yy, zz = custom_meshgrid(xs, ys, zs)
0
2023-11-23 10:34:08+00:00
24k
alexzhou907/DreamPropeller
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
14,680
(self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf(
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self,*args, **kwargs) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf(
scale_tensor(
10
2023-11-27 23:39:49+00:00
24k
abdulhaim/LMRL-Gym
llm_rl_scripts/chess/ilql/train_full_games_ilql.py
[ { "identifier": "ilql_loss", "path": "LLM_RL/algorithms/ilql/base_interface.py", "snippet": "def ilql_loss(\n q1: jax.Array, # [batch, time-1] output is masked; shift x[:-1]\n q2: jax.Array, # [batch, time-1] output is masked; shift x[:-1]\n v: jax.Array, # [batch, time-1] output is masked; shi...
from typing import Optional from JaxSeq.bucket_manager import open_with_bucket as open from JaxSeq.utils import convert_path, load_mesh, setup_experiment_save from JaxSeq.utils import BlockingStrategy, Padding, Truncation, get_weight_decay_mask from JaxSeq.models.gpt2.load import load_train_state, ModelLoadMode from LLM_RL.algorithms.ilql.base_interface import ilql_loss from transformers.generation import GenerationConfig from jaxtyping import PyTree from LLM_RL.environment import Text, text_env_eval, TextTrajectory, TextTrajectoryChain, TokenTrajectoryChain from LLM_RL.algorithms.ilql.gpt2.interface import GPT2ILQLTrain, GPT2ILQLInference from LLM_RL.algorithms.value_rl_base.gpt2.interface import GPT2ValuePolicy, GPT2ValueRLInference from LLM_RL.heads.mlp_head import load_train_state_from_config as load_head_train_state_from_config from LLM_RL.heads.mlp_head import MLPHeadConfig from JaxSeq.shard_model import shard_params_from_params from LLM_RL.algorithms.ilql.data import ILQLDataset from LLM_RL.algorithms.ilql.data import ILQLIterableDataset from functools import partial from JaxSeq.logs import pull_logs from LLM_RL.algorithms.ilql.train import eval_loss, train_loop from LLM_RL.algorithms.ilql.data import ILQLData, ILQLDataset from JaxSeq.utils import multihost_device_get from transformers import GPT2TokenizerFast from IPython import embed from llm_rl_scripts.chess.env.env import FenChessHistoryEnv import tyro import jax import jax.numpy as jnp import os import optax import pickle as pkl import re import json import random
21,116
def main( model_load_mode: ModelLoadMode, model_load_path: str, train_data_path: str, /, # Mark the end of positional arguments. exp_name: Optional[str]=None, outputs_path: Optional[str]=None, data_mesh_shape: int=1, fsdp_mesh_shape: int=1, model_mesh_shape: int=-1, use_wandb: bool=True, wandb_project: Optional[str]="llm_rl_repo_endgames_ilql", n_rounds: int=1, epochs: int=1, max_steps: Optional[int]=None, lr: float=1e-4, weight_decay: float=0.0, tau: float=0.7, cql_weight: float=1.0, gamma: float=0.99, train_bsize: int=32, grad_accum_steps: int=1, gradient_checkpointing: bool=False, gradient_checkpointing_policy: str='nothing_saveable', max_length: int=160, log_every: int=256, eval_every_steps: Optional[int]=None, eval_every_epochs: Optional[int]=10, eval_at_beginning: bool=False, eval_at_end: bool=True, save_every_steps: Optional[int]=None, save_every_epochs: Optional[int]=5, save_at_beginning: bool=False, save_at_end: bool=True, save_best: bool=False, max_checkpoints: Optional[int]=5, save_train_state: bool=True, save_bf16: bool=True, policy_max_input_length: int=256, policy_max_output_length: int=256, policy_do_sample: bool=True, policy_num_beams: int=1, policy_temperature: Optional[float]=None, policy_top_p: Optional[float]=None, policy_top_k: Optional[int]=None, force_pad_embeddings: bool=False, should_restore_loop_state: bool=False, reranker: bool=True ): input_args = locals() print(input_args) tokenizer = GPT2TokenizerFast.from_pretrained('gpt2') tokenizer.add_special_tokens({'pad_token': '<|pad|>'}) mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp')) is_main_process = jax.process_index() == 0 print(f"Mesh: {mesh}") print(f"Is main process: {is_main_process}") def ilql_data_generator(data_name): with open(data_name, "r") as f: for item in f: obj = json.loads(item) # curr_chain = TextTrajectory() # starting with the last element last_trajectory = TextTrajectory([Text(obj[-1]["state"], False), Text(obj[-1]["action"], True)], [0, obj[-1]["reward"]], True) curr_chain = TextTrajectoryChain(text_trajectory=last_trajectory, next=None) # curr_chain.next = curr_chain for traj in reversed(obj): # iterate through move history backwards except for last transition # embed() prev_trajectory = TextTrajectory([Text(traj["state"], False), Text(traj["action"], True)], [0, traj["reward"]], False) curr_chain = TextTrajectoryChain(text_trajectory=prev_trajectory, next=curr_chain)
def main( model_load_mode: ModelLoadMode, model_load_path: str, train_data_path: str, /, # Mark the end of positional arguments. exp_name: Optional[str]=None, outputs_path: Optional[str]=None, data_mesh_shape: int=1, fsdp_mesh_shape: int=1, model_mesh_shape: int=-1, use_wandb: bool=True, wandb_project: Optional[str]="llm_rl_repo_endgames_ilql", n_rounds: int=1, epochs: int=1, max_steps: Optional[int]=None, lr: float=1e-4, weight_decay: float=0.0, tau: float=0.7, cql_weight: float=1.0, gamma: float=0.99, train_bsize: int=32, grad_accum_steps: int=1, gradient_checkpointing: bool=False, gradient_checkpointing_policy: str='nothing_saveable', max_length: int=160, log_every: int=256, eval_every_steps: Optional[int]=None, eval_every_epochs: Optional[int]=10, eval_at_beginning: bool=False, eval_at_end: bool=True, save_every_steps: Optional[int]=None, save_every_epochs: Optional[int]=5, save_at_beginning: bool=False, save_at_end: bool=True, save_best: bool=False, max_checkpoints: Optional[int]=5, save_train_state: bool=True, save_bf16: bool=True, policy_max_input_length: int=256, policy_max_output_length: int=256, policy_do_sample: bool=True, policy_num_beams: int=1, policy_temperature: Optional[float]=None, policy_top_p: Optional[float]=None, policy_top_k: Optional[int]=None, force_pad_embeddings: bool=False, should_restore_loop_state: bool=False, reranker: bool=True ): input_args = locals() print(input_args) tokenizer = GPT2TokenizerFast.from_pretrained('gpt2') tokenizer.add_special_tokens({'pad_token': '<|pad|>'}) mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp')) is_main_process = jax.process_index() == 0 print(f"Mesh: {mesh}") print(f"Is main process: {is_main_process}") def ilql_data_generator(data_name): with open(data_name, "r") as f: for item in f: obj = json.loads(item) # curr_chain = TextTrajectory() # starting with the last element last_trajectory = TextTrajectory([Text(obj[-1]["state"], False), Text(obj[-1]["action"], True)], [0, obj[-1]["reward"]], True) curr_chain = TextTrajectoryChain(text_trajectory=last_trajectory, next=None) # curr_chain.next = curr_chain for traj in reversed(obj): # iterate through move history backwards except for last transition # embed() prev_trajectory = TextTrajectory([Text(traj["state"], False), Text(traj["action"], True)], [0, traj["reward"]], False) curr_chain = TextTrajectoryChain(text_trajectory=prev_trajectory, next=curr_chain)
token_trajectory_chain = TokenTrajectoryChain.from_text_trajectory_chain(curr_chain, tokenizer)
5
2023-11-21 00:16:42+00:00
24k