repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
JoaoPedro9674/django-ledger
django_ledger/io/io_mixin.py
[ { "identifier": "settings", "path": "django_ledger/settings.py", "snippet": " DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = True\n DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = False\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = True\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = False\nDJANGO_LEDGER_USE_CLOSING_ENTRIES...
from collections import defaultdict, namedtuple from datetime import datetime, date from itertools import groupby from pathlib import Path from random import choice from typing import List, Set, Union, Tuple, Optional, Dict from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError, ObjectDoesNotExist from django.db.models import Sum, QuerySet from django.db.models.functions import TruncMonth from django.http import Http404 from django.utils.dateparse import parse_date, parse_datetime from django.utils.timezone import make_aware, is_naive, localtime from django.utils.translation import gettext_lazy as _ from django_ledger import settings from django_ledger.exceptions import InvalidDateInputError, TransactionNotInBalanceError from django_ledger.io import roles as roles_module from django_ledger.io.io_context import (RoleContextManager, GroupContextManager, ActivityContextManager, BalanceSheetStatementContextManager, IncomeStatementContextManager, CashFlowStatementContextManager) from django_ledger.io.io_digest import IODigestContextManager from django_ledger.io.ratios import FinancialRatioManager from django_ledger.models.utils import lazy_loader
15,656
# print('Orig From:', from_date) # print('New from:', from_date_d) # print('To Date:', to_date) # print(closing_entry_list) if not txs_queryset: TransactionModel = lazy_loader.get_txs_model() if self.is_entity_model(): if entity_slug: if entity_slug != self.slug: raise IOValidationError('Inconsistent entity_slug. ' f'Provided {entity_slug} does not match actual {self.slug}') if unit_slug: txs_queryset = TransactionModel.objects.for_unit( user_model=user_model, entity_slug=entity_slug or self.slug, unit_slug=unit_slug ) else: txs_queryset = TransactionModel.objects.for_entity( user_model=user_model, entity_slug=self ) elif self.is_ledger_model(): if not entity_slug: raise IOValidationError( 'Calling digest from Ledger Model requires entity_slug explicitly for safety') txs_queryset = TransactionModel.objects.for_ledger( user_model=user_model, entity_slug=entity_slug, ledger_model=self ) elif self.is_entity_unit_model(): if not entity_slug: raise IOValidationError( 'Calling digest from Entity Unit requires entity_slug explicitly for safety') txs_queryset = TransactionModel.objects.for_unit( user_model=user_model, entity_slug=entity_slug, unit_slug=unit_slug or self ) else: txs_queryset = TransactionModel.objects.none() txs_queryset = txs_queryset.not_closing_entry() if exclude_zero_bal: txs_queryset = txs_queryset.filter(amount__gt=0) if posted: txs_queryset = txs_queryset.posted() if from_date: txs_queryset = txs_queryset.from_date(from_date=from_date) if to_date: txs_queryset = txs_queryset.to_date(to_date=to_date) if accounts: if not isinstance(accounts, str): accounts = [accounts] txs_queryset = txs_queryset.for_accounts(account_list=accounts) if activity: if isinstance(activity, str): activity = [activity] txs_queryset = txs_queryset.for_activity(activity_list=activity) if role: txs_queryset = txs_queryset.for_roles(role_list=role) VALUES = [ 'account__uuid', 'account__balance_type', 'tx_type', 'account__code', 'account__name', 'account__role', ] ANNOTATE = {'balance': Sum('amount')} ORDER_BY = ['account__uuid'] if by_unit: ORDER_BY.append('journal_entry__entity_unit__uuid') VALUES += ['journal_entry__entity_unit__uuid', 'journal_entry__entity_unit__name'] if by_period: ORDER_BY.append('journal_entry__timestamp') ANNOTATE['dt_idx'] = TruncMonth('journal_entry__timestamp') if by_activity: ORDER_BY.append('journal_entry__activity') VALUES.append('journal_entry__activity') if by_tx_type: ORDER_BY.append('tx_type') VALUES.append('tx_type') return txs_queryset.values(*VALUES).annotate(**ANNOTATE).order_by(*ORDER_BY) def python_digest(self, txs_queryset: Optional[QuerySet] = None, user_model: Optional[UserModel] = None, to_date: date = None, from_date: date = None, equity_only: bool = False, activity: str = None, entity_slug: str = None, unit_slug: str = None, role: Optional[Union[Set[str], List[str]]] = None, accounts: Optional[Union[Set[str], List[str]]] = None, signs: bool = False, by_unit: bool = False, by_activity: bool = False, by_tx_type: bool = False, by_period: bool = False, **kwargs) -> list or tuple: if equity_only:
""" Django Ledger created by Miguel Sanda <msanda@arrobalytics.com>. Copyright© EDMA Group Inc licensed under the GPLv3 Agreement. Contributions to this module: * Miguel Sanda <msanda@arrobalytics.com> """ UserModel = get_user_model() def diff_tx_data(tx_data: list, raise_exception: bool = True): IS_TX_MODEL = False TransactionModel = lazy_loader.get_txs_model() if isinstance(tx_data[0], TransactionModel): CREDITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'credit') DEBITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'debit') IS_TX_MODEL = True elif isinstance(tx_data[0], dict): CREDITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'credit') DEBITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'debit') else: raise ValidationError('Only Dictionary or TransactionModel allowed.') is_valid = (CREDITS == DEBITS) diff = CREDITS - DEBITS if not is_valid and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: if raise_exception: raise TransactionNotInBalanceError( f'Invalid tx data. Credits and debits must match. Currently cr: {CREDITS}, db {DEBITS}.' f'Max Tolerance {settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE}' ) return IS_TX_MODEL, is_valid, diff def check_tx_balance(tx_data: list, perform_correction: bool = False) -> bool: if tx_data: IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data, raise_exception=perform_correction) if not perform_correction and abs(diff): return False if not perform_correction and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: return False while not is_valid: tx_type_choice = choice(['debit', 'credit']) txs_candidates = list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice) if len(txs_candidates) > 0: tx = choice(list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice)) if any([diff > 0 and tx_type_choice == 'debit', diff < 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION elif any([diff < 0 and tx_type_choice == 'debit', diff > 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount -= settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data) return True def validate_io_date(dt: Union[str, date, datetime], no_parse_localdate: bool = True) -> Optional[datetime]: if not dt: return if isinstance(dt, date): dt = make_aware( value=datetime.combine( dt, datetime.min.time() )) return dt elif isinstance(dt, datetime): if is_naive(dt): return make_aware(dt) return dt elif isinstance(dt, str): # try to parse a date object from string... fdt = parse_date(dt) if not fdt: # try to parse a datetime object from string... fdt = parse_datetime(dt) if not fdt: raise InvalidDateInputError( message=f'Could not parse date from {dt}' ) elif is_naive(fdt): fdt = make_aware(fdt) return fdt if no_parse_localdate: return localtime() def validate_dates( from_date: Union[str, datetime, date] = None, to_date: Union[str, datetime, date] = None) -> Tuple[date, date]: from_date = validate_io_date(from_date, no_parse_localdate=False) to_date = validate_io_date(to_date) return from_date, to_date def validate_activity(activity: str, raise_404: bool = False): # idea: move to model???... JournalEntryModel = lazy_loader.get_journal_entry_model() valid = activity in JournalEntryModel.VALID_ACTIVITIES if activity and not valid: exception = ValidationError(f'{activity} is invalid. Choices are {JournalEntryModel.VALID_ACTIVITIES}.') if raise_404: raise Http404(exception) raise exception return activity class IOValidationError(ValidationError): pass class IODatabaseMixIn: """ Controls how transactions are recorded into the ledger. """ def is_entity_model(self): return isinstance(self, lazy_loader.get_entity_model()) def is_ledger_model(self): return isinstance(self, lazy_loader.get_ledger_model()) def is_entity_unit_model(self): return isinstance(self, lazy_loader.get_unit_model()) def get_entity_model_from_io(self): if self.is_entity_model(): return self elif self.is_ledger_model(): return self.entity elif self.is_entity_unit_model(): return self.entity # def is_time_bounded(self, from_date, to_date): def database_digest(self, txs_queryset: QuerySet, entity_slug: str = None, unit_slug: str = None, user_model: UserModel = None, from_date: date = None, to_date: date = None, activity: str = None, role: str = None, accounts: str or List[str] or Set[str] = None, posted: bool = True, exclude_zero_bal: bool = True, by_activity: bool = False, by_tx_type: bool = False, by_period: bool = False, by_unit: bool = False, **kwargs): if settings.DJANGO_LEDGER_USE_CLOSING_ENTRIES: if not from_date: entity_model = self.get_entity_model_from_io() closing_entry_date = entity_model.select_closing_entry_for_io_date(to_date=to_date) # print(closing_entry_date) # # if closing_entry_date: # closing_entry_list = entity_model.get_closing_entry_cache_for_date( # closing_date=closing_entry_date, # force_cache_update=True # ) # from_date_d = closing_entry_date + timedelta(days=1) # print('Orig From:', from_date) # print('New from:', from_date_d) # print('To Date:', to_date) # print(closing_entry_list) if not txs_queryset: TransactionModel = lazy_loader.get_txs_model() if self.is_entity_model(): if entity_slug: if entity_slug != self.slug: raise IOValidationError('Inconsistent entity_slug. ' f'Provided {entity_slug} does not match actual {self.slug}') if unit_slug: txs_queryset = TransactionModel.objects.for_unit( user_model=user_model, entity_slug=entity_slug or self.slug, unit_slug=unit_slug ) else: txs_queryset = TransactionModel.objects.for_entity( user_model=user_model, entity_slug=self ) elif self.is_ledger_model(): if not entity_slug: raise IOValidationError( 'Calling digest from Ledger Model requires entity_slug explicitly for safety') txs_queryset = TransactionModel.objects.for_ledger( user_model=user_model, entity_slug=entity_slug, ledger_model=self ) elif self.is_entity_unit_model(): if not entity_slug: raise IOValidationError( 'Calling digest from Entity Unit requires entity_slug explicitly for safety') txs_queryset = TransactionModel.objects.for_unit( user_model=user_model, entity_slug=entity_slug, unit_slug=unit_slug or self ) else: txs_queryset = TransactionModel.objects.none() txs_queryset = txs_queryset.not_closing_entry() if exclude_zero_bal: txs_queryset = txs_queryset.filter(amount__gt=0) if posted: txs_queryset = txs_queryset.posted() if from_date: txs_queryset = txs_queryset.from_date(from_date=from_date) if to_date: txs_queryset = txs_queryset.to_date(to_date=to_date) if accounts: if not isinstance(accounts, str): accounts = [accounts] txs_queryset = txs_queryset.for_accounts(account_list=accounts) if activity: if isinstance(activity, str): activity = [activity] txs_queryset = txs_queryset.for_activity(activity_list=activity) if role: txs_queryset = txs_queryset.for_roles(role_list=role) VALUES = [ 'account__uuid', 'account__balance_type', 'tx_type', 'account__code', 'account__name', 'account__role', ] ANNOTATE = {'balance': Sum('amount')} ORDER_BY = ['account__uuid'] if by_unit: ORDER_BY.append('journal_entry__entity_unit__uuid') VALUES += ['journal_entry__entity_unit__uuid', 'journal_entry__entity_unit__name'] if by_period: ORDER_BY.append('journal_entry__timestamp') ANNOTATE['dt_idx'] = TruncMonth('journal_entry__timestamp') if by_activity: ORDER_BY.append('journal_entry__activity') VALUES.append('journal_entry__activity') if by_tx_type: ORDER_BY.append('tx_type') VALUES.append('tx_type') return txs_queryset.values(*VALUES).annotate(**ANNOTATE).order_by(*ORDER_BY) def python_digest(self, txs_queryset: Optional[QuerySet] = None, user_model: Optional[UserModel] = None, to_date: date = None, from_date: date = None, equity_only: bool = False, activity: str = None, entity_slug: str = None, unit_slug: str = None, role: Optional[Union[Set[str], List[str]]] = None, accounts: Optional[Union[Set[str], List[str]]] = None, signs: bool = False, by_unit: bool = False, by_activity: bool = False, by_tx_type: bool = False, by_period: bool = False, **kwargs) -> list or tuple: if equity_only:
role = roles_module.GROUP_EARNINGS
2
2023-10-20 01:07:20+00:00
24k
acolas1/KGSimple
simplify.py
[ { "identifier": "FluencyScorer", "path": "scoring/fluency_scorer.py", "snippet": "class FluencyScorer:\n def __init__(self, batch_size=1, reduce=\"mean\", log=True, laplace_smooth=False, prob_dict_path=None):\n self.device = \"cuda:1\" if torch.cuda.is_available() else \"cpu\"\n self.ba...
import os import json import numpy as np import pandas as pd import torch import random from collections import defaultdict from transformers import BartTokenizer, T5Tokenizer from transformers import AdamW, get_linear_schedule_with_warmup from utils import * from scoring.fluency_scorer import FluencyScorer from scoring.saliency_scorer import SaliencyBERTScore from scoring.simplicity_scorer import SimplicityTextScore from scoring.guardrails import * from scoring.aggregate_scorer import ScorerWrapper from GAP.data_relations_as_nodes import GAPDataloader, EventDataset, WebNLGDataset from GAP.data_relations_as_nodes import evaluate_bleu, get_t_emb_dim from tqdm import tqdm, trange from rake_nltk import Rake from evaluate import load from sentence_similarity import sentence_similarity from GAP.modeling_gap_type import GAPBartForConditionalGeneration as GAP_Type_model from GAP.modeling_gap import GAPBartForConditionalGeneration as GAP_model
21,497
# import yake bertscore = load("bertscore") ## sentence model for merge phrase_model = sentence_similarity(model_name='distilbert-base-uncased',embedding_type='cls_token_embedding') ## for sentence checking ner_check = NERInaccuracyPenalty() def run(args, logger): #load in model for graph-to-text and tokenizer checkpoint = args.model_path tokenizer_path = args.tokenizer_path tokenizer = BartTokenizer.from_pretrained(tokenizer_path) n_gpu = torch.cuda.device_count() if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if args.type_encoding: t_emb_dim = get_t_emb_dim(args) model = GAP_Type_model.from_pretrained(checkpoint,t_emb_dim=t_emb_dim) else: model = GAP_model.from_pretrained(checkpoint) if torch.cuda.is_available(): model.to(torch.device("cuda")) # Here let's put all the scorers and make a "score" function for each. scores = [{"name": "fluency", "model": FluencyScorer(1, log=True, laplace_smooth=True, prob_dict_path="data/wiki/enwiki/enwiki_terms_with_punc.csv"), "sign": 1, "weight": 1.0}, {"name": "simple_text_score", "model": SimplicityTextScore(), "sign": 1, "weight": 1.0},
# import yake bertscore = load("bertscore") ## sentence model for merge phrase_model = sentence_similarity(model_name='distilbert-base-uncased',embedding_type='cls_token_embedding') ## for sentence checking ner_check = NERInaccuracyPenalty() def run(args, logger): #load in model for graph-to-text and tokenizer checkpoint = args.model_path tokenizer_path = args.tokenizer_path tokenizer = BartTokenizer.from_pretrained(tokenizer_path) n_gpu = torch.cuda.device_count() if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if args.type_encoding: t_emb_dim = get_t_emb_dim(args) model = GAP_Type_model.from_pretrained(checkpoint,t_emb_dim=t_emb_dim) else: model = GAP_model.from_pretrained(checkpoint) if torch.cuda.is_available(): model.to(torch.device("cuda")) # Here let's put all the scorers and make a "score" function for each. scores = [{"name": "fluency", "model": FluencyScorer(1, log=True, laplace_smooth=True, prob_dict_path="data/wiki/enwiki/enwiki_terms_with_punc.csv"), "sign": 1, "weight": 1.0}, {"name": "simple_text_score", "model": SimplicityTextScore(), "sign": 1, "weight": 1.0},
{"name": "saliency_bert", "model": SaliencyBERTScore(), "sign": 1, "weight": 1.0},
1
2023-10-24 13:24:23+00:00
24k
ForceFledgling/proxyhub
proxyhub/api.py
[ { "identifier": "Checker", "path": "proxyhub/checker.py", "snippet": "class Checker:\n \"\"\"Proxy checker.\"\"\"\n\n def __init__(\n self,\n judges,\n max_tries=3,\n timeout=8,\n verify_ssl=False,\n strict=False,\n dnsbl=None,\n real_ext_ip=...
import asyncio import io import signal import warnings from collections import Counter, defaultdict from functools import partial from pprint import pprint from .checker import Checker from .errors import ResolveError from .providers import PROVIDERS, Provider from .proxy import Proxy from .resolver import Resolver from .server import Server from .utils import IPPortPatternLine, log
14,844
(optional) The minimum number of proxies to choose from before deciding which is the most suitable to use. The default value is 5 :param int min_req_proxy: (optional) The minimum number of processed requests to estimate the quality of proxy (in accordance with :attr:`max_error_rate` and :attr:`max_resp_time`). The default value is 5 :param int max_error_rate: (optional) The maximum percentage of requests that ended with an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this value, proxy will be removed from the pool. The default value is 0.5 :param int max_resp_time: (optional) The maximum response time in seconds. If proxy.avg_resp_time exceeds this value, proxy will be removed from the pool. The default value is 8 :param bool prefer_connect: (optional) Flag that indicates whether to use the CONNECT method if possible. For example: If is set to True and a proxy supports HTTP proto (GET or POST requests) and CONNECT method, the server will try to use CONNECT method and only after that send the original request. The default value is False :param list http_allowed_codes: (optional) Acceptable HTTP codes returned by proxy on requests. If a proxy return code, not included in this list, it will be considered as a proxy error, not a wrong/unavailable address. For example, if a proxy will return a ``404 Not Found`` response - this will be considered as an error of a proxy. Checks only for HTTP protocol, HTTPS not supported at the moment. By default the list is empty and the response code is not verified :param int backlog: (optional) The maximum number of queued connections passed to listen. The default value is 100 :raises ValueError: If :attr:`limit` is less than or equal to zero. Because a parsing of providers will be endless .. versionadded:: 0.2.0 """ if limit <= 0: raise ValueError( 'In serve mode value of the limit cannot be less than or ' 'equal to zero. Otherwise, a parsing of providers will be ' 'endless' ) self._server = Server( host=host, port=port, proxies=self._proxies, timeout=self._timeout, max_tries=kwargs.pop('max_tries', self._max_tries), loop=self._loop, **kwargs, ) self._server.start() task = asyncio.ensure_future(self.find(limit=limit, **kwargs)) self._all_tasks.append(task) async def _load(self, data, check=True): """Looking for proxies in the passed data. Transform the passed data from [raw string | file-like object | list] to set {(host, port), ...}: {('192.168.0.1', '80'), } """ log.debug('Load proxies from the raw data') if isinstance(data, io.TextIOWrapper): data = data.read() if isinstance(data, str): data = IPPortPatternLine.findall(data) proxies = set(data) for proxy in proxies: await self._handle(proxy, check=check) await self._on_check.join() self._done() async def _grab(self, types=None, check=False): def _get_tasks(by=MAX_CONCURRENT_PROVIDERS): providers = [ pr for pr in self._providers if not types or not pr.proto or bool(pr.proto & types.keys()) ] while providers: tasks = [ asyncio.ensure_future(pr.get_proxies()) for pr in providers[:by] ] del providers[:by] self._all_tasks.extend(tasks) yield tasks log.debug('Start grabbing proxies') while True: for tasks in _get_tasks(): for task in asyncio.as_completed(tasks): proxies = await task for proxy in proxies: await self._handle(proxy, check=check) log.debug('Grab cycle is complete') if self._server: log.debug('fall asleep for %d seconds' % GRAB_PAUSE) await asyncio.sleep(GRAB_PAUSE) log.debug('awaked') else: break await self._on_check.join() self._done() async def _handle(self, proxy, check=False): try: proxy = await Proxy.create( *proxy, timeout=self._timeout, resolver=self._resolver, verify_ssl=self._verify_ssl, loop=self._loop, )
# Pause between grabbing cycles; in seconds. GRAB_PAUSE = 180 # The maximum number of providers that are parsed concurrently MAX_CONCURRENT_PROVIDERS = 3 class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxyhub.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxyhub.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop :param stop_broker_on_sigint: (optional) whether set SIGINT signal on broker object. Useful for a thread other than main thread. .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, stop_broker_on_sigint=True, **kwargs, ): self._loop = loop or asyncio.get_event_loop_policy().get_event_loop() self._proxies = queue or asyncio.Queue() self._resolver = Resolver(loop=self._loop) self._timeout = timeout self._verify_ssl = verify_ssl self.unique_proxies = {} self._all_tasks = [] self._checker = None self._server = None self._limit = 0 # not limited self._countries = None max_concurrent_conn = kwargs.get('max_concurrent_conn') if max_concurrent_conn: warnings.warn( '`max_concurrent_conn` is deprecated, use `max_conn` instead', DeprecationWarning, ) if isinstance(max_concurrent_conn, asyncio.Semaphore): max_conn = max_concurrent_conn._value else: max_conn = max_concurrent_conn attempts_conn = kwargs.get('attempts_conn') if attempts_conn: warnings.warn( '`attempts_conn` is deprecated, use `max_tries` instead', DeprecationWarning, ) max_tries = attempts_conn # The maximum number of concurrent checking proxies self._on_check = asyncio.Queue(maxsize=max_conn) self._max_tries = max_tries self._judges = judges self._providers = [ p if isinstance(p, Provider) else Provider(p) for p in (providers or PROVIDERS) ] if stop_broker_on_sigint: try: self._loop.add_signal_handler(signal.SIGINT, self.stop) # add_signal_handler() is not implemented on Win # https://docs.python.org/3.5/library/asyncio-eventloops.html#windows except NotImplementedError: pass async def grab(self, *, countries=None, limit=0): """Gather proxies from the providers without checking. :param list countries: (optional) List of ISO country codes where should be located proxies :param int limit: (optional) The maximum number of proxies :ref:`Example of usage <proxyhub-examples-grab>`. """ self._countries = countries self._limit = limit task = asyncio.ensure_future(self._grab(check=False)) self._all_tasks.append(task) async def find( self, *, types=None, data=None, countries=None, post=False, strict=False, dnsbl=None, limit=0, **kwargs, ): """Gather and check proxies from providers or from a passed data. :ref:`Example of usage <proxyhub-examples-find>`. :param list types: Types (protocols) that need to be check on support by proxy. Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25 And levels of anonymity (HTTP only): Transparent, Anonymous, High :param data: (optional) String or list with proxies. Also can be a file-like object supports `read()` method. Used instead of providers :param list countries: (optional) List of ISO country codes where should be located proxies :param bool post: (optional) Flag indicating use POST instead of GET for requests when checking proxies :param bool strict: (optional) Flag indicating that anonymity levels of types (protocols) supported by a proxy must be equal to the requested types and levels of anonymity. By default, strict mode is off and for a successful check is enough to satisfy any one of the requested types :param list dnsbl: (optional) Spam databases for proxy checking. `Wiki <https://en.wikipedia.org/wiki/DNSBL>`_ :param int limit: (optional) The maximum number of proxies :raises ValueError: If :attr:`types` not given. .. versionchanged:: 0.2.0 Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`. Changed: :attr:`types` is required. """ ip = await self._resolver.get_real_ext_ip() types = _update_types(types) if not types: raise ValueError('`types` is required') self._checker = Checker( judges=self._judges, timeout=self._timeout, verify_ssl=self._verify_ssl, max_tries=self._max_tries, real_ext_ip=ip, types=types, post=post, strict=strict, dnsbl=dnsbl, loop=self._loop, ) self._countries = countries self._limit = limit tasks = [asyncio.ensure_future(self._checker.check_judges())] if data: task = asyncio.ensure_future(self._load(data, check=True)) else: task = asyncio.ensure_future(self._grab(types, check=True)) tasks.append(task) self._all_tasks.extend(tasks) def serve(self, host='127.0.0.1', port=8888, limit=100, **kwargs): """Start a local proxy server. The server distributes incoming requests to a pool of found proxies. When the server receives an incoming request, it chooses the optimal proxy (based on the percentage of errors and average response time) and passes to it the incoming request. In addition to the parameters listed below are also accept all the parameters of the :meth:`.find` method and passed it to gather proxies to a pool. :ref:`Example of usage <proxyhub-examples-server>`. :param str host: (optional) Host of local proxy server :param int port: (optional) Port of local proxy server :param int limit: (optional) When will be found a requested number of working proxies, checking of new proxies will be lazily paused. Checking will be resumed if all the found proxies will be discarded in the process of working with them (see :attr:`max_error_rate`, :attr:`max_resp_time`). And will continue until it finds one working proxy and paused again. The default value is 100 :param int max_tries: (optional) The maximum number of attempts to handle an incoming request. If not specified, it will use the value specified during the creation of the :class:`Broker` object. Attempts can be made with different proxies. The default value is 3 :param int strategy: (optional) The strategy used for picking proxy from pool. The default value is 'best' :param int min_queue: (optional) The minimum number of proxies to choose from before deciding which is the most suitable to use. The default value is 5 :param int min_req_proxy: (optional) The minimum number of processed requests to estimate the quality of proxy (in accordance with :attr:`max_error_rate` and :attr:`max_resp_time`). The default value is 5 :param int max_error_rate: (optional) The maximum percentage of requests that ended with an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this value, proxy will be removed from the pool. The default value is 0.5 :param int max_resp_time: (optional) The maximum response time in seconds. If proxy.avg_resp_time exceeds this value, proxy will be removed from the pool. The default value is 8 :param bool prefer_connect: (optional) Flag that indicates whether to use the CONNECT method if possible. For example: If is set to True and a proxy supports HTTP proto (GET or POST requests) and CONNECT method, the server will try to use CONNECT method and only after that send the original request. The default value is False :param list http_allowed_codes: (optional) Acceptable HTTP codes returned by proxy on requests. If a proxy return code, not included in this list, it will be considered as a proxy error, not a wrong/unavailable address. For example, if a proxy will return a ``404 Not Found`` response - this will be considered as an error of a proxy. Checks only for HTTP protocol, HTTPS not supported at the moment. By default the list is empty and the response code is not verified :param int backlog: (optional) The maximum number of queued connections passed to listen. The default value is 100 :raises ValueError: If :attr:`limit` is less than or equal to zero. Because a parsing of providers will be endless .. versionadded:: 0.2.0 """ if limit <= 0: raise ValueError( 'In serve mode value of the limit cannot be less than or ' 'equal to zero. Otherwise, a parsing of providers will be ' 'endless' ) self._server = Server( host=host, port=port, proxies=self._proxies, timeout=self._timeout, max_tries=kwargs.pop('max_tries', self._max_tries), loop=self._loop, **kwargs, ) self._server.start() task = asyncio.ensure_future(self.find(limit=limit, **kwargs)) self._all_tasks.append(task) async def _load(self, data, check=True): """Looking for proxies in the passed data. Transform the passed data from [raw string | file-like object | list] to set {(host, port), ...}: {('192.168.0.1', '80'), } """ log.debug('Load proxies from the raw data') if isinstance(data, io.TextIOWrapper): data = data.read() if isinstance(data, str): data = IPPortPatternLine.findall(data) proxies = set(data) for proxy in proxies: await self._handle(proxy, check=check) await self._on_check.join() self._done() async def _grab(self, types=None, check=False): def _get_tasks(by=MAX_CONCURRENT_PROVIDERS): providers = [ pr for pr in self._providers if not types or not pr.proto or bool(pr.proto & types.keys()) ] while providers: tasks = [ asyncio.ensure_future(pr.get_proxies()) for pr in providers[:by] ] del providers[:by] self._all_tasks.extend(tasks) yield tasks log.debug('Start grabbing proxies') while True: for tasks in _get_tasks(): for task in asyncio.as_completed(tasks): proxies = await task for proxy in proxies: await self._handle(proxy, check=check) log.debug('Grab cycle is complete') if self._server: log.debug('fall asleep for %d seconds' % GRAB_PAUSE) await asyncio.sleep(GRAB_PAUSE) log.debug('awaked') else: break await self._on_check.join() self._done() async def _handle(self, proxy, check=False): try: proxy = await Proxy.create( *proxy, timeout=self._timeout, resolver=self._resolver, verify_ssl=self._verify_ssl, loop=self._loop, )
except (ResolveError, ValueError):
1
2023-11-05 13:28:57+00:00
24k
radekd91/inferno
inferno/models/DECA.py
[ { "identifier": "EmoNetLoss", "path": "inferno/layers/losses/EmoNetLoss.py", "snippet": "class EmoNetLoss(EmoLossBase):\n# class EmoNetLoss(object):\n\n def __init__(self, device, emonet=None, trainable=False, normalize_features=False, emo_feat_loss=None, au_loss=None):\n if emonet is None:\n ...
import os, sys import torch import torchvision import torch.nn.functional as F import torchvision.transforms.functional as F_v import numpy as np import cv2 import inferno.layers.losses.DecaLosses as lossfunc import inferno.layers.losses.MediaPipeLandmarkLosses as lossfunc_mp import inferno.utils.DecaUtils as util import pytorch_lightning.plugins.environments.lightning_environment as le import psutil import adabound import copy from pytorch_lightning import LightningModule from pytorch_lightning.loggers import WandbLogger from inferno.layers.losses.EmoNetLoss import EmoNetLoss, create_emo_loss, create_au_loss from skimage.io import imread from skimage.transform import resize from pathlib import Path from inferno.models.Renderer import SRenderY from inferno.models.DecaEncoder import ResnetEncoder, SecondHeadResnet, SwinEncoder from inferno.models.DecaDecoder import Generator, GeneratorAdaIn from inferno.models.DecaFLAME import FLAME, FLAMETex, FLAME_mediapipe from inferno.models.EmotionMLP import EmotionMLP from inferno.datasets.AffWild2Dataset import Expression7 from inferno.datasets.AffectNetDataModule import AffectNetExpressions from inferno.utils.lightning_logging import _log_array_image, _log_wandb_image, _torch_image2np from enum import Enum from inferno.utils.other import class_from_str, get_path_to_assets from inferno.layers.losses.VGGLoss import VGG19Loss from omegaconf import OmegaConf, open_dict from inferno.models.temporal.external.LipReadingLoss import LipReadingLoss from .StarGAN import StarGANWrapper from inferno.models.EmoNetRegressor import EmoNetRegressor, EmonetRegressorStatic from .mica.config import get_cfg_defaults from .mica.mica import MICA from .mica.MicaInputProcessing import MicaInputProcessor from inferno.utils.other import get_path_to_assets from inferno.models.IO import locate_checkpoint
18,195
""" super().__init__() self.learning_params = learning_params self.inout_params = inout_params # detail conditioning - what is given as the conditioning input to the detail generator in detail stage training if 'detail_conditioning' not in model_params.keys(): # jaw, expression and detail code by default self.detail_conditioning = ['jawpose', 'expression', 'detail'] OmegaConf.set_struct(model_params, True) with open_dict(model_params): model_params.detail_conditioning = self.detail_conditioning else: self.detail_conditioning = model_params.detail_conditioning # deprecated and is not used if 'detailemo_conditioning' not in model_params.keys(): self.detailemo_conditioning = [] OmegaConf.set_struct(model_params, True) with open_dict(model_params): model_params.detailemo_conditioning = self.detailemo_conditioning else: self.detailemo_conditioning = model_params.detailemo_conditioning supported_conditioning_keys = ['identity', 'jawpose', 'expression', 'detail', 'detailemo'] for c in self.detail_conditioning: if c not in supported_conditioning_keys: raise ValueError(f"Conditioning on '{c}' is not supported. Supported conditionings: {supported_conditioning_keys}") for c in self.detailemo_conditioning: if c not in supported_conditioning_keys: raise ValueError(f"Conditioning on '{c}' is not supported. Supported conditionings: {supported_conditioning_keys}") # which type of DECA network is used if 'deca_class' not in model_params.keys() or model_params.deca_class is None: print(f"Deca class is not specified. Defaulting to {str(DECA.__class__.__name__)}") # vanilla DECA by default (not EMOCA) deca_class = DECA else: # other type of DECA-inspired networks possible (such as ExpDECA, which is what EMOCA) deca_class = class_from_str(model_params.deca_class, sys.modules[__name__]) # instantiate the network self.deca = deca_class(config=model_params) self.mode = DecaMode[str(model_params.mode).upper()] self.stage_name = stage_name if self.stage_name is None: self.stage_name = "" if len(self.stage_name) > 0: self.stage_name += "_" # initialize the emotion perceptual loss (used for EMOCA supervision) self.emonet_loss = None self._init_emotion_loss() # initialize the au perceptual loss (not currently used in EMOCA) self.au_loss = None self._init_au_loss() # initialize the lip reading perceptual loss (not currently used in original EMOCA) self.lipread_loss = None self._init_lipread_loss() # MPL regressor from the encoded space to emotion labels (not used in EMOCA but could be used for direct emotion supervision) if 'mlp_emotion_predictor' in self.deca.config.keys(): # self._build_emotion_mlp(self.deca.config.mlp_emotion_predictor) self.emotion_mlp = EmotionMLP(self.deca.config.mlp_emotion_predictor, model_params) else: self.emotion_mlp = None def get_input_image_size(self): return (self.deca.config.image_size, self.deca.config.image_size) def _instantiate_deca(self, model_params): """ Instantiate the DECA network. """ # which type of DECA network is used if 'deca_class' not in model_params.keys() or model_params.deca_class is None: print(f"Deca class is not specified. Defaulting to {str(DECA.__class__.__name__)}") # vanilla DECA by default (not EMOCA) deca_class = DECA else: # other type of DECA-inspired networks possible (such as ExpDECA, which is what EMOCA) deca_class = class_from_str(model_params.deca_class, sys.modules[__name__]) # instantiate the network self.deca = deca_class(config=model_params) def _init_emotion_loss(self): """ Initialize the emotion perceptual loss (used for EMOCA supervision) """ if 'emonet_weight' in self.deca.config.keys() and bool(self.deca.config.get('emonet_model_path', False)): if self.emonet_loss is not None: emoloss_force_override = True if 'emoloss_force_override' in self.deca.config.keys() and self.deca.config.emoloss_force_override else False if self.emonet_loss.is_trainable(): if not emoloss_force_override: print("The old emonet loss is trainable and will not be overrided or replaced.") return # raise NotImplementedError("The old emonet loss was trainable. Changing a trainable loss is probably now " # "what you want implicitly. If you need this, use the '`'emoloss_force_override' config.") else: print("The old emonet loss is trainable but override is set so it will be replaced.") else: print("The old emonet loss is not trainable. It will be replaced.") if 'emonet_model_path' in self.deca.config.keys(): emonet_model_path = self.deca.config.emonet_model_path else: emonet_model_path=None # self.emonet_loss = EmoNetLoss(self.device, emonet=emonet_model_path) emoloss_trainable = True if 'emoloss_trainable' in self.deca.config.keys() and self.deca.config.emoloss_trainable else False emoloss_dual = True if 'emoloss_dual' in self.deca.config.keys() and self.deca.config.emoloss_dual else False normalize_features = self.deca.config.normalize_features if 'normalize_features' in self.deca.config.keys() else None emo_feat_loss = self.deca.config.emo_feat_loss if 'emo_feat_loss' in self.deca.config.keys() else None old_emonet_loss = self.emonet_loss
""" Author: Radek Danecek Copyright (c) 2022, Radek Danecek All rights reserved. # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is # holder of all proprietary rights on this computer program. # Using this computer program means that you agree to the terms # in the LICENSE file included with this software distribution. # Any use not explicitly granted by the LICENSE is prohibited. # # Copyright©2022 Max-Planck-Gesellschaft zur Förderung # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute # for Intelligent Systems. All rights reserved. # # For comments or questions, please email us at emoca@tue.mpg.de # For commercial licensing contact, please contact ps-license@tuebingen.mpg.de Parts of the code were adapted from the original DECA release: https://github.com/YadiraF/DECA/ """ # from time import time torch.backends.cudnn.benchmark = True class DecaMode(Enum): COARSE = 1 # when switched on, only coarse part of DECA-based networks is used DETAIL = 2 # when switched on, only coarse and detail part of DECA-based networks is used class DecaModule(LightningModule): """ DecaModule is a PL module that implements DECA-inspired face reconstruction networks. """ def __init__(self, model_params, learning_params, inout_params, stage_name = ""): """ :param model_params: a DictConfig of parameters about the model itself :param learning_params: a DictConfig of parameters corresponding to the learning process (such as optimizer, lr and others) :param inout_params: a DictConfig of parameters about input and output (where checkpoints and visualizations are saved) """ super().__init__() self.learning_params = learning_params self.inout_params = inout_params # detail conditioning - what is given as the conditioning input to the detail generator in detail stage training if 'detail_conditioning' not in model_params.keys(): # jaw, expression and detail code by default self.detail_conditioning = ['jawpose', 'expression', 'detail'] OmegaConf.set_struct(model_params, True) with open_dict(model_params): model_params.detail_conditioning = self.detail_conditioning else: self.detail_conditioning = model_params.detail_conditioning # deprecated and is not used if 'detailemo_conditioning' not in model_params.keys(): self.detailemo_conditioning = [] OmegaConf.set_struct(model_params, True) with open_dict(model_params): model_params.detailemo_conditioning = self.detailemo_conditioning else: self.detailemo_conditioning = model_params.detailemo_conditioning supported_conditioning_keys = ['identity', 'jawpose', 'expression', 'detail', 'detailemo'] for c in self.detail_conditioning: if c not in supported_conditioning_keys: raise ValueError(f"Conditioning on '{c}' is not supported. Supported conditionings: {supported_conditioning_keys}") for c in self.detailemo_conditioning: if c not in supported_conditioning_keys: raise ValueError(f"Conditioning on '{c}' is not supported. Supported conditionings: {supported_conditioning_keys}") # which type of DECA network is used if 'deca_class' not in model_params.keys() or model_params.deca_class is None: print(f"Deca class is not specified. Defaulting to {str(DECA.__class__.__name__)}") # vanilla DECA by default (not EMOCA) deca_class = DECA else: # other type of DECA-inspired networks possible (such as ExpDECA, which is what EMOCA) deca_class = class_from_str(model_params.deca_class, sys.modules[__name__]) # instantiate the network self.deca = deca_class(config=model_params) self.mode = DecaMode[str(model_params.mode).upper()] self.stage_name = stage_name if self.stage_name is None: self.stage_name = "" if len(self.stage_name) > 0: self.stage_name += "_" # initialize the emotion perceptual loss (used for EMOCA supervision) self.emonet_loss = None self._init_emotion_loss() # initialize the au perceptual loss (not currently used in EMOCA) self.au_loss = None self._init_au_loss() # initialize the lip reading perceptual loss (not currently used in original EMOCA) self.lipread_loss = None self._init_lipread_loss() # MPL regressor from the encoded space to emotion labels (not used in EMOCA but could be used for direct emotion supervision) if 'mlp_emotion_predictor' in self.deca.config.keys(): # self._build_emotion_mlp(self.deca.config.mlp_emotion_predictor) self.emotion_mlp = EmotionMLP(self.deca.config.mlp_emotion_predictor, model_params) else: self.emotion_mlp = None def get_input_image_size(self): return (self.deca.config.image_size, self.deca.config.image_size) def _instantiate_deca(self, model_params): """ Instantiate the DECA network. """ # which type of DECA network is used if 'deca_class' not in model_params.keys() or model_params.deca_class is None: print(f"Deca class is not specified. Defaulting to {str(DECA.__class__.__name__)}") # vanilla DECA by default (not EMOCA) deca_class = DECA else: # other type of DECA-inspired networks possible (such as ExpDECA, which is what EMOCA) deca_class = class_from_str(model_params.deca_class, sys.modules[__name__]) # instantiate the network self.deca = deca_class(config=model_params) def _init_emotion_loss(self): """ Initialize the emotion perceptual loss (used for EMOCA supervision) """ if 'emonet_weight' in self.deca.config.keys() and bool(self.deca.config.get('emonet_model_path', False)): if self.emonet_loss is not None: emoloss_force_override = True if 'emoloss_force_override' in self.deca.config.keys() and self.deca.config.emoloss_force_override else False if self.emonet_loss.is_trainable(): if not emoloss_force_override: print("The old emonet loss is trainable and will not be overrided or replaced.") return # raise NotImplementedError("The old emonet loss was trainable. Changing a trainable loss is probably now " # "what you want implicitly. If you need this, use the '`'emoloss_force_override' config.") else: print("The old emonet loss is trainable but override is set so it will be replaced.") else: print("The old emonet loss is not trainable. It will be replaced.") if 'emonet_model_path' in self.deca.config.keys(): emonet_model_path = self.deca.config.emonet_model_path else: emonet_model_path=None # self.emonet_loss = EmoNetLoss(self.device, emonet=emonet_model_path) emoloss_trainable = True if 'emoloss_trainable' in self.deca.config.keys() and self.deca.config.emoloss_trainable else False emoloss_dual = True if 'emoloss_dual' in self.deca.config.keys() and self.deca.config.emoloss_dual else False normalize_features = self.deca.config.normalize_features if 'normalize_features' in self.deca.config.keys() else None emo_feat_loss = self.deca.config.emo_feat_loss if 'emo_feat_loss' in self.deca.config.keys() else None old_emonet_loss = self.emonet_loss
self.emonet_loss = create_emo_loss(self.device, emoloss=emonet_model_path, trainable=emoloss_trainable,
1
2023-11-07 20:13:32+00:00
24k
codefuse-ai/Collinear-Constrained-Attention
model/build_model.py
[ { "identifier": "get_model_params_num", "path": "utils/common_utils.py", "snippet": "def get_model_params_num(model):\n \"\"\"\n Get params number of the model\n Args:\n model: model(required)\n Returns:\n the number of parameters of model\n \"\"\"\n num = 0\n for _, p...
import os import torch import sys import peft import model.peft.modeling_peft # noqa import bitsandbytes as bnb # noqa import accelerate # noqa from utils.common_utils import get_model_params_num from transformers import ( # noqa: E402 CONFIG_MAPPING, AutoConfig, AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast ) from .gpt_neox.configuration_gpt_neox import GPTNeoXConfig from .gpt_neox.modeling_gpt_neox import GPTNeoXForCausalLM from .gpt_neox.tokenization_gpt_neox_fast import GPTNeoXTokenizerFast from .llama.configuration_llama import LlamaConfig from .llama.modeling_llama import LlamaForCausalLM from .llama.tokenization_llama import LlamaTokenizer from .llama.tokenization_llama_fast import LlamaTokenizerFast from torch.distributed.fsdp import ( FullyShardedDataParallel as FSDP, StateDictType, ) from utils.common_utils import print_rank_0, is_old_version from tokenizer import build_tokenizer from tokenizer.tokenizer import HFTokenizer from peft.tuners.lora import LoraLayer from model.peft.utils import prepare_model_for_kbit_training from peft import ( # noqa LoraConfig, PrefixTuningConfig, PromptEncoderConfig, PromptEncoderReparameterizationType, PromptTuningConfig, PromptTuningInit, TaskType, get_peft_model ) from model.peft.tuner import AdaLoraConfig from transformers import BitsAndBytesConfig from packaging import version from .glm.tokenization_glm_deprecated import GLMChineseTokenizer
17,448
# coding=utf-8 # Copyright (c) 2023 Ant Group. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. sys.path.append("..") # from .glm.modeling_glm import GLMForConditionalGeneration # from .glm.configuration_glm import GLMConfig # from .glm.tokenization_glm import GLMTokenizer try: except ImportError: BitsAndBytesConfig = None try: except ImportError: bnb = None def find_all_linear_names(args, model): cls = bnb.nn.Linear4bit if args.bits == 4 else (bnb.nn.Linear8bitLt if args.bits == 8 else torch.nn.Linear) lora_module_names = set() for name, module in model.named_modules(): if isinstance(module, cls): names = name.split('.') lora_module_names.add(names[0] if len(names) == 1 else names[-1]) if 'lm_head' in lora_module_names: # needed for 16-bit lora_module_names.remove('lm_head') return list(lora_module_names) def setup_model(args, logger, use_cache=False): # Load pretrained model and tokenizer if args.pretrained_model_path: # TODO: 实现from pretrained读tokenizer if args.model_type == 'gpt_neox': # if args.tokenizer_type: # tokenizer = build_tokenizer(args) # tokenizer.eod_token = "<|endoftext|>" # tokenizer.pad_token = "<|pad|>" # # tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset # # tokenizer.eop_token = "<|endoftext|>" # tokenizer.eod_id = tokenizer.tokenize(tokenizer.eod_token)[0] # tokenizer.pad_id = tokenizer.tokenize(tokenizer.pad_token)[0] # else: tokenizer = GPTNeoXTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') elif args.model_type == 'llama': tokenizer = LlamaTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = AutoTokenizer.from_pretrained( # args.pretrained_model_path, # trust_remote_code=True, # ) tokenizer.eod_token = "</s>" tokenizer.eos_token = "</s>" tokenizer.bos_token = "<s>" tokenizer.pad_token = "[PAD]" tokenizer.unk_token = "<unk>" tokenizer.sop_token = "</s>" # 适配multi task dataset tokenizer.eop_token = "</s>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.eos_id = tokenizer.convert_tokens_to_ids(tokenizer.eos_token) tokenizer.bos_id = tokenizer.convert_tokens_to_ids(tokenizer.bos_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) tokenizer.unk_id = tokenizer.convert_tokens_to_ids(tokenizer.unk_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.eos_token} id: {tokenizer.eos_id}') print_rank_0(f'tokenizer {tokenizer.bos_token} id: {tokenizer.bos_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') print_rank_0(f'tokenizer {tokenizer.unk_token} id: {tokenizer.unk_id}') elif args.model_type == 'glm': if is_old_version(args.pretrained_model_path): tokenizer = GLMChineseTokenizer.from_pretrained(args.pretrained_model_path) else: tokenizer = GLMTokenizer.from_pretrained(args.pretrained_model_path) elif args.train_mode == 'sst': # tokenizer = build_tokenizer(args) tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_path." ) if args.model_type == 'gpt_neox': auto_config = GPTNeoXConfig auto_model_class = GPTNeoXForCausalLM elif args.model_type == 'llama':
# coding=utf-8 # Copyright (c) 2023 Ant Group. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. sys.path.append("..") # from .glm.modeling_glm import GLMForConditionalGeneration # from .glm.configuration_glm import GLMConfig # from .glm.tokenization_glm import GLMTokenizer try: except ImportError: BitsAndBytesConfig = None try: except ImportError: bnb = None def find_all_linear_names(args, model): cls = bnb.nn.Linear4bit if args.bits == 4 else (bnb.nn.Linear8bitLt if args.bits == 8 else torch.nn.Linear) lora_module_names = set() for name, module in model.named_modules(): if isinstance(module, cls): names = name.split('.') lora_module_names.add(names[0] if len(names) == 1 else names[-1]) if 'lm_head' in lora_module_names: # needed for 16-bit lora_module_names.remove('lm_head') return list(lora_module_names) def setup_model(args, logger, use_cache=False): # Load pretrained model and tokenizer if args.pretrained_model_path: # TODO: 实现from pretrained读tokenizer if args.model_type == 'gpt_neox': # if args.tokenizer_type: # tokenizer = build_tokenizer(args) # tokenizer.eod_token = "<|endoftext|>" # tokenizer.pad_token = "<|pad|>" # # tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset # # tokenizer.eop_token = "<|endoftext|>" # tokenizer.eod_id = tokenizer.tokenize(tokenizer.eod_token)[0] # tokenizer.pad_id = tokenizer.tokenize(tokenizer.pad_token)[0] # else: tokenizer = GPTNeoXTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') elif args.model_type == 'llama': tokenizer = LlamaTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = AutoTokenizer.from_pretrained( # args.pretrained_model_path, # trust_remote_code=True, # ) tokenizer.eod_token = "</s>" tokenizer.eos_token = "</s>" tokenizer.bos_token = "<s>" tokenizer.pad_token = "[PAD]" tokenizer.unk_token = "<unk>" tokenizer.sop_token = "</s>" # 适配multi task dataset tokenizer.eop_token = "</s>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.eos_id = tokenizer.convert_tokens_to_ids(tokenizer.eos_token) tokenizer.bos_id = tokenizer.convert_tokens_to_ids(tokenizer.bos_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) tokenizer.unk_id = tokenizer.convert_tokens_to_ids(tokenizer.unk_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.eos_token} id: {tokenizer.eos_id}') print_rank_0(f'tokenizer {tokenizer.bos_token} id: {tokenizer.bos_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') print_rank_0(f'tokenizer {tokenizer.unk_token} id: {tokenizer.unk_id}') elif args.model_type == 'glm': if is_old_version(args.pretrained_model_path): tokenizer = GLMChineseTokenizer.from_pretrained(args.pretrained_model_path) else: tokenizer = GLMTokenizer.from_pretrained(args.pretrained_model_path) elif args.train_mode == 'sst': # tokenizer = build_tokenizer(args) tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_path." ) if args.model_type == 'gpt_neox': auto_config = GPTNeoXConfig auto_model_class = GPTNeoXForCausalLM elif args.model_type == 'llama':
auto_config = LlamaConfig
4
2023-11-02 01:37:01+00:00
24k
bytedance/cryostar
projects/star/train_atom.py
[ { "identifier": "SpatialGridTranslate", "path": "cryostar/utils/transforms.py", "snippet": "class SpatialGridTranslate(torch.nn.Module):\n\n def __init__(self, D, device=None) -> None:\n super().__init__()\n self.D = D\n # yapf: disable\n coords = torch.stack(torch.meshgri...
import os.path as osp import warnings import collections import einops import numpy as np import biotite.structure as struc import torch import lightning.pytorch as pl from pathlib import Path from copy import deepcopy from torch import nn from torch import optim from torch.utils.data import DataLoader from torchinfo import summary from lightning.fabric.utilities.warnings import PossibleUserWarning from lightning.pytorch.utilities import rank_zero_only from lightning.pytorch.strategies import DDPStrategy from mmengine import mkdir_or_exist from cryostar.utils.transforms import SpatialGridTranslate from cryostar.utils.dataio import StarfileDataSet, StarfileDatasetConfig, Mask from cryostar.utils.ctf_utils import CTFRelion, CTFCryoDRGN from cryostar.utils.losses import calc_cor_loss, calc_kl_loss from cryostar.utils.misc import log_to_current, \ pl_init_exp, pretty_dict, set_seed, warmup from cryostar.utils.pdb_tools import bt_save_pdb from cryostar.gmm.gmm import EMAN2Grid, batch_projection, Gaussian from cryostar.gmm.deformer import E3Deformer, NMADeformer from cryostar.utils.fft_utils import primal_to_fourier_2d, fourier_to_primal_2d from cryostar.utils.polymer import Polymer, NT_ATOMS, AA_ATOMS from cryostar.utils.dist_loss import (find_quaint_cutoff_pairs, find_range_cutoff_pairs, find_continuous_pairs, calc_dist_by_pair_indices, remove_duplicate_pairs, filter_same_chain_pairs, DistLoss) from cryostar.utils.latent_space_utils import get_nearest_point, cluster_kmeans, run_pca, get_pc_traj, run_umap from cryostar.utils.vis_utils import plot_z_dist, save_tensor_image from cryostar.utils.pl_utils import merge_step_outputs, squeeze_dict_outputs_1st_dim, \ filter_outputs_by_indices, get_1st_unique_indices from miscs import calc_pair_dist_loss, calc_clash_loss, low_pass_mask2d, VAE, infer_ctf_params_from_config
16,674
tmp_atom_arr.coord = tmp_struc atom_arrs.append(tmp_atom_arr) bt_save_pdb(save_path, struc.stack(atom_arrs)) def _shared_image_check(self, total=25): mode = self.model.training # use validation or test set which not shuffled tmp_loader = self.trainer.val_dataloaders or self.trainer.test_dataloaders num = 0 gt_images_list = [] pred_gmm_images_list = [] self.model.eval() with torch.no_grad(): for batch in tmp_loader: batch = self.trainer.strategy.batch_to_device(batch) gt_images, pred_gmm_images, _, mu, log_var = self._shared_infer(batch) gt_images_list.append(gt_images) pred_gmm_images_list.append(pred_gmm_images) num += gt_images.shape[0] if num >= total: break self.model.train(mode=mode) gt_images_list = torch.cat(gt_images_list, dim=0)[:total] pred_gmm_images_list = torch.cat(pred_gmm_images_list, dim=0)[:total] save_dir = self._get_save_dir() save_tensor_image(gt_images_list, osp.join(save_dir, "input_image.png")) save_tensor_image(pred_gmm_images_list, osp.join(save_dir, "pred_gmm_image.png"), self.mask.mask) # standard hooks: def training_step(self, batch, batch_idx): cfg = self.cfg gt_images, pred_gmm_images, pred_struc, mu, log_var = self._shared_infer(batch) # gmm part loss # only gmm supervision should be low-passed if self.lp_mask2d is not None: lp_gt_images = self.low_pass_images(gt_images) else: lp_gt_images = gt_images gmm_proj_loss = calc_cor_loss(pred_gmm_images, lp_gt_images, self.mask) weighted_gmm_proj_loss = cfg.loss.gmm_cryoem_weight * gmm_proj_loss if hasattr(self, "connect_pairs"): connect_loss = calc_pair_dist_loss(pred_struc, self.connect_pairs, self.connect_dists) weighted_connect_loss = cfg.loss.connect_weight * connect_loss else: weighted_connect_loss = weighted_gmm_proj_loss.new_tensor(0.) if hasattr(self, "sse_pairs"): sse_loss = calc_pair_dist_loss(pred_struc, self.sse_pairs, self.sse_dists) weighted_sse_loss = cfg.loss.connect_weight * sse_loss else: weighted_sse_loss = weighted_gmm_proj_loss.new_tensor(0.) if hasattr(self, "dist_loss_fn"): dist_loss = self.dist_loss_fn(pred_struc) # across devices all_dist_loss = self.all_gather(dist_loss) # world_size, batch, num_pairs all_dist_loss = all_dist_loss.reshape(-1, dist_loss.shape[-1]) # chain-wise drop with torch.no_grad(): keep_mask = torch.ones(dist_loss.shape[-1], dtype=torch.bool).to(dist_loss.device) for i in range(len(self.cutoff_chain_mask)): tmp_mask = self.cutoff_chain_mask[i] tmp_var = all_dist_loss.index_select(dim=1, index=tmp_mask.nonzero(as_tuple=True)[0]).var(dim=0) intra_chain_keep_mask = tmp_var.lt(torch.quantile(tmp_var, cfg.loss.dist_keep_ratio)) keep_mask[tmp_mask] *= intra_chain_keep_mask keep_mask = keep_mask.unsqueeze(0).repeat(dist_loss.size(0), 1) dist_loss = torch.mean(dist_loss[keep_mask]) weighted_dist_loss = cfg.loss.dist_weight * dist_loss # dist_penalty = torch.mean(torch.abs(self.dist_loss_fn.get_weights())) # weighted_dist_penalty = cfg.loss.dist_penalty_weight * dist_penalty else: weighted_dist_loss = weighted_gmm_proj_loss.new_tensor(0.) # weighted_dist_penalty = weighted_gmm_proj_loss.new_tensor(0.) if hasattr(self, "clash_pairs"): clash_loss = calc_clash_loss(pred_struc, self.clash_pairs, cfg.loss.clash_min_cutoff) weighted_clash_loss = cfg.loss.clash_weight * clash_loss else: weighted_clash_loss = weighted_gmm_proj_loss.new_tensor(0.) # KL kl_loss = calc_kl_loss(mu, log_var, self.cfg.loss.free_bits) kl_beta = warmup(cfg.loss.warmup_step, upper=cfg.loss.kl_beta_upper)(self.global_step) weighted_kld_loss = kl_beta * kl_loss / self.mask.num_masked # clac loss loss = (weighted_kld_loss + weighted_gmm_proj_loss + weighted_connect_loss + weighted_dist_loss + weighted_sse_loss + weighted_clash_loss) tmp_metric = { "loss": loss.item(), "cryoem(gmm)": weighted_gmm_proj_loss.item(), "con": weighted_connect_loss.item(), "sse": weighted_sse_loss.item(), "dist": weighted_dist_loss.item(), # "dist_penalty": weighted_dist_penalty.item(), "clash": weighted_clash_loss.item(), "kld": weighted_kld_loss.item(), "kld(/dim)": kl_loss.item() } if self.global_step % cfg.runner.log_every_n_step == 0: self.log_dict(tmp_metric) log_to_current(f"epoch {self.current_epoch} [{batch_idx}/{self.trainer.num_training_batches}] | " +
# other # avoid num_workers set as cpu_count warning warnings.simplefilter("ignore", PossibleUserWarning) # only log to rank_zero, comment this for debugging log_to_current = rank_zero_only(log_to_current) TASK_NAME = "atom" def prepare_images(images: torch.FloatTensor, space: str): assert space in ("real", "fourier") if space == "real": model_input = einops.rearrange(images, "b 1 ny nx -> b (1 ny nx)") else: fimages = primal_to_fourier_2d(images) model_input = einops.rearrange(torch.view_as_real(fimages), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2) return model_input class InitTask(pl.LightningModule): def __init__(self, em_module): super().__init__() self.cfg = em_module.cfg self.em_module = em_module self.loss_deque = collections.deque([ 10, ], maxlen=20) def on_train_batch_end(self, outputs, batch, batch_idx): self.loss_deque.append(outputs['loss'].item()) if np.mean(self.loss_deque) < 1e-3: self.trainer.should_stop = True # update all process status self.trainer.should_stop = self.trainer.strategy.broadcast(self.trainer.should_stop) def training_step(self, batch, batch_idx): images = batch["proj"] idxes = batch["idx"] rot_mats, trans_mats = self.em_module.get_batch_pose(batch) pred_deformation, mu, log_var = self.em_module.model(prepare_images(images, self.cfg.model.input_space), idxes, rot_mats) shift_loss = torch.mean(torch.pow(pred_deformation.flatten(start_dim=-2), 2)) loss = shift_loss if self.global_step % self.cfg.runner.log_every_n_step == 0: log_to_current(f"loss {loss.item()}") return loss def configure_optimizers(self): return optim.AdamW(self.em_module.model.parameters(), lr=1e-4) def on_fit_end(self): log_to_current(f"Init finished with loss {np.mean(self.loss_deque)}") class CryoEMTask(pl.LightningModule): def __init__(self, cfg, dataset): super().__init__() cfg = deepcopy(cfg) self.cfg = cfg # Define GMM meta = Polymer.from_pdb(cfg.dataset_attr.ref_pdb_path) log_to_current(f"Load reference structure from {cfg.dataset_attr.ref_pdb_path}") # for save self.template_pdb = meta.to_atom_arr() log_to_current(f"Protein contains {len(meta)} atoms, " f"{meta.num_amino_acids} amino acids, " f"{meta.num_nucleotides} nucleotides, " f"{meta.num_chains} chains.") # ref ref_centers = torch.from_numpy(meta.coord).float() ref_amps = torch.from_numpy(meta.num_electron).float() ref_sigmas = torch.ones_like(ref_amps) ref_sigmas.fill_(2.) log_to_current(f"1st GMM blob amplitude {ref_amps[0].item()}, sigma {ref_sigmas[0].item()}") num_pts = len(meta) log_to_current(f"Reference structure has {num_pts} atom coordinates") # tunable params # gmm self.register_buffer("gmm_centers", ref_centers) if cfg.gmm.tunable: log_to_current("Set GMM sigmas, amplitudes tunable") self.register_parameter("gmm_sigmas", nn.Parameter(ref_sigmas)) self.register_parameter("gmm_amps", nn.Parameter(ref_amps)) else: self.register_buffer("gmm_sigmas", ref_sigmas) self.register_buffer("gmm_amps", ref_amps) nma_modes = None if (hasattr(self.cfg.extra_input_data_attr, "nma_path") and self.cfg.extra_input_data_attr.nma_path not in ["", None]): nma_modes = torch.tensor(np.load(self.cfg.extra_input_data_attr.nma_path), dtype=torch.float32) log_to_current(f"Load NMA coefficients from {self.cfg.extra_input_data_attr.nma_path}, " f"whose shape is {nma_modes.shape}") # model if cfg.model.input_space == "fourier": in_dim = 2 * cfg.data_process.down_side_shape ** 2 elif cfg.model.input_space == "real": in_dim = cfg.data_process.down_side_shape ** 2 else: raise NotImplementedError self.model = VAE(in_dim=in_dim, out_dim=num_pts * 3 if nma_modes is None else 6 + nma_modes.shape[1], **cfg.model.model_cfg) log_to_current('Model summary:\n' + str(summary(self.model, input_size=[(1, in_dim), (1,)], verbose=0))) if nma_modes is None: self.deformer = E3Deformer() else: self.deformer = NMADeformer(nma_modes) # loss or regularization's preparation # dist loss connect_pairs = find_continuous_pairs(meta.chain_id, meta.res_id, meta.atom_name) if cfg.extra_input_data_attr.use_domain: log_to_current("use domain instead of chain!") domain_id = np.load(cfg.extra_input_data_attr.domain_path) cutoff_pairs = find_quaint_cutoff_pairs(meta.coord, domain_id, meta.res_id, cfg.loss.intra_chain_cutoff, cfg.loss.inter_chain_cutoff, cfg.loss.intra_chain_res_bound) else: # deal with RNA/DNA if np.sum(np.isin(meta.atom_name, NT_ATOMS)): # aa tmp_mask = np.isin(meta.atom_name, AA_ATOMS) indices_in_pdb = np.nonzero(tmp_mask)[0] aa_cutoff_pairs = find_quaint_cutoff_pairs(meta.coord[tmp_mask], meta.chain_id[tmp_mask], meta.res_id[tmp_mask], cfg.loss.intra_chain_cutoff, cfg.loss.inter_chain_cutoff, cfg.loss.intra_chain_res_bound) aa_cutoff_pairs = indices_in_pdb[aa_cutoff_pairs] log_to_current(f"{len(aa_cutoff_pairs)} AA pairs") # nt tmp_mask = np.isin(meta.atom_name, NT_ATOMS) indices_in_pdb = np.nonzero(tmp_mask)[0] nt_cutoff_pairs = find_quaint_cutoff_pairs(meta.coord[tmp_mask], meta.chain_id[tmp_mask], meta.res_id[tmp_mask], cfg.loss.nt_intra_chain_cutoff, cfg.loss.nt_inter_chain_cutoff, cfg.loss.nt_intra_chain_res_bound) nt_cutoff_pairs = indices_in_pdb[nt_cutoff_pairs] log_to_current(f"{len(nt_cutoff_pairs)} NT pairs") cutoff_pairs = np.vstack((aa_cutoff_pairs, nt_cutoff_pairs)) else: cutoff_pairs = find_quaint_cutoff_pairs(meta.coord, meta.chain_id, meta.res_id, cfg.loss.intra_chain_cutoff, cfg.loss.inter_chain_cutoff, cfg.loss.intra_chain_res_bound) cutoff_pairs = remove_duplicate_pairs(cutoff_pairs, connect_pairs) if cfg.loss.sse_weight != 0.0: log_to_current("use pseduo `sse` by building spatial/sequential edges") sse_pairs = find_quaint_cutoff_pairs(meta.coord, meta.chain_id, meta.res_id, cfg.loss.intra_chain_cutoff, 0, 20) cutoff_pairs = remove_duplicate_pairs(cutoff_pairs, sse_pairs) clash_pairs = find_range_cutoff_pairs(meta.coord, cfg.loss.clash_min_cutoff) clash_pairs = remove_duplicate_pairs(clash_pairs, connect_pairs) if len(connect_pairs) > 0: self.register_buffer("connect_pairs", torch.from_numpy(connect_pairs).long()) dists = calc_dist_by_pair_indices(meta.coord, connect_pairs) self.register_buffer("connect_dists", torch.from_numpy(dists).float()) log_to_current(f"found {len(connect_pairs)} connect_pairs") else: log_to_current("connect_pairs is empty") if cfg.loss.sse_weight != 0.0: self.register_buffer("sse_pairs", torch.from_numpy(sse_pairs).long()) dists = calc_dist_by_pair_indices(meta.coord, sse_pairs) self.register_buffer("sse_dists", torch.from_numpy(dists).float()) log_to_current(f"found {len(sse_pairs)} sse_pairs") if len(cutoff_pairs) > 0: dists = calc_dist_by_pair_indices(meta.coord, cutoff_pairs) log_to_current(f"found {len(cutoff_pairs)} cutoff_pairs") self.dist_loss_fn = DistLoss(cutoff_pairs, dists, reduction=None) # for chain-wise dropout cutoff_chain_mask = filter_same_chain_pairs(cutoff_pairs, meta.chain_id) self.register_buffer("cutoff_chain_mask", torch.from_numpy(cutoff_chain_mask)) else: log_to_current("cutoff_pairs is empty") if len(clash_pairs) > 0: self.register_buffer("clash_pairs", torch.from_numpy(clash_pairs).long()) log_to_current(f"found {len(clash_pairs)} clash_pairs") else: log_to_current("clash_pairs is empty") # low-pass filtering if hasattr(cfg.data_process, "low_pass_bandwidth"): log_to_current(f"Use low-pass filtering w/ {cfg.data_process.low_pass_bandwidth} A") lp_mask2d = low_pass_mask2d(cfg.data_process.down_side_shape, cfg.data_process.down_apix, cfg.data_process.low_pass_bandwidth) self.register_buffer("lp_mask2d", torch.from_numpy(lp_mask2d).float()) else: self.lp_mask2d = None # self.mask = Mask(cfg.data_process.down_side_shape, rad=cfg.loss.mask_rad_for_image_loss) # for projection grid = EMAN2Grid(side_shape=cfg.data_process.down_side_shape, voxel_size=cfg.data_process.down_apix) self.grid = grid ctf_params = infer_ctf_params_from_config(cfg) if cfg.model.ctf == "v1": self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset)) log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.") elif cfg.model.ctf == "v2": self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset)) else: raise NotImplementedError log_to_current(ctf_params) # translate image helper self.translator = SpatialGridTranslate(D=cfg.data_process.down_side_shape, device=self.device) self.apix = self.cfg.data_process.down_apix # cache self.validation_step_outputs = [] self.stored_metrics = {} self.history_saved_dirs = [] if getattr(self.cfg.extra_input_data_attr, "ckpt_path", None) is not None: log_to_current(f"load checkpoint from {self.cfg.extra_input_data_attr.ckpt_path}") self._load_ckpt(self.cfg.extra_input_data_attr.ckpt_path) def _save_ckpt(self, ckpt_path): torch.save( { "model": self.model.state_dict(), "gmm_sigmas": self.gmm_sigmas.data, "gmm_amps": self.gmm_amps.data }, ckpt_path) def _load_ckpt(self, ckpt_path): state_dict = torch.load(ckpt_path, map_location=self.device) self.model.load_state_dict(state_dict["model"]) if self.cfg.gmm.tunable: self.gmm_sigmas.data = state_dict["gmm_sigmas"] self.gmm_amps.data = state_dict["gmm_amps"] def _get_save_dir(self): save_dir = osp.join(self.cfg.work_dir, f"{self.current_epoch:04d}_{self.global_step:07d}") mkdir_or_exist(save_dir) return save_dir def low_pass_images(self, images): f_images = primal_to_fourier_2d(images) f_images = f_images * self.lp_mask2d images = fourier_to_primal_2d(f_images).real return images def get_batch_pose(self, batch): rot_mats = batch["rotmat"] # yx order trans_mats = torch.concat((batch["shiftY"].unsqueeze(1), batch["shiftX"].unsqueeze(1)), dim=1) trans_mats /= self.apix return rot_mats, trans_mats def _shared_forward(self, images, idxes, rots): # predict structure pred_deformation, mu, log_var = self.model(prepare_images(images, self.cfg.model.input_space), idxes, rots) return pred_deformation, mu, log_var def _shared_projection(self, pred_struc, rot_mats): pred_images = batch_projection( gauss=Gaussian( mus=pred_struc, sigmas=self.gmm_sigmas.unsqueeze(0), # (b, num_centers) amplitudes=self.gmm_amps.unsqueeze(0)), rot_mats=rot_mats, line_grid=self.grid.line()) pred_images = einops.rearrange(pred_images, 'b y x -> b 1 y x') return pred_images def _apply_ctf(self, batch, real_proj, freq_mask=None): f_proj = primal_to_fourier_2d(real_proj) f_proj = self._apply_ctf_f(batch, f_proj, freq_mask) # Note: here only use the real part proj = fourier_to_primal_2d(f_proj).real return proj def _apply_ctf_f(self, batch, f_proj, freq_mask=None): pred_ctf_params = {k: batch[k] for k in ('defocusU', 'defocusV', 'angleAstigmatism') if k in batch} f_proj = self.ctf(f_proj, batch['idx'], ctf_params=pred_ctf_params, mode="gt", frequency_marcher=None) if freq_mask is not None: f_proj = f_proj * self.lp_mask2d return f_proj def _shared_infer(self, batch): gt_images = batch["proj"] idxes = batch["idx"] rot_mats, trans_mats = self.get_batch_pose(batch) # if self.lp_mask2d is not None: # gt_images = self.low_pass_images(gt_images) # prediction pred_deformation, mu, log_var = self._shared_forward(gt_images, idxes, rot_mats) pred_struc = self.deformer.transform(pred_deformation, self.gmm_centers) # get gmm projections pred_gmm_images = self._shared_projection(pred_struc, rot_mats) # apply ctf, low-pass pred_gmm_images = self._apply_ctf(batch, pred_gmm_images, self.lp_mask2d) if trans_mats is not None: gt_images = self.translator.transform(einops.rearrange(gt_images, "B 1 NY NX -> B NY NX"), einops.rearrange(trans_mats, "B C2 -> B 1 C2")) return gt_images, pred_gmm_images, pred_struc, mu, log_var def _shared_decoding(self, z): with torch.no_grad(): z = z.float().to(self.device) pred_deformation = self.model.decoder(z) pred_struc = self.deformer.transform(pred_deformation, self.gmm_centers) pred_struc = pred_struc.squeeze(0) return pred_struc def _save_batched_strucs(self, pred_strucs, save_path): ref_atom_arr = self.template_pdb.copy() atom_arrs = [] b = pred_strucs.shape[0] for i in range(b): tmp_struc = pred_strucs[i].cpu().numpy() tmp_atom_arr = ref_atom_arr.copy() tmp_atom_arr.coord = tmp_struc atom_arrs.append(tmp_atom_arr) bt_save_pdb(save_path, struc.stack(atom_arrs)) def _shared_image_check(self, total=25): mode = self.model.training # use validation or test set which not shuffled tmp_loader = self.trainer.val_dataloaders or self.trainer.test_dataloaders num = 0 gt_images_list = [] pred_gmm_images_list = [] self.model.eval() with torch.no_grad(): for batch in tmp_loader: batch = self.trainer.strategy.batch_to_device(batch) gt_images, pred_gmm_images, _, mu, log_var = self._shared_infer(batch) gt_images_list.append(gt_images) pred_gmm_images_list.append(pred_gmm_images) num += gt_images.shape[0] if num >= total: break self.model.train(mode=mode) gt_images_list = torch.cat(gt_images_list, dim=0)[:total] pred_gmm_images_list = torch.cat(pred_gmm_images_list, dim=0)[:total] save_dir = self._get_save_dir() save_tensor_image(gt_images_list, osp.join(save_dir, "input_image.png")) save_tensor_image(pred_gmm_images_list, osp.join(save_dir, "pred_gmm_image.png"), self.mask.mask) # standard hooks: def training_step(self, batch, batch_idx): cfg = self.cfg gt_images, pred_gmm_images, pred_struc, mu, log_var = self._shared_infer(batch) # gmm part loss # only gmm supervision should be low-passed if self.lp_mask2d is not None: lp_gt_images = self.low_pass_images(gt_images) else: lp_gt_images = gt_images gmm_proj_loss = calc_cor_loss(pred_gmm_images, lp_gt_images, self.mask) weighted_gmm_proj_loss = cfg.loss.gmm_cryoem_weight * gmm_proj_loss if hasattr(self, "connect_pairs"): connect_loss = calc_pair_dist_loss(pred_struc, self.connect_pairs, self.connect_dists) weighted_connect_loss = cfg.loss.connect_weight * connect_loss else: weighted_connect_loss = weighted_gmm_proj_loss.new_tensor(0.) if hasattr(self, "sse_pairs"): sse_loss = calc_pair_dist_loss(pred_struc, self.sse_pairs, self.sse_dists) weighted_sse_loss = cfg.loss.connect_weight * sse_loss else: weighted_sse_loss = weighted_gmm_proj_loss.new_tensor(0.) if hasattr(self, "dist_loss_fn"): dist_loss = self.dist_loss_fn(pred_struc) # across devices all_dist_loss = self.all_gather(dist_loss) # world_size, batch, num_pairs all_dist_loss = all_dist_loss.reshape(-1, dist_loss.shape[-1]) # chain-wise drop with torch.no_grad(): keep_mask = torch.ones(dist_loss.shape[-1], dtype=torch.bool).to(dist_loss.device) for i in range(len(self.cutoff_chain_mask)): tmp_mask = self.cutoff_chain_mask[i] tmp_var = all_dist_loss.index_select(dim=1, index=tmp_mask.nonzero(as_tuple=True)[0]).var(dim=0) intra_chain_keep_mask = tmp_var.lt(torch.quantile(tmp_var, cfg.loss.dist_keep_ratio)) keep_mask[tmp_mask] *= intra_chain_keep_mask keep_mask = keep_mask.unsqueeze(0).repeat(dist_loss.size(0), 1) dist_loss = torch.mean(dist_loss[keep_mask]) weighted_dist_loss = cfg.loss.dist_weight * dist_loss # dist_penalty = torch.mean(torch.abs(self.dist_loss_fn.get_weights())) # weighted_dist_penalty = cfg.loss.dist_penalty_weight * dist_penalty else: weighted_dist_loss = weighted_gmm_proj_loss.new_tensor(0.) # weighted_dist_penalty = weighted_gmm_proj_loss.new_tensor(0.) if hasattr(self, "clash_pairs"): clash_loss = calc_clash_loss(pred_struc, self.clash_pairs, cfg.loss.clash_min_cutoff) weighted_clash_loss = cfg.loss.clash_weight * clash_loss else: weighted_clash_loss = weighted_gmm_proj_loss.new_tensor(0.) # KL kl_loss = calc_kl_loss(mu, log_var, self.cfg.loss.free_bits) kl_beta = warmup(cfg.loss.warmup_step, upper=cfg.loss.kl_beta_upper)(self.global_step) weighted_kld_loss = kl_beta * kl_loss / self.mask.num_masked # clac loss loss = (weighted_kld_loss + weighted_gmm_proj_loss + weighted_connect_loss + weighted_dist_loss + weighted_sse_loss + weighted_clash_loss) tmp_metric = { "loss": loss.item(), "cryoem(gmm)": weighted_gmm_proj_loss.item(), "con": weighted_connect_loss.item(), "sse": weighted_sse_loss.item(), "dist": weighted_dist_loss.item(), # "dist_penalty": weighted_dist_penalty.item(), "clash": weighted_clash_loss.item(), "kld": weighted_kld_loss.item(), "kld(/dim)": kl_loss.item() } if self.global_step % cfg.runner.log_every_n_step == 0: self.log_dict(tmp_metric) log_to_current(f"epoch {self.current_epoch} [{batch_idx}/{self.trainer.num_training_batches}] | " +
pretty_dict(tmp_metric, 5))
8
2023-11-06 07:15:26+00:00
24k
KAIST-AILab/palr
train.py
[ { "identifier": "BC", "path": "imitation/bc.py", "snippet": "class BC(nn.Module):\n def __init__(self, policy, env, best_policy=None,\n replay_buffer=None, replay_buffer_valid=None, seed=0, \n device='cpu', lr=3e-4, envname=None, wandb=None, save_policy_path=None, \n ...
import os import wandb import envs import d4rl import gym import torch from imitation.bc import BC from imitation.rap import RAP from imitation.fca import FCA from imitation.mine import MINE_BC from imitation.palr import PALR from argparse import ArgumentParser from itertools import product from core.policy import TanhGaussianPolicyWithEmbedding, TanhGaussianRAPPolicy from core.replay_buffer import EnvReplayBuffer from core.preprocess import preprocess_dataset_with_prev_actions, data_select_num_transitions from rlkit.envs.wrappers import NormalizedBoxEnv
20,850
trainer.train(total_iteration = configs['total_iteration'], eval_freq = configs['eval_freq'], batch_size = configs['batch_size'], num_valid = configs['valid_data_num']) elif 'FCA' in configs['algorithm']: embedding_dim = configs['layer_sizes'][1] policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, ) best_policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, ) trainer = FCA( policy = policy, best_policy = best_policy, env = env, replay_buffer = replay_buffer, replay_buffer_valid = replay_buffer_valid, seed = configs['seed'], device = device, lr = configs['lr'], wandb = wandb, save_policy_path = configs['save_policy_path'], obs_dim = obs_dim, action_dim = action_dim, stacksize = stacksize, standardize=configs['standardize'], embedding_dim = embedding_dim, entropy_hidden_size = configs['additional_network_size'], entropy_lr = configs['inner_lr'], reg_coef = configs['reg_coef'], info_bottleneck_loss_coef = configs['info_bottleneck_loss_coef'] ) trainer.train(total_iteration = configs['total_iteration'], eval_freq = configs['eval_freq'], batch_size = configs['batch_size'], num_valid = configs['valid_data_num'], inner_steps = configs['inner_steps'],) elif 'MINE' in configs['algorithm']: embedding_dim = configs['layer_sizes'][1] policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, ) best_policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, ) trainer = MINE_BC( policy = policy, best_policy = best_policy, env = env, replay_buffer = replay_buffer, replay_buffer_valid = replay_buffer_valid, seed = configs['seed'], device = device, lr = configs['lr'], wandb = wandb, save_policy_path = configs['save_policy_path'], obs_dim = obs_dim, action_dim = action_dim, stacksize = stacksize, embedding_dim = embedding_dim, standardize=configs['standardize'], mine_lr = configs['inner_lr'], reg_coef = configs['reg_coef'], info_bottleneck_loss_coef = configs['info_bottleneck_loss_coef'] ) trainer.train(total_iteration = configs['total_iteration'], eval_freq = configs['eval_freq'], inner_steps = configs['inner_steps'], batch_size = configs['batch_size'], num_valid = configs['valid_data_num']) elif 'PALR' in configs['algorithm']: embedding_dim = configs['layer_sizes'][1] policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, ) best_policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, )
wandb_dir = '.' os.environ['WANDB_DIR'] = wandb_dir os.environ['D4RL_DATASET_DIR'] = './dataset/' def train(configs): env = NormalizedBoxEnv(gym.make(configs['envname'])) obs_dim = env.observation_space.low.size action_dim = env.action_space.low.size d4rl_env = gym.make(configs['d4rl_env_name']) stacksize = configs['stacksize'] if stacksize == 0: stacksize = 1 device = 'cuda' if torch.cuda.is_available() else 'cpu' envname, envtype = configs['envname'], configs['envtype'] traj_load_path = configs['traj_load_path'] print(f'-- Loading dataset from {traj_load_path}...') dataset = d4rl_env.get_dataset() print(f'-- Done!') print(f'-- Preprocessing dataset... ({envtype}, {stacksize})') path = preprocess_dataset_with_prev_actions(dataset, envtype, stacksize, configs['partially_observable'], action_history_len=2) train_data = data_select_num_transitions(path, configs['train_data_num']) valid_data = data_select_num_transitions(path, configs['valid_data_num'], start_idx=900000) replay_buffer = EnvReplayBuffer( configs['replay_buffer_size'], env, stacksize, action_history_len=2 ) replay_buffer.add_path(train_data) replay_buffer_valid = EnvReplayBuffer( configs['replay_buffer_size'], env, stacksize, action_history_len=2 ) replay_buffer_valid.add_path(valid_data) if configs['standardize']: obs_mean, obs_std, act_mean, act_std = replay_buffer.calculate_statistics() replay_buffer_valid.set_statistics(obs_mean, obs_std, act_mean, act_std) # to use wandb, initialize here, e.g. # wandb.init(project='palr', dir=wandb_dir, config=configs) wandb = None if 'BC' in configs['algorithm']: embedding_dim = configs['layer_sizes'][1] policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device ) best_policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device ) trainer = BC( policy = policy, best_policy = best_policy, env = env, replay_buffer = replay_buffer, replay_buffer_valid = replay_buffer_valid, seed = configs['seed'], device = device, envname = envname, lr = configs['lr'], save_policy_path = configs['save_policy_path'], obs_dim = obs_dim, action_dim = action_dim, stacksize = stacksize, wandb = wandb, standardize=configs['standardize'] ) trainer.train(total_iteration=configs['total_iteration'], eval_freq = configs['eval_freq'], batch_size = configs['batch_size'], num_valid = configs['valid_data_num']) elif 'RAP' in configs['algorithm']: embedding_dim = configs['layer_sizes'][1] policy = TanhGaussianRAPPolicy( obs_dim=obs_dim, stack_size=stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], residual_hidden_size=configs['additional_network_size'], device=device, ) best_policy = TanhGaussianRAPPolicy( obs_dim=obs_dim, stack_size=stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], residual_hidden_size=configs['additional_network_size'], device=device, ) trainer = RAP( policy = policy, best_policy = best_policy, env = env, replay_buffer = replay_buffer, replay_buffer_valid = replay_buffer_valid, seed = configs['seed'], device = device, lr = configs['lr'], save_policy_path = configs['save_policy_path'], obs_dim = obs_dim, action_dim = action_dim, embedding_dim = embedding_dim, stacksize = stacksize, wandb = wandb, standardize=configs['standardize'] ) trainer.train(total_iteration = configs['total_iteration'], eval_freq = configs['eval_freq'], batch_size = configs['batch_size'], num_valid = configs['valid_data_num']) elif 'FCA' in configs['algorithm']: embedding_dim = configs['layer_sizes'][1] policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, ) best_policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, ) trainer = FCA( policy = policy, best_policy = best_policy, env = env, replay_buffer = replay_buffer, replay_buffer_valid = replay_buffer_valid, seed = configs['seed'], device = device, lr = configs['lr'], wandb = wandb, save_policy_path = configs['save_policy_path'], obs_dim = obs_dim, action_dim = action_dim, stacksize = stacksize, standardize=configs['standardize'], embedding_dim = embedding_dim, entropy_hidden_size = configs['additional_network_size'], entropy_lr = configs['inner_lr'], reg_coef = configs['reg_coef'], info_bottleneck_loss_coef = configs['info_bottleneck_loss_coef'] ) trainer.train(total_iteration = configs['total_iteration'], eval_freq = configs['eval_freq'], batch_size = configs['batch_size'], num_valid = configs['valid_data_num'], inner_steps = configs['inner_steps'],) elif 'MINE' in configs['algorithm']: embedding_dim = configs['layer_sizes'][1] policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, ) best_policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, ) trainer = MINE_BC( policy = policy, best_policy = best_policy, env = env, replay_buffer = replay_buffer, replay_buffer_valid = replay_buffer_valid, seed = configs['seed'], device = device, lr = configs['lr'], wandb = wandb, save_policy_path = configs['save_policy_path'], obs_dim = obs_dim, action_dim = action_dim, stacksize = stacksize, embedding_dim = embedding_dim, standardize=configs['standardize'], mine_lr = configs['inner_lr'], reg_coef = configs['reg_coef'], info_bottleneck_loss_coef = configs['info_bottleneck_loss_coef'] ) trainer.train(total_iteration = configs['total_iteration'], eval_freq = configs['eval_freq'], inner_steps = configs['inner_steps'], batch_size = configs['batch_size'], num_valid = configs['valid_data_num']) elif 'PALR' in configs['algorithm']: embedding_dim = configs['layer_sizes'][1] policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, ) best_policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, )
trainer = PALR(
4
2023-11-06 08:35:34+00:00
24k
tylerlight071/Project-Cipher
main.py
[ { "identifier": "clear_terminal", "path": "components/common_functions.py", "snippet": "def clear_terminal():\n os.system('cls' if os.name == 'nt' else 'clear')" }, { "identifier": "print_slow", "path": "components/common_functions.py", "snippet": "def print_slow(text, delay=0.00): #...
import msvcrt import os import pickle import sys import time import colorama import pygame from colorama import Fore, Style from components.common_functions import clear_terminal, print_slow, shop_help, help_user, connect_help, mail_help, \ system_help from conversations.calls import intro_call, first_call, second_call, third_call, fourth_call, fifth_call, sixth_call, \ markus_seen_call from conversations.minigame_calls import code_shatter_call from minigames.code_shatter_minigame import code_shatter_minigame from minigames.eye_spy_minigame import port_scanning from systems.level_1.amy.amy_system import AmySystem from systems.level_1.billy.billy_system import BillySystem from systems.level_1.cameras.camera_1 import camera_first from systems.level_1.markus.markus_system import MarkusSystem
15,886
if system['name'] == 'Markus' and has_item("CodeShatter"): clear_terminal() code_shatter_minigame() print_slow("Password Cracked: 735@&!//") input("Press [Enter] to continue") clear_terminal() markus_system_command_loop(markus_system) add_level(player_level) remove_from_inventory(item="CodeShatter") seen_markus = True elif system['name'] == 'Lobby Camera' and has_item("EyeSpy"): port_scanning() add_level(player_level) camera_first() else: # Prompt the user for the password print_slow("") password = getpass_star("Enter password: ") print_slow("") if password == system['password']: print_slow("") print_slow(Fore.GREEN + "Access granted!" + Style.RESET_ALL) if system['name'] == 'Amy': amy_system_command_loop(amy_system) elif system['name'] == 'Billy': billy_system_command_loop(billy_system) elif system['name'] == 'Markus': markus_system_command_loop(markus_system) add_level(player_level) seen_markus = True elif system['name'] == 'Lobby Camera': camera_first() elif system['name'] == 'Kyle': # Implement Kyle System else: # Add more conditions for other systems pass else: print_slow("") print_slow(Fore.RED + "Access denied! Incorrect password." + Style.RESET_ALL) else: print_slow("") print_slow(Fore.RED + "System not found! Please try again." + Style.RESET_ALL) else: print_slow("") print_slow(Fore.RED + "System not found! Please try again." + Style.RESET_ALL) def list_emails(emails): print_slow(Fore.LIGHTBLUE_EX + "\nEmails:" + Style.RESET_ALL) for i, email in enumerate(emails): print_slow(Fore.LIGHTBLUE_EX + f"\n{email['subject']} - From: {email['sender']}" + Style.RESET_ALL) def read_email(emails, subject): global has_read_email, evidence global balance email_found = False for email in emails: if email['subject'].lower() == subject.lower(): email_found = True print_slow( Fore.LIGHTBLUE_EX + f"\nFrom: {email['sender']}\nSubject: {email['subject']}\n\n{email['body']}" + Style.RESET_ALL) # Check if the email is one of the specific emails that increases evidence count if email['subject'].lower() in ["project update"]: evidence_item = 3 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) third_call() if email['subject'].lower() in ["professional development"]: evidence_item = 2 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) second_call() if email['subject'].lower() == "can't stop thinking about you" and email['sender'].lower() == 'amy': evidence_item = 1 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) first_call() if email['subject'].lower() == "upcoming software update" and email['sender'].lower() == 'markus': evidence_item = 6 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL)
# Set the PYGAME_HIDE_SUPPORT_PROMPT environment variable os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "1" # Initialize pygame mixer pygame.mixer.init() # Load the bg music file and loop it pygame.mixer.music.load('bg_music.mp3') pygame.mixer.music.play(-1) # sets the volume to 20% (change value to adjust) pygame.mixer.music.set_volume(0.2) # Define the global variables at the module level inventory = [] balance = 300 emails = [] has_read_email = False has_read_file = False has_intro_call = False seen_markus = False evidence = [] amy_system = AmySystem() billy_system = BillySystem() markus_system = MarkusSystem() bg_music_enabled = True player_level = 1 has_started_game = False # Save the game state to a file def save_game(): global inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus with open('savegame.pkl', 'wb') as f: pickle.dump( (inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus), f) # Load the game state from a file def load_game(): global inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus if os.path.exists('savegame.pkl'): with open('savegame.pkl', 'rb') as f: inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus = pickle.load( f) else: # If the savegame file doesn't exist, set the default values inventory = [] player_level = 1 evidence = [] has_intro_call = False has_started_game = False seen_markus = False balance = 30000 emails = [ { "sender": "Hacker's Digest", "subject": "Weekly Hacker's Digest", "body": ( "Issue #143\n\n" "Cipher,\n\n" "Welcome to the latest edition of Hacker's Digest! In this issue: \n\n" "- Unveiling the Latest Exploits\n" "- Spotlight on Cryptocurrency Security\n" "- Interview with a Grey Hat Hacker\n" "- Tool of the Week: EnigmaLink\n\n" "Don't miss out on the latest in the world of hacking and cybersecurity. Stay informed and stay secure!\n\n" "Best regards,\n" "Hacker's Digest Team" ) }, { "sender": "The Cyber Mythbuster", "subject": "Busting Cybersecurity Myths", "body": ( "Cipher,\n\n" "Heard any wild cybersecurity myths lately? This week, we're busting the craziest ones, including:\n\n" "- Using 'Password123' for Maximum Security\n" "- Cyber Ninjas and Their Stealthy VPNs\n" "- USB Drives: The Fountain of Eternal Data\n\n" "Stay myth-free and keep on hacking (responsibly)!\n\n" "Mythbustingly,\n" "The Cyber Mythbuster" ) }, { "sender": "CyberSilliness", "subject": "Where Cyber Meets Comedy", "body": ( "Welcome to the CyberSilliness Gazette\n" "Where we believe that a good laugh is the ultimate antivirus! In this week's hilarity-packed issue:\n\n" "- Cyber Jokes to Crack You Up (Without Cracking Your Passwords)\n" "- Tech Support Horror Stories: A Comedy of Errors\n" "- Chuckle Challenge: Share Your Funniest Cybersecurity Anecdote\n" "- Meet the Cyber Clowns: Our Team's Silly Security Habits Revealed\n\n" "Laughter is contagious, and so is good cybersecurity. Dive into the giggles and stay safe!\n\n" "Silly Regards,\n" "The CyberSilliness Team" ) }, { "sender": "Security Insight Weekly", "subject": "Navigating the Cybersecurity Landscape", "body": ( "Hello Cipher,\n\n" "Welcome to Security Insight Weekly, your reliable source for navigating the ever-evolving cybersecurity landscape. In this week's issue:\n\n" "- Threat Analysis: Understanding Recent Cybersecurity Incidents\n" "- Best Practices for Endpoint Security\n" "- Industry Spotlight: Healthcare Cybersecurity Challenges\n" "- Security Compliance Update: Staying Aligned with Regulations\n\n" "Stay informed and empowered as we delve into the serious aspects of cybersecurity. Your security is our priority.\n\n" "Best regards,\n" "The Security Insight Team" ) }, ] # New function for game settings def game_settings(): global bg_music_enabled print_slow(Fore.GREEN + "░██████╗███████╗████████╗████████╗██╗███╗░░██╗░██████╗░░██████╗") print_slow(Fore.GREEN + "██╔════╝██╔════╝╚══██╔══╝╚══██╔══╝██║████╗░██║██╔════╝░██╔════╝") print_slow(Fore.GREEN + "╚█████╗░█████╗░░░░░██║░░░░░░██║░░░██║██╔██╗██║██║░░██╗░╚█████╗░") print_slow(Fore.GREEN + "░╚═══██╗██╔══╝░░░░░██║░░░░░░██║░░░██║██║╚████║██║░░╚██╗░╚═══██╗") print_slow(Fore.GREEN + "██████╔╝███████╗░░░██║░░░░░░██║░░░██║██║░╚███║╚██████╔╝██████╔╝") print_slow(Fore.GREEN + "╚═════╝░╚══════╝░░░╚═╝░░░░░░╚═╝░░░╚═╝╚═╝░░╚══╝░╚═════╝░╚═════╝░" + Style.RESET_ALL) print_slow("") print_slow("") print_slow("") print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) print_slow( Fore.GREEN + f"| [Background Music] {'Enabled |' if bg_music_enabled else 'Disabled |'}" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Delete Savegame] |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Back to Main Menu] |" + Style.RESET_ALL) print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) choice = input(Fore.GREEN + "\n> " + Style.RESET_ALL) if choice.lower() == "background music": # Toggle background music bg_music_enabled = not bg_music_enabled if bg_music_enabled: pygame.mixer.music.play(-1) print_slow(Fore.GREEN + "\nBackground Music Enabled" + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() else: pygame.mixer.music.stop() print_slow(Fore.RED + "\nBackground Music Disabled" + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() elif choice.lower() == "delete savegame": # Delete savegame confirm = input(Fore.RED + "\nAre you sure you want to delete the savegame? (yes/no): " + Style.RESET_ALL) if confirm.lower() == "yes": try: os.remove("savegame.pkl") print_slow(Fore.GREEN + "\nSavegame Deleted" + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() except FileNotFoundError: print_slow(Fore.RED + "\nSavegame not found" + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() elif choice.lower() == "back" or choice.lower() == "back to main menu": # Return to Main Menu print_slow(Fore.GREEN + "\nReturning to Main Menu..." + Style.RESET_ALL) time.sleep(1) clear_terminal() else: print_slow(Fore.RED + "\nInvalid choice, please try again." + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() # Function to add an item to the inventory def add_to_inventory(item): inventory.append(item) def remove_from_inventory(item): if item in inventory: inventory.remove(item) def add_evidence(evidence_item): evidence.append(evidence_item) def has_evidence(evidence_item): return evidence_item in evidence # Prints the games title def main(): clear_terminal() colorama.init() print_slow(Fore.GREEN + "██████╗░██╗░░░░░░█████╗░░█████╗░██╗░░██╗██╗░░██╗░█████╗░████████╗" + Style.RESET_ALL) print_slow(Fore.GREEN + "██╔══██╗██║░░░░░██╔══██╗██╔══██╗██║░██╔╝██║░░██║██╔══██╗╚══██╔══╝" + Style.RESET_ALL) print_slow(Fore.GREEN + "██████╦╝██║░░░░░███████║██║░░╚═╝█████═╝░███████║███████║░░░██║░░░" + Style.RESET_ALL) print_slow(Fore.GREEN + "██╔══██╗██║░░░░░██╔══██║██║░░██╗██╔═██╗░██╔══██║██╔══██║░░░██║░░░" + Style.RESET_ALL) print_slow(Fore.GREEN + "██████╦╝███████╗██║░░██║╚█████╔╝██║░╚██╗██║░░██║██║░░██║░░░██║░░░" + Style.RESET_ALL) print_slow(Fore.GREEN + "╚═════╝░╚══════╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝╚═╝░░╚═╝╚═╝░░╚═╝░░░╚═╝░░░" + Style.RESET_ALL) # Pause for 2 seconds before clearing the console time.sleep(5) # Clear the console clear_terminal() # Main menu loop while True: print_slow(Fore.GREEN + "███╗░░░███╗░█████╗░██╗███╗░░██╗  ███╗░░░███╗███████╗███╗░░██╗██╗░░░██╗") print_slow(Fore.GREEN + "████╗░████║██╔══██╗██║████╗░██║  ████╗░████║██╔════╝████╗░██║██║░░░██║") print_slow(Fore.GREEN + "██╔████╔██║███████║██║██╔██╗██║  ██╔████╔██║█████╗░░██╔██╗██║██║░░░██║") print_slow(Fore.GREEN + "██║╚██╔╝██║██╔══██║██║██║╚████║  ██║╚██╔╝██║██╔══╝░░██║╚████║██║░░░██║") print_slow(Fore.GREEN + "██║░╚═╝░██║██║░░██║██║██║░╚███║  ██║░╚═╝░██║███████╗██║░╚███║╚██████╔╝") print_slow( Fore.GREEN + "╚═╝░░░░░╚═╝╚═╝░░╚═╝╚═╝╚═╝░░╚══╝  ╚═╝░░░░░╚═╝╚══════╝╚═╝░░╚══╝░╚═════╝░" + Style.RESET_ALL) print_slow("") print_slow("") print_slow("") print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Start] Start the game |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Options] Change the settings |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Exit] Exit the game |" + Style.RESET_ALL) print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) choice = input(Fore.GREEN + "\n> " + Style.RESET_ALL) # Start the game if choice.lower() == "start": load_game() start_game() # Open game settings elif choice.lower() == "options": clear_terminal() game_settings() # Exit the game elif choice.lower() == "exit": print_slow(Fore.GREEN + "\nExiting..." + Style.RESET_ALL) pygame.mixer.music.stop() sys.exit() else: print_slow(Fore.RED + "\nInvalid choice, please try again." + Style.RESET_ALL) time.sleep(2) clear_terminal() # Function to get the user's balance def get_balance(): return balance # Function to add money to the user's balance def add_money(amount): global balance balance += amount # Function to subtract money from the user's balance def subtract_money(amount): global balance balance -= amount def add_level(level): global player_level player_level += level # Function to print the user's balance def print_balance(): print_slow(f"Your current balance is: £{get_balance()}") # Function to read files and marks files as evidence def read_file(file_content, file_name): global has_read_file, evidence global balance # Print the file content print_slow(Fore.LIGHTBLUE_EX + f"\n{file_name}:\n\n{file_content}" + Style.RESET_ALL) print_slow("") # Check if the file is one of the specific files that increases evidence count if file_name.lower() in ["employee_performance_review.txt"]: evidence_item = 4 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) fourth_call() if file_name.lower() in ["meeting_minutes.txt"]: evidence_item = 5 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) fifth_call() # Add more file names here as needed # Add money to balance based on the file name if file_name.lower() == "employee_performance_review.txt": balance += 30 elif file_name.lower() == "meeting_minutes.txt": balance += 50 # List of available upgrades upgrades = [ {"name": "EnigmaLink", "description": "Application required to connect to Enigma Corps network.", "price": 100}, {"name": "CodeShatter", "description": "A powerful password breaker that can crack even the strongest passwords.", "price": 250}, {"name": "EyeSpy", "description": "A privacy breaker to gain access to the smallest of cameras.", "price": 500}, {"name": "Rift", "description": "Break the barrier between the Server and Network.", "price": 800} ] # Function to display the shop def shop(): clear_terminal() print_slow(Fore.YELLOW + r''' ██╗░░██╗░█████╗░░█████╗░██╗░░██╗███████╗██████╗░  ███╗░░░███╗░█████╗░██████╗░██╗░░██╗███████╗████████╗ ██║░░██║██╔══██╗██╔══██╗██║░██╔╝██╔════╝██╔══██╗  ████╗░████║██╔══██╗██╔══██╗██║░██╔╝██╔════╝╚══██╔══╝ ███████║███████║██║░░╚═╝█████═╝░█████╗░░██████╔╝  ██╔████╔██║███████║██████╔╝█████═╝░█████╗░░░░░██║░░░ ██╔══██║██╔══██║██║░░██╗██╔═██╗░██╔══╝░░██╔══██╗  ██║╚██╔╝██║██╔══██║██╔══██╗██╔═██╗░██╔══╝░░░░░██║░░░ ██║░░██║██║░░██║╚█████╔╝██║░╚██╗███████╗██║░░██║  ██║░╚═╝░██║██║░░██║██║░░██║██║░╚██╗███████╗░░░██║░░░ ╚═╝░░╚═╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝╚══════╝╚═╝░░╚═╝  ╚═╝░░░░░╚═╝╚═╝░░╚═╝╚═╝░░╚═╝╚═╝░░╚═╝╚══════╝░░░╚═╝░░░''' + Style.RESET_ALL) print_slow(Fore.YELLOW + "\nWelcome to the Hacker's Market!" + Style.RESET_ALL) print_slow("") print_slow(Fore.YELLOW + "Here you can buy upgrades to improve your hacking abilities.\n" + Style.RESET_ALL) while True: # Display the list of available upgrades for i, upgrade in enumerate(upgrades): print_slow( Fore.YELLOW + f"\n{upgrade['name']} - {upgrade['description']} - £{upgrade['price']}" + Style.RESET_ALL) # Get the user's choice command = input(Fore.YELLOW + "\n> " + Style.RESET_ALL) # Buy the chosen upgrade if command.lower() == 'exit': print_slow(Fore.YELLOW + "\nExiting Hacker's Market" + Style.RESET_ALL) time.sleep(1) clear_terminal() start_game() elif command.lower() == 'help': shop_help() elif command.lower().startswith('buy '): upgrade_name = command[4:] # [4:] removes first 4 characters if has_item('EnigmaLink'): if upgrade_name.lower() == 'enigmalink': print_slow("") print_slow(Fore.RED + "Sold Out" + Style.RESET_ALL) time.sleep(1) clear_terminal() shop() else: for upgrade in upgrades: if upgrade_name.lower() == upgrade['name'].lower(): if get_balance() >= upgrade['price']: print_slow("") print_slow( Fore.GREEN + f"You have successfully purchased {upgrade['name']} for ${upgrade['price']}!" + Style.RESET_ALL) subtract_money(upgrade['price']) print_slow("") print_balance() add_to_inventory(upgrade['name']) time.sleep(2) clear_terminal() # Check if the purchased upgrade is CodeShatter if upgrade_name.lower() == 'codeshatter': print_slow("") print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) code_shatter_call() shop() else: clear_terminal() shop() else: print_slow( Fore.RED + "You don't have enough money to buy this upgrade." + Style.RESET_ALL) time.sleep(1) clear_terminal() shop() else: print_slow(Fore.RED + "Invalid choice, please try again." + Style.RESET_ALL) time.sleep(1) clear_terminal() shop() else: for upgrade in upgrades: if upgrade_name.lower() == upgrade['name'].lower(): if get_balance() >= upgrade['price']: print_slow("") print_slow( Fore.GREEN + f"You have successfully purchased {upgrade['name']} for ${upgrade['price']}!" + Style.RESET_ALL) subtract_money(upgrade['price']) print_slow("") print_balance() add_to_inventory(upgrade['name']) time.sleep(2) clear_terminal() shop() else: print_slow( Fore.RED + "You don't have enough money to buy this upgrade." + Style.RESET_ALL) shop() else: print_slow(Fore.RED + "Invalid choice, please try again." + Style.RESET_ALL) time.sleep(1) clear_terminal() shop() # Function to start the game def start_game(): global has_intro_call, has_started_game, seen_markus if has_intro_call: clear_terminal() pass else: print_slow("\nStarting game...") time.sleep(1) print_slow("\nLoading assets...") time.sleep(1) clear_terminal() print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) intro_call() has_intro_call = True has_started_game = True print_slow(Fore.MAGENTA + "\nHint: Type 'help' to get a list of available commands." + Style.RESET_ALL) pass if seen_markus: print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) markus_seen_call() else: pass # Game command loop command = input(Fore.GREEN + "> " + Style.RESET_ALL) # Connect to the network if command.lower() == "connect": connect() # Access the mail system elif command.lower() == "mail": mail() # Display help message elif command.lower() == "help": help_user() # Check balance elif command.lower() == "balance": print_balance() # Enter shop elif command.lower() == "shop": shop() # Clear terminal elif command.lower() == "clear": clear_terminal() # Return to the main menu elif command.lower() == "exit": print_slow("Returning to Main Menu...") time.sleep(1) main() else: print_slow("Invalid command, please try again.") time.sleep(1) clear_terminal() start_game() # Save the game state save_game() # Function to check if an item is in the inventory def has_item(item): return item in inventory def scan(): print_slow("") print_slow(Fore.YELLOW + "Scanning network..." + Style.RESET_ALL) time.sleep(2) print_slow("") print_slow(Fore.YELLOW + "\nAvailable Systems:" + Style.RESET_ALL) print_slow("") for system in all_systems: if system['level'] == player_level: print_slow("") print_slow(f"{system['name']} ({system['type']})") print_slow("") def getpass_star(prompt="Password: "): print(prompt, end='', flush=True) password = [] while True: char = msvcrt.getch().decode('utf-8') if char == '\r' or char == '\n': break elif char == '\b': # Backspace if password: password.pop() print('\b \b', end='', flush=True) else: password.append(char) print('*', end='', flush=True) print() # Move to the next line return ''.join(password) def hack(system_name): global seen_markus # Find the system in the all_systems list system = next((s for s in all_systems if s['name'].lower() == system_name.lower()), None) if system: if system['level'] == player_level: # Check for CodeShatter before prompting for password if system['name'] == 'Markus' and has_item("CodeShatter"): clear_terminal() code_shatter_minigame() print_slow("Password Cracked: 735@&!//") input("Press [Enter] to continue") clear_terminal() markus_system_command_loop(markus_system) add_level(player_level) remove_from_inventory(item="CodeShatter") seen_markus = True elif system['name'] == 'Lobby Camera' and has_item("EyeSpy"): port_scanning() add_level(player_level) camera_first() else: # Prompt the user for the password print_slow("") password = getpass_star("Enter password: ") print_slow("") if password == system['password']: print_slow("") print_slow(Fore.GREEN + "Access granted!" + Style.RESET_ALL) if system['name'] == 'Amy': amy_system_command_loop(amy_system) elif system['name'] == 'Billy': billy_system_command_loop(billy_system) elif system['name'] == 'Markus': markus_system_command_loop(markus_system) add_level(player_level) seen_markus = True elif system['name'] == 'Lobby Camera': camera_first() elif system['name'] == 'Kyle': # Implement Kyle System else: # Add more conditions for other systems pass else: print_slow("") print_slow(Fore.RED + "Access denied! Incorrect password." + Style.RESET_ALL) else: print_slow("") print_slow(Fore.RED + "System not found! Please try again." + Style.RESET_ALL) else: print_slow("") print_slow(Fore.RED + "System not found! Please try again." + Style.RESET_ALL) def list_emails(emails): print_slow(Fore.LIGHTBLUE_EX + "\nEmails:" + Style.RESET_ALL) for i, email in enumerate(emails): print_slow(Fore.LIGHTBLUE_EX + f"\n{email['subject']} - From: {email['sender']}" + Style.RESET_ALL) def read_email(emails, subject): global has_read_email, evidence global balance email_found = False for email in emails: if email['subject'].lower() == subject.lower(): email_found = True print_slow( Fore.LIGHTBLUE_EX + f"\nFrom: {email['sender']}\nSubject: {email['subject']}\n\n{email['body']}" + Style.RESET_ALL) # Check if the email is one of the specific emails that increases evidence count if email['subject'].lower() in ["project update"]: evidence_item = 3 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) third_call() if email['subject'].lower() in ["professional development"]: evidence_item = 2 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) second_call() if email['subject'].lower() == "can't stop thinking about you" and email['sender'].lower() == 'amy': evidence_item = 1 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) first_call() if email['subject'].lower() == "upcoming software update" and email['sender'].lower() == 'markus': evidence_item = 6 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL)
sixth_call()
13
2023-11-06 09:52:13+00:00
24k
ziqi-zhang/TAOISM
python/test/test_conv.py
[ { "identifier": "register_layer", "path": "python/common_net.py", "snippet": "def register_layer(layer, name):\n layer.register_forward_hook(hooking_layer(name))\n layer.register_backward_hook(hooking_layer_backward(name))\n layer_names.append(name)" }, { "identifier": "register_weight_...
import os import sys import numpy as np import torch import torch.distributed as dist import sys import pdb from pdb import set_trace as st from torch import optim, nn from python.common_net import register_layer, register_weight_layer, get_layer_weight, get_layer_input, \ get_layer_weight_grad, get_layer_output, get_layer_output_grad, get_layer_input_grad from python.enclave_interfaces import GlobalTensor from python.layers.batch_norm_2d import SecretBatchNorm2dLayer from python.layers.flatten import SecretFlattenLayer from python.layers.input import SecretInputLayer from python.layers.maxpool2d import SecretMaxpool2dLayer from python.layers.output import SecretOutputLayer from python.layers.relu import SecretReLULayer from python.sgx_net import init_communicate, warming_up_cuda, SecretNeuralNetwork, SgdOptimizer from python.layers.sgx_linear_base import SGXLinearBase from python.layers.sgx_conv_base import SGXConvBase from python.utils.basic_utils import ExecutionModeOptions from python.utils.logger_utils import Logger from python.quantize_net import NetQ from python.test_sgx_net import argparser_distributed, marshal_process, load_cifar10, seed_torch from python.utils.timer_utils import NamedTimerInstance, VerboseLevel, NamedTimer from python.utils.torch_utils import compare_expected_actual from pdb import set_trace as st
21,551
device_cuda = torch.device("cuda:0") torch.set_printoptions(precision=10) def compare_layer_member(layer: SGXLinearBase, layer_name: str, extract_func , member_name: str, save_path=None) -> None: print(member_name) layer.make_sure_cpu_is_latest(member_name) compare_expected_actual(extract_func(layer_name), layer.get_cpu(member_name), get_relative=True, verbose=True) if save_path is not None: if not os.path.exists(save_path): os.makedirs(save_path) print("Directory ", save_path, " Created ") else: print("Directory ", save_path, " already exists") torch.save(extract_func(layer_name), os.path.join(save_path, member_name + "_expected")) torch.save(layer.get_cpu(member_name), os.path.join(save_path, member_name + "_actual")) def compare_layer(layer: SGXLinearBase, layer_name: str, save_path=None) -> None: print("comparing with layer in expected NN :", layer_name) compare_name_function = [("input", get_layer_input), ("output", get_layer_output), ("DerOutput", get_layer_output_grad), ] if layer_name != "conv1": compare_name_function.append(("DerInput", get_layer_input_grad)) for member_name, extract_func in compare_name_function: compare_layer_member(layer, layer_name, extract_func, member_name, save_path=save_path) def compare_weight_layer(layer: SGXLinearBase, layer_name: str, save_path=None) -> None: compare_layer(layer, layer_name, save_path) compare_name_function = [("weight", get_layer_weight), ("DerWeight", get_layer_weight_grad) ] for member_name, extract_func in compare_name_function: compare_layer_member(layer, layer_name, extract_func, member_name, save_path=save_path) class ForkedPdb(pdb.Pdb): """A Pdb subclass that may be used from a forked multiprocessing child """ def interaction(self, *args, **kwargs): _stdin = sys.stdin try: sys.stdin = open('/dev/stdin') pdb.Pdb.interaction(self, *args, **kwargs) finally: sys.stdin = _stdin def test_conv( batch_size, img_hw, input_c, output_c, kernel, padding, stride, bias=False, set_values_to_one=False, sid=0 ): print("="*20, "TestConv", "="*20) print( f"batch {batch_size}, img_hw {img_hw}, input_c {input_c}, output_c {output_c}, " + f"kernel {kernel}, padding {padding}, stride {stride}" ) # def test_conv( # bias=False, set_values_to_one=True, # sid=0 # ): # batch_size = 128 # input_c = 3 # output_c = 64 # img_hw = 224 # kernel, padding, stride = 7, 3, 2 # batch_size = 128 # input_c = 512 # output_c = 512 # img_hw = 7 # kernel, padding, stride = 3, 1, 1 x_shape = [batch_size, input_c, img_hw, img_hw] GlobalTensor.init()
device_cuda = torch.device("cuda:0") torch.set_printoptions(precision=10) def compare_layer_member(layer: SGXLinearBase, layer_name: str, extract_func , member_name: str, save_path=None) -> None: print(member_name) layer.make_sure_cpu_is_latest(member_name) compare_expected_actual(extract_func(layer_name), layer.get_cpu(member_name), get_relative=True, verbose=True) if save_path is not None: if not os.path.exists(save_path): os.makedirs(save_path) print("Directory ", save_path, " Created ") else: print("Directory ", save_path, " already exists") torch.save(extract_func(layer_name), os.path.join(save_path, member_name + "_expected")) torch.save(layer.get_cpu(member_name), os.path.join(save_path, member_name + "_actual")) def compare_layer(layer: SGXLinearBase, layer_name: str, save_path=None) -> None: print("comparing with layer in expected NN :", layer_name) compare_name_function = [("input", get_layer_input), ("output", get_layer_output), ("DerOutput", get_layer_output_grad), ] if layer_name != "conv1": compare_name_function.append(("DerInput", get_layer_input_grad)) for member_name, extract_func in compare_name_function: compare_layer_member(layer, layer_name, extract_func, member_name, save_path=save_path) def compare_weight_layer(layer: SGXLinearBase, layer_name: str, save_path=None) -> None: compare_layer(layer, layer_name, save_path) compare_name_function = [("weight", get_layer_weight), ("DerWeight", get_layer_weight_grad) ] for member_name, extract_func in compare_name_function: compare_layer_member(layer, layer_name, extract_func, member_name, save_path=save_path) class ForkedPdb(pdb.Pdb): """A Pdb subclass that may be used from a forked multiprocessing child """ def interaction(self, *args, **kwargs): _stdin = sys.stdin try: sys.stdin = open('/dev/stdin') pdb.Pdb.interaction(self, *args, **kwargs) finally: sys.stdin = _stdin def test_conv( batch_size, img_hw, input_c, output_c, kernel, padding, stride, bias=False, set_values_to_one=False, sid=0 ): print("="*20, "TestConv", "="*20) print( f"batch {batch_size}, img_hw {img_hw}, input_c {input_c}, output_c {output_c}, " + f"kernel {kernel}, padding {padding}, stride {stride}" ) # def test_conv( # bias=False, set_values_to_one=True, # sid=0 # ): # batch_size = 128 # input_c = 3 # output_c = 64 # img_hw = 224 # kernel, padding, stride = 7, 3, 2 # batch_size = 128 # input_c = 512 # output_c = 512 # img_hw = 7 # kernel, padding, stride = 3, 1, 1 x_shape = [batch_size, input_c, img_hw, img_hw] GlobalTensor.init()
input_layer = SecretInputLayer(sid, "InputLayer", x_shape, ExecutionModeOptions.Enclave )
21
2023-11-01 10:37:37+00:00
24k
Codra-Ingenierie-Informatique/DataLab
cdl/tests/features/embedded1_unit.py
[ { "identifier": "_", "path": "cdl/config.py", "snippet": "CONF_VERSION = \"1.0.0\"\nAPP_NAME = \"DataLab\"\nMOD_NAME = \"cdl\"\nAPP_DESC = _(\"\"\"DataLab is a generic signal and image processing platform\"\"\")\nAPP_PATH = osp.dirname(__file__)\nDEBUG = os.environ.get(\"DEBUG\", \"\").lower() in (\"1\"...
import abc import cdl.obj from guidata.qthelpers import ( get_std_icon, qt_app_context, win32_fix_title_bar_background, ) from guidata.widgets.codeeditor import CodeEditor from qtpy import QtWidgets as QW from cdl.config import _ from cdl.core.gui.main import CDLMainWindow from cdl.tests import data as test_data
16,132
# -*- coding: utf-8 -*- # # Licensed under the terms of the BSD 3-Clause # (see cdl/LICENSE for details) """ Application embedded test 1 DataLab main window is destroyed when closing application. It is rebuilt from scratch when reopening application. """ # pylint: disable=invalid-name # Allows short reference names like x, y, ... # guitest: show class HostWidget(QW.QWidget): """Host widget: menu with action buttons, log viewer""" def __init__(self, parent=None): super().__init__(parent) self.button_layout = QW.QVBoxLayout() self.logwidget = CodeEditor(self) self.logwidget.setMinimumWidth(500) grid_layout = QW.QGridLayout() grid_layout.addLayout(self.button_layout, 0, 0) grid_layout.addWidget(self.logwidget, 0, 1) self.setLayout(grid_layout) def log(self, message): """Log message""" self.logwidget.appendPlainText(message) def add_spacing(self, spacing: int) -> None: """Add spacing to button box""" self.button_layout.addSpacing(spacing) def add_label(self, text: str) -> None: """Add label to button box""" self.button_layout.addWidget(QW.QLabel(text)) def add_widget(self, obj: QW.QWidget, spacing_before: int = 0) -> None: """Add widget (QWidget) to button box""" if spacing_before > 0: self.add_spacing(spacing_before) self.button_layout.addWidget(obj) def add_button(self, title, slot, spacing_before=0, icon=None): """Add button""" btn = QW.QPushButton(title) if icon is not None: btn.setIcon(get_std_icon(icon)) btn.clicked.connect(lambda _checked=False: slot()) self.add_widget(btn, spacing_before=spacing_before) return btn def add_stretch(self): """Add stretch to button box""" self.button_layout.addStretch() class AbstractClientWindowMeta(type(QW.QMainWindow), abc.ABCMeta): """Mixed metaclass to avoid conflicts""" class AbstractClientWindow(QW.QMainWindow, metaclass=AbstractClientWindowMeta): """Abstract client window, to embed DataLab or connect to it""" PURPOSE = None INIT_BUTTON_LABEL = None SIG_TITLES = ("Oscilloscope", "Digitizer", "Radiometer", "Voltmeter", "Sensor") IMA_TITLES = ( "Camera", "Streak Camera", "Image Scanner", "Laser Beam Profiler", "Gated Imaging Camera", ) def __init__(self): super().__init__() win32_fix_title_bar_background(self) self.setWindowTitle(_("Host application")) self.setWindowIcon(get_std_icon("ComputerIcon"))
# -*- coding: utf-8 -*- # # Licensed under the terms of the BSD 3-Clause # (see cdl/LICENSE for details) """ Application embedded test 1 DataLab main window is destroyed when closing application. It is rebuilt from scratch when reopening application. """ # pylint: disable=invalid-name # Allows short reference names like x, y, ... # guitest: show class HostWidget(QW.QWidget): """Host widget: menu with action buttons, log viewer""" def __init__(self, parent=None): super().__init__(parent) self.button_layout = QW.QVBoxLayout() self.logwidget = CodeEditor(self) self.logwidget.setMinimumWidth(500) grid_layout = QW.QGridLayout() grid_layout.addLayout(self.button_layout, 0, 0) grid_layout.addWidget(self.logwidget, 0, 1) self.setLayout(grid_layout) def log(self, message): """Log message""" self.logwidget.appendPlainText(message) def add_spacing(self, spacing: int) -> None: """Add spacing to button box""" self.button_layout.addSpacing(spacing) def add_label(self, text: str) -> None: """Add label to button box""" self.button_layout.addWidget(QW.QLabel(text)) def add_widget(self, obj: QW.QWidget, spacing_before: int = 0) -> None: """Add widget (QWidget) to button box""" if spacing_before > 0: self.add_spacing(spacing_before) self.button_layout.addWidget(obj) def add_button(self, title, slot, spacing_before=0, icon=None): """Add button""" btn = QW.QPushButton(title) if icon is not None: btn.setIcon(get_std_icon(icon)) btn.clicked.connect(lambda _checked=False: slot()) self.add_widget(btn, spacing_before=spacing_before) return btn def add_stretch(self): """Add stretch to button box""" self.button_layout.addStretch() class AbstractClientWindowMeta(type(QW.QMainWindow), abc.ABCMeta): """Mixed metaclass to avoid conflicts""" class AbstractClientWindow(QW.QMainWindow, metaclass=AbstractClientWindowMeta): """Abstract client window, to embed DataLab or connect to it""" PURPOSE = None INIT_BUTTON_LABEL = None SIG_TITLES = ("Oscilloscope", "Digitizer", "Radiometer", "Voltmeter", "Sensor") IMA_TITLES = ( "Camera", "Streak Camera", "Image Scanner", "Laser Beam Profiler", "Gated Imaging Camera", ) def __init__(self): super().__init__() win32_fix_title_bar_background(self) self.setWindowTitle(_("Host application")) self.setWindowIcon(get_std_icon("ComputerIcon"))
self.cdl: CDLMainWindow = None
1
2023-11-09 16:56:03+00:00
24k
ingra14m/Tensor4D-DNeRF
exp_runner.py
[ { "identifier": "Dataset", "path": "models/dataset.py", "snippet": "class Dataset:\n def __init__(self, conf):\n super(Dataset, self).__init__()\n print('Load data: Begin')\n self.device = torch.device('cuda')\n self.conf = conf\n\n self.data_dir = conf.get_string('...
import os import time import logging import argparse import numpy as np import cv2 as cv import torch import torch.nn.functional as F from torch.utils.tensorboard import SummaryWriter from shutil import copyfile from tqdm import tqdm from pyhocon import ConfigFactory from models.dataset import Dataset, BlenderDataset from models.fields import RenderingNetwork, FieldNetwork, SingleVarianceNetwork from models.tensor4d import Tensor4D from models.renderer import NeuSRenderer from models.mask import Mask3D from metrics import *
16,173
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' class Runner: def __init__(self, conf_path, mode='train', case='CASE_NAME', is_continue=False): self.device = torch.device('cuda') # Configuration self.conf_path = conf_path f = open(self.conf_path) conf_text = f.read() conf_text = conf_text.replace('CASE_NAME', case) f.close() self.conf = ConfigFactory.parse_string(conf_text) self.conf['dataset.data_dir'] = self.conf['dataset.data_dir'].replace('CASE_NAME', case) self.base_exp_dir = self.conf['general.base_exp_dir'] os.makedirs(self.base_exp_dir, exist_ok=True) self.is_blender = self.conf['dataset'].get_bool('is_blender', default=False) self.dataset = BlenderDataset(self.conf['dataset']) if self.is_blender else Dataset(self.conf['dataset']) self.g_nums = self.conf['dataset']['g_nums'] self.iter_step = 0 self.flow = self.conf.get_bool('model.flow', default=False) # Training parameters self.end_iter = self.conf.get_int('train.end_iter') self.save_freq = self.conf.get_int('train.save_freq') self.report_freq = self.conf.get_int('train.report_freq') self.val_freq = self.conf.get_int('train.val_freq') self.batch_size = self.conf.get_int('train.batch_size') self.fine_level_iter = self.conf.get_int('train.fine_level_iter') self.downsample_iter = self.conf.get_int('train.downsample_iter') self.validate_resolution_level = self.conf.get_int('train.validate_resolution_level') self.learning_rate = self.conf.get_float('train.learning_rate') self.learning_rate_alpha = self.conf.get_float('train.learning_rate_alpha') self.use_white_bkgd = self.conf.get_bool('train.use_white_bkgd') self.warm_up_end = self.conf.get_float('train.warm_up_end', default=0.0) self.warm_up_imgs = self.conf.get_int('train.warm_up_imgs', default=50) self.anneal_end = self.conf.get_float('train.anneal_end', default=0.0) self.mask_color_loss = self.conf.get_bool('train.mask_color_loss') self.weighted_sample = self.conf.get_bool('train.weighted_sample') # Weights self.igr_weight = self.conf.get_float('train.igr_weight') self.tgr_weight = self.conf.get_float('train.tgr_weight') self.mask_weight = self.conf.get_float('train.mask_weight') self.tv_weight = self.conf.get_float('train.tv_weight') if self.tv_weight > 0: self.reg_l2 = True else: self.reg_l2 = False self.is_continue = is_continue self.mode = mode self.model_list = [] self.writer = None # Masks self.mask3d = Mask3D(**self.conf['model.mask3d'], num_frames=self.dataset.n_images // self.g_nums, device=self.device) # Networks
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' class Runner: def __init__(self, conf_path, mode='train', case='CASE_NAME', is_continue=False): self.device = torch.device('cuda') # Configuration self.conf_path = conf_path f = open(self.conf_path) conf_text = f.read() conf_text = conf_text.replace('CASE_NAME', case) f.close() self.conf = ConfigFactory.parse_string(conf_text) self.conf['dataset.data_dir'] = self.conf['dataset.data_dir'].replace('CASE_NAME', case) self.base_exp_dir = self.conf['general.base_exp_dir'] os.makedirs(self.base_exp_dir, exist_ok=True) self.is_blender = self.conf['dataset'].get_bool('is_blender', default=False) self.dataset = BlenderDataset(self.conf['dataset']) if self.is_blender else Dataset(self.conf['dataset']) self.g_nums = self.conf['dataset']['g_nums'] self.iter_step = 0 self.flow = self.conf.get_bool('model.flow', default=False) # Training parameters self.end_iter = self.conf.get_int('train.end_iter') self.save_freq = self.conf.get_int('train.save_freq') self.report_freq = self.conf.get_int('train.report_freq') self.val_freq = self.conf.get_int('train.val_freq') self.batch_size = self.conf.get_int('train.batch_size') self.fine_level_iter = self.conf.get_int('train.fine_level_iter') self.downsample_iter = self.conf.get_int('train.downsample_iter') self.validate_resolution_level = self.conf.get_int('train.validate_resolution_level') self.learning_rate = self.conf.get_float('train.learning_rate') self.learning_rate_alpha = self.conf.get_float('train.learning_rate_alpha') self.use_white_bkgd = self.conf.get_bool('train.use_white_bkgd') self.warm_up_end = self.conf.get_float('train.warm_up_end', default=0.0) self.warm_up_imgs = self.conf.get_int('train.warm_up_imgs', default=50) self.anneal_end = self.conf.get_float('train.anneal_end', default=0.0) self.mask_color_loss = self.conf.get_bool('train.mask_color_loss') self.weighted_sample = self.conf.get_bool('train.weighted_sample') # Weights self.igr_weight = self.conf.get_float('train.igr_weight') self.tgr_weight = self.conf.get_float('train.tgr_weight') self.mask_weight = self.conf.get_float('train.mask_weight') self.tv_weight = self.conf.get_float('train.tv_weight') if self.tv_weight > 0: self.reg_l2 = True else: self.reg_l2 = False self.is_continue = is_continue self.mode = mode self.model_list = [] self.writer = None # Masks self.mask3d = Mask3D(**self.conf['model.mask3d'], num_frames=self.dataset.n_images // self.g_nums, device=self.device) # Networks
self.tensor4d = Tensor4D(**self.conf['model.tensor4d']).to(self.device)
5
2023-11-07 10:16:33+00:00
24k
Kushalhk/AutoFilter
plugins/p_ttishow.py
[ { "identifier": "ADMINS", "path": "info.py", "snippet": "ADMINS = [int(admin) if id_pattern.search(admin) else admin for admin in environ.get('ADMINS', '').split()]" }, { "identifier": "LOG_CHANNEL", "path": "info.py", "snippet": "LOG_CHANNEL = int(environ.get('LOG_CHANNEL', ''))" }, ...
from pyrogram import Client, filters, enums from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery from pyrogram.errors.exceptions.bad_request_400 import MessageTooLong, PeerIdInvalid from info import ADMINS, LOG_CHANNEL, SUPPORT_CHAT, MELCOW_NEW_USERS, MELCOW_VID, CHNL_LNK, GRP_LNK from database.users_chats_db import db from database.ia_filterdb import Media from utils import get_size, temp, get_settings from Script import script from pyrogram.errors import ChatAdminRequired import asyncio
19,629
"""-----------------------------------------https://t.me/TG_LINKS_CHANNEL--------------------------------------""" @Client.on_message(filters.new_chat_members & filters.group) async def save_group(bot, message): r_j_check = [u.id for u in message.new_chat_members] if temp.ME in r_j_check: if not await db.get_chat(message.chat.id): total=await bot.get_chat_members_count(message.chat.id) r_j = message.from_user.mention if message.from_user else "Anonymous" await bot.send_message(LOG_CHANNEL, script.LOG_TEXT_G.format(message.chat.title, message.chat.id, total, r_j)) await db.add_chat(message.chat.id, message.chat.title) if message.chat.id in temp.BANNED_CHATS: # Inspired from a boat of a banana tree buttons = [[ InlineKeyboardButton('Support', url=f'https://t.me/{SUPPORT_CHAT}') ]] reply_markup=InlineKeyboardMarkup(buttons) k = await message.reply( text='<b>CHAT NOT ALLOWED 🐞\n\nMy admins has restricted me from working here ! If you want to know more about it contact support..</b>', reply_markup=reply_markup, ) try: await k.pin() except: pass await bot.leave_chat(message.chat.id) return buttons = [[ InlineKeyboardButton('🔸 ᴍᴇꜱꜱᴀɢᴇ ʜᴇʀᴇ 🔹', url="https://t.me/TG_Bots_Supporter") ],[ InlineKeyboardButton('ᴄʜᴀɴɴᴇʟ', url=CHNL_LNK),
"""-----------------------------------------https://t.me/TG_LINKS_CHANNEL--------------------------------------""" @Client.on_message(filters.new_chat_members & filters.group) async def save_group(bot, message): r_j_check = [u.id for u in message.new_chat_members] if temp.ME in r_j_check: if not await db.get_chat(message.chat.id): total=await bot.get_chat_members_count(message.chat.id) r_j = message.from_user.mention if message.from_user else "Anonymous" await bot.send_message(LOG_CHANNEL, script.LOG_TEXT_G.format(message.chat.title, message.chat.id, total, r_j)) await db.add_chat(message.chat.id, message.chat.title) if message.chat.id in temp.BANNED_CHATS: # Inspired from a boat of a banana tree buttons = [[ InlineKeyboardButton('Support', url=f'https://t.me/{SUPPORT_CHAT}') ]] reply_markup=InlineKeyboardMarkup(buttons) k = await message.reply( text='<b>CHAT NOT ALLOWED 🐞\n\nMy admins has restricted me from working here ! If you want to know more about it contact support..</b>', reply_markup=reply_markup, ) try: await k.pin() except: pass await bot.leave_chat(message.chat.id) return buttons = [[ InlineKeyboardButton('🔸 ᴍᴇꜱꜱᴀɢᴇ ʜᴇʀᴇ 🔹', url="https://t.me/TG_Bots_Supporter") ],[ InlineKeyboardButton('ᴄʜᴀɴɴᴇʟ', url=CHNL_LNK),
InlineKeyboardButton('ɢʀᴏᴜᴘ', url=GRP_LNK)
6
2023-11-03 12:21:26+00:00
24k
apple/ml-reed
reed/algorithms/pebble.py
[ { "identifier": "utils", "path": "BPref/utils.py", "snippet": "def make_env(cfg):\ndef ppo_make_env(env_id, seed):\ndef tie_weights(src, trg):\ndef make_metaworld_env(cfg):\ndef ppo_make_metaworld_env(env_id, seed):\n def __init__(self, *models):\n def __enter__(self):\n def __exit__(self, *arg...
import typing as t import time import numpy as np import torch import hydra from pathlib import Path from omegaconf import dictconfig, OmegaConf from BPref import utils from BPref.logger import Logger from BPref.replay_buffer import TrajectoryReplayBuffer from collections import deque from reed.models.reward_model import StateActionRewardModel from reed.data.preference_dataset import PreferenceDataset from reed.data.preference_data_loader import PreferenceTripletEnsembleDataLoader from reed.data.preprocess_images import PreProcessInference
18,851
# # For licensing see accompanying LICENSE file. # Copyright (C) 2023 Apple Inc. All Rights Reserved. # class PEBBLE: """ Train a reward model in conjunction with policy training following the PEBBLE algorithm from (Lee et al. 2021) """ def __init__(self, experiment_config: dictconfig.DictConfig): """ Args: experiment_config: contains the configuration for the experiment to be run. Access like a dictionry """ # track the experimental configuration self.experiment_config = experiment_config # create the logger to track policy learning progress self.logger = Logger( self.experiment_config.out_dir, save_tb=self.experiment_config.log_save_tb, log_frequency=self.experiment_config.log_frequency, agent=self.experiment_config.agent.name) # used to track where we are in training # total amount of feedback the reward model has solicited self.total_feedback = 0 # total amount of feedback given to the reward model self.labeled_feedback = 0 # policy train step self.step = 0 # we need to set the random seed for replication purposes
# # For licensing see accompanying LICENSE file. # Copyright (C) 2023 Apple Inc. All Rights Reserved. # class PEBBLE: """ Train a reward model in conjunction with policy training following the PEBBLE algorithm from (Lee et al. 2021) """ def __init__(self, experiment_config: dictconfig.DictConfig): """ Args: experiment_config: contains the configuration for the experiment to be run. Access like a dictionry """ # track the experimental configuration self.experiment_config = experiment_config # create the logger to track policy learning progress self.logger = Logger( self.experiment_config.out_dir, save_tb=self.experiment_config.log_save_tb, log_frequency=self.experiment_config.log_frequency, agent=self.experiment_config.agent.name) # used to track where we are in training # total amount of feedback the reward model has solicited self.total_feedback = 0 # total amount of feedback given to the reward model self.labeled_feedback = 0 # policy train step self.step = 0 # we need to set the random seed for replication purposes
utils.set_seed_everywhere(self.experiment_config.seed)
0
2023-11-06 23:14:20+00:00
24k
alibaba/animate-anything
train.py
[ { "identifier": "VideoJsonDataset", "path": "utils/dataset.py", "snippet": "class VideoJsonDataset(Dataset):\n def __init__(\n self,\n tokenizer=None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 16,\n fps: int = 8,\n video_dir: st...
import argparse import datetime import logging import inspect import math import os import json import gc import copy import random import cv2 import torch import torch.nn.functional as F import torch.utils.checkpoint import torchvision.transforms as T import diffusers import transformers import numpy as np import imageio import itertools import bitsandbytes as bnb from typing import Dict, Optional, Tuple from omegaconf import OmegaConf from tqdm.auto import tqdm from PIL import Image from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers.models import AutoencoderKL from diffusers import DPMSolverMultistepScheduler, DDPMScheduler from diffusers.image_processor import VaeImageProcessor from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version, export_to_video from diffusers.utils.import_utils import is_xformers_available from diffusers.models.attention_processor import AttnProcessor2_0, Attention from diffusers.models.attention import BasicTransformerBlock from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth import tensor2vid from transformers import CLIPTextModel, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPEncoder from utils.dataset import VideoJsonDataset, SingleVideoDataset, \ ImageDataset, VideoFolderDataset, CachedDataset, VideoBLIPDataset from einops import rearrange, repeat from models.unet_3d_condition_mask import UNet3DConditionModel from models.pipeline import LatentToVideoPipeline from utils.lora_handler import LoraHandler, LORA_VERSIONS from utils.common import read_mask, generate_random_mask, slerp, calculate_motion_score, \ read_video, calculate_motion_precision, calculate_latent_motion_score, \ DDPM_forward, DDPM_forward_timesteps, DDPM_forward_mask, motion_mask_loss, \ generate_center_mask, tensor_to_vae_latent from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
19,759
} if extra_params is not None: for k, v in extra_params.items(): params[k] = v return params def negate_params(name, negation): # We have to do this if we are co-training with LoRA. # This ensures that parameter groups aren't duplicated. if negation is None: return False for n in negation: if n in name and 'temp' not in name: return True return False def create_optimizer_params(model_list, lr): optimizer_params = [] for optim in model_list: model, condition, extra_params, is_lora, negation = optim.values() # Check if we are doing LoRA training. if is_lora and condition and isinstance(model, list): params = create_optim_params( params=itertools.chain(*model), extra_params=extra_params ) optimizer_params.append(params) continue if is_lora and condition and not isinstance(model, list): for n, p in model.named_parameters(): if 'lora' in n: params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) continue # If this is true, we can train it. if condition: for n, p in model.named_parameters(): should_negate = 'lora' in n and not is_lora if should_negate: continue params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) return optimizer_params def get_optimizer(use_8bit_adam): if use_8bit_adam: try: except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) return bnb.optim.AdamW8bit else: return torch.optim.AdamW def is_mixed_precision(accelerator): weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 return weight_dtype def cast_to_gpu_and_type(model_list, device, weight_dtype): for model in model_list: if model is not None: model.to(device, dtype=weight_dtype) def handle_cache_latents( should_cache, output_dir, train_dataloader, train_batch_size, vae, cached_latent_dir=None, shuffle=False ): # Cache latents by storing them in VRAM. # Speeds up training and saves memory by not encoding during the train loop. if not should_cache: return None vae.to('cuda', dtype=torch.float16) vae.enable_slicing() cached_latent_dir = ( os.path.abspath(cached_latent_dir) if cached_latent_dir is not None else None ) if cached_latent_dir is None: cache_save_dir = f"{output_dir}/cached_latents" os.makedirs(cache_save_dir, exist_ok=True) for i, batch in enumerate(tqdm(train_dataloader, desc="Caching Latents.")): save_name = f"cached_{i}" full_out_path = f"{cache_save_dir}/{save_name}.pt" pixel_values = batch['pixel_values'].to('cuda', dtype=torch.float16) batch['pixel_values'] = tensor_to_vae_latent(pixel_values, vae) for k, v in batch.items(): batch[k] = v[0] torch.save(batch, full_out_path) del pixel_values del batch # We do this to avoid fragmentation from casting latents between devices. torch.cuda.empty_cache() else: cache_save_dir = cached_latent_dir return torch.utils.data.DataLoader(
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = [] dataset_cls = [VideoJsonDataset, SingleVideoDataset, ImageDataset, VideoFolderDataset, VideoBLIPDataset] dataset_map = {d.__getname__(): d for d in dataset_cls} # Loop through all available datasets, get the name, then add to list of data to process. for dataset in dataset_types: if dataset in dataset_map: train_datasets.append(dataset_map[dataset](**train_data, tokenizer=tokenizer)) else: raise ValueError(f"Dataset type not found: {dataset} not in {dataset_map.keys()}") return train_datasets def extend_datasets(datasets, dataset_items, extend=False): biggest_data_len = max(x.__len__() for x in datasets) extended = [] for dataset in datasets: if dataset.__len__() == 0: del dataset continue if dataset.__len__() < biggest_data_len: for item in dataset_items: if extend and item not in extended and hasattr(dataset, item): print(f"Extending {item}") value = getattr(dataset, item) value *= biggest_data_len value = value[:biggest_data_len] setattr(dataset, item, value) print(f"New {item} dataset length: {dataset.__len__()}") extended.append(item) def export_to_video(video_frames, output_video_path, fps): fourcc = cv2.VideoWriter_fourcc(*"mp4v") h, w, _ = video_frames[0].shape video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=fps, frameSize=(w, h)) for i in range(len(video_frames)): img = cv2.cvtColor(video_frames[i], cv2.COLOR_RGB2BGR) video_writer.write(img) def create_output_folders(output_dir, config): now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") out_dir = os.path.join(output_dir, f"train_{now}") os.makedirs(out_dir, exist_ok=True) os.makedirs(f"{out_dir}/samples", exist_ok=True) OmegaConf.save(config, os.path.join(out_dir, 'config.yaml')) return out_dir def load_primary_models(pretrained_model_path, motion_mask, motion_strength): noise_scheduler = DDPMScheduler.from_pretrained(pretrained_model_path, subfolder="scheduler") tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder") vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae") unet = UNet3DConditionModel.from_pretrained(pretrained_model_path, subfolder="unet", low_cpu_mem_usage=False, device_map=None, motion_mask=motion_mask, motion_strength=motion_strength) if pretrained_model_path.endswith('zeroscope_v2_576w'): #first time init, modify unet conv in2 unet.conv_in2.bias.data = copy.deepcopy(unet.conv_in.bias) torch.nn.init.zeros_(unet.conv_in2.weight) unet.conv_in2.weight.data[:,1:]= copy.deepcopy(unet.conv_in.weight) return noise_scheduler, tokenizer, text_encoder, vae, unet def unet_and_text_g_c(unet, text_encoder, unet_enable, text_enable): unet._set_gradient_checkpointing(value=unet_enable) if text_enable: text_encoder.gradient_checkpointing_enable() else: text_encoder.gradient_checkpointing_disable() def freeze_models(models_to_freeze): for model in models_to_freeze: if model is not None: model.requires_grad_(False) def is_attn(name): return ('attn1' or 'attn2' == name.split('.')[-1]) def set_processors(attentions): for attn in attentions: attn.set_processor(AttnProcessor2_0()) def set_torch_2_attn(unet): optim_count = 0 for name, module in unet.named_modules(): if is_attn(name): if isinstance(module, torch.nn.ModuleList): for m in module: if isinstance(m, BasicTransformerBlock): set_processors([m.attn1, m.attn2]) optim_count += 1 if optim_count > 0: print(f"{optim_count} Attention layers using Scaled Dot Product Attention.") def handle_memory_attention(enable_xformers_memory_efficient_attention, enable_torch_2_attn, unet): try: is_torch_2 = hasattr(F, 'scaled_dot_product_attention') enable_torch_2 = is_torch_2 and enable_torch_2_attn if enable_xformers_memory_efficient_attention and not enable_torch_2: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) else: raise ValueError("xformers is not available. Make sure it is installed correctly") if enable_torch_2: set_torch_2_attn(unet) except: print("Could not enable memory efficient attention for xformers or Torch 2.0.") def param_optim(model, condition, extra_params=None, is_lora=False, negation=None): extra_params = extra_params if len(extra_params.keys()) > 0 else None return { "model": model, "condition": condition, 'extra_params': extra_params, 'is_lora': is_lora, "negation": negation } def create_optim_params(name='param', params=None, lr=5e-6, extra_params=None): params = { "name": name, "params": params, "lr": lr } if extra_params is not None: for k, v in extra_params.items(): params[k] = v return params def negate_params(name, negation): # We have to do this if we are co-training with LoRA. # This ensures that parameter groups aren't duplicated. if negation is None: return False for n in negation: if n in name and 'temp' not in name: return True return False def create_optimizer_params(model_list, lr): optimizer_params = [] for optim in model_list: model, condition, extra_params, is_lora, negation = optim.values() # Check if we are doing LoRA training. if is_lora and condition and isinstance(model, list): params = create_optim_params( params=itertools.chain(*model), extra_params=extra_params ) optimizer_params.append(params) continue if is_lora and condition and not isinstance(model, list): for n, p in model.named_parameters(): if 'lora' in n: params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) continue # If this is true, we can train it. if condition: for n, p in model.named_parameters(): should_negate = 'lora' in n and not is_lora if should_negate: continue params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) return optimizer_params def get_optimizer(use_8bit_adam): if use_8bit_adam: try: except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) return bnb.optim.AdamW8bit else: return torch.optim.AdamW def is_mixed_precision(accelerator): weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 return weight_dtype def cast_to_gpu_and_type(model_list, device, weight_dtype): for model in model_list: if model is not None: model.to(device, dtype=weight_dtype) def handle_cache_latents( should_cache, output_dir, train_dataloader, train_batch_size, vae, cached_latent_dir=None, shuffle=False ): # Cache latents by storing them in VRAM. # Speeds up training and saves memory by not encoding during the train loop. if not should_cache: return None vae.to('cuda', dtype=torch.float16) vae.enable_slicing() cached_latent_dir = ( os.path.abspath(cached_latent_dir) if cached_latent_dir is not None else None ) if cached_latent_dir is None: cache_save_dir = f"{output_dir}/cached_latents" os.makedirs(cache_save_dir, exist_ok=True) for i, batch in enumerate(tqdm(train_dataloader, desc="Caching Latents.")): save_name = f"cached_{i}" full_out_path = f"{cache_save_dir}/{save_name}.pt" pixel_values = batch['pixel_values'].to('cuda', dtype=torch.float16) batch['pixel_values'] = tensor_to_vae_latent(pixel_values, vae) for k, v in batch.items(): batch[k] = v[0] torch.save(batch, full_out_path) del pixel_values del batch # We do this to avoid fragmentation from casting latents between devices. torch.cuda.empty_cache() else: cache_save_dir = cached_latent_dir return torch.utils.data.DataLoader(
CachedDataset(cache_dir=cache_save_dir),
4
2023-12-07 08:26:29+00:00
24k
rehg-lab/RAVE
annotator/oneformer/detectron2/modeling/meta_arch/retinanet.py
[ { "identifier": "configurable", "path": "annotator/oneformer/detectron2/config/config.py", "snippet": "def configurable(init_func=None, *, from_config=None):\r\n \"\"\"\r\n Decorate a function or a class's __init__ method so that it can be called\r\n with a :class:`CfgNode` object using a :func...
import logging import math import torch from typing import List, Tuple from fvcore.nn import sigmoid_focal_loss_jit from torch import Tensor, nn from torch.nn import functional as F from annotator.oneformer.detectron2.config import configurable from annotator.oneformer.detectron2.layers import CycleBatchNormList, ShapeSpec, batched_nms, cat, get_norm from annotator.oneformer.detectron2.structures import Boxes, ImageList, Instances, pairwise_iou from annotator.oneformer.detectron2.utils.events import get_event_storage from ..anchor_generator import build_anchor_generator from ..backbone import Backbone, build_backbone from ..box_regression import Box2BoxTransform, _dense_box_regression_loss from ..matcher import Matcher from .build import META_ARCH_REGISTRY from .dense_detector import DenseDetector, permute_to_N_HWA_K # noqa
15,594
# Copyright (c) Facebook, Inc. and its affiliates. __all__ = ["RetinaNet"] logger = logging.getLogger(__name__) @META_ARCH_REGISTRY.register() class RetinaNet(DenseDetector): """ Implement RetinaNet in :paper:`RetinaNet`. """ @configurable def __init__( self, *, backbone: Backbone, head: nn.Module, head_in_features, anchor_generator, box2box_transform, anchor_matcher, num_classes, focal_loss_alpha=0.25, focal_loss_gamma=2.0, smooth_l1_beta=0.0, box_reg_loss_type="smooth_l1", test_score_thresh=0.05, test_topk_candidates=1000, test_nms_thresh=0.5, max_detections_per_image=100, pixel_mean, pixel_std, vis_period=0, input_format="BGR", ): """ NOTE: this interface is experimental. Args: backbone: a backbone module, must follow detectron2's backbone interface head (nn.Module): a module that predicts logits and regression deltas for each level from a list of per-level features head_in_features (Tuple[str]): Names of the input feature maps to be used in head anchor_generator (nn.Module): a module that creates anchors from a list of features. Usually an instance of :class:`AnchorGenerator` box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to instance boxes anchor_matcher (Matcher): label the anchors by matching them with ground truth. num_classes (int): number of classes. Used to label background proposals. # Loss parameters: focal_loss_alpha (float): focal_loss_alpha focal_loss_gamma (float): focal_loss_gamma smooth_l1_beta (float): smooth_l1_beta box_reg_loss_type (str): Options are "smooth_l1", "giou", "diou", "ciou" # Inference parameters: test_score_thresh (float): Inference cls score threshold, only anchors with score > INFERENCE_TH are considered for inference (to improve speed) test_topk_candidates (int): Select topk candidates before NMS test_nms_thresh (float): Overlap threshold used for non-maximum suppression (suppress boxes with IoU >= this threshold) max_detections_per_image (int): Maximum number of detections to return per image during inference (100 is based on the limit established for the COCO dataset). pixel_mean, pixel_std: see :class:`DenseDetector`. """ super().__init__( backbone, head, head_in_features, pixel_mean=pixel_mean, pixel_std=pixel_std ) self.num_classes = num_classes # Anchors self.anchor_generator = anchor_generator self.box2box_transform = box2box_transform self.anchor_matcher = anchor_matcher # Loss parameters: self.focal_loss_alpha = focal_loss_alpha self.focal_loss_gamma = focal_loss_gamma self.smooth_l1_beta = smooth_l1_beta self.box_reg_loss_type = box_reg_loss_type # Inference parameters: self.test_score_thresh = test_score_thresh self.test_topk_candidates = test_topk_candidates self.test_nms_thresh = test_nms_thresh self.max_detections_per_image = max_detections_per_image # Vis parameters self.vis_period = vis_period self.input_format = input_format @classmethod def from_config(cls, cfg):
# Copyright (c) Facebook, Inc. and its affiliates. __all__ = ["RetinaNet"] logger = logging.getLogger(__name__) @META_ARCH_REGISTRY.register() class RetinaNet(DenseDetector): """ Implement RetinaNet in :paper:`RetinaNet`. """ @configurable def __init__( self, *, backbone: Backbone, head: nn.Module, head_in_features, anchor_generator, box2box_transform, anchor_matcher, num_classes, focal_loss_alpha=0.25, focal_loss_gamma=2.0, smooth_l1_beta=0.0, box_reg_loss_type="smooth_l1", test_score_thresh=0.05, test_topk_candidates=1000, test_nms_thresh=0.5, max_detections_per_image=100, pixel_mean, pixel_std, vis_period=0, input_format="BGR", ): """ NOTE: this interface is experimental. Args: backbone: a backbone module, must follow detectron2's backbone interface head (nn.Module): a module that predicts logits and regression deltas for each level from a list of per-level features head_in_features (Tuple[str]): Names of the input feature maps to be used in head anchor_generator (nn.Module): a module that creates anchors from a list of features. Usually an instance of :class:`AnchorGenerator` box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to instance boxes anchor_matcher (Matcher): label the anchors by matching them with ground truth. num_classes (int): number of classes. Used to label background proposals. # Loss parameters: focal_loss_alpha (float): focal_loss_alpha focal_loss_gamma (float): focal_loss_gamma smooth_l1_beta (float): smooth_l1_beta box_reg_loss_type (str): Options are "smooth_l1", "giou", "diou", "ciou" # Inference parameters: test_score_thresh (float): Inference cls score threshold, only anchors with score > INFERENCE_TH are considered for inference (to improve speed) test_topk_candidates (int): Select topk candidates before NMS test_nms_thresh (float): Overlap threshold used for non-maximum suppression (suppress boxes with IoU >= this threshold) max_detections_per_image (int): Maximum number of detections to return per image during inference (100 is based on the limit established for the COCO dataset). pixel_mean, pixel_std: see :class:`DenseDetector`. """ super().__init__( backbone, head, head_in_features, pixel_mean=pixel_mean, pixel_std=pixel_std ) self.num_classes = num_classes # Anchors self.anchor_generator = anchor_generator self.box2box_transform = box2box_transform self.anchor_matcher = anchor_matcher # Loss parameters: self.focal_loss_alpha = focal_loss_alpha self.focal_loss_gamma = focal_loss_gamma self.smooth_l1_beta = smooth_l1_beta self.box_reg_loss_type = box_reg_loss_type # Inference parameters: self.test_score_thresh = test_score_thresh self.test_topk_candidates = test_topk_candidates self.test_nms_thresh = test_nms_thresh self.max_detections_per_image = max_detections_per_image # Vis parameters self.vis_period = vis_period self.input_format = input_format @classmethod def from_config(cls, cfg):
backbone = build_backbone(cfg)
12
2023-12-05 02:51:53+00:00
24k
DiffusionLight/DiffusionLight
relighting/inpainter.py
[ { "identifier": "CustomStableDiffusionControlNetInpaintPipeline", "path": "relighting/pipeline.py", "snippet": "class CustomStableDiffusionControlNetInpaintPipeline(StableDiffusionControlNetInpaintPipeline):\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] =...
import torch import numpy as np import os import pickle from diffusers import ControlNetModel, AutoencoderKL from PIL import Image from tqdm.auto import tqdm from transformers import pipeline as transformers_pipeline from relighting.pipeline import CustomStableDiffusionControlNetInpaintPipeline from relighting.pipeline_inpaintonly import CustomStableDiffusionInpaintPipeline, CustomStableDiffusionXLInpaintPipeline from relighting.argument import SAMPLERS, VAE_MODELS, DEPTH_ESTIMATOR, get_control_signal_type from relighting.image_processor import ( estimate_scene_depth, estimate_scene_normal, merge_normal_map, fill_depth_circular ) from relighting.ball_processor import get_ideal_normal_ball, crop_ball from relighting.pipeline_xl import CustomStableDiffusionXLControlNetInpaintPipeline
18,097
class NoWaterMark: def apply_watermark(self, *args, **kwargs): return args[0] class ControlSignalGenerator(): def __init__(self, sd_arch, control_signal_type, device): self.sd_arch = sd_arch self.control_signal_type = control_signal_type self.device = device def process_sd_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", device=self.device.index) control_image = self.depth_estimator(input_image)['depth'] control_image = np.array(control_image) control_image = control_image[:, :, None] control_image = np.concatenate([control_image, control_image, control_image], axis=2) control_image = Image.fromarray(control_image) control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sdxl_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) control_image = estimate_scene_depth(input_image, depth_estimator=self.depth_estimator) xs = [x] if not isinstance(x, list) else x ys = [y] if not isinstance(y, list) else y rs = [r] if not isinstance(r, list) else r for x, y, r in zip(xs, ys, rs): #print(f"depth at {x}, {y}, {r}") control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sd_normal(self, input_image, normal_ball, mask_ball, x, y, r=None, normal_ball_path=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) normal_scene = estimate_scene_normal(input_image, depth_estimator=self.depth_estimator) normal_image = merge_normal_map(normal_scene, normal_ball, mask_ball, x, y) normal_image = (normal_image * 127.5 + 127.5).clip(0, 255).astype(np.uint8) control_image = Image.fromarray(normal_image) return control_image def __call__(self, *args, **kwargs): process_fn = getattr(self, f"process_{self.sd_arch}_{self.control_signal_type}", None) if process_fn is None: raise ValueError else: return process_fn(*args, **kwargs) class BallInpainter(): def __init__(self, pipeline, sd_arch, control_generator, disable_water_mask=True): self.pipeline = pipeline self.sd_arch = sd_arch self.control_generator = control_generator self.median = {} if disable_water_mask: self._disable_water_mask() def _disable_water_mask(self): if hasattr(self.pipeline, "watermark"): self.pipeline.watermark = NoWaterMark() print("Disabled watermasking") @classmethod def from_sd(cls, model, controlnet=None, device=0, sampler="unipc", torch_dtype=torch.float16, disable_water_mask=True, offload=False ): if controlnet is not None: control_signal_type = get_control_signal_type(controlnet) controlnet = ControlNetModel.from_pretrained(controlnet, torch_dtype=torch.float16)
class NoWaterMark: def apply_watermark(self, *args, **kwargs): return args[0] class ControlSignalGenerator(): def __init__(self, sd_arch, control_signal_type, device): self.sd_arch = sd_arch self.control_signal_type = control_signal_type self.device = device def process_sd_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", device=self.device.index) control_image = self.depth_estimator(input_image)['depth'] control_image = np.array(control_image) control_image = control_image[:, :, None] control_image = np.concatenate([control_image, control_image, control_image], axis=2) control_image = Image.fromarray(control_image) control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sdxl_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) control_image = estimate_scene_depth(input_image, depth_estimator=self.depth_estimator) xs = [x] if not isinstance(x, list) else x ys = [y] if not isinstance(y, list) else y rs = [r] if not isinstance(r, list) else r for x, y, r in zip(xs, ys, rs): #print(f"depth at {x}, {y}, {r}") control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sd_normal(self, input_image, normal_ball, mask_ball, x, y, r=None, normal_ball_path=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) normal_scene = estimate_scene_normal(input_image, depth_estimator=self.depth_estimator) normal_image = merge_normal_map(normal_scene, normal_ball, mask_ball, x, y) normal_image = (normal_image * 127.5 + 127.5).clip(0, 255).astype(np.uint8) control_image = Image.fromarray(normal_image) return control_image def __call__(self, *args, **kwargs): process_fn = getattr(self, f"process_{self.sd_arch}_{self.control_signal_type}", None) if process_fn is None: raise ValueError else: return process_fn(*args, **kwargs) class BallInpainter(): def __init__(self, pipeline, sd_arch, control_generator, disable_water_mask=True): self.pipeline = pipeline self.sd_arch = sd_arch self.control_generator = control_generator self.median = {} if disable_water_mask: self._disable_water_mask() def _disable_water_mask(self): if hasattr(self.pipeline, "watermark"): self.pipeline.watermark = NoWaterMark() print("Disabled watermasking") @classmethod def from_sd(cls, model, controlnet=None, device=0, sampler="unipc", torch_dtype=torch.float16, disable_water_mask=True, offload=False ): if controlnet is not None: control_signal_type = get_control_signal_type(controlnet) controlnet = ControlNetModel.from_pretrained(controlnet, torch_dtype=torch.float16)
pipe = CustomStableDiffusionControlNetInpaintPipeline.from_pretrained(
0
2023-12-07 14:03:31+00:00
24k
modelscope/normal-depth-diffusion
ldm/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\...
import pdb import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn import torch.nn.functional as F from contextlib import contextmanager from functools import partial from einops import rearrange, repeat from ldm.models.autoencoder import (AutoencoderKL, IdentityFirstStage, VQModelInterface) from ldm.models.diffusion.ddim import DDIMSampler from ldm.models.diffusion.dpm_solver import DPMSolverSampler from ldm.models.diffusion.plms import PLMSSampler from ldm.modules.attention import CrossAttention from ldm.modules.diffusionmodules.util import (extract_into_tensor, make_beta_schedule, noise_like) from ldm.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl) from ldm.modules.ema import LitEma from ldm.util import (count_params, default, exists, filter_nan_loss, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat) from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from pytorch_lightning.utilities.distributed import rank_zero_only from pytorch_lightning.utilities.rank_zero import rank_zero_only
16,562
else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out ''' @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.1): ''' we add uncondition prompts to improve classifer-free guidance results ''' x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} # To support classifier-free guidance, randomly drop out only text conditioning 10% like sd-v1.5 random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < uncond, 'n -> n 1 1') null_prompts = self.get_learned_conditioning(['']).to(c.device) cc = torch.where(prompt_mask, null_prompts, c) out = [z, cc] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry( z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, 'split_input_params'): if self.split_input_params['patch_distributed_vq']: ks = self.split_input_params['ks'] # eg. (128, 128) stride = self.split_input_params['stride'] # eg. (64, 64) uf = self.split_input_params['vqf'] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print('reducing Kernel') if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print('reducing stride') fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ try: except: __conditioning_keys__ = { 'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y' } def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class anneal_identity(): def __call__(self, x, global_step): return x def upper_bound(arr, key): left = 0 right = len(arr) while left < right: mid = (left + right) >> 1 if arr[mid] < key: left = mid + 1 else: right = mid return left class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule='linear', loss_type='l2', ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor='val/loss', use_ema=True, first_stage_key='image', image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization='eps', # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., anneal_t=False, # we find at the begining, smaller t, larger denoise mse loss. anneal_global_step=[], anneal_ratio=0.9, prior_model=None, prior_normal=None, input_keys=['rgb'], ): super().__init__() assert parameterization in [ 'eps', 'x0' ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f'{self.__class__.__name__}: Running in {self.parameterization}-prediction mode' ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.') self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight self.input_keys = input_keys if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full( fill_value=logvar_init, size=(self.num_timesteps, )) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) ### anneal t function if not anneal_t: self.anneal_func = anneal_identity() else: self.anneal_func = anneal_warmup(anneal_ratio, anneal_global_step, self.num_timesteps) if prior_model is not None: self.prior_model = instantiate_from_config(prior_model) else: self.prior_model = None if prior_normal is not None: self.prior_normal = instantiate_from_config(prior_normal) else: self.prior_normal = None def register_schedule(self, given_betas=None, beta_schedule='linear', timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[ 0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( 'posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer( 'posterior_mean_coef1', to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer( 'posterior_mean_coef2', to_torch((1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == 'eps': lvlb_weights = self.betas**2 / (2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == 'x0': lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / ( 2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError('mu not supported') # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f'{context}: Switched to EMA weights') try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f'{context}: Restored training weights') def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location='cpu') if 'state_dict' in list(sd.keys()): sd = sd['state_dict'] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print('Deleting key {} from state_dict.'.format(k)) del sd[k] missing, unexpected = self.load_state_dict( sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print( f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys' ) if len(missing) > 0: print(f'Missing Keys: {missing}') if len(unexpected) > 0: print(f'Unexpected Keys: {unexpected}') if self.use_ema: if len(missing) > 0: model_ema_str = sorted(missing)[-1] # missing model_ema if 'model_ema' in model_ema_str: print(f'Reinitialize model_ema') self.model_ema = LitEma(self.model) print( f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.' ) else: if self.ema_copy == True: print(f'Reinitialize model_ema') self.model_ema = LitEma(self.model) print( f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.' ) def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == 'eps': x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == 'x0': x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape( b, *((1, ) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample( img, torch.full((b, ), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss( target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == 'eps': target = noise elif self.parameterization == 'x0': target = x_start else: raise NotImplementedError( f'Paramterization {self.parameterization} not yet supported') loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0], ), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict # property of model for (to, cuda, cpu, float, half, ...) def to(self, *args, **kwargs): # type: ignore[valid-type] """See :meth:`torch.nn.Module.to`.""" # this converts `str` device to `torch.device` if self.prior_model is not None: self.prior_model.to(*args, **kwargs) if self.prior_normal is not None: self.prior_normal.to(*args, **kwargs) return super().to(*args, **kwargs) def cuda(self, device=None): # type: ignore[valid-type] """Moves all model parameters and buffers to the GPU. This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized. Arguments: device: If specified, all parameters will be copied to that device. If `None`, the current CUDA device index will be used. Returns: Module: self """ if device is None: device = torch.device('cuda', torch.cuda.current_device()) elif isinstance(device, int): device = torch.device('cuda', index=device) if self.prior_model is not None: self.prior_model.cuda(device) if self.prior_normal is not None: self.prior_normal.cuda(device) return super().cuda(device=device) def cpu(self): # type: ignore[valid-type] """See :meth:`torch.nn.Module.cpu`.""" if self.prior_model is not None: self.prior_model.cpu() if self.prior_normal is not None: self.prior_normal.cpu() return super().cpu() def float(self): # type: ignore[valid-type] """See :meth:`torch.nn.Module.float`.""" if self.prior_model is not None: self.prior_model.float() if self.prior_normal is not None: self.prior_normal.float() return super().float() def double(self): # type: ignore[valid-type] """See :meth:`torch.nn.Module.double`.""" if self.prior_model is not None: self.prior_model.double() if self.prior_normal is not None: self.prior_normal.double() return super().double() def half(self): # type: ignore[valid-type] """See :meth:`torch.nn.Module.half`.""" if self.prior_model is not None: self.prior_model.half() if self.prior_normal is not None: self.prior_normal.half() return super().half() def prior_to_eval(self): if self.prior_model is not None: self.prior_model.eval() if self.prior_normal is not None: self.prior_normal.eval() @torch.no_grad() def prior_inference(self, inputs, prior_inputs): # depth prior model # midas or zoe is 384 model inputs = inputs.permute(0, 3, 1, 2) prior_results = {} self.prior_to_eval() # using depth prior if self.prior_model is not None: model_prior_results = self.prior_model(prior_inputs) prior_results.update(model_prior_results) # using normal map if self.prior_normal is not None: normal_prior_results = self.prior_normal(prior_inputs) prior_results.update(normal_prior_results) resize_prior_results = {} _, __, h, w = inputs.shape for key in prior_results.keys(): resize_prior_results[key] = F.interpolate( prior_results[key], (w, h), mode='bilinear') # add a rgb input resize_prior_results.update({'rgb': inputs}) input_container = [] for key in self.input_keys: input_container.append(resize_prior_results[key]) return torch.cat(input_container, dim=1).permute(0, 2, 3, 1) @torch.no_grad() def collect_inputs(self, batch): input_container = [] for key in self.input_keys: # [B H W C] input_container.append(batch[key]) return torch.cat(input_container, dim=-1) def training_step(self, batch, batch_idx): if self.prior_model is not None: batch['image'] = self.prior_inference(batch['image'], batch['prior']) # image_condition batch['ic'] = batch['image'][..., :3] else: batch['image'] = self.collect_inputs(batch) # image_condition batch['ic'] = batch['image'][..., :3] loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log( 'global_step', self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log( 'lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): if self.prior_model is not None: batch['image'] = self.prior_inference(batch['image'], batch['prior']) # image_condition batch['ic'] = batch['image'][..., :3] else: batch['image'] = self.collect_inputs(batch) # image_condition batch['ic'] = batch['image'][..., :3] _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = { key + '_ema': loss_dict_ema[key] for key in loss_dict_ema } self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) @torch.no_grad() def test_step(self, batch, batch_idx): if self.prior_model is not None: batch['image'] = self.prior_inference(batch['image'], batch['prior']) # image_condition batch['ic'] = batch['image'][..., :3] else: batch['image'] = self.collect_inputs(batch) # image_condition batch['ic'] = batch['image'][..., :3] with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = { key + '_ema': loss_dict_ema[key] for key in loss_dict_ema } self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): # args: outputs, batch, batch_idx if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log['inputs'] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log['diffusion_row'] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope('Plotting'): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True) log['samples'] = samples log['denoise_row'] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key='image', cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, first_stage_ckpts=None, without_crossattn=False, ema_copy=False, *args, **kwargs): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop('ckpt_path', None) ignore_keys = kwargs.pop('ignore_keys', []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len( first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.first_stage_ckpts = first_stage_ckpts # VAE Load self.instantiate_first_stage(first_stage_config) # CLIP load self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False self.ema_copy = ema_copy if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if self.first_stage_ckpts is not None: first_stage_ckpts = torch.load( self.first_stage_ckpts, map_location='cpu') no_match = self.first_stage_model.load_state_dict( first_stage_ckpts['state_dict'], strict=False) print('encode-decode, no match keys:\n {}'.format(no_match)) for param in self.first_stage_model.parameters(): param.requires_grad = False # lambda-stage-1 without crossattn if without_crossattn: for m in self.modules(): if isinstance(m, CrossAttention): for para in m.parameters(): para.requires_grad = False # RuntimeError: One of the differentiated Tensors does not require grad def make_cond_schedule(self, ): self.cond_ids = torch.full( size=(self.num_timesteps, ), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print('### USING STD-RESCALING ###') x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f'setting self.scale_factor to {self.scale_factor}') print('### USING STD-RESCALING ###') def register_schedule(self, given_betas=None, beta_schedule='linear', timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == '__is_first_stage__': print('Using first stage also as cond stage.') self.cond_stage_model = self.first_stage_model elif config == '__is_unconditional__': print( f'Training {self.__class__.__name__} as an unconditional model.' ) self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError( f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" ) return self.scale_factor * z def get_learned_conditioning(self, c): ''' # CLIP embedding ''' if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable( self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min( torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params['clip_min_weight'], self.split_input_params['clip_max_weight'], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params['tie_braker']: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, self.split_input_params['clip_min_tie_weight'], self.split_input_params['clip_max_tie_weight']) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold( output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view( 1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting ''' @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out ''' @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.1): ''' we add uncondition prompts to improve classifer-free guidance results ''' x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} # To support classifier-free guidance, randomly drop out only text conditioning 10% like sd-v1.5 random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < uncond, 'n -> n 1 1') null_prompts = self.get_learned_conditioning(['']).to(c.device) cc = torch.where(prompt_mask, null_prompts, c) out = [z, cc] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry( z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, 'split_input_params'): if self.split_input_params['patch_distributed_vq']: ks = self.split_input_params['ks'] # eg. (128, 128) stride = self.split_input_params['stride'] # eg. (64, 64) uf = self.split_input_params['vqf'] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print('reducing Kernel') if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print('reducing stride') fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim
if isinstance(self.first_stage_model, VQModelInterface):
2
2023-12-06 07:29:34+00:00
24k
RobertCsordas/moe_attention
tasks/simple/language_model/transformer_lm_mixin.py
[ { "identifier": "TransformerLanguageModel", "path": "models/transformer_language_model.py", "snippet": "class TransformerLanguageModel(LoggingLayer, torch.nn.Module):\n def __init__(self, voc_size: int, embedding_size: Optional[int], state_size: int, dropout: float,\n tied_embedding: ...
import framework import torch import torch.nn import torch.nn.functional as F import torch.utils.data import math from typing import List, Tuple, Dict, Any from models import TransformerLanguageModel from ... import task, args from layers.transformer import RelativeTransformerEncoderLayer, PrelnRelativeTransformerEncoderLayer from layers.transformer.relative_moe_transformer import RelativeMoeTransformerEncoderLayer from layers.transformer.fast_rope_transformer import FastRopeTransformerEncoderLayer from layers.transformer.moe_attention_relative_transformer import MoeAttentionRelativeTransformerEncoderLayer from layers.moe_layer import MoE from interfaces import Result from layers import LayerVisualizer from layers.transformer.full_moe_relative_attention import FullMoeRelativeAttentionCore
19,506
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-lm.trafo.context_blocks", default=1) parser.add_argument("-lm.trafo.test_context_blocks", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.test_pos_clamp", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.same_length_eval", default=False) parser.add_argument("-lm.trafo.same_length", default=False) parser.add_argument("-lm.trafo.last_layer_context", default=False) parser.add_argument("-lm.trafo.xl_init", default=False) parser.add_argument("-lm.trafo.embedding_mode_init", default="default", choice=["default", "scale_to_sqrt_dmodel", "init_to_sqrt_dmodel", "one_and_scale_to_sqrt_dmodel", "like_preln"]) parser.add_argument("-pkm.n_heads", default=1) parser.add_argument("-moe.n_experts", default=128) parser.add_argument("-moe.expert_size", default=128) parser.add_argument("-moe.selection_mode", default="sigmoid", choice=["gate", "sigmoid", "mul"]) parser.add_argument("-moe.perplexity_reg", default=0.0) parser.add_argument("-moe.perplexity_reg_mode", default="step", choice=["step", "global", "time", "global_time"]) parser.add_argument("-moe.reg_type", default="entropy", choice=["perplexity", "variance", "entropy", "l2", "switch", "normal"]) parser.add_argument("-moe.norm_keys", default=False) parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-moe.att.n_experts", default=4) parser.add_argument("-moe.att.variant", default="moa", choice=["moa", "simple", "qside", "full", "full_rope", "seq", "target"]) parser.add_argument("-moe.att.enable", default=False) parser.add_argument("-moe.att.q_expert", default=True) parser.add_argument("-moe.att.k_expert", default=True) parser.add_argument("-moe.att.v_expert", default=True) parser.add_argument("-moe.att.o_expert", default=True) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_qk", default=False) parser.add_argument("-moe.att.v_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.same_sel", default=False) parser.add_argument("-moe.att.expert_dropout", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.selection_mode", default="sigmoid", choice=["sigmoid", "softmax"]) parser.add_argument("-moe.att.perplexity_reg", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.qside_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_ret", default=False) parser.add_argument("-moe.att.shared_experts", default=False) parser.add_argument("-moe.att.drop_expert", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.kq_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.separate_kq_sel", default=False) parser.add_argument("-moe.att.norm_init", default=False) parser.add_argument("-rope.rotate_fraction", default=0.5) parser.add_argument("-rope.base", default=10000.0) parser.add_argument("-moa.mode", default="my", choice=["my", "moa"]) parser.add_argument("-moa.cvloss", default=0.0) parser.add_argument("-moa.switchloss", default=0.0) parser.add_argument("-moa.zloss", default=0.0) parser.add_argument("-debug_plot_interval", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.plot_head_details", default=False) parser.add_argument("-plot.n_steps", default=-128) @task() class TransformerLMMixin: helper: framework.helpers.TrainingHelper def is_preln(self) -> bool: return "preln" in self.helper.args.transformer.variant def topk_activation(self, x: torch.Tensor) -> torch.Tensor: nx = -x return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0) def get_layers(self) -> List[torch.nn.Module]: # pyright: reportOptionalMemberAccess=false if self.helper.args.transformer.activation == "relu": activation = F.relu elif self.helper.args.transformer.activation == "topk": activation = self.topk_activation elif self.helper.args.transformer.activation == "identity": activation = lambda x: x elif self.helper.args.transformer.activation == "sigmoid": activation = torch.sigmoid elif self.helper.args.transformer.activation == "gelu": activation = F.gelu elif self.helper.args.transformer.activation == "softmax": activation = lambda x: F.softmax(x, dim=-1) else: raise ValueError(f"Invalid activation: {self.helper.args.transformer.activation}") base_args = dict( d_model=self.helper.args.state_size, nhead=self.helper.args.transformer.n_heads, dropout=self.helper.args.dropout, activation=activation ) if self.helper.args.transformer.variant not in {"preln_moe", "moe"}: base_args["dim_feedforward"]=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier) extra_args = {} if not self.helper.args.transformer.variant.endswith("_gelu") else { "activation": F.gelu, "drop_expand": False } if self.helper.args.transformer.variant in {"preln_relative"}:
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-lm.trafo.context_blocks", default=1) parser.add_argument("-lm.trafo.test_context_blocks", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.test_pos_clamp", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.same_length_eval", default=False) parser.add_argument("-lm.trafo.same_length", default=False) parser.add_argument("-lm.trafo.last_layer_context", default=False) parser.add_argument("-lm.trafo.xl_init", default=False) parser.add_argument("-lm.trafo.embedding_mode_init", default="default", choice=["default", "scale_to_sqrt_dmodel", "init_to_sqrt_dmodel", "one_and_scale_to_sqrt_dmodel", "like_preln"]) parser.add_argument("-pkm.n_heads", default=1) parser.add_argument("-moe.n_experts", default=128) parser.add_argument("-moe.expert_size", default=128) parser.add_argument("-moe.selection_mode", default="sigmoid", choice=["gate", "sigmoid", "mul"]) parser.add_argument("-moe.perplexity_reg", default=0.0) parser.add_argument("-moe.perplexity_reg_mode", default="step", choice=["step", "global", "time", "global_time"]) parser.add_argument("-moe.reg_type", default="entropy", choice=["perplexity", "variance", "entropy", "l2", "switch", "normal"]) parser.add_argument("-moe.norm_keys", default=False) parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-moe.att.n_experts", default=4) parser.add_argument("-moe.att.variant", default="moa", choice=["moa", "simple", "qside", "full", "full_rope", "seq", "target"]) parser.add_argument("-moe.att.enable", default=False) parser.add_argument("-moe.att.q_expert", default=True) parser.add_argument("-moe.att.k_expert", default=True) parser.add_argument("-moe.att.v_expert", default=True) parser.add_argument("-moe.att.o_expert", default=True) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_qk", default=False) parser.add_argument("-moe.att.v_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.same_sel", default=False) parser.add_argument("-moe.att.expert_dropout", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.selection_mode", default="sigmoid", choice=["sigmoid", "softmax"]) parser.add_argument("-moe.att.perplexity_reg", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.qside_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_ret", default=False) parser.add_argument("-moe.att.shared_experts", default=False) parser.add_argument("-moe.att.drop_expert", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.kq_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.separate_kq_sel", default=False) parser.add_argument("-moe.att.norm_init", default=False) parser.add_argument("-rope.rotate_fraction", default=0.5) parser.add_argument("-rope.base", default=10000.0) parser.add_argument("-moa.mode", default="my", choice=["my", "moa"]) parser.add_argument("-moa.cvloss", default=0.0) parser.add_argument("-moa.switchloss", default=0.0) parser.add_argument("-moa.zloss", default=0.0) parser.add_argument("-debug_plot_interval", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.plot_head_details", default=False) parser.add_argument("-plot.n_steps", default=-128) @task() class TransformerLMMixin: helper: framework.helpers.TrainingHelper def is_preln(self) -> bool: return "preln" in self.helper.args.transformer.variant def topk_activation(self, x: torch.Tensor) -> torch.Tensor: nx = -x return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0) def get_layers(self) -> List[torch.nn.Module]: # pyright: reportOptionalMemberAccess=false if self.helper.args.transformer.activation == "relu": activation = F.relu elif self.helper.args.transformer.activation == "topk": activation = self.topk_activation elif self.helper.args.transformer.activation == "identity": activation = lambda x: x elif self.helper.args.transformer.activation == "sigmoid": activation = torch.sigmoid elif self.helper.args.transformer.activation == "gelu": activation = F.gelu elif self.helper.args.transformer.activation == "softmax": activation = lambda x: F.softmax(x, dim=-1) else: raise ValueError(f"Invalid activation: {self.helper.args.transformer.activation}") base_args = dict( d_model=self.helper.args.state_size, nhead=self.helper.args.transformer.n_heads, dropout=self.helper.args.dropout, activation=activation ) if self.helper.args.transformer.variant not in {"preln_moe", "moe"}: base_args["dim_feedforward"]=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier) extra_args = {} if not self.helper.args.transformer.variant.endswith("_gelu") else { "activation": F.gelu, "drop_expand": False } if self.helper.args.transformer.variant in {"preln_relative"}:
mklayer = lambda: PrelnRelativeTransformerEncoderLayer(
4
2023-12-13 08:45:02+00:00
24k
AIFSH/NativeDancer
nativedancer/third_part/detectron2/modeling/meta_arch/retinanet.py
[ { "identifier": "configurable", "path": "nativedancer/third_part/detectron2/config/config.py", "snippet": "def configurable(init_func=None, *, from_config=None):\n \"\"\"\n Decorate a function or a class's __init__ method so that it can be called\n with a :class:`CfgNode` object using a :func:`...
import logging import math import torch from typing import List, Tuple from fvcore.nn import sigmoid_focal_loss_jit from torch import Tensor, nn from torch.nn import functional as F from ...config import configurable from ...layers import CycleBatchNormList, ShapeSpec, batched_nms, cat, get_norm from ...structures import Boxes, ImageList, Instances, pairwise_iou from ...utils.events import get_event_storage from ..anchor_generator import build_anchor_generator from ..backbone import Backbone, build_backbone from ..box_regression import Box2BoxTransform, _dense_box_regression_loss from ..matcher import Matcher from .build import META_ARCH_REGISTRY from .dense_detector import DenseDetector, permute_to_N_HWA_K # noqa
17,569
match_quality_matrix = pairwise_iou(gt_per_image.gt_boxes, anchors) matched_idxs, anchor_labels = self.anchor_matcher(match_quality_matrix) del match_quality_matrix if len(gt_per_image) > 0: matched_gt_boxes_i = gt_per_image.gt_boxes.tensor[matched_idxs] gt_labels_i = gt_per_image.gt_classes[matched_idxs] # Anchors with label 0 are treated as background. gt_labels_i[anchor_labels == 0] = self.num_classes # Anchors with label -1 are ignored. gt_labels_i[anchor_labels == -1] = -1 else: matched_gt_boxes_i = torch.zeros_like(anchors.tensor) gt_labels_i = torch.zeros_like(matched_idxs) + self.num_classes gt_labels.append(gt_labels_i) matched_gt_boxes.append(matched_gt_boxes_i) return gt_labels, matched_gt_boxes def forward_inference( self, images: ImageList, features: List[Tensor], predictions: List[List[Tensor]] ): pred_logits, pred_anchor_deltas = self._transpose_dense_predictions( predictions, [self.num_classes, 4] ) anchors = self.anchor_generator(features) results: List[Instances] = [] for img_idx, image_size in enumerate(images.image_sizes): scores_per_image = [x[img_idx].sigmoid_() for x in pred_logits] deltas_per_image = [x[img_idx] for x in pred_anchor_deltas] results_per_image = self.inference_single_image( anchors, scores_per_image, deltas_per_image, image_size ) results.append(results_per_image) return results def inference_single_image( self, anchors: List[Boxes], box_cls: List[Tensor], box_delta: List[Tensor], image_size: Tuple[int, int], ): """ Single-image inference. Return bounding-box detection results by thresholding on scores and applying non-maximum suppression (NMS). Arguments: anchors (list[Boxes]): list of #feature levels. Each entry contains a Boxes object, which contains all the anchors in that feature level. box_cls (list[Tensor]): list of #feature levels. Each entry contains tensor of size (H x W x A, K) box_delta (list[Tensor]): Same shape as 'box_cls' except that K becomes 4. image_size (tuple(H, W)): a tuple of the image height and width. Returns: Same as `inference`, but for only one image. """ pred = self._decode_multi_level_predictions( anchors, box_cls, box_delta, self.test_score_thresh, self.test_topk_candidates, image_size, ) keep = batched_nms( # per-class NMS pred.pred_boxes.tensor, pred.scores, pred.pred_classes, self.test_nms_thresh ) return pred[keep[: self.max_detections_per_image]] class RetinaNetHead(nn.Module): """ The head used in RetinaNet for object classification and box regression. It has two subnets for the two tasks, with a common structure but separate parameters. """ @configurable def __init__( self, *, input_shape: List[ShapeSpec], num_classes, num_anchors, conv_dims: List[int], norm="", prior_prob=0.01, ): """ NOTE: this interface is experimental. Args: input_shape (List[ShapeSpec]): input shape num_classes (int): number of classes. Used to label background proposals. num_anchors (int): number of generated anchors conv_dims (List[int]): dimensions for each convolution layer norm (str or callable): Normalization for conv layers except for the two output layers. See :func:`detectron2.layers.get_norm` for supported types. prior_prob (float): Prior weight for computing bias """ super().__init__() self._num_features = len(input_shape) if norm == "BN" or norm == "SyncBN": logger.info( f"Using domain-specific {norm} in RetinaNetHead with len={self._num_features}." ) bn_class = nn.BatchNorm2d if norm == "BN" else nn.SyncBatchNorm def norm(c): return CycleBatchNormList( length=self._num_features, bn_class=bn_class, num_features=c ) else:
# Copyright (c) Facebook, Inc. and its affiliates. __all__ = ["RetinaNet"] logger = logging.getLogger(__name__) @META_ARCH_REGISTRY.register() class RetinaNet(DenseDetector): """ Implement RetinaNet in :paper:`RetinaNet`. """ @configurable def __init__( self, *, backbone: Backbone, head: nn.Module, head_in_features, anchor_generator, box2box_transform, anchor_matcher, num_classes, focal_loss_alpha=0.25, focal_loss_gamma=2.0, smooth_l1_beta=0.0, box_reg_loss_type="smooth_l1", test_score_thresh=0.05, test_topk_candidates=1000, test_nms_thresh=0.5, max_detections_per_image=100, pixel_mean, pixel_std, vis_period=0, input_format="BGR", ): """ NOTE: this interface is experimental. Args: backbone: a backbone module, must follow detectron2's backbone interface head (nn.Module): a module that predicts logits and regression deltas for each level from a list of per-level features head_in_features (Tuple[str]): Names of the input feature maps to be used in head anchor_generator (nn.Module): a module that creates anchors from a list of features. Usually an instance of :class:`AnchorGenerator` box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to instance boxes anchor_matcher (Matcher): label the anchors by matching them with ground truth. num_classes (int): number of classes. Used to label background proposals. # Loss parameters: focal_loss_alpha (float): focal_loss_alpha focal_loss_gamma (float): focal_loss_gamma smooth_l1_beta (float): smooth_l1_beta box_reg_loss_type (str): Options are "smooth_l1", "giou", "diou", "ciou" # Inference parameters: test_score_thresh (float): Inference cls score threshold, only anchors with score > INFERENCE_TH are considered for inference (to improve speed) test_topk_candidates (int): Select topk candidates before NMS test_nms_thresh (float): Overlap threshold used for non-maximum suppression (suppress boxes with IoU >= this threshold) max_detections_per_image (int): Maximum number of detections to return per image during inference (100 is based on the limit established for the COCO dataset). pixel_mean, pixel_std: see :class:`DenseDetector`. """ super().__init__( backbone, head, head_in_features, pixel_mean=pixel_mean, pixel_std=pixel_std ) self.num_classes = num_classes # Anchors self.anchor_generator = anchor_generator self.box2box_transform = box2box_transform self.anchor_matcher = anchor_matcher # Loss parameters: self.focal_loss_alpha = focal_loss_alpha self.focal_loss_gamma = focal_loss_gamma self.smooth_l1_beta = smooth_l1_beta self.box_reg_loss_type = box_reg_loss_type # Inference parameters: self.test_score_thresh = test_score_thresh self.test_topk_candidates = test_topk_candidates self.test_nms_thresh = test_nms_thresh self.max_detections_per_image = max_detections_per_image # Vis parameters self.vis_period = vis_period self.input_format = input_format @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) backbone_shape = backbone.output_shape() feature_shapes = [backbone_shape[f] for f in cfg.MODEL.RETINANET.IN_FEATURES] head = RetinaNetHead(cfg, feature_shapes) anchor_generator = build_anchor_generator(cfg, feature_shapes) return { "backbone": backbone, "head": head, "anchor_generator": anchor_generator, "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RETINANET.BBOX_REG_WEIGHTS), "anchor_matcher": Matcher( cfg.MODEL.RETINANET.IOU_THRESHOLDS, cfg.MODEL.RETINANET.IOU_LABELS, allow_low_quality_matches=True, ), "pixel_mean": cfg.MODEL.PIXEL_MEAN, "pixel_std": cfg.MODEL.PIXEL_STD, "num_classes": cfg.MODEL.RETINANET.NUM_CLASSES, "head_in_features": cfg.MODEL.RETINANET.IN_FEATURES, # Loss parameters: "focal_loss_alpha": cfg.MODEL.RETINANET.FOCAL_LOSS_ALPHA, "focal_loss_gamma": cfg.MODEL.RETINANET.FOCAL_LOSS_GAMMA, "smooth_l1_beta": cfg.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA, "box_reg_loss_type": cfg.MODEL.RETINANET.BBOX_REG_LOSS_TYPE, # Inference parameters: "test_score_thresh": cfg.MODEL.RETINANET.SCORE_THRESH_TEST, "test_topk_candidates": cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST, "test_nms_thresh": cfg.MODEL.RETINANET.NMS_THRESH_TEST, "max_detections_per_image": cfg.TEST.DETECTIONS_PER_IMAGE, # Vis parameters "vis_period": cfg.VIS_PERIOD, "input_format": cfg.INPUT.FORMAT, } def forward_training(self, images, features, predictions, gt_instances): # Transpose the Hi*Wi*A dimension to the middle: pred_logits, pred_anchor_deltas = self._transpose_dense_predictions( predictions, [self.num_classes, 4] ) anchors = self.anchor_generator(features) gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances) return self.losses(anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes) def losses(self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes): """ Args: anchors (list[Boxes]): a list of #feature level Boxes gt_labels, gt_boxes: see output of :meth:`RetinaNet.label_anchors`. Their shapes are (N, R) and (N, R, 4), respectively, where R is the total number of anchors across levels, i.e. sum(Hi x Wi x Ai) pred_logits, pred_anchor_deltas: both are list[Tensor]. Each element in the list corresponds to one level and has shape (N, Hi * Wi * Ai, K or 4). Where K is the number of classes used in `pred_logits`. Returns: dict[str, Tensor]: mapping from a named loss to a scalar tensor storing the loss. Used during training only. The dict keys are: "loss_cls" and "loss_box_reg" """ num_images = len(gt_labels) gt_labels = torch.stack(gt_labels) # (N, R) valid_mask = gt_labels >= 0 pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes) num_pos_anchors = pos_mask.sum().item() get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images) normalizer = self._ema_update("loss_normalizer", max(num_pos_anchors, 1), 100) # classification and regression loss gt_labels_target = F.one_hot(gt_labels[valid_mask], num_classes=self.num_classes + 1)[ :, :-1 ] # no loss for the last (background) class loss_cls = sigmoid_focal_loss_jit( cat(pred_logits, dim=1)[valid_mask], gt_labels_target.to(pred_logits[0].dtype), alpha=self.focal_loss_alpha, gamma=self.focal_loss_gamma, reduction="sum", ) loss_box_reg = _dense_box_regression_loss( anchors, self.box2box_transform, pred_anchor_deltas, gt_boxes, pos_mask, box_reg_loss_type=self.box_reg_loss_type, smooth_l1_beta=self.smooth_l1_beta, ) return { "loss_cls": loss_cls / normalizer, "loss_box_reg": loss_box_reg / normalizer, } @torch.no_grad() def label_anchors(self, anchors, gt_instances): """ Args: anchors (list[Boxes]): A list of #feature level Boxes. The Boxes contains anchors of this image on the specific feature level. gt_instances (list[Instances]): a list of N `Instances`s. The i-th `Instances` contains the ground-truth per-instance annotations for the i-th input image. Returns: list[Tensor]: List of #img tensors. i-th element is a vector of labels whose length is the total number of anchors across all feature maps (sum(Hi * Wi * A)). Label values are in {-1, 0, ..., K}, with -1 means ignore, and K means background. list[Tensor]: i-th element is a Rx4 tensor, where R is the total number of anchors across feature maps. The values are the matched gt boxes for each anchor. Values are undefined for those anchors not labeled as foreground. """ anchors = Boxes.cat(anchors) # Rx4 gt_labels = [] matched_gt_boxes = [] for gt_per_image in gt_instances: match_quality_matrix = pairwise_iou(gt_per_image.gt_boxes, anchors) matched_idxs, anchor_labels = self.anchor_matcher(match_quality_matrix) del match_quality_matrix if len(gt_per_image) > 0: matched_gt_boxes_i = gt_per_image.gt_boxes.tensor[matched_idxs] gt_labels_i = gt_per_image.gt_classes[matched_idxs] # Anchors with label 0 are treated as background. gt_labels_i[anchor_labels == 0] = self.num_classes # Anchors with label -1 are ignored. gt_labels_i[anchor_labels == -1] = -1 else: matched_gt_boxes_i = torch.zeros_like(anchors.tensor) gt_labels_i = torch.zeros_like(matched_idxs) + self.num_classes gt_labels.append(gt_labels_i) matched_gt_boxes.append(matched_gt_boxes_i) return gt_labels, matched_gt_boxes def forward_inference( self, images: ImageList, features: List[Tensor], predictions: List[List[Tensor]] ): pred_logits, pred_anchor_deltas = self._transpose_dense_predictions( predictions, [self.num_classes, 4] ) anchors = self.anchor_generator(features) results: List[Instances] = [] for img_idx, image_size in enumerate(images.image_sizes): scores_per_image = [x[img_idx].sigmoid_() for x in pred_logits] deltas_per_image = [x[img_idx] for x in pred_anchor_deltas] results_per_image = self.inference_single_image( anchors, scores_per_image, deltas_per_image, image_size ) results.append(results_per_image) return results def inference_single_image( self, anchors: List[Boxes], box_cls: List[Tensor], box_delta: List[Tensor], image_size: Tuple[int, int], ): """ Single-image inference. Return bounding-box detection results by thresholding on scores and applying non-maximum suppression (NMS). Arguments: anchors (list[Boxes]): list of #feature levels. Each entry contains a Boxes object, which contains all the anchors in that feature level. box_cls (list[Tensor]): list of #feature levels. Each entry contains tensor of size (H x W x A, K) box_delta (list[Tensor]): Same shape as 'box_cls' except that K becomes 4. image_size (tuple(H, W)): a tuple of the image height and width. Returns: Same as `inference`, but for only one image. """ pred = self._decode_multi_level_predictions( anchors, box_cls, box_delta, self.test_score_thresh, self.test_topk_candidates, image_size, ) keep = batched_nms( # per-class NMS pred.pred_boxes.tensor, pred.scores, pred.pred_classes, self.test_nms_thresh ) return pred[keep[: self.max_detections_per_image]] class RetinaNetHead(nn.Module): """ The head used in RetinaNet for object classification and box regression. It has two subnets for the two tasks, with a common structure but separate parameters. """ @configurable def __init__( self, *, input_shape: List[ShapeSpec], num_classes, num_anchors, conv_dims: List[int], norm="", prior_prob=0.01, ): """ NOTE: this interface is experimental. Args: input_shape (List[ShapeSpec]): input shape num_classes (int): number of classes. Used to label background proposals. num_anchors (int): number of generated anchors conv_dims (List[int]): dimensions for each convolution layer norm (str or callable): Normalization for conv layers except for the two output layers. See :func:`detectron2.layers.get_norm` for supported types. prior_prob (float): Prior weight for computing bias """ super().__init__() self._num_features = len(input_shape) if norm == "BN" or norm == "SyncBN": logger.info( f"Using domain-specific {norm} in RetinaNetHead with len={self._num_features}." ) bn_class = nn.BatchNorm2d if norm == "BN" else nn.SyncBatchNorm def norm(c): return CycleBatchNormList( length=self._num_features, bn_class=bn_class, num_features=c ) else:
norm_name = str(type(get_norm(norm, 32)))
1
2023-12-10 20:14:00+00:00
24k
mkang315/ASF-YOLO
segment/val.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=Tr...
import argparse import json import os import sys import numpy as np import torch import torch.nn.functional as F import time from multiprocessing.pool import ThreadPool from pathlib import Path from tqdm import tqdm from models.common import DetectMultiBackend from models.yolo import SegmentationModel from utils.callbacks import Callbacks from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader from utils.segment.general import mask_iou, process_mask, process_mask_upsample, scale_image from utils.segment.metrics import Metrics, ap_per_class_box_and_mask from utils.segment.plots import plot_images_and_masks from utils.torch_utils import de_parallel, select_device, smart_inference_mode from pycocotools.mask import encode from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
19,918
detections (array[N, 6]), x1, y1, x2, y2, conf, class labels (array[M, 5]), class, x1, y1, x2, y2 Returns: correct (array[N, 10]), for 10 IoU levels """ if masks: if overlap: nl = len(labels) index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes iou = box_iou(labels[:, 1:], detections[:, :4]) correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) correct_class = labels[:, 0:1] == detections[:, 5] for i in range(len(iouv)): x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] correct[matches[:, 1].astype(int), i] = True return torch.tensor(correct, dtype=torch.bool, device=iouv.device) @smart_inference_mode() def run( data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold max_det=300, # maximum detections per image task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val-seg', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, overlap=False, mask_downsample_ratio=1, compute_loss=None, callbacks=Callbacks(), ): if save_json: check_requirements(['pycocotools']) process = process_mask_upsample # more accurate else: process = process_mask # faster # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() nm = de_parallel(model).model[-1].nm # number of masks else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks if engine: batch_size = model.batch_size else: device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') # Data data = check_dataset(data) # check # Configure model.eval() cuda = device.type != 'cpu' is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() # Dataloader if not training: if pt and not single_cls: # check --weights are trained on --data ncm = model.model.nc assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Validate a trained YOLOv5 segment model on a segment dataset Usage: $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments Usage - formats: $ python segment/val.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_label # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative def save_one_txt(predn, save_conf, shape, file): # Save one txt result gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(file, 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') def save_one_json(predn, jdict, path, class_map, pred_masks): # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} def single_encode(x): rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] rle["counts"] = rle["counts"].decode("utf-8") return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner pred_masks = np.transpose(pred_masks, (2, 0, 1)) with ThreadPool(NUM_THREADS) as pool: rles = pool.map(single_encode, pred_masks) for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): jdict.append({ 'image_id': image_id, 'category_id': class_map[int(p[5])], 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5), 'segmentation': rles[i]}) def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): """ Return correct prediction matrix Arguments: detections (array[N, 6]), x1, y1, x2, y2, conf, class labels (array[M, 5]), class, x1, y1, x2, y2 Returns: correct (array[N, 10]), for 10 IoU levels """ if masks: if overlap: nl = len(labels) index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes iou = box_iou(labels[:, 1:], detections[:, :4]) correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) correct_class = labels[:, 0:1] == detections[:, 5] for i in range(len(iouv)): x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] correct[matches[:, 1].astype(int), i] = True return torch.tensor(correct, dtype=torch.bool, device=iouv.device) @smart_inference_mode() def run( data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold max_det=300, # maximum detections per image task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val-seg', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, overlap=False, mask_downsample_ratio=1, compute_loss=None, callbacks=Callbacks(), ): if save_json: check_requirements(['pycocotools']) process = process_mask_upsample # more accurate else: process = process_mask # faster # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() nm = de_parallel(model).model[-1].nm # number of masks else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks if engine: batch_size = model.batch_size else: device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') # Data data = check_dataset(data) # check # Configure model.eval() cuda = device.type != 'cpu' is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() # Dataloader if not training: if pt and not single_cls: # check --weights are trained on --data ncm = model.model.nc assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
dataloader = create_dataloader(data[task],
23
2023-12-10 14:18:29+00:00
24k
youngskkim/CRN
exps/base_exp.py
[ { "identifier": "NuscDatasetRadarDet", "path": "datasets/nusc_det_dataset.py", "snippet": "class NuscDatasetRadarDet(Dataset):\n def __init__(self,\n ida_aug_conf,\n bda_aug_conf,\n rda_aug_conf,\n classes,\n data_root,\n...
from functools import partial from pytorch_lightning.core import LightningModule from torch.cuda.amp.autocast_mode import autocast from torch.optim.lr_scheduler import MultiStepLR from mmcv.runner import build_optimizer from datasets.nusc_det_dataset import NuscDatasetRadarDet, collate_fn from evaluators.det_evaluators import DetNuscEvaluator from models.base_bev_depth import BaseBEVDepth from utils.torch_dist import all_gather_object, synchronize import mmcv import torch import torch.nn.functional as F import torch.nn.parallel import torch.utils.data import torch.utils.data.distributed import torchvision.models as models
16,087
def forward(self, sweep_imgs, mats, is_train=False, **inputs): return self.model(sweep_imgs, mats, is_train=is_train) def training_step(self, batch): if self.global_rank == 0: for pg in self.trainer.optimizers[0].param_groups: self.log('learning_rate', pg["lr"]) (sweep_imgs, mats, _, gt_boxes_3d, gt_labels_3d, _, depth_labels, pts_pv) = batch if torch.cuda.is_available(): if self.return_image: sweep_imgs = sweep_imgs.cuda() for key, value in mats.items(): mats[key] = value.cuda() if self.return_radar_pv: pts_pv = pts_pv.cuda() gt_boxes_3d = [gt_box.cuda() for gt_box in gt_boxes_3d] gt_labels_3d = [gt_label.cuda() for gt_label in gt_labels_3d] preds, depth_preds = self(sweep_imgs, mats, pts_pv=pts_pv, is_train=True) targets = self.model.get_targets(gt_boxes_3d, gt_labels_3d) loss_detection, loss_heatmap, loss_bbox = self.model.loss(targets, preds) if len(depth_labels.shape) == 5: # only key-frame will calculate depth loss depth_labels = depth_labels[:, 0, ...].contiguous() loss_depth = self.get_depth_loss(depth_labels.cuda(), depth_preds) self.log('train/detection', loss_detection) self.log('train/heatmap', loss_heatmap) self.log('train/bbox', loss_bbox) self.log('train/depth', loss_depth) return loss_detection + loss_depth def get_depth_loss(self, depth_labels, depth_preds, weight=3.): depth_labels = self.get_downsampled_gt_depth(depth_labels) depth_preds = depth_preds.permute(0, 2, 3, 1).contiguous().view( -1, self.depth_channels) fg_mask = torch.max(depth_labels, dim=1).values > 0.0 with autocast(enabled=False): loss_depth = (F.binary_cross_entropy( depth_preds[fg_mask], depth_labels[fg_mask], reduction='none', ).sum() / max(1.0, fg_mask.sum())) return weight * loss_depth def get_downsampled_gt_depth(self, gt_depths): """ Input: gt_depths: [B, N, H, W] Output: gt_depths: [B*N*h*w, d] """ B, N, H, W = gt_depths.shape gt_depths = gt_depths.view( B * N, H // self.downsample_factor, self.downsample_factor, W // self.downsample_factor, self.downsample_factor, 1, ) gt_depths = gt_depths.permute(0, 1, 3, 5, 2, 4).contiguous() gt_depths = gt_depths.view( -1, self.downsample_factor * self.downsample_factor) gt_depths_tmp = torch.where(gt_depths == 0.0, 1e5 * torch.ones_like(gt_depths), gt_depths) gt_depths = torch.min(gt_depths_tmp, dim=-1).values gt_depths = gt_depths.view(B * N, H // self.downsample_factor, W // self.downsample_factor) gt_depths = (gt_depths - (self.dbound[0] - self.dbound[2])) / self.dbound[2] gt_depths = torch.where( (gt_depths < self.depth_channels + 1) & (gt_depths > 0.), gt_depths, torch.zeros_like(gt_depths)) gt_depths = F.one_hot(gt_depths.long(), num_classes=self.depth_channels + 1).view( -1, self.depth_channels + 1)[:, 1:] return gt_depths.float() def eval_step(self, batch, batch_idx, prefix: str): (sweep_imgs, mats, img_metas, _, _, _, _, pts_pv) = batch if torch.cuda.is_available(): if self.return_image: sweep_imgs = sweep_imgs.cuda() for key, value in mats.items(): mats[key] = value.cuda() if self.return_radar_pv: pts_pv = pts_pv.cuda() preds = self(sweep_imgs, mats, pts_pv=pts_pv, is_train=False) if isinstance(self.model, torch.nn.parallel.DistributedDataParallel): results = self.model.module.get_bboxes(preds, img_metas) else: results = self.model.get_bboxes(preds, img_metas) for i in range(len(results)): results[i][0] = results[i][0].tensor.detach().cpu().numpy() results[i][1] = results[i][1].detach().cpu().numpy() results[i][2] = results[i][2].detach().cpu().numpy() results[i].append(img_metas[i]) return results def validation_epoch_end(self, validation_step_outputs): detection_losses = list() heatmap_losses = list() bbox_losses = list() depth_losses = list() for validation_step_output in validation_step_outputs: detection_losses.append(validation_step_output[0]) heatmap_losses.append(validation_step_output[1]) bbox_losses.append(validation_step_output[2]) depth_losses.append(validation_step_output[3])
# Copyright (c) Megvii Inc. All rights reserved. pretrain_config = dict( img_model_path=None, img_load_key=[], img_freeze_key=None, pts_model_path=None, pts_load_key=[]) optimizer_config = dict( type='AdamW', lr=2e-4, weight_decay=1e-2) H = 900 W = 1600 final_dim = (256, 704) img_conf = dict(img_mean=[123.675, 116.28, 103.53], img_std=[58.395, 57.12, 57.375], to_rgb=True) ida_aug_conf = { 'resize_lim': (0.386, 0.55), 'final_dim': final_dim, 'rot_lim': (-5.4, 5.4), 'H': 900, 'W': 1600, 'rand_flip': True, 'bot_pct_lim': (0.0, 0.0), 'cams': ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT'], 'Ncams': 6, } bda_aug_conf = { 'rot_ratio': 1.0, 'rot_lim': (-22.5, 22.5), 'scale_lim': (0.95, 1.05), 'flip_dx_ratio': 0.5, 'flip_dy_ratio': 0.5 } rda_aug_conf = { 'N_sweeps': 6, 'N_use': 5, 'drop_ratio': 0.1, } backbone_img_conf = { 'x_bound': [-51.2, 51.2, 0.8], 'y_bound': [-51.2, 51.2, 0.8], 'z_bound': [-5, 3, 8], 'd_bound': [2.0, 58.0, 0.8], 'final_dim': final_dim, 'output_channels': 80, 'downsample_factor': 16, 'img_backbone_conf': dict( type='ResNet', depth=50, frozen_stages=0, out_indices=[0, 1, 2, 3], norm_eval=False, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), ), 'img_neck_conf': dict( type='SECONDFPN', in_channels=[256, 512, 1024, 2048], upsample_strides=[0.25, 0.5, 1, 2], out_channels=[128, 128, 128, 128], ), 'depth_net_conf': dict(in_channels=512, mid_channels=512), 'camera_aware': True } CLASSES = [ 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone', ] head_conf = { 'bev_backbone_conf': dict( type='ResNet', in_channels=80, depth=18, num_stages=3, strides=(1, 2, 2), dilations=(1, 1, 1), out_indices=[0, 1, 2], norm_eval=False, base_channels=160), 'bev_neck_conf': dict( type='SECONDFPN', in_channels=[80, 160, 320, 640], upsample_strides=[1, 2, 4, 8], out_channels=[64, 64, 64, 64]), 'tasks': [ dict(num_class=1, class_names=['car']), dict(num_class=2, class_names=['truck', 'construction_vehicle']), dict(num_class=2, class_names=['bus', 'trailer']), dict(num_class=1, class_names=['barrier']), dict(num_class=2, class_names=['motorcycle', 'bicycle']), dict(num_class=2, class_names=['pedestrian', 'traffic_cone']),], 'common_heads': dict( reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)), 'bbox_coder': dict( type='CenterPointBBoxCoder', post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_num=500, score_threshold=0.1, out_size_factor=4, voxel_size=[0.2, 0.2, 8], pc_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], code_size=9), 'train_cfg': dict( point_cloud_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], grid_size=[512, 512, 1], voxel_size=[0.2, 0.2, 8], out_size_factor=4, dense_reg=1, gaussian_overlap=0.1, max_objs=500, min_radius=2, code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 0.5]), 'test_cfg': dict( post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_per_img=500, max_pool_nms=False, min_radius=[4, 12, 10, 1, 0.85, 0.175], score_threshold=0.1, out_size_factor=4, voxel_size=[0.2, 0.2, 8], nms_type='circle', pre_max_size=1000, post_max_size=83, nms_thr=0.2), 'in_channels': 256, # Equal to bev_neck output_channels. 'loss_cls': dict(type='GaussianFocalLoss', reduction='mean'), 'loss_bbox': dict(type='L1Loss', reduction='mean', loss_weight=0.25), 'gaussian_overlap': 0.1, 'min_radius': 2, } class BEVDepthLightningModel(LightningModule): MODEL_NAMES = sorted(name for name in models.__dict__ if name.islower() and not name.startswith('__') and callable(models.__dict__[name])) def __init__(self, gpus: int = 1, data_root='data/nuScenes', eval_interval=1, batch_size_per_device=8, class_names=CLASSES, backbone_img_conf=backbone_img_conf, head_conf=head_conf, ida_aug_conf=ida_aug_conf, bda_aug_conf=bda_aug_conf, rda_aug_conf=rda_aug_conf, default_root_dir='./outputs/', **kwargs): super().__init__() self.save_hyperparameters() self.gpus = gpus self.optimizer_config = optimizer_config self.pretrain_config = pretrain_config self.eval_interval = eval_interval self.batch_size_per_device = batch_size_per_device self.data_root = data_root self.class_names = class_names self.backbone_img_conf = backbone_img_conf self.head_conf = head_conf self.ida_aug_conf = ida_aug_conf self.bda_aug_conf = bda_aug_conf self.rda_aug_conf = rda_aug_conf mmcv.mkdir_or_exist(default_root_dir) self.default_root_dir = default_root_dir self.evaluator = DetNuscEvaluator(class_names=self.class_names, output_dir=self.default_root_dir) self.model = BaseBEVDepth(self.backbone_img_conf, self.head_conf) self.mode = 'valid' self.img_conf = img_conf self.data_use_cbgs = False self.load_interval = 1 self.num_sweeps = 1 self.sweep_idxes = list() self.key_idxes = list() self.data_return_depth = True self.downsample_factor = self.backbone_img_conf['downsample_factor'] self.dbound = self.backbone_img_conf['d_bound'] self.depth_channels = int( (self.dbound[1] - self.dbound[0]) / self.dbound[2]) self.use_fusion = False self.train_info_paths = 'data/nuScenes/nuscenes_infos_train.pkl' self.val_info_paths = 'data/nuScenes/nuscenes_infos_val.pkl' self.predict_info_paths = 'data/nuScenes/nuscenes_infos_test.pkl' self.return_image = True self.return_depth = True self.return_radar_pv = False self.remove_z_axis = True def forward(self, sweep_imgs, mats, is_train=False, **inputs): return self.model(sweep_imgs, mats, is_train=is_train) def training_step(self, batch): if self.global_rank == 0: for pg in self.trainer.optimizers[0].param_groups: self.log('learning_rate', pg["lr"]) (sweep_imgs, mats, _, gt_boxes_3d, gt_labels_3d, _, depth_labels, pts_pv) = batch if torch.cuda.is_available(): if self.return_image: sweep_imgs = sweep_imgs.cuda() for key, value in mats.items(): mats[key] = value.cuda() if self.return_radar_pv: pts_pv = pts_pv.cuda() gt_boxes_3d = [gt_box.cuda() for gt_box in gt_boxes_3d] gt_labels_3d = [gt_label.cuda() for gt_label in gt_labels_3d] preds, depth_preds = self(sweep_imgs, mats, pts_pv=pts_pv, is_train=True) targets = self.model.get_targets(gt_boxes_3d, gt_labels_3d) loss_detection, loss_heatmap, loss_bbox = self.model.loss(targets, preds) if len(depth_labels.shape) == 5: # only key-frame will calculate depth loss depth_labels = depth_labels[:, 0, ...].contiguous() loss_depth = self.get_depth_loss(depth_labels.cuda(), depth_preds) self.log('train/detection', loss_detection) self.log('train/heatmap', loss_heatmap) self.log('train/bbox', loss_bbox) self.log('train/depth', loss_depth) return loss_detection + loss_depth def get_depth_loss(self, depth_labels, depth_preds, weight=3.): depth_labels = self.get_downsampled_gt_depth(depth_labels) depth_preds = depth_preds.permute(0, 2, 3, 1).contiguous().view( -1, self.depth_channels) fg_mask = torch.max(depth_labels, dim=1).values > 0.0 with autocast(enabled=False): loss_depth = (F.binary_cross_entropy( depth_preds[fg_mask], depth_labels[fg_mask], reduction='none', ).sum() / max(1.0, fg_mask.sum())) return weight * loss_depth def get_downsampled_gt_depth(self, gt_depths): """ Input: gt_depths: [B, N, H, W] Output: gt_depths: [B*N*h*w, d] """ B, N, H, W = gt_depths.shape gt_depths = gt_depths.view( B * N, H // self.downsample_factor, self.downsample_factor, W // self.downsample_factor, self.downsample_factor, 1, ) gt_depths = gt_depths.permute(0, 1, 3, 5, 2, 4).contiguous() gt_depths = gt_depths.view( -1, self.downsample_factor * self.downsample_factor) gt_depths_tmp = torch.where(gt_depths == 0.0, 1e5 * torch.ones_like(gt_depths), gt_depths) gt_depths = torch.min(gt_depths_tmp, dim=-1).values gt_depths = gt_depths.view(B * N, H // self.downsample_factor, W // self.downsample_factor) gt_depths = (gt_depths - (self.dbound[0] - self.dbound[2])) / self.dbound[2] gt_depths = torch.where( (gt_depths < self.depth_channels + 1) & (gt_depths > 0.), gt_depths, torch.zeros_like(gt_depths)) gt_depths = F.one_hot(gt_depths.long(), num_classes=self.depth_channels + 1).view( -1, self.depth_channels + 1)[:, 1:] return gt_depths.float() def eval_step(self, batch, batch_idx, prefix: str): (sweep_imgs, mats, img_metas, _, _, _, _, pts_pv) = batch if torch.cuda.is_available(): if self.return_image: sweep_imgs = sweep_imgs.cuda() for key, value in mats.items(): mats[key] = value.cuda() if self.return_radar_pv: pts_pv = pts_pv.cuda() preds = self(sweep_imgs, mats, pts_pv=pts_pv, is_train=False) if isinstance(self.model, torch.nn.parallel.DistributedDataParallel): results = self.model.module.get_bboxes(preds, img_metas) else: results = self.model.get_bboxes(preds, img_metas) for i in range(len(results)): results[i][0] = results[i][0].tensor.detach().cpu().numpy() results[i][1] = results[i][1].detach().cpu().numpy() results[i][2] = results[i][2].detach().cpu().numpy() results[i].append(img_metas[i]) return results def validation_epoch_end(self, validation_step_outputs): detection_losses = list() heatmap_losses = list() bbox_losses = list() depth_losses = list() for validation_step_output in validation_step_outputs: detection_losses.append(validation_step_output[0]) heatmap_losses.append(validation_step_output[1]) bbox_losses.append(validation_step_output[2]) depth_losses.append(validation_step_output[3])
synchronize()
5
2023-12-06 14:57:49+00:00
24k
jinxixiang/magic_animate_unofficial
animatediff/magic_animate/pipeline.py
[ { "identifier": "UNet3DConditionModel", "path": "animatediff/magic_animate/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optiona...
import inspect, math import numpy as np import torch import torch.distributed as dist import einops from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from animatediff.magic_animate.unet_controlnet import UNet3DConditionModel from animatediff.magic_animate.controlnet import ControlNetModel from animatediff.magic_animate.mutual_self_attention import ReferenceAttentionControl from animatediff.magic_animate.context import ( get_context_scheduler, get_total_steps ) from animatediff.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
19,748
decoder_consistency=None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ controlnet = self.controlnet # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt if prompt_embeddings is None: prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size) else: text_embeddings = torch.cat([prompt_embeddings] * context_batch_size) reference_control_writer = ReferenceAttentionControl(appearance_encoder, do_classifier_free_guidance=do_classifier_free_guidance, mode='write', batch_size=context_batch_size, clip_length=context_frames) reference_control_reader = ReferenceAttentionControl(unet, do_classifier_free_guidance=do_classifier_free_guidance, mode='read', batch_size=context_batch_size, clip_length=context_frames) is_dist_initialized = kwargs.get("dist", False) rank = kwargs.get("rank", 0) world_size = kwargs.get("world_size", 1) # Prepare video assert num_videos_per_prompt == 1 # FIXME: verify if num_videos_per_prompt > 1 works assert batch_size == 1 # FIXME: verify if batch_size > 1 works control = self.prepare_condition( condition=controlnet_condition, device=device, dtype=controlnet.dtype, num_videos_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, ) if do_classifier_free_guidance: controlnet_uncond_images, controlnet_cond_images = control.chunk(2) else: controlnet_cond_images = control # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables if init_latents is not None: latents = rearrange(init_latents, "(b f) c h w -> b c f h w", f=video_length) else: num_channels_latents = self.unet.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, video_length, height, width, text_embeddings.dtype, device, generator, latents, clip_length=context_frames ) latents_dtype = latents.dtype # Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # Prepare text embeddings for controlnet controlnet_text_embeddings = text_embeddings.repeat_interleave(video_length, 0) if do_classifier_free_guidance: _, controlnet_text_embeddings_c = controlnet_text_embeddings.chunk(2) else: controlnet_text_embeddings_c = controlnet_text_embeddings controlnet_res_samples_cache_dict = {i: None for i in range(video_length)} # For img2img setting if num_actual_inference_steps is None: num_actual_inference_steps = num_inference_steps if isinstance(source_image, str): ref_image_latents = self.images2latents(np.array(Image.open(source_image).resize((width, height)))[None, :], latents_dtype).to(device) elif isinstance(source_image, np.ndarray): ref_image_latents = self.images2latents(source_image[None, :], latents_dtype).to(device)
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, controlnet: ControlNetModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1: -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents, rank, decoder_consistency=None): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0]), disable=(rank != 0)): if decoder_consistency is not None: video.append(decoder_consistency(latents[frame_idx:frame_idx + 1])) else: video.append(self.vae.decode(latents[frame_idx:frame_idx + 1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None, clip_length=16): shape = ( batch_size, num_channels_latents, clip_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) latents = latents.repeat(1, 1, video_length // clip_length, 1, 1) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_condition(self, condition, num_videos_per_prompt, device, dtype, do_classifier_free_guidance): # prepare conditions for controlnet # condition = torch.from_numpy(condition.copy()).to(device=device, dtype=dtype) / 255.0 condition = condition.to(device=device, dtype=dtype) # condition = torch.stack([condition for _ in range(num_videos_per_prompt)], dim=0) condition = einops.repeat(condition, 'b f c h w -> (b r) f c h w', r=num_videos_per_prompt) condition = rearrange(condition, 'b f c h w -> (b f) c h w').clone() # condition = rearrange(condition, 'b f h w c -> (b f) c h w').clone() if do_classifier_free_guidance: condition = torch.cat([condition] * 2) return condition def next_step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0., verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 pred_dir = (1 - alpha_prod_t_next) ** 0.5 * model_output x_next = alpha_prod_t_next ** 0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx + 1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor: int, device): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0], latents.shape[1], ((latents.shape[2] - 1) * interpolation_factor) + 1, latents.shape[3], latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i / interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0, i1 in zip(range(org_video_length), range(org_video_length)[1:]): v0 = latents[:, :, i0, :, :] v1 = latents[:, :, i1, :, :] new_latents[:, :, new_index, :, :] = v0 new_index += 1 for f in rate: v = get_tensor_interpolation_method()(v0.to(device=device), v1.to(device=device), f) new_latents[:, :, new_index, :, :] = v.to(latents.device) new_index += 1 new_latents[:, :, new_index, :, :] = v1 new_index += 1 return new_latents def select_controlnet_res_samples(self, controlnet_res_samples_cache_dict, context, do_classifier_free_guidance, b, f): _down_block_res_samples = [] _mid_block_res_sample = [] for i in np.concatenate(np.array(context)): _down_block_res_samples.append(controlnet_res_samples_cache_dict[i][0]) _mid_block_res_sample.append(controlnet_res_samples_cache_dict[i][1]) down_block_res_samples = [[] for _ in range(len(controlnet_res_samples_cache_dict[i][0]))] for res_t in _down_block_res_samples: for i, res in enumerate(res_t): down_block_res_samples[i].append(res) down_block_res_samples = [torch.cat(res) for res in down_block_res_samples] mid_block_res_sample = torch.cat(_mid_block_res_sample) # reshape controlnet output to match the unet3d inputs b = b // 2 if do_classifier_free_guidance else b _down_block_res_samples = [] for sample in down_block_res_samples: sample = rearrange(sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: sample = sample.repeat(2, 1, 1, 1, 1) _down_block_res_samples.append(sample) down_block_res_samples = _down_block_res_samples mid_block_res_sample = rearrange(mid_block_res_sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: mid_block_res_sample = mid_block_res_sample.repeat(2, 1, 1, 1, 1) return down_block_res_samples, mid_block_res_sample @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], prompt_embeddings: Optional[torch.FloatTensor] = None, video_length: Optional[int] = 8, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, controlnet_condition: list = None, controlnet_conditioning_scale: float = 1.0, context_frames: int = 16, context_stride: int = 1, context_overlap: int = 4, context_batch_size: int = 1, context_schedule: str = "uniform", init_latents: Optional[torch.FloatTensor] = None, num_actual_inference_steps: Optional[int] = None, appearance_encoder=None, unet=None, source_image: str = None, decoder_consistency=None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ controlnet = self.controlnet # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt if prompt_embeddings is None: prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size) else: text_embeddings = torch.cat([prompt_embeddings] * context_batch_size) reference_control_writer = ReferenceAttentionControl(appearance_encoder, do_classifier_free_guidance=do_classifier_free_guidance, mode='write', batch_size=context_batch_size, clip_length=context_frames) reference_control_reader = ReferenceAttentionControl(unet, do_classifier_free_guidance=do_classifier_free_guidance, mode='read', batch_size=context_batch_size, clip_length=context_frames) is_dist_initialized = kwargs.get("dist", False) rank = kwargs.get("rank", 0) world_size = kwargs.get("world_size", 1) # Prepare video assert num_videos_per_prompt == 1 # FIXME: verify if num_videos_per_prompt > 1 works assert batch_size == 1 # FIXME: verify if batch_size > 1 works control = self.prepare_condition( condition=controlnet_condition, device=device, dtype=controlnet.dtype, num_videos_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, ) if do_classifier_free_guidance: controlnet_uncond_images, controlnet_cond_images = control.chunk(2) else: controlnet_cond_images = control # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables if init_latents is not None: latents = rearrange(init_latents, "(b f) c h w -> b c f h w", f=video_length) else: num_channels_latents = self.unet.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, video_length, height, width, text_embeddings.dtype, device, generator, latents, clip_length=context_frames ) latents_dtype = latents.dtype # Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # Prepare text embeddings for controlnet controlnet_text_embeddings = text_embeddings.repeat_interleave(video_length, 0) if do_classifier_free_guidance: _, controlnet_text_embeddings_c = controlnet_text_embeddings.chunk(2) else: controlnet_text_embeddings_c = controlnet_text_embeddings controlnet_res_samples_cache_dict = {i: None for i in range(video_length)} # For img2img setting if num_actual_inference_steps is None: num_actual_inference_steps = num_inference_steps if isinstance(source_image, str): ref_image_latents = self.images2latents(np.array(Image.open(source_image).resize((width, height)))[None, :], latents_dtype).to(device) elif isinstance(source_image, np.ndarray): ref_image_latents = self.images2latents(source_image[None, :], latents_dtype).to(device)
context_scheduler = get_context_scheduler(context_schedule)
3
2023-12-12 00:16:39+00:00
24k
qitan/devops-backend-lite
common/ext_fun.py
[ { "identifier": "generate_docu", "path": "common/utils/ElasticSearchAPI.py", "snippet": "def generate_docu(table, index_version=None):\n index_name = f\"{table.name}-{index_version}\" if index_version else table.name\n _tbindex = Index(index_name)\n _tbindex.analyzer(my_normalizer)\n _tbinde...
from gitlab.exceptions import GitlabGetError from functools import reduce from common.utils.ElasticSearchAPI import generate_docu, Search from common.utils.GitLabAPI import GitLabAPI from common.utils.HarborAPI import HarborAPI from common.utils.JenkinsAPI import GlueJenkins from common.custom_format import convert_xml_to_str_with_pipeline from common.variables import DASHBOARD_TIME_FORMAT, DASHBOARD_TIME_FORMAT_T, DASHBOARD_TIME_FREQNAMES, \ DASHBOARD_TIME_FREQNAMES_T, SENSITIVE_KEYS, JENKINS_CALLBACK_KEY, \ JENKINS_STATUS_MAP, DEV_LANGUAGE_KEY from dbapp.models import AppInfo, Product, KubernetesCluster, KubernetesDeploy, MicroApp, Project, ProjectConfig, DevLanguage, BuildJob, UserProfile, SystemConfig, Role, Permission, Menu, DataDict from django.conf import settings from django.core.cache import cache from django.utils import timezone from django.db.models import Q from social_django.utils import load_strategy from rest_framework.utils.serializer_helpers import ReturnDict from config import SOCIAL_AUTH_GITLAB_API_URL, GITLAB_ADMIN_TOKEN from common.utils.K8sAPI import K8sAPI from urllib.parse import urlparse, quote_plus from dateutil.relativedelta import relativedelta from dateutil.rrule import rrule from ruamel import yaml from datetime import datetime, timedelta from celery import current_app import copy import operator import re import time import pytz import os import json import requests import math import shortuuid import logging
17,777
for i in envs: try: env_value = i.get('value', None) cmname = i.pop('cmname', None) cmkey = i.pop('cmkey', None) if env_value: env_value = env_value.lstrip('"').rstrip( '"').lstrip("'").rstrip("'") i.pop('value', None) i['name'] = i['name'].lstrip('"').rstrip( '"').lstrip("'").rstrip("'") if i.get('valueFrom', None) == 'configMapKeyRef': i['valueFrom'] = {'configMapKeyRef': { 'name': cmname, 'key': cmkey}} else: i['value'] = env_value i['valueFrom'] = None except BaseException as e: pass yaml_template['spec']['template']['spec']['containers'][0]['env'] = envs if template.get('health', False): _d = health_lifecycle_generate('health', True) for k, v in _d.items(): yaml_template['spec']['template']['spec']['containers'][0][k] = v if template.get('lifecycle', False): yaml_template['spec']['template']['spec']['containers'][0]['lifecycle'] = { } _d = health_lifecycle_generate('lifecycle', False) for k, v in _d.items(): yaml_template['spec']['template']['spec']['containers'][0]['lifecycle'][k] = v _vo_mount = [{'mountPath': '/data/logs', 'name': 'logs', 'readOnly': False}] _volumes = [{'name': 'logs', 'type': 'Directory', 'hostPath': { 'path': f'/data/{appinfo_obj.environment.name}-applogs/{appinfo_obj.app.project.name}/'}}] if template.get('storage', None): for k, v in template['storage']['data'].items(): for i in v: _x = {} for m, n in i.items(): if isinstance(n, (str,)): n = n.replace('${APPNAME}', appinfo_obj.app.name) if '_' in m: _t = m.split('_') if _x.get(_t[0], None): _x[_t[0]][_t[1]] = n else: _x[_t[0]] = {_t[1]: n} else: _x[m] = n _t = {'mountPath': _x['mount'], 'name': _x['name'], 'readOnly': True if _x.get('mode', None) == 'ReadOnly' else False} if _x.get('file', None): _t['subPath'] = _x['configMap']['items'][0]['key'] _vo_mount.append(_t) _mode = _x.pop('mode', None) _x.pop('file', None) _x.pop('mount', None) if _x.get('configMap', None): _x['configMap']['defaultMode'] = 0o600 if _mode == 'ReadOnly' else 0o755 _volumes.append(_x) yaml_template['spec']['template']['spec']['containers'][0]['volumeMounts'] = _vo_mount yaml_template['spec']['template']['spec']['volumes'] = _volumes if use_host_network: yaml_template['spec']['template']['spec']['hostNetwork'] = True partial_deploy_yaml_template = None except BaseException as e: logger.exception(f'generate yaml err {e.__class__} {e}') return {'ecode': 500, 'message': str(e)} # 多容器处理 if appinfo_obj.template.get('containers_custom', None): containers = container_generate( appinfo_obj.template.get('containers', [])) else: containers = container_generate( project_config.first().template.get('containers', [])) yaml_template['spec']['template']['spec']['containers'].extend(containers) ret = {'ecode': 200, 'image': image, 'yaml': yaml_template} if partial_deploy_yaml_template: ret['partial_deploy_yaml'] = partial_deploy_yaml_template return ret def get_members(obj): team_members = [j for i in obj.team_members.values() for j in i] return list(set(team_members)) def get_permission_from_role(request): try: perms = request.user.roles.values( 'permissions__method', ).distinct() return [p['permissions__method'] for p in perms] except AttributeError: return [] def get_headers(request=None): """ Function: get_headers(self, request) Description: To get all the headers from request """ regex = re.compile('^HTTP_') return dict((regex.sub('', header), value) for (header, value) in request.META.items() if header.startswith('HTTP_')) def mask_sensitive_data(data): """ Hides sensitive keys specified in sensitive_keys settings. Loops recursively over nested dictionaries. """ if hasattr(settings, 'DRF_API_LOGGER_EXCLUDE_KEYS'): if type(settings.DRF_API_LOGGER_EXCLUDE_KEYS) in (list, tuple):
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Author : Charles Lai @Contact : qqing_lai@hotmail.com @Time : 2020/12/21 上午10:00 @FileName: ext_fun.py @Blog :https://imaojia.com """ logger = logging.getLogger('drf') class ThirdPartyUser(object): def get_user(self): user = UserProfile.objects.get_or_create(username='thirdparty')[0] self.set_permission(user, self.get_role()) return user def get_role(self): return Role.objects.get_or_create(name='thirdparty')[0] def get_perm(self): return Permission.objects.get_or_create(name='Jenkins回调', method='jenkins_callback')[0] def set_permission(self, user, role): role.permissions.set([self.get_perm().id]) user.roles.set([role.id]) def set_redis_data(name, config): cache.set(f"system:{name}", config, None) def get_redis_data(name): ret = cache.get(f"system:{name}") if not ret: try: if name == 'cicd-harbor': qs = SystemConfig.objects.filter(type=name)[0] else: qs = SystemConfig.objects.get(name=name) except BaseException as e: return None ret = json.loads(qs.config) set_redis_data(name, ret) return ret def get_datadict(name, config=0, default_value=None): """ 从数据字典获取数据 """ try: qs = DataDict.objects.get(key=name) except BaseException as e: return default_value if config: ret = json.loads(qs.extra) else: ret = {'id': qs.id, 'key': qs.key, 'value': qs.value, 'desc': qs.desc} return ret def check_pods(cluster_id, k8s_config, namespace, **kwargs): k8s = KubernetesCluster.objects.get(id=cluster_id) cli = k8s_cli(k8s, k8s_config) if not cli: return False count = 3 while count: ret2 = cli.get_pods(namespace, **kwargs) count -= 1 if len(ret2['items']) > 0: return True else: check_pods(k8s_config, namespace, **kwargs) return False def template_svc_generate(appinfo_obj): """ 生成Kubernetes Svc Yaml ### 格式: { "apiVersion": "v1", "kind": "Service", "metadata": { "name": "appname", "namespace": "env-product", "labels": { "app": "appname" } }, "spec": { "ports": [{ "port": 8080, "targetPort": 8080, "protocol": "TCP", "name": "http" }], "selector": { "app": "appname" } } } """ svc_temp = DataDict.objects.filter(key='yaml.svc') if svc_temp.exists(): svc_temp = json.loads(svc_temp.first().extra) if appinfo_obj.environment.name in svc_temp: svc_temp = svc_temp[appinfo_obj.environment.name] namespace = appinfo_obj.namespace svc_temp['metadata']['name'] = appinfo_obj.app.name svc_temp['metadata']['namespace'] = namespace svc_temp['metadata']['labels'] = {'app': appinfo_obj.app.name} labels = [] labels.extend([{'name': 'app', 'value': appinfo_obj.app.name}]) svc_temp['spec']['selector'] = { i['name']: i['value'] for i in labels} return True, svc_temp return False, None def harbor_cli(namespace, **filters): try: harbor = SystemConfig.objects.filter(**filters).first() # 获取harbor配置 harbor_config = json.loads(harbor.config) except BaseException as e: logger.exception(f'创建任务失败, 原因: 获取harbor仓库异常, {e}') return False, f"获取harbor仓库异常:{e}" # 构建前创建harbor项目 cli = HarborAPI(url=harbor_config['url'], username=harbor_config['user'], password=harbor_config['password']) try: cli.create_project( namespace, public=harbor_config.get('public', False)) except BaseException as e: pass return True, harbor_config def k8s_cli(k8s, k8s_config): try: if k8s_config['type'] == 'basic': # basic auth or token auth k8s_config.pop('config', None) k8s_config.pop('type', None) cli = K8sAPI(**k8s_config) else: eks = None eks_token = None k8s_config = yaml.safe_load(k8s_config['config']) if k8s.idc.type == 1 and k8s.idc.supplier.split('.')[-1] == 'aws': return False, 'not support.' cli = K8sAPI(k8s_config=k8s_config, api_key=eks_token, eks=eks) return True, cli except BaseException as e: return False, str(e) def template_generate(appinfo_obj: AppInfo, image=None, partial_deploy_replicas: int = 0): """ 生成Kubernetes Deployment Yaml """ def health_lifecycle_generate(item, enable=True): _c = {} for i in template[item]['data']: _x = {} if i.get('enable', enable): for j in i['items']: if '__' in j['name']: _t = j['name'].split('__') _value = j['value'] if j['name'] == 'exec__command': _value = ["sh", "-c", j['value']] if _x.get(_t[0], None): _x[_t[0]][_t[1]] = _value else: _x[_t[0]] = {_t[1]: _value} else: _x[j['name']] = j['value'] _c[i['name']] = _x return _c def container_generate(container_data): containers = [] for i in container_data: if i.get('enable', None): container = get_datadict(i['key'], config=1) if not container: container = i['extra'] containers.append( container) return containers language_obj = DevLanguage.objects.get(name=appinfo_obj.app.language) project_config = ProjectConfig.objects.filter(project_id=appinfo_obj.app.project.id, environment_id=appinfo_obj.environment.id) namespace = appinfo_obj.namespace harbor_config = get_redis_data('cicd-harbor') harbor_url = harbor_config['url'].split('://')[1] image = f"{harbor_url}/{image}" template = {} # 模板优先级 # 应用模块 -> 应用 -> 项目 -> 环境 if project_config.first(): project_template = project_config.first().template for k, v in project_template.items(): if v and isinstance(v, (dict,)): if v.get('custom', False) is False: if appinfo_obj.environment.template.get(k, None): template[k] = appinfo_obj.environment.template[k] else: if project_template.get(k, None): template[k] = project_template[k] microapp_template = appinfo_obj.app.template for k, v in microapp_template.items(): if '_on' in k and v: _k = k.rstrip('_on') if microapp_template.get(_k, None): template[_k] = microapp_template[_k] use_host_network = False if appinfo_obj.template.get('userHostNetwork', 0): use_host_network = True for k, v in appinfo_obj.template.items(): if v and isinstance(v, (dict,)): if v.get('custom', False) and appinfo_obj.template.get(k, None): template[k] = appinfo_obj.template[k] yaml_template = {'kind': 'Deployment', 'metadata': {}, 'spec': {'strategy': {}, 'template': {'metadata': {}, 'spec': {'containers': [{'ports': [{'containerPort': 8080}], 'resources': []}], 'imagePullSecrets': [{'name': 'loginharbor'}], 'terminationGracePeriodSeconds': 120} } } } try: tz = appinfo_obj.app.project.product.region.extra['timezone'] except BaseException as e: tz = 'Asia/Shanghai' try: if template.get('strategy', None): for i in template['strategy']['data']: if i['key'] in ['maxSurge', 'maxUnavailable']: if yaml_template['spec']['strategy'].get('rollingUpdate', None) is None: yaml_template['spec']['strategy']['rollingUpdate'] = {} yaml_template['spec']['strategy']['rollingUpdate'][i['key'] ] = f"{i['value']}%" else: yaml_template['spec'][i['key']] = i['value'] _d = {} for i in template['resources']['data']: _t = i['key'].split('_') if _d.get(_t[0], None): _d[_t[0]][_t[1]] = f"{i['value']}{i['slot']}" else: _d[_t[0]] = {_t[1]: f"{i['value']}{i['slot']}"} yaml_template['spec']['template']['spec']['containers'][0]['resources'] = _d yaml_template['metadata']['name'] = appinfo_obj.app.name yaml_template['metadata']['namespace'] = namespace yaml_template['spec']['template']['spec']['containers'][0]['name'] = appinfo_obj.app.name yaml_template['spec']['template']['spec']['containers'][0]['image'] = image command = appinfo_obj.app.template.get( 'command', None) or language_obj.labels.get('command', None) if command: if command.startswith('./'): yaml_template['spec']['template']['spec']['containers'][0]['command'] = [ command] else: yaml_template['spec']['template']['spec']['containers'][0]['command'] = [ 'sh', '-c', command] # 优先级: 应用模块>应用>预设>开发语言 labels = template['label']['data'] labels.extend([{'name': 'app', 'value': appinfo_obj.app.name}]) yaml_template['spec']['template']['metadata']['labels'] = { i['name']: i['value'] for i in labels} yaml_template['spec']['template']['metadata']['labels'][ 'status-app-name-for-ops-platform'] = appinfo_obj.app.name yaml_template['spec']['selector'] = { 'matchLabels': {i['name']: i['value'] for i in labels}} selectors = template['selector']['data'] yaml_template['spec']['template']['spec']['nodeSelector'] = { i['name']: i['value'] for i in selectors} if 'annotations' not in yaml_template['spec']['template']['metadata']: yaml_template['spec']['template']['metadata']['annotations'] = {} for i in template['prometheus']['data']: yaml_template['spec']['template']['metadata'][ 'annotations'][f'prometheus.io/{i["name"]}'] = i['value'] if 'prometheus.io/path' in yaml_template['spec']['template']['metadata']['annotations']: yaml_template['spec']['template']['metadata']['annotations'][ 'prometheus.io/app_product'] = appinfo_obj.app.project.product.name yaml_template['spec']['template']['metadata']['annotations'][ 'prometheus.io/app_env'] = appinfo_obj.environment.name yaml_template['spec']['template']['metadata']['annotations'][ 'prometheus.io/app_project'] = appinfo_obj.app.project.name # 环境变量 envs = [{'name': 'TZ', 'value': tz}] envs.extend(template['env']['data']) envs.extend([ {'name': '_RESTART', 'value': datetime.now().strftime( '%Y%m%d%H%M%S')}, # _RESTART变量用于强制更新deployment {'name': 'PRODUCT_NAME', 'value': appinfo_obj.app.project.product.name}, {'name': 'PROJECT_NAME', 'value': appinfo_obj.app.project.name}, {'name': 'APPNAME', 'value': appinfo_obj.app.name}, {'name': 'APPID', 'value': appinfo_obj.app.appid}, {'name': 'ENV', 'value': appinfo_obj.environment.name}, {'name': 'POD_NAMESPACE', 'value': namespace} ]) envs = list({i['name']: i for i in envs}.values()) for i in envs: try: env_value = i.get('value', None) cmname = i.pop('cmname', None) cmkey = i.pop('cmkey', None) if env_value: env_value = env_value.lstrip('"').rstrip( '"').lstrip("'").rstrip("'") i.pop('value', None) i['name'] = i['name'].lstrip('"').rstrip( '"').lstrip("'").rstrip("'") if i.get('valueFrom', None) == 'configMapKeyRef': i['valueFrom'] = {'configMapKeyRef': { 'name': cmname, 'key': cmkey}} else: i['value'] = env_value i['valueFrom'] = None except BaseException as e: pass yaml_template['spec']['template']['spec']['containers'][0]['env'] = envs if template.get('health', False): _d = health_lifecycle_generate('health', True) for k, v in _d.items(): yaml_template['spec']['template']['spec']['containers'][0][k] = v if template.get('lifecycle', False): yaml_template['spec']['template']['spec']['containers'][0]['lifecycle'] = { } _d = health_lifecycle_generate('lifecycle', False) for k, v in _d.items(): yaml_template['spec']['template']['spec']['containers'][0]['lifecycle'][k] = v _vo_mount = [{'mountPath': '/data/logs', 'name': 'logs', 'readOnly': False}] _volumes = [{'name': 'logs', 'type': 'Directory', 'hostPath': { 'path': f'/data/{appinfo_obj.environment.name}-applogs/{appinfo_obj.app.project.name}/'}}] if template.get('storage', None): for k, v in template['storage']['data'].items(): for i in v: _x = {} for m, n in i.items(): if isinstance(n, (str,)): n = n.replace('${APPNAME}', appinfo_obj.app.name) if '_' in m: _t = m.split('_') if _x.get(_t[0], None): _x[_t[0]][_t[1]] = n else: _x[_t[0]] = {_t[1]: n} else: _x[m] = n _t = {'mountPath': _x['mount'], 'name': _x['name'], 'readOnly': True if _x.get('mode', None) == 'ReadOnly' else False} if _x.get('file', None): _t['subPath'] = _x['configMap']['items'][0]['key'] _vo_mount.append(_t) _mode = _x.pop('mode', None) _x.pop('file', None) _x.pop('mount', None) if _x.get('configMap', None): _x['configMap']['defaultMode'] = 0o600 if _mode == 'ReadOnly' else 0o755 _volumes.append(_x) yaml_template['spec']['template']['spec']['containers'][0]['volumeMounts'] = _vo_mount yaml_template['spec']['template']['spec']['volumes'] = _volumes if use_host_network: yaml_template['spec']['template']['spec']['hostNetwork'] = True partial_deploy_yaml_template = None except BaseException as e: logger.exception(f'generate yaml err {e.__class__} {e}') return {'ecode': 500, 'message': str(e)} # 多容器处理 if appinfo_obj.template.get('containers_custom', None): containers = container_generate( appinfo_obj.template.get('containers', [])) else: containers = container_generate( project_config.first().template.get('containers', [])) yaml_template['spec']['template']['spec']['containers'].extend(containers) ret = {'ecode': 200, 'image': image, 'yaml': yaml_template} if partial_deploy_yaml_template: ret['partial_deploy_yaml'] = partial_deploy_yaml_template return ret def get_members(obj): team_members = [j for i in obj.team_members.values() for j in i] return list(set(team_members)) def get_permission_from_role(request): try: perms = request.user.roles.values( 'permissions__method', ).distinct() return [p['permissions__method'] for p in perms] except AttributeError: return [] def get_headers(request=None): """ Function: get_headers(self, request) Description: To get all the headers from request """ regex = re.compile('^HTTP_') return dict((regex.sub('', header), value) for (header, value) in request.META.items() if header.startswith('HTTP_')) def mask_sensitive_data(data): """ Hides sensitive keys specified in sensitive_keys settings. Loops recursively over nested dictionaries. """ if hasattr(settings, 'DRF_API_LOGGER_EXCLUDE_KEYS'): if type(settings.DRF_API_LOGGER_EXCLUDE_KEYS) in (list, tuple):
SENSITIVE_KEYS.extend(settings.DRF_API_LOGGER_EXCLUDE_KEYS)
10
2023-12-13 03:09:32+00:00
24k
MarilynKeller/aitviewer-skel
aitviewer/scene/camera.py
[ { "identifier": "CONFIG", "path": "aitviewer/configuration.py", "snippet": "CONFIG = Configuration()" }, { "identifier": "Lines", "path": "aitviewer/renderables/lines.py", "snippet": "class Lines(Node):\n \"\"\"Render lines as cylinders or cones. Can render approx. 600k lines at 40 fp...
import os import joblib import numpy as np from abc import ABC, abstractmethod from trimesh.transformations import rotation_matrix from aitviewer.configuration import CONFIG as C from aitviewer.renderables.lines import Lines from aitviewer.renderables.meshes import Meshes from aitviewer.renderables.rigid_bodies import RigidBodies from aitviewer.scene.camera_utils import ( look_at, normalize, orthographic_projection, perspective_projection, ) from aitviewer.scene.node import Node from aitviewer.utils.decorators import hooked
21,087
self.projection_matrix = None self.view_matrix = None self.view_projection_matrix = None def get_projection_matrix(self): if self.projection_matrix is None: raise ValueError("update_matrices() must be called before to update the projection matrix") return self.projection_matrix def get_view_matrix(self): if self.view_matrix is None: raise ValueError("update_matrices() must be called before to update the view matrix") return self.view_matrix def get_view_projection_matrix(self): if self.view_projection_matrix is None: raise ValueError("update_matrices() must be called before to update the view-projection matrix") return self.view_projection_matrix @abstractmethod def update_matrices(self, width, height): pass @property @abstractmethod def position(self): pass @property @abstractmethod def forward(self): pass @property @abstractmethod def up(self): pass @property @abstractmethod def right(self): pass def gui(self, imgui): pass class Camera(Node, CameraInterface): """ A base camera object that provides rendering of a camera mesh and visualization of the camera frustum and coordinate system. Subclasses of this class must implement the CameraInterface abstract methods. """ def __init__( self, inactive_color=(0.5, 0.5, 0.5, 1), active_color=(0.6, 0.1, 0.1, 1), viewer=None, **kwargs, ): """Initializer :param inactive_color: Color that will be used for rendering this object when inactive :param active_color: Color that will be used for rendering this object when active :param viewer: The current viewer, if not None the gui for this object will show a button for viewing from this camera in the viewer """ super(Camera, self).__init__(icon="\u0084", gui_material=False, **kwargs) # Camera object geometry vertices = np.array( [ # Body [0, 0, 0], [-1, -1, 1], [-1, 1, 1], [1, -1, 1], [1, 1, 1], # Triangle front [0.5, 1.1, 1], [-0.5, 1.1, 1], [0, 2, 1], # Triangle back [0.5, 1.1, 1], [-0.5, 1.1, 1], [0, 2, 1], ], dtype=np.float32, ) # Scale dimensions vertices[:, 0] *= 0.05 vertices[:, 1] *= 0.03 vertices[:, 2] *= 0.15 # Slide such that the origin is in front of the object vertices[:, 2] -= vertices[1, 2] * 1.1 # Reverse z since we use the opengl convention that camera forward is -z vertices[:, 2] *= -1 # Reverse x too to maintain a consistent triangle winding vertices[:, 0] *= -1 faces = np.array( [ [0, 1, 2], [0, 2, 4], [0, 4, 3], [0, 3, 1], [1, 3, 2], [4, 2, 3], [5, 6, 7], [8, 10, 9], ] ) self._active = False self.active_color = active_color self.inactive_color = inactive_color
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos def _transform_vector(transform, vector): """Apply affine transformation (4-by-4 matrix) to a 3D vector.""" return (transform @ np.concatenate([vector, np.array([1])]))[:3] def _transform_direction(transform, vector): """Apply affine transformation (4-by-4 matrix) to a 3D directon.""" return (transform @ np.concatenate([vector, np.array([0])]))[:3] class CameraInterface(ABC): """ An abstract class which describes the interface expected by the viewer for using this object as a camera """ def __init__(self): self.projection_matrix = None self.view_matrix = None self.view_projection_matrix = None def get_projection_matrix(self): if self.projection_matrix is None: raise ValueError("update_matrices() must be called before to update the projection matrix") return self.projection_matrix def get_view_matrix(self): if self.view_matrix is None: raise ValueError("update_matrices() must be called before to update the view matrix") return self.view_matrix def get_view_projection_matrix(self): if self.view_projection_matrix is None: raise ValueError("update_matrices() must be called before to update the view-projection matrix") return self.view_projection_matrix @abstractmethod def update_matrices(self, width, height): pass @property @abstractmethod def position(self): pass @property @abstractmethod def forward(self): pass @property @abstractmethod def up(self): pass @property @abstractmethod def right(self): pass def gui(self, imgui): pass class Camera(Node, CameraInterface): """ A base camera object that provides rendering of a camera mesh and visualization of the camera frustum and coordinate system. Subclasses of this class must implement the CameraInterface abstract methods. """ def __init__( self, inactive_color=(0.5, 0.5, 0.5, 1), active_color=(0.6, 0.1, 0.1, 1), viewer=None, **kwargs, ): """Initializer :param inactive_color: Color that will be used for rendering this object when inactive :param active_color: Color that will be used for rendering this object when active :param viewer: The current viewer, if not None the gui for this object will show a button for viewing from this camera in the viewer """ super(Camera, self).__init__(icon="\u0084", gui_material=False, **kwargs) # Camera object geometry vertices = np.array( [ # Body [0, 0, 0], [-1, -1, 1], [-1, 1, 1], [1, -1, 1], [1, 1, 1], # Triangle front [0.5, 1.1, 1], [-0.5, 1.1, 1], [0, 2, 1], # Triangle back [0.5, 1.1, 1], [-0.5, 1.1, 1], [0, 2, 1], ], dtype=np.float32, ) # Scale dimensions vertices[:, 0] *= 0.05 vertices[:, 1] *= 0.03 vertices[:, 2] *= 0.15 # Slide such that the origin is in front of the object vertices[:, 2] -= vertices[1, 2] * 1.1 # Reverse z since we use the opengl convention that camera forward is -z vertices[:, 2] *= -1 # Reverse x too to maintain a consistent triangle winding vertices[:, 0] *= -1 faces = np.array( [ [0, 1, 2], [0, 2, 4], [0, 4, 3], [0, 3, 1], [1, 3, 2], [4, 2, 3], [5, 6, 7], [8, 10, 9], ] ) self._active = False self.active_color = active_color self.inactive_color = inactive_color
self.mesh = Meshes(
2
2023-12-07 16:13:50+00:00
24k
nexB/dejacode
component_catalog/models.py
[ { "identifier": "build_licensing", "path": "component_catalog/license_expression_dje.py", "snippet": "def build_licensing(licenses=None):\n \"\"\"\n Return a Licensing from `licenses`: either a License QuerySet or a\n pre-built Licensing object (which is returned as-is).\n \"\"\"\n if isi...
import logging import re from contextlib import suppress from urllib.parse import quote_plus from django.contrib.postgres.fields import ArrayField from django.core import validators from django.core.exceptions import MultipleObjectsReturned from django.core.exceptions import ObjectDoesNotExist from django.core.exceptions import ValidationError from django.core.validators import EMPTY_VALUES from django.db import models from django.db.models import CharField from django.db.models import Exists from django.db.models import OuterRef from django.db.models.functions import Concat from django.dispatch import receiver from django.template.defaultfilters import filesizeformat from django.utils.functional import cached_property from django.utils.html import format_html from django.utils.text import format_lazy from django.utils.text import get_valid_filename from django.utils.text import normalize_newlines from django.utils.translation import gettext_lazy as _ from attributecode.model import About from cyclonedx import model as cyclonedx_model from cyclonedx.model import component as cyclonedx_component from packageurl import PackageURL from packageurl.contrib import purl2url from packageurl.contrib import url2purl from packageurl.contrib.django.models import PackageURLMixin from packageurl.contrib.django.models import PackageURLQuerySetMixin from packageurl.contrib.django.utils import without_empty_values from component_catalog.license_expression_dje import build_licensing from component_catalog.license_expression_dje import get_license_objects from component_catalog.license_expression_dje import parse_expression from dejacode_toolkit import spdx from dejacode_toolkit.download import DataCollectionException from dejacode_toolkit.download import collect_package_data from dejacode_toolkit.purldb import PurlDB from dje import urn from dje.copier import post_copy from dje.copier import post_update from dje.fields import JSONListField from dje.fields import NoStripTextField from dje.models import DataspacedManager from dje.models import DataspacedModel from dje.models import DataspacedQuerySet from dje.models import ExternalReferenceMixin from dje.models import History from dje.models import HistoryFieldsMixin from dje.models import ParentChildModelMixin from dje.models import ParentChildRelationshipModel from dje.models import ReferenceNotesMixin from dje.tasks import tasks_logger from dje.utils import set_fields_from_object from dje.validators import generic_uri_validator from dje.validators import validate_url_segment from dje.validators import validate_version from license_library.models import License from license_library.models import LicenseChoice from policy.models import SetPolicyFromLicenseMixin from policy.models import UsagePolicyMixin from workflow.models import RequestMixin
18,940
self.license_expression, licenses=self.licensing, validate_known=False, validate_strict=False, ) normalized_expression = cached_property(_get_normalized_expression) def get_license_expression(self, template="{symbol.key}", as_link=False, show_policy=False): """ Validate and Return the license_expression value set on this instance. The license expression is NOT validated for known symbols. Use the `template` format string to render each license in the expression. if `as_link` is True, render the expression as a link. """ if self.license_expression: rendered = self.normalized_expression.render_as_readable( template, as_link=as_link, show_policy=show_policy, ) return format_html(rendered) def get_license_expression_attribution(self): # note: the fields use in the template must be available as attributes or # properties on a License. template = '<a href="#license_{symbol.key}">{symbol.short_name}</a>' return self.get_license_expression(template) license_expression_attribution = cached_property(get_license_expression_attribution) def get_license_expression_linked(self): return self.get_license_expression(as_link=True) license_expression_linked = cached_property(get_license_expression_linked) def get_license_expression_linked_with_policy(self): license_expression = self.get_license_expression(as_link=True, show_policy=True) if license_expression: return format_html('<span class="license-expression">{}</span>', license_expression) def get_license_expression_spdx_id(self): """ Return the license_expression formatted for SPDX compatibility. This includes a workaround for a SPDX spec limitation, where license exceptions that do not exist in the SPDX list cannot be provided as "LicenseRef-" in the "hasExtractedLicensingInfos". The current fix is to use AND rather than WITH for any exception that is a "LicenseRef-". See discussion at https://github.com/spdx/tools-java/issues/73 """ expression = self.get_license_expression("{symbol.spdx_id}") if expression: return expression.replace("WITH LicenseRef-", "AND LicenseRef-") def _get_primary_license(self): """ Return the primary license key of this instance or None. The primary license is the left most license of the expression. It can be the combination of a license WITH an exception and therefore may contain more than one key. WARNING: This does not support exception as primary_license. """ if self.license_expression: licensing = build_licensing() return licensing.primary_license_key(self.license_expression) primary_license = cached_property(_get_primary_license) def save(self, *args, **kwargs): """ Call the handle_assigned_licenses method on save, except during copy. During copy, as some Licenses referenced by the license_expression may not exists in the target Dataspace yet, the handle_assigned_licenses() would not be able to create the proper assignments and the UUID of those assignments would not be shared with reference Dataspace. Thus, the handle_assigned_licenses() is skipped during the copy process and the License assignments are handled by the m2m copy. """ super().save(*args, **kwargs) self.handle_assigned_licenses(copy=kwargs.get("copy")) def handle_assigned_licenses(self, copy=False): """ Create missing AssignedLicense instances and deletes the ones non-referenced in the license_expression. In `copy` mode, all the license assignments are deleted to avoid any conflicts during the copy/update process where all the assignments are properly created. """ licenses_field = self._meta.get_field("licenses") AssignedLicense = licenses_field.remote_field.through # Looking for the FK field name, on the AssignedLicense, that points to this Model fk_field_name = [ field for field in AssignedLicense._meta.get_fields() if field.many_to_one and field.concrete and field.related_model == self.__class__ ] if len(fk_field_name) != 1: return fk_field_name = fk_field_name[0].name assigned_license_qs = AssignedLicense.objects.filter( **{"dataspace": self.dataspace, fk_field_name: self} ) if copy: # Deletes all existing license assignments to ensure UUID integrity # as the licenses will be properly assigned during the copy/update process assigned_license_qs.delete() return # Get the full list of licenses is required here for proper # validation. We cannot rely on the assigned licenses since we # are modifying those assignments. all_licenses = License.objects.scope(self.dataspace).for_expression()
# # Copyright (c) nexB Inc. and others. All rights reserved. # DejaCode is a trademark of nexB Inc. # SPDX-License-Identifier: AGPL-3.0-only # See https://github.com/nexB/dejacode for support or download. # See https://aboutcode.org for more information about AboutCode FOSS projects. # logger = logging.getLogger("dje") COMPONENT_PACKAGE_COMMON_FIELDS = [ "copyright", "dependencies", "description", "holder", "homepage_url", "license_expression", "name", "notice_text", "primary_language", "release_date", "version", ] def validate_filename(value): invalid_chars = ["/", "\\", ":"] if any(char in value for char in invalid_chars): raise ValidationError( _("Enter a valid filename: slash, backslash, or colon are not allowed.") ) class LicenseExpressionMixin: """Model mixin for models that store license expressions.""" def _get_licensing(self): """Return a Licensing object built from the assigned licenses.""" # WARNING: Do not apply select/prefect_related here but on the main QuerySet instead # For example: prefetch_related('component_set__licenses__dataspace') return build_licensing(self.licenses.all()) licensing = cached_property(_get_licensing) def _get_normalized_expression(self): """ Return this object ``license_expression`` field value as a normalized parsed expression object. """ if self.license_expression: return parse_expression( self.license_expression, licenses=self.licensing, validate_known=False, validate_strict=False, ) normalized_expression = cached_property(_get_normalized_expression) def get_license_expression(self, template="{symbol.key}", as_link=False, show_policy=False): """ Validate and Return the license_expression value set on this instance. The license expression is NOT validated for known symbols. Use the `template` format string to render each license in the expression. if `as_link` is True, render the expression as a link. """ if self.license_expression: rendered = self.normalized_expression.render_as_readable( template, as_link=as_link, show_policy=show_policy, ) return format_html(rendered) def get_license_expression_attribution(self): # note: the fields use in the template must be available as attributes or # properties on a License. template = '<a href="#license_{symbol.key}">{symbol.short_name}</a>' return self.get_license_expression(template) license_expression_attribution = cached_property(get_license_expression_attribution) def get_license_expression_linked(self): return self.get_license_expression(as_link=True) license_expression_linked = cached_property(get_license_expression_linked) def get_license_expression_linked_with_policy(self): license_expression = self.get_license_expression(as_link=True, show_policy=True) if license_expression: return format_html('<span class="license-expression">{}</span>', license_expression) def get_license_expression_spdx_id(self): """ Return the license_expression formatted for SPDX compatibility. This includes a workaround for a SPDX spec limitation, where license exceptions that do not exist in the SPDX list cannot be provided as "LicenseRef-" in the "hasExtractedLicensingInfos". The current fix is to use AND rather than WITH for any exception that is a "LicenseRef-". See discussion at https://github.com/spdx/tools-java/issues/73 """ expression = self.get_license_expression("{symbol.spdx_id}") if expression: return expression.replace("WITH LicenseRef-", "AND LicenseRef-") def _get_primary_license(self): """ Return the primary license key of this instance or None. The primary license is the left most license of the expression. It can be the combination of a license WITH an exception and therefore may contain more than one key. WARNING: This does not support exception as primary_license. """ if self.license_expression: licensing = build_licensing() return licensing.primary_license_key(self.license_expression) primary_license = cached_property(_get_primary_license) def save(self, *args, **kwargs): """ Call the handle_assigned_licenses method on save, except during copy. During copy, as some Licenses referenced by the license_expression may not exists in the target Dataspace yet, the handle_assigned_licenses() would not be able to create the proper assignments and the UUID of those assignments would not be shared with reference Dataspace. Thus, the handle_assigned_licenses() is skipped during the copy process and the License assignments are handled by the m2m copy. """ super().save(*args, **kwargs) self.handle_assigned_licenses(copy=kwargs.get("copy")) def handle_assigned_licenses(self, copy=False): """ Create missing AssignedLicense instances and deletes the ones non-referenced in the license_expression. In `copy` mode, all the license assignments are deleted to avoid any conflicts during the copy/update process where all the assignments are properly created. """ licenses_field = self._meta.get_field("licenses") AssignedLicense = licenses_field.remote_field.through # Looking for the FK field name, on the AssignedLicense, that points to this Model fk_field_name = [ field for field in AssignedLicense._meta.get_fields() if field.many_to_one and field.concrete and field.related_model == self.__class__ ] if len(fk_field_name) != 1: return fk_field_name = fk_field_name[0].name assigned_license_qs = AssignedLicense.objects.filter( **{"dataspace": self.dataspace, fk_field_name: self} ) if copy: # Deletes all existing license assignments to ensure UUID integrity # as the licenses will be properly assigned during the copy/update process assigned_license_qs.delete() return # Get the full list of licenses is required here for proper # validation. We cannot rely on the assigned licenses since we # are modifying those assignments. all_licenses = License.objects.scope(self.dataspace).for_expression()
licenses = get_license_objects(self.license_expression, all_licenses)
1
2023-12-07 16:57:42+00:00
24k
wusize/CLIM
src/open_clip/model.py
[ { "identifier": "HFTextEncoder", "path": "src/open_clip/hf_model.py", "snippet": "class HFTextEncoder(nn.Module):\n \"\"\"HuggingFace model adapter\"\"\"\n output_tokens: torch.jit.Final[bool]\n\n def __init__(\n self,\n model_name_or_path: str,\n output_dim: in...
from dataclasses import dataclass from typing import Optional, Tuple, Union from torch import nn from torch.utils.checkpoint import checkpoint from .hf_model import HFTextEncoder from .modified_resnet import ModifiedResNet from .timm_model import TimmModel from .transformer import LayerNormFp32, LayerNorm, QuickGELU, Attention, VisionTransformer, TextTransformer from .utils import to_2tuple import logging import math import numpy as np import torch import torch.nn.functional as F
17,555
mask_pooled_v2 = (x_dense * masks.unsqueeze(-1)).sum(1) / masks.sum(1, keepdim=True) if normalize: mask_pooled_v1 = F.normalize(mask_pooled_v1, dim=-1) mask_pooled_v2 = F.normalize(mask_pooled_v2, dim=-1) return mask_pooled_v1, mask_pooled_v2 def encode_masks(self, image, masks, normalize=True, mask_attn=False): return self._pool_masks(image, masks, normalize, mask_attn) def encode_text(self, text, normalize: bool = False): cast_dtype = self.transformer.get_cast_dtype() x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model] x = x + self.positional_embedding.to(cast_dtype) x = x.permute(1, 0, 2) # NLD -> LND x = self.transformer(x, attn_mask=self.attn_mask) x = x.permute(1, 0, 2) # LND -> NLD x = self.ln_final(x) # [batch_size, n_ctx, transformer.width] # take features from the eot embedding (eot_token is the highest number in each sequence) x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection return F.normalize(x, dim=-1) if normalize else x def forward(self, image, text=None): image_features = self.encode_image(image, normalize=True) if text is None: text_features = None else: text_features = self.encode_text(text, normalize=True) if self.output_dict: return { "image_features": image_features, "text_features": text_features, "logit_scale": self.logit_scale.exp() } return image_features, text_features, self.logit_scale.exp() def train(self, mode: bool = True): if not isinstance(mode, bool): raise ValueError("training mode is expected to be boolean") self.training = mode for name, module in self.named_children(): if name == 'visual': if mode: logging.info(f'========Set module {name} as train mode========') else: logging.info(f'========Set module {name} as eval mode========') module.train(mode) else: logging.info(f'========Set module {name} as eval mode========') module.train(mode=False) return self class CustomTextCLIP(nn.Module): output_dict: torch.jit.Final[bool] def __init__( self, embed_dim: int, vision_cfg: CLIPVisionCfg, text_cfg: CLIPTextCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None, output_dict: bool = False, ): super().__init__() self.output_dict = output_dict self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype) self.text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype) self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False): # lock image tower as per LiT - https://arxiv.org/abs/2111.07991 self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats) def lock_text_tower(self, unlocked_layers: int = 0, freeze_layer_norm: bool = True): self.text.lock(unlocked_layers, freeze_layer_norm) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.visual.set_grad_checkpointing(enable) self.text.set_grad_checkpointing(enable) def encode_pseudo_boxes(self, image, normed_boxes, normalize: bool = False): features = self.visual.extract_roi_features(image, normed_boxes) return F.normalize(features, dim=-1) if normalize else features def encode_image(self, image, normalize: bool = False): features = self.visual(image) return F.normalize(features, dim=-1) if normalize else features def encode_text(self, text, normalize: bool = False): features = self.text(text) return F.normalize(features, dim=-1) if normalize else features def forward(self, image, text): image_features = self.encode_image(image, normalize=True) if text is None: text_features = None else: text_features = self.encode_text(text, normalize=True) if self.output_dict: return { "image_features": image_features, "text_features": text_features, "logit_scale": self.logit_scale.exp() } return image_features, text_features, self.logit_scale.exp() def convert_weights_to_lp(model: nn.Module, dtype=torch.float16): """Convert applicable model parameters to low-precision (bf16 or fp16)""" def _convert_weights(l): if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): l.weight.data = l.weight.data.to(dtype) if l.bias is not None: l.bias.data = l.bias.data.to(dtype)
""" CLIP Model Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. """ @dataclass class CLIPVisionCfg: layers: Union[Tuple[int, int, int, int], int] = 12 width: int = 768 head_width: int = 64 mlp_ratio: float = 4.0 patch_size: int = 16 image_size: Union[Tuple[int, int], int] = 224 ls_init_value: Optional[float] = None # layer scale initial value patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results input_patchnorm: bool = False # whether to use dual patchnorm - would only apply the input layernorm on each patch, as post-layernorm already exist in original clip vit design global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580) attentional_pool: bool = False # whether to use attentional pooler in the last embedding layer n_queries: int = 256 # n_queries for attentional pooler attn_pooler_heads: int = 8 # n heads for attentional_pooling timm_model_name: str = None # a valid model name overrides layers, width, patch_size timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '') timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '') timm_proj_bias: bool = False # enable bias final projection timm_drop: float = 0. # head dropout timm_drop_path: Optional[float] = None # backbone stochastic depth output_tokens: bool = False freeze_output = True freeze_all_bns = True @dataclass class CLIPTextCfg: context_length: int = 77 vocab_size: int = 49408 width: int = 512 heads: int = 8 layers: int = 12 ls_init_value: Optional[float] = None # layer scale initial value hf_model_name: str = None hf_tokenizer_name: str = None hf_model_pretrained: bool = True proj: str = 'mlp' pooler_type: str = 'mean_pooler' embed_cls: bool = False pad_id: int = 0 output_tokens: bool = False def get_cast_dtype(precision: str): cast_dtype = None if precision == 'bf16': cast_dtype = torch.bfloat16 elif precision == 'fp16': cast_dtype = torch.float16 return cast_dtype def _build_vision_tower( embed_dim: int, vision_cfg: CLIPVisionCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None ): if isinstance(vision_cfg, dict): vision_cfg = CLIPVisionCfg(**vision_cfg) # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more # memory efficient in recent PyTorch releases (>= 1.10). # NOTE: timm models always use native GELU regardless of quick_gelu flag. act_layer = QuickGELU if quick_gelu else nn.GELU if vision_cfg.timm_model_name: visual = TimmModel( vision_cfg.timm_model_name, pretrained=vision_cfg.timm_model_pretrained, pool=vision_cfg.timm_pool, proj=vision_cfg.timm_proj, proj_bias=vision_cfg.timm_proj_bias, drop=vision_cfg.timm_drop, drop_path=vision_cfg.timm_drop_path, patch_drop=vision_cfg.patch_dropout if vision_cfg.patch_dropout > 0 else None, embed_dim=embed_dim, image_size=vision_cfg.image_size, ) act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models elif isinstance(vision_cfg.layers, (tuple, list)): vision_heads = vision_cfg.width * 32 // vision_cfg.head_width visual = ModifiedResNet( layers=vision_cfg.layers, output_dim=embed_dim, heads=vision_heads, image_size=vision_cfg.image_size, width=vision_cfg.width, freeze_output=vision_cfg.freeze_output, freeze_all_bns=vision_cfg.freeze_all_bns ) else: vision_heads = vision_cfg.width // vision_cfg.head_width norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm visual = VisionTransformer( image_size=vision_cfg.image_size, patch_size=vision_cfg.patch_size, width=vision_cfg.width, layers=vision_cfg.layers, heads=vision_heads, mlp_ratio=vision_cfg.mlp_ratio, ls_init_value=vision_cfg.ls_init_value, patch_dropout=vision_cfg.patch_dropout, input_patchnorm=vision_cfg.input_patchnorm, global_average_pool=vision_cfg.global_average_pool, attentional_pool=vision_cfg.attentional_pool, n_queries=vision_cfg.n_queries, attn_pooler_heads=vision_cfg.attn_pooler_heads, output_tokens=vision_cfg.output_tokens, output_dim=embed_dim, act_layer=act_layer, norm_layer=norm_layer, ) return visual def _build_text_tower( embed_dim: int, text_cfg: CLIPTextCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None, ): if isinstance(text_cfg, dict): text_cfg = CLIPTextCfg(**text_cfg) if text_cfg.hf_model_name: text = HFTextEncoder( text_cfg.hf_model_name, output_dim=embed_dim, proj=text_cfg.proj, pooler_type=text_cfg.pooler_type, pretrained=text_cfg.hf_model_pretrained, output_tokens=text_cfg.output_tokens, ) else: act_layer = QuickGELU if quick_gelu else nn.GELU norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm text = TextTransformer( context_length=text_cfg.context_length, vocab_size=text_cfg.vocab_size, width=text_cfg.width, heads=text_cfg.heads, layers=text_cfg.layers, ls_init_value=text_cfg.ls_init_value, output_dim=embed_dim, embed_cls=text_cfg.embed_cls, output_tokens=text_cfg.output_tokens, pad_id=text_cfg.pad_id, act_layer=act_layer, norm_layer=norm_layer, ) return text class CLIP(nn.Module): output_dict: torch.jit.Final[bool] def __init__( self, embed_dim: int, vision_cfg: CLIPVisionCfg, text_cfg: CLIPTextCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None, output_dict: bool = False, freeze_text=True, ): assert freeze_text, 'For now we must freeze text' super().__init__() self.output_dict = output_dict self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype) text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype) if freeze_text: print(f'Freeze text encoder parameters', flush=True) for param in text.parameters(): param.requires_grad = False text.eval() self.transformer = text.transformer self.vocab_size = text.vocab_size self.embed_dim = embed_dim self.token_embedding = text.token_embedding self.positional_embedding = text.positional_embedding self.ln_final = text.ln_final self.text_projection = text.text_projection self.register_buffer('attn_mask', text.attn_mask, persistent=False) self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False, **kwargs): self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.visual.set_grad_checkpointing(enable) self.transformer.grad_checkpointing = enable def encode_image(self, image, normalize: bool = False): features = self.visual(image) return F.normalize(features, dim=-1) if normalize else features def encode_dense(self, image, normalize: bool = False, keep_shape=False): features = self.visual.encode_dense(image, keep_shape=keep_shape) if normalize: if keep_shape: features = F.normalize(features, dim=1) else: features = F.normalize(features, dim=-1) return features def encode_pseudo_boxes(self, image, normed_boxes, normalize: bool = False, extract_type='v1'): features = self.visual.extract_roi_features(image, normed_boxes, extract_type=extract_type) if normalize: features = F.normalize(features, dim=-1) return features def _pool_masks(self, image, masks, normalize, mask_attn=False): if mask_attn: mask_pooled = self.visual.mask_attn_pool(image, masks) else: mask_pooled = self.visual.mask_pool(image, masks) if normalize: mask_pooled = F.normalize(mask_pooled, dim=-1) return mask_pooled def _pool_masks_v3(self, image, masks, normalize): mask_pooled_v1, x_dense = self.visual.mask_attn_pool(image, masks, return_dense=True) x_dense = F.normalize(x_dense, dim=-1).flatten(1, 2) # bs, h*w, c x_dense = torch.repeat_interleave( x_dense, torch.tensor([len(m) for m in masks], device=x_dense.device), dim=0) masks = torch.cat(masks).float().flatten(-2, -1) # bs, h*w mask_pooled_v2 = (x_dense * masks.unsqueeze(-1)).sum(1) / masks.sum(1, keepdim=True) if normalize: mask_pooled_v1 = F.normalize(mask_pooled_v1, dim=-1) mask_pooled_v2 = F.normalize(mask_pooled_v2, dim=-1) return mask_pooled_v1, mask_pooled_v2 def encode_masks(self, image, masks, normalize=True, mask_attn=False): return self._pool_masks(image, masks, normalize, mask_attn) def encode_text(self, text, normalize: bool = False): cast_dtype = self.transformer.get_cast_dtype() x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model] x = x + self.positional_embedding.to(cast_dtype) x = x.permute(1, 0, 2) # NLD -> LND x = self.transformer(x, attn_mask=self.attn_mask) x = x.permute(1, 0, 2) # LND -> NLD x = self.ln_final(x) # [batch_size, n_ctx, transformer.width] # take features from the eot embedding (eot_token is the highest number in each sequence) x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection return F.normalize(x, dim=-1) if normalize else x def forward(self, image, text=None): image_features = self.encode_image(image, normalize=True) if text is None: text_features = None else: text_features = self.encode_text(text, normalize=True) if self.output_dict: return { "image_features": image_features, "text_features": text_features, "logit_scale": self.logit_scale.exp() } return image_features, text_features, self.logit_scale.exp() def train(self, mode: bool = True): if not isinstance(mode, bool): raise ValueError("training mode is expected to be boolean") self.training = mode for name, module in self.named_children(): if name == 'visual': if mode: logging.info(f'========Set module {name} as train mode========') else: logging.info(f'========Set module {name} as eval mode========') module.train(mode) else: logging.info(f'========Set module {name} as eval mode========') module.train(mode=False) return self class CustomTextCLIP(nn.Module): output_dict: torch.jit.Final[bool] def __init__( self, embed_dim: int, vision_cfg: CLIPVisionCfg, text_cfg: CLIPTextCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None, output_dict: bool = False, ): super().__init__() self.output_dict = output_dict self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype) self.text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype) self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False): # lock image tower as per LiT - https://arxiv.org/abs/2111.07991 self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats) def lock_text_tower(self, unlocked_layers: int = 0, freeze_layer_norm: bool = True): self.text.lock(unlocked_layers, freeze_layer_norm) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.visual.set_grad_checkpointing(enable) self.text.set_grad_checkpointing(enable) def encode_pseudo_boxes(self, image, normed_boxes, normalize: bool = False): features = self.visual.extract_roi_features(image, normed_boxes) return F.normalize(features, dim=-1) if normalize else features def encode_image(self, image, normalize: bool = False): features = self.visual(image) return F.normalize(features, dim=-1) if normalize else features def encode_text(self, text, normalize: bool = False): features = self.text(text) return F.normalize(features, dim=-1) if normalize else features def forward(self, image, text): image_features = self.encode_image(image, normalize=True) if text is None: text_features = None else: text_features = self.encode_text(text, normalize=True) if self.output_dict: return { "image_features": image_features, "text_features": text_features, "logit_scale": self.logit_scale.exp() } return image_features, text_features, self.logit_scale.exp() def convert_weights_to_lp(model: nn.Module, dtype=torch.float16): """Convert applicable model parameters to low-precision (bf16 or fp16)""" def _convert_weights(l): if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): l.weight.data = l.weight.data.to(dtype) if l.bias is not None: l.bias.data = l.bias.data.to(dtype)
if isinstance(l, (nn.MultiheadAttention, Attention)):
6
2023-12-09 05:43:08+00:00
24k
LkPrtctrd/BSL-V53
Heart/Logic/LogicLaserMessageFactory.py
[ { "identifier": "ClientHelloMessage", "path": "Heart/Packets/Client/Authentification/ClientHelloMessage.py", "snippet": "class ClientHelloMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields)...
from Heart.Packets.Client.Authentification.ClientHelloMessage import ClientHelloMessage from Heart.Packets.Client.Authentification.LoginMessage import LoginMessage from Heart.Packets.Client.Battle.AskForBattleEndMessage import AskForBattleEndMessage from Heart.Packets.Client.Home.ChangeAvatarNameMessage import ChangeAvatarNameMessage from Heart.Packets.Client.Home.EndClientTurnMessage import EndClientTurnMessage from Heart.Packets.Client.Home.GoHomeFromOfflinePractiseMessage import GoHomeFromOfflinePractiseMessage from Heart.Packets.Client.Home.GoHomeMessage import GoHomeMessage from Heart.Packets.Client.Home.GetPlayerProfileMessage import GetPlayerProfileMessage from Heart.Packets.Client.Home.AskForAllianceDataMessage import AskForAllianceDataMessage from Heart.Packets.Client.Socket.KeepAliveMessage import KeepAliveMessage from Heart.Packets.Server.Authentification.LoginFailedMessage import LoginFailedMessage from Heart.Packets.Server.Authentification.LoginOkMessage import LoginOkMessage from Heart.Packets.Server.Authentification.OutOfSyncMessage import OutOfSyncMessage from Heart.Packets.Server.Authentification.ServerHelloMessage import ServerHelloMessage from Heart.Packets.Server.Battle.BattleEndMessage import BattleEndMessage from Heart.Packets.Server.Home.AvailableServerCommandMessage import AvailableServerCommandMessage from Heart.Packets.Server.Home.LobbyInfoMessage import LobbyInfoMessage from Heart.Packets.Server.Home.OwnHomeDataMessage import OwnHomeDataMessage from Heart.Packets.Server.Socket.KeepAliveServerMessage import KeepAliveServerMessage from Heart.Packets.Server.Home.PlayerProfileMessage import PlayerProfileMessage from Heart.Packets.Server.Home.MyAllianceMessage import MyAllianceMessage from Heart.Packets.Server.Home.AllianceDataMessage import AllianceDataMessage
16,888
14363: 'TeamSetLocationMessage', 14364: 'TeamReportChatMessage', 14365: 'TeamInviteMessage', 14366: 'PlayerStatusMessage', 14367: 'TeamClearInviteMessage', 14368: 'TeamInviteResponseMessage', 14369: 'TeamPremadeChatMessage', 14370: 'TeamAllianceMemberInviteMessage', 14371: 'TeamJoinOrCreateGameRoomMessage', 14372: 'TeamToggleSettingsMessage', 14373: 'TeamBotSlotDisableMessage', 14403: 'GetLeaderboardMessage', 14405: 'AskForAvatarStreamMessage', 14406: 'AskForBattleReplayStreamMessage', 14418: 'RemoveAvatarStreamEntryMessage', 14469: 'AlliancePremadeChatMessage', 14479: 'TeamInvitationResponseMessage', 14600: 'AvatarNameCheckRequestMessage', 14700: 'ListBrawlTvChannelsMessage', 14701: 'TuneBrawlTvChannelMessage', 14715: 'SendGlobalChatLineMessage', 14777: 'SetInvitesBlockedMessage', 14778: 'SetTeamChatMutedMessage', 14867: 'SetRegionMessage', 14880: 'TeamRequestJoinCancelMessage', 14881: 'TeamRequestJoinMessage', 14882: 'TeamRequestJoinApproveMessage', 15081: GetPlayerProfileMessage, #v50 15793: 'GetTokenFriendMessage', 16000: 'LogicDeviceLinkCodeRequestMessage', 16001: 'LogicDeviceLinkMenuClosedMessage', 16002: 'LogicDeviceLinkEnterCodeMessage', 16003: 'LogicDeviceLinkConfirmYesMessage', 16939: 'AskApiTokenMessage', 17000: 'LogicAccountTransferCodeRequestMessage', 17190: 'JoinAllianceUsingTokenMessage', 17337: 'UnbotifyReportMessage', 17338: 'AdjustPackageMessage', 17750: GoHomeFromOfflinePractiseMessage, #v50 18686: 'SetSupportedCreatorMessage', 19001: 'LatencyTestResultMessage', 19002: 'UdpLatencyTestRequestMessage', 19003: 'TriggerStartLatencyTestMessage', 19004: 'RequestLatencyTestStatusMessage', 20000: 'SetEncryptionMessage', 20100: ServerHelloMessage, 20101: 'CreateAccountOkMessage', 20103: LoginFailedMessage, 20104: LoginOkMessage, 20105: 'FriendListMessage', 20106: 'FriendListUpdateMessage', 20107: 'AddableFriendsMessage', 20108: KeepAliveServerMessage, 20109: 'FriendOnlineStatusMessage', 20110: 'FriendLoggedInMessage', 20111: 'FriendLoggedOutMessage', 20112: 'AddFriendFailedMessage', 20117: 'ReportUserStatusMessage', 20118: 'ChatAccountBanStatusMessage', 20121: 'BillingRequestFailedMessage', 20132: 'UnlockAccountOkMessage', 20133: 'UnlockAccountFailedMessage', 20151: 'AppleBillingProcessedByServerMessage', 20152: 'GoogleBillingProcessedByServerMessage', 20153: 'TencentBillingProcessedByServerMessage', 20154: 'CafeBazaarBillingProcessedByServerMessage', 20156: 'KunlunBillingProcessedByServerMessage', 20161: 'ShutdownStartedMessage', 20171: 'PersonalBreakStartedMessage', 20173: 'YoozooBillingProcessedByServerMessage', 20199: 'FriendSuggestionsMessage', 20205: 'AvatarNameChangeFailedMessage', 20206: 'AvatarOnlineStatusUpdated', 20207: 'AllianceOnlineStatusUpdatedMessage', 20300: 'AvatarNameCheckResponseMessage', 20402: 'CreateGameFailedMessage', 20405: 'MatchMakingStatusMessage', 20406: 'MatchMakingCancelledMessage', 20501: 'AcceptFriendFailedMessage', 20523: 'YoozooOrderAvailableMessage', 20545: 'YoozooOrderDeliveryFailedMessage', 20559: 'StartLoadingMessage', 20801: 'NotificationMessage', 20802: 'OpponentRejoinsMatchNotificationMessage', 20931: 'AntiAddictionDataUpdatedMessage', 22089: 'GetTokenFriendResultMessage', 22100: 'CreatePlayerMapResponseMessage', 22101: 'DeletePlayerMapResponseMessage', 22102: 'PlayerMapsMessage', 22103: 'UpdatePlayerMapResponseMessage', 22104: 'SubmitPlayerMapResponseMessage', 22105: 'PublishPlayerMapResponseMessage', 22106: 'ChangePlayerMapNameMResponseMessage', 22107: 'PlayerMapInfoUpdatedMessage', 22109: 'DebugPlayerMapReviewResultOverrideSetMessage', 22111: 'PlayerMapGreenlightedMessage', 22125: 'ReportPlayerMapResponseMessage', 22150: 'RankedMatchStartMessage', 22151: 'RankedMatchBanStartedMessage', 22152: 'RankedMatchBanHeroResponseMessage', 22153: 'RankedMatchBanEndedMessage', 22154: 'RankedMatchPickStartedMessage', 22155: 'RankedMatchPickHeroFailedMessage', 22156: 'RankedMatchHeroPickedMessage', 22157: 'RankedMatchHeroDataUpdatedMessage', 22158: 'RankedMatchFinalPreparationStartedMessage', 22159: 'RankedMatchTerminatedMessage', 22202: 'MapPreviewMessage', 22377: 'GoogleServiceAccountBoundMessage', 22687: 'GamecenterAccountAlreadyBoundMessage', 22957: 'PvpMatchmakeNotificationMessage', 23067: 'SCIDLogoutAllDevicesResultMessage', 23302: 'GetAllianceInviteTokenResultMessage', 23456: BattleEndMessage, 23457: LobbyInfoMessage, 23458: 'BattleLogMessage', 23459: 'BattleLogReplayAvailableMessage', 23494: 'GoogleServiceAccountAlreadyBoundMessage', 23774: 'PlayerJWTokenMessage', 24101: OwnHomeDataMessage,
class LogicLaserMessageFactory: messagesList = { 10055: 'AskPlayerJWTokenMessage', 10099: 'ClientCryptoErrorMessage', 10100: ClientHelloMessage, 10101: LoginMessage, 10102: 'LoginUsingSessionMessage', 10103: 'CreateAccountMessage', 10107: 'ClientCapabilitiesMessage', 10108: KeepAliveMessage, 10109: 'UdpCheckConnectionMessage', 10110: 'AnalyticEventMessage', 10111: 'AccountIdentifiersMessage', 10112: 'AuthenticationCheckMessage', 10113: 'SetDeviceTokenMessage', 10116: 'ResetAccountMessage', 10117: 'ReportUserMessage', 10118: 'AccountSwitchedMessage', 10119: 'ReportAllianceStreamMessage', 10121: 'UnlockAccountMessage', 10150: 'AppleBillingRequestMessage', 10151: 'GoogleBillingRequestMessage', 10152: 'TencentBillingRequestMessage', 10153: 'CafeBazaarBillingRequestMessage', 10159: 'KunlunBillingRequestMessage', 10160: 'BillingCancelledByClientMessage', 10177: 'ClientInfoMessage', 10212: ChangeAvatarNameMessage, 10309: 'GetAllianceInviteTokenMessage', 10321: 'AttributionEventMessage', 10401: 'CreateGameMessage', 10501: 'AcceptFriendMessage', 10502: 'AddFriendMessage', 10503: 'AskForAddableFriendsMessage', 10504: 'AskForFriendListMessage', 10506: 'RemoveFriendMessage', 10507: 'AddFriendByEmailMessage', 10509: 'AddFriendByAvatarNameAndCodeMessage', 10512: 'AskForPlayingGamecenterFriendsMessage', 10513: 'AskForPlayingFacebookFriendsMessage', 10514: 'AskForPlayingKakaoFriendsMessage', 10515: 'AskForPlayingTencentFriendsMessage', 10516: 'AskForPlayingLineFriendsMessage', 10517: 'AskForPlayingSupercellFriendsMessage', 10523: 'YoozooBillingRequestMessage', 10555: 'ClientInputMessage', 10576: 'SetBlockFriendRequestsMessage', 10599: 'AskForFriendSuggestionsMessage', 10636: 'SCIDBindAccountMessage', 11736: 'SCIDLogoutAllDevicesMessage', 12100: 'CreatePlayerMapMessage', 12101: 'DeletePlayerMapMessage', 12102: 'GetPlayerMapsMessage', 12103: 'UpdatePlayerMapMessage', 12104: 'SubmitPlayerMapMessage', 12105: 'PublishPlayerMapMessage', 12106: 'ChangePlayerMapNameMessage', 12107: 'EnterMapEditorMessage', 12108: 'GoHomeFromMapEditorMessage', 12110: 'TeamSetPlayerMapMessage', 12111: 'SignoffPlayerMapMessage', 12125: 'ReportPlayerMapMessage', 12152: 'RankedMatchBanHeroMessage', 12155: 'RankedMatchPickHeroMessage', 12157: 'RankedMatchUpdateHeroDataMessage', 12905: 'GetCurrentBattleReplayDataMessage', 12998: 'SetCountryMessage', 13922: 'AcceptTokenFriendMessage', 14101: GoHomeMessage, 14102: EndClientTurnMessage, 14103: 'StartGameMessage', 14104: 'StartSpectateMessage', 14105: 'HomeLogicStoppedMessage', 14106: 'CancelMatchmakingMessage', 14107: 'StopSpectateMessage', 14108: 'GoHomeFromSpectateMessage', #14109: GoHomeFromOfflinePractiseMessage, //before v50 14110: AskForBattleEndMessage, #14113: GetPlayerProfileMessage, //before v50 14114: 'GetBattleLogMessage', 14115: 'BattleLogViewReplayMessage', 14116: 'ViewReplayByStringMessage', 14117: 'RequestMatchCancelMessage', 14118: 'SinglePlayerMatchRequestMessage', 14166: 'ChronosEventSeenMessage', 14167: 'ChronosEventSeenMessage', 14177: 'PlayAgainMessage', 14178: 'DebugCommandMessage', 14199: 'LookForGameRoomRequestMessage', 14211: 'UnbindFacebookAccountMessage', 14201: 'BindFacebookAccountMessage', 14202: 'BindKakaoAccountMessage', 14203: 'BingLineAccountMessage', 14212: 'BindGamecenterAccountMessage', 14213: 'UnbindKakaoAccountMessage', 14214: 'UnbindLineAccountMessage', 14262: 'BindGoogleServiceAccountMessage', 14266: 'BindTencentAccountMessage', 14268: 'TencentCheckCanPayMessage', 14276: 'TencentAntiAddictionInstructionExecutedMessage', 14277: 'GetSeasonRewardsMessage', 14299: 'SetAllianceCountryMessage', 14301: 'CreateAllianceMessage', 14302: AskForAllianceDataMessage, 14303: 'AskForJoinableAlliancesListMessage', 14304: 'AskForAllianceStreamMessage', 14305: 'JoinAllianceMessage', 14306: 'ChangeAllianceMemberRoleMessage', 14307: 'KickAllianceMemberMessage', 14308: 'LeaveAllianceMessage', 14315: 'ChatToAllianceStreamMessage', 14316: 'ChangeAllianceSettingsMessage', 14317: 'RequestJoinAllianceMessage', 14321: 'RespondToAllianceJoinRequestMessage', 14322: 'SendAllianceInvitationMessage', 14323: 'JoinAllianceUsingInvitationMessage', 14324: 'SearchAlliancesMessage', 14326: 'SendAllianceInvitationToFriendMessage', 14330: 'SendAllianceMailMessage', 14350: 'TeamCreateMessage', 14351: 'TeamJoinMessage', 14352: 'TeamKickMessage', 14353: 'TeamLeaveMessage', 14354: 'TeamChangeMemberSettingsMessage', 14355: 'TeamSetMemberReadyMessage', 14356: 'TeamTogglePractiseMessage', 14357: 'TeamToggleMemberSideMessage', 14358: 'TeamSpectateMessage', 14359: 'TeamChatMessage', 14360: 'TeamPostAdMessage', 14361: 'TeamMemberStatusMessage', 14362: 'TeamSetEventMessage', 14363: 'TeamSetLocationMessage', 14364: 'TeamReportChatMessage', 14365: 'TeamInviteMessage', 14366: 'PlayerStatusMessage', 14367: 'TeamClearInviteMessage', 14368: 'TeamInviteResponseMessage', 14369: 'TeamPremadeChatMessage', 14370: 'TeamAllianceMemberInviteMessage', 14371: 'TeamJoinOrCreateGameRoomMessage', 14372: 'TeamToggleSettingsMessage', 14373: 'TeamBotSlotDisableMessage', 14403: 'GetLeaderboardMessage', 14405: 'AskForAvatarStreamMessage', 14406: 'AskForBattleReplayStreamMessage', 14418: 'RemoveAvatarStreamEntryMessage', 14469: 'AlliancePremadeChatMessage', 14479: 'TeamInvitationResponseMessage', 14600: 'AvatarNameCheckRequestMessage', 14700: 'ListBrawlTvChannelsMessage', 14701: 'TuneBrawlTvChannelMessage', 14715: 'SendGlobalChatLineMessage', 14777: 'SetInvitesBlockedMessage', 14778: 'SetTeamChatMutedMessage', 14867: 'SetRegionMessage', 14880: 'TeamRequestJoinCancelMessage', 14881: 'TeamRequestJoinMessage', 14882: 'TeamRequestJoinApproveMessage', 15081: GetPlayerProfileMessage, #v50 15793: 'GetTokenFriendMessage', 16000: 'LogicDeviceLinkCodeRequestMessage', 16001: 'LogicDeviceLinkMenuClosedMessage', 16002: 'LogicDeviceLinkEnterCodeMessage', 16003: 'LogicDeviceLinkConfirmYesMessage', 16939: 'AskApiTokenMessage', 17000: 'LogicAccountTransferCodeRequestMessage', 17190: 'JoinAllianceUsingTokenMessage', 17337: 'UnbotifyReportMessage', 17338: 'AdjustPackageMessage', 17750: GoHomeFromOfflinePractiseMessage, #v50 18686: 'SetSupportedCreatorMessage', 19001: 'LatencyTestResultMessage', 19002: 'UdpLatencyTestRequestMessage', 19003: 'TriggerStartLatencyTestMessage', 19004: 'RequestLatencyTestStatusMessage', 20000: 'SetEncryptionMessage', 20100: ServerHelloMessage, 20101: 'CreateAccountOkMessage', 20103: LoginFailedMessage, 20104: LoginOkMessage, 20105: 'FriendListMessage', 20106: 'FriendListUpdateMessage', 20107: 'AddableFriendsMessage', 20108: KeepAliveServerMessage, 20109: 'FriendOnlineStatusMessage', 20110: 'FriendLoggedInMessage', 20111: 'FriendLoggedOutMessage', 20112: 'AddFriendFailedMessage', 20117: 'ReportUserStatusMessage', 20118: 'ChatAccountBanStatusMessage', 20121: 'BillingRequestFailedMessage', 20132: 'UnlockAccountOkMessage', 20133: 'UnlockAccountFailedMessage', 20151: 'AppleBillingProcessedByServerMessage', 20152: 'GoogleBillingProcessedByServerMessage', 20153: 'TencentBillingProcessedByServerMessage', 20154: 'CafeBazaarBillingProcessedByServerMessage', 20156: 'KunlunBillingProcessedByServerMessage', 20161: 'ShutdownStartedMessage', 20171: 'PersonalBreakStartedMessage', 20173: 'YoozooBillingProcessedByServerMessage', 20199: 'FriendSuggestionsMessage', 20205: 'AvatarNameChangeFailedMessage', 20206: 'AvatarOnlineStatusUpdated', 20207: 'AllianceOnlineStatusUpdatedMessage', 20300: 'AvatarNameCheckResponseMessage', 20402: 'CreateGameFailedMessage', 20405: 'MatchMakingStatusMessage', 20406: 'MatchMakingCancelledMessage', 20501: 'AcceptFriendFailedMessage', 20523: 'YoozooOrderAvailableMessage', 20545: 'YoozooOrderDeliveryFailedMessage', 20559: 'StartLoadingMessage', 20801: 'NotificationMessage', 20802: 'OpponentRejoinsMatchNotificationMessage', 20931: 'AntiAddictionDataUpdatedMessage', 22089: 'GetTokenFriendResultMessage', 22100: 'CreatePlayerMapResponseMessage', 22101: 'DeletePlayerMapResponseMessage', 22102: 'PlayerMapsMessage', 22103: 'UpdatePlayerMapResponseMessage', 22104: 'SubmitPlayerMapResponseMessage', 22105: 'PublishPlayerMapResponseMessage', 22106: 'ChangePlayerMapNameMResponseMessage', 22107: 'PlayerMapInfoUpdatedMessage', 22109: 'DebugPlayerMapReviewResultOverrideSetMessage', 22111: 'PlayerMapGreenlightedMessage', 22125: 'ReportPlayerMapResponseMessage', 22150: 'RankedMatchStartMessage', 22151: 'RankedMatchBanStartedMessage', 22152: 'RankedMatchBanHeroResponseMessage', 22153: 'RankedMatchBanEndedMessage', 22154: 'RankedMatchPickStartedMessage', 22155: 'RankedMatchPickHeroFailedMessage', 22156: 'RankedMatchHeroPickedMessage', 22157: 'RankedMatchHeroDataUpdatedMessage', 22158: 'RankedMatchFinalPreparationStartedMessage', 22159: 'RankedMatchTerminatedMessage', 22202: 'MapPreviewMessage', 22377: 'GoogleServiceAccountBoundMessage', 22687: 'GamecenterAccountAlreadyBoundMessage', 22957: 'PvpMatchmakeNotificationMessage', 23067: 'SCIDLogoutAllDevicesResultMessage', 23302: 'GetAllianceInviteTokenResultMessage', 23456: BattleEndMessage, 23457: LobbyInfoMessage, 23458: 'BattleLogMessage', 23459: 'BattleLogReplayAvailableMessage', 23494: 'GoogleServiceAccountAlreadyBoundMessage', 23774: 'PlayerJWTokenMessage', 24101: OwnHomeDataMessage,
24104: OutOfSyncMessage,
12
2023-12-14 18:57:56+00:00
24k
GXNU-ZhongLab/ODTrack
lib/train/base_functions.py
[ { "identifier": "Lasot", "path": "lib/train/dataset/lasot.py", "snippet": "class Lasot(BaseVideoDataset):\n \"\"\" LaSOT dataset.\n\n Publication:\n LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, H...
import os import torch import lib.train.data.transforms as tfm from torch.utils.data.distributed import DistributedSampler from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.data import sampler, opencv_loader, processing, LTRLoader from lib.utils.misc import is_main_process
19,146
print("Building got10k_train_full from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader)) else: datasets.append(ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader)) if name == "TRACKINGNET": if settings.use_lmdb: print("Building TrackingNet from lmdb") datasets.append(TrackingNet_lmdb(settings.env.trackingnet_lmdb_dir, image_loader=image_loader)) else: # raise ValueError("NOW WE CAN ONLY USE TRACKINGNET FROM LMDB") datasets.append(TrackingNet(settings.env.trackingnet_dir, image_loader=image_loader)) return datasets def build_dataloaders(cfg, settings): # Data transform transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05), tfm.RandomHorizontalFlip(probability=0.5)) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.RandomHorizontalFlip_Norm(probability=0.5), # tfm.RandomHorizontalFlip(probability=0.5), tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD)) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD)) # The tracking pairs processing module output_sz = settings.output_sz search_area_factor = settings.search_area_factor data_processing_train = processing.STARKProcessing(search_area_factor=search_area_factor, output_sz=output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', transform=transform_train, joint_transform=transform_joint, settings=settings) data_processing_val = processing.STARKProcessing(search_area_factor=search_area_factor, output_sz=output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', transform=transform_val, joint_transform=transform_joint, settings=settings) # Train sampler and loader settings.num_template = getattr(cfg.DATA.TEMPLATE, "NUMBER", 1) settings.num_search = getattr(cfg.DATA.SEARCH, "NUMBER", 1) sampler_mode = getattr(cfg.DATA, "SAMPLER_MODE", "causal") train_cls = getattr(cfg.TRAIN, "TRAIN_CLS", False) print("sampler_mode: ", sampler_mode) dataset_train = sampler.TrackingSampler(datasets=names2datasets(cfg.DATA.TRAIN.DATASETS_NAME, settings, opencv_loader), p_datasets=cfg.DATA.TRAIN.DATASETS_RATIO, samples_per_epoch=cfg.DATA.TRAIN.SAMPLE_PER_EPOCH, max_gap=cfg.DATA.MAX_SAMPLE_INTERVAL, num_search_frames=settings.num_search, num_template_frames=settings.num_template, processing=data_processing_train, frame_sample_mode=sampler_mode, train_cls=train_cls) train_sampler = DistributedSampler(dataset_train) if settings.local_rank != -1 else None shuffle = False if settings.local_rank != -1 else True loader_train = LTRLoader('train', dataset_train, training=True, batch_size=cfg.TRAIN.BATCH_SIZE, shuffle=shuffle, num_workers=cfg.TRAIN.NUM_WORKER, drop_last=True, stack_dim=1, sampler=train_sampler) # Validation samplers and loaders dataset_val = sampler.TrackingSampler(datasets=names2datasets(cfg.DATA.VAL.DATASETS_NAME, settings, opencv_loader), p_datasets=cfg.DATA.VAL.DATASETS_RATIO, samples_per_epoch=cfg.DATA.VAL.SAMPLE_PER_EPOCH, max_gap=cfg.DATA.MAX_SAMPLE_INTERVAL, num_search_frames=settings.num_search, num_template_frames=settings.num_template, processing=data_processing_val, frame_sample_mode=sampler_mode, train_cls=train_cls) val_sampler = DistributedSampler(dataset_val) if settings.local_rank != -1 else None loader_val = LTRLoader('val', dataset_val, training=False, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=cfg.TRAIN.NUM_WORKER, drop_last=True, stack_dim=1, sampler=val_sampler, epoch_interval=cfg.TRAIN.VAL_EPOCH_INTERVAL) return loader_train, loader_val def get_optimizer_scheduler(net, cfg, settings): tracker_name = settings.script_name # Visual Encoder param_dicts = [ {"params": [p for n, p in net.named_parameters() if "backbone" not in n and p.requires_grad]}, { "params": [p for n, p in net.named_parameters() if "backbone" in n and p.requires_grad], "lr": cfg.TRAIN.LR * cfg.TRAIN.BACKBONE_MULTIPLIER, }, ]
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", ] # Tracking Task if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader)) else: datasets.append(ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader)) if name == "TRACKINGNET": if settings.use_lmdb: print("Building TrackingNet from lmdb") datasets.append(TrackingNet_lmdb(settings.env.trackingnet_lmdb_dir, image_loader=image_loader)) else: # raise ValueError("NOW WE CAN ONLY USE TRACKINGNET FROM LMDB") datasets.append(TrackingNet(settings.env.trackingnet_dir, image_loader=image_loader)) return datasets def build_dataloaders(cfg, settings): # Data transform transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05), tfm.RandomHorizontalFlip(probability=0.5)) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.RandomHorizontalFlip_Norm(probability=0.5), # tfm.RandomHorizontalFlip(probability=0.5), tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD)) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD)) # The tracking pairs processing module output_sz = settings.output_sz search_area_factor = settings.search_area_factor data_processing_train = processing.STARKProcessing(search_area_factor=search_area_factor, output_sz=output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', transform=transform_train, joint_transform=transform_joint, settings=settings) data_processing_val = processing.STARKProcessing(search_area_factor=search_area_factor, output_sz=output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', transform=transform_val, joint_transform=transform_joint, settings=settings) # Train sampler and loader settings.num_template = getattr(cfg.DATA.TEMPLATE, "NUMBER", 1) settings.num_search = getattr(cfg.DATA.SEARCH, "NUMBER", 1) sampler_mode = getattr(cfg.DATA, "SAMPLER_MODE", "causal") train_cls = getattr(cfg.TRAIN, "TRAIN_CLS", False) print("sampler_mode: ", sampler_mode) dataset_train = sampler.TrackingSampler(datasets=names2datasets(cfg.DATA.TRAIN.DATASETS_NAME, settings, opencv_loader), p_datasets=cfg.DATA.TRAIN.DATASETS_RATIO, samples_per_epoch=cfg.DATA.TRAIN.SAMPLE_PER_EPOCH, max_gap=cfg.DATA.MAX_SAMPLE_INTERVAL, num_search_frames=settings.num_search, num_template_frames=settings.num_template, processing=data_processing_train, frame_sample_mode=sampler_mode, train_cls=train_cls) train_sampler = DistributedSampler(dataset_train) if settings.local_rank != -1 else None shuffle = False if settings.local_rank != -1 else True loader_train = LTRLoader('train', dataset_train, training=True, batch_size=cfg.TRAIN.BATCH_SIZE, shuffle=shuffle, num_workers=cfg.TRAIN.NUM_WORKER, drop_last=True, stack_dim=1, sampler=train_sampler) # Validation samplers and loaders dataset_val = sampler.TrackingSampler(datasets=names2datasets(cfg.DATA.VAL.DATASETS_NAME, settings, opencv_loader), p_datasets=cfg.DATA.VAL.DATASETS_RATIO, samples_per_epoch=cfg.DATA.VAL.SAMPLE_PER_EPOCH, max_gap=cfg.DATA.MAX_SAMPLE_INTERVAL, num_search_frames=settings.num_search, num_template_frames=settings.num_template, processing=data_processing_val, frame_sample_mode=sampler_mode, train_cls=train_cls) val_sampler = DistributedSampler(dataset_val) if settings.local_rank != -1 else None loader_val = LTRLoader('val', dataset_val, training=False, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=cfg.TRAIN.NUM_WORKER, drop_last=True, stack_dim=1, sampler=val_sampler, epoch_interval=cfg.TRAIN.VAL_EPOCH_INTERVAL) return loader_train, loader_val def get_optimizer_scheduler(net, cfg, settings): tracker_name = settings.script_name # Visual Encoder param_dicts = [ {"params": [p for n, p in net.named_parameters() if "backbone" not in n and p.requires_grad]}, { "params": [p for n, p in net.named_parameters() if "backbone" in n and p.requires_grad], "lr": cfg.TRAIN.LR * cfg.TRAIN.BACKBONE_MULTIPLIER, }, ]
if is_main_process():
14
2023-12-10 03:57:19+00:00
24k
lumina-test/lumina
lumina/e2e_test/test_gbn.py
[ { "identifier": "get_qp_info_list", "path": "lumina/analyzer/main.py", "snippet": "def get_qp_info_list(switch_msg_snapshot):\n \"\"\" Get the list of QP info from the switch message snapshot\n\n Args:\n switch_msg_snapshot (str): The path to the switch message snapshot\n\n Returns:\n ...
import argparse, os, math, glob, logging, time import lumina.analyzer.checker.integrity_check as integrity_check import lumina.analyzer.checker.host_check as host_check import lumina.analyzer.checker.gbn_check as gbn_check import lumina.analyzer.checker.read_gbn_check as read_gbn_check import lumina.orchestrator.host as host import lumina.orchestrator.switch as switch from lumina.analyzer.main import get_qp_info_list from lumina.orchestrator.main import Orchestrator from lumina.analyzer.counter.switch_counter import SwitchCounter from lumina.analyzer.counter.host_counter import MLNXHostCounter, IntelHostCounter from lumina.analyzer.pcap_processor.pcap_process import get_packet_list from lumina.analyzer.measurer.latency_measure import LatencyMeasure from lumina.utils.config_loggers import config_stream_handler, config_file_handler from lumina.analyzer.packet_parser.roce_packet import TRIGGER_OOS, TRIGGER_TIMEOUT
15,057
logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK READ request generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK READ request response latency: %fus' % (nack_resp_latency * 1e6)) elif trigger == TRIGGER_TIMEOUT: nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t NACK READ request response latency: %fus' % (nack_resp_latency * 1e6)) else: logger.error("\t\t NACK READ request should be triggered by either OOS or timeout") else: nack = latency_measurement.get_qp_first_nack_before_retrans(pkt) if nack is None: logger.error("\t\t Cannot find the NACK READ request to recover this lost packet") return trigger = nack.get_trigger() if trigger == TRIGGER_OOS: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) elif trigger == TRIGGER_TIMEOUT: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.error("\t\t NACK READ request should be triggered by either OOS or timeout") else: # For other verbs, we can only find a NACK in case of out of sequence arriving packets if latency_measurement.get_nack(pkt) != None: # Out of sequence/NACK triggered retransmission next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK response latency: %fus' % (nack_resp_latency * 1e6)) elif latency_measurement.get_qp_first_nack_before_retrans(pkt) != None: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK indicates a loss (%d) before this packet (%d)") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) def verify_results(orchestrator): """ Verify the experiment results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ result_dir = orchestrator.result_path num_repeats = orchestrator.num_repeats mtu = orchestrator.traffic_conf['mtu'] msg_size = orchestrator.traffic_conf['message-size'] num_msgs_per_qp = orchestrator.traffic_conf['num-msgs-per-qp'] aggregate_pcap_filename = orchestrator.aggregate_pcap_filename port_map = {'requester': orchestrator.requester.conf['nic']['switch-port'], 'responder': orchestrator.responder.conf['nic']['switch-port'], 'requester-mirror': orchestrator.requester_mirror.conf['nic']['switch-port'], 'responder-mirror': orchestrator.responder_mirror.conf['nic']['switch-port']} requester_ip_list = orchestrator.get_requester_ip_list() responder_ip_list = orchestrator.get_responder_ip_list() for iter in range(num_repeats): iter = str(iter) result_logger = logging.getLogger('Analysis iter %s' % (iter)) result_logger.handlers.clear() config_file_handler(logger=result_logger, log_file=os.path.join(result_dir, iter, RESULT_FILENAME), no_format=True) result_logger.info("=" * 100) result_logger.info("Iteration %s" % iter) switch_msg_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_MESSAGE_SNAPSHOT) switch_state_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_STATE_SNAPSHOT) pcap_filename = os.path.join(result_dir, iter, host.PCAP_RESULT_DIR, aggregate_pcap_filename) requester_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_START_COUNTER_FILE_NAME) requester_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_FINISH_COUNTER_FILE_NAME) responder_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_START_COUNTER_FILE_NAME) responder_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_FINISH_COUNTER_FILE_NAME)
## All logs will be logged into file LOG_FILENAME LOG_FILENAME = "test_gbn.log" ## Results (checkers and measurements) will also be dumped into file RESULT_FILENAME RESULT_FILENAME = "result.log" ## Max # of retries for each experiment iteration MAX_NB_EXP_RETRIES = 3 def setup_root_logger(orchestrator): """ Setup the root logger for the test Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ root_logger = logging.getLogger() root_logger.handlers.clear() config_stream_handler(root_logger) config_file_handler(logger=root_logger, log_file=os.path.join(orchestrator.result_path, LOG_FILENAME), no_format=False) def run_traffic(orchestrator): """ Run the traffic and collect the results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: bool: True if the experiment is successful, False otherwise """ orchestrator.rm_old_files() if orchestrator.sync_and_compile() == False: logging.error("Failed to sync and compile the code") sys.exit(-1) logging.info("Sync and compile completed") if orchestrator.generate_switch_config_file() == False: logging.error("Failed to generate switch configuration file") sys.exit(-1) num_repeats = orchestrator.get_num_repeats() for i in range(num_repeats): logging.info("=" * 100) nb_retry = 0 iter_result = False while nb_retry < MAX_NB_EXP_RETRIES: if orchestrator.run_experiment() == False: logging.error("Iteration %d: Failed to complete experiment" % i) logging.error("Iteration %d: Rerun experiment (retry: %d)" % i, nb_retry) nb_retry += 1 orchestrator.clean_up() time.sleep(5) continue logging.info("Iteration %d: Completed experiment" % i) try: orchestrator.clean_up() orchestrator.fetch_results(i) logging.info("Iteration %d: Fetch experiment results" % i) orchestrator.merge_traces(i) logging.info("Iteration %d: Merge the pcap files" % i) except: logging.error("Iteration %d: Result collection failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue if orchestrator.check_integrity(i) == False: logging.error("Iteration %d: Integrity check failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue iter_result = True break if iter_result is False: logging.error("Iteration %d: Still failed after %d retries" % (i, nb_retry)) return False return True def analyze_retrans_latency(pkt, latency_measurement, is_read, logger): """ Analyze the retransmission latency breakdown for an undelivered packet Args: pkt (Packet object): The undelivered packet latency_measurement (LatencyMeasure object): A LatencyMeasure object that can compute latency breakdown is_read (bool): If we use RDMA READ in this experiment logger (logging.Logger): A logger object Returns: N/A """ # All the undelivered packets should be retransmitted in our test cases if latency_measurement.get_retransmit_pkt(pkt) == None: logger.error("\t\t No retransmit packet found for this packet") logger.error("\t\t It is possible that this undelivered packet is a redundant transmission") return retrans_latency = latency_measurement.get_retransmit_latency(pkt) if is_read == True: # For RDMA READ, we should always find a NACK READ request that triggers retransmission nack = latency_measurement.get_nack(pkt) if nack is not None: trigger = nack.get_trigger() if trigger == TRIGGER_OOS: next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK READ request generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK READ request response latency: %fus' % (nack_resp_latency * 1e6)) elif trigger == TRIGGER_TIMEOUT: nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t NACK READ request response latency: %fus' % (nack_resp_latency * 1e6)) else: logger.error("\t\t NACK READ request should be triggered by either OOS or timeout") else: nack = latency_measurement.get_qp_first_nack_before_retrans(pkt) if nack is None: logger.error("\t\t Cannot find the NACK READ request to recover this lost packet") return trigger = nack.get_trigger() if trigger == TRIGGER_OOS: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) elif trigger == TRIGGER_TIMEOUT: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.error("\t\t NACK READ request should be triggered by either OOS or timeout") else: # For other verbs, we can only find a NACK in case of out of sequence arriving packets if latency_measurement.get_nack(pkt) != None: # Out of sequence/NACK triggered retransmission next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK response latency: %fus' % (nack_resp_latency * 1e6)) elif latency_measurement.get_qp_first_nack_before_retrans(pkt) != None: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK indicates a loss (%d) before this packet (%d)") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) def verify_results(orchestrator): """ Verify the experiment results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ result_dir = orchestrator.result_path num_repeats = orchestrator.num_repeats mtu = orchestrator.traffic_conf['mtu'] msg_size = orchestrator.traffic_conf['message-size'] num_msgs_per_qp = orchestrator.traffic_conf['num-msgs-per-qp'] aggregate_pcap_filename = orchestrator.aggregate_pcap_filename port_map = {'requester': orchestrator.requester.conf['nic']['switch-port'], 'responder': orchestrator.responder.conf['nic']['switch-port'], 'requester-mirror': orchestrator.requester_mirror.conf['nic']['switch-port'], 'responder-mirror': orchestrator.responder_mirror.conf['nic']['switch-port']} requester_ip_list = orchestrator.get_requester_ip_list() responder_ip_list = orchestrator.get_responder_ip_list() for iter in range(num_repeats): iter = str(iter) result_logger = logging.getLogger('Analysis iter %s' % (iter)) result_logger.handlers.clear() config_file_handler(logger=result_logger, log_file=os.path.join(result_dir, iter, RESULT_FILENAME), no_format=True) result_logger.info("=" * 100) result_logger.info("Iteration %s" % iter) switch_msg_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_MESSAGE_SNAPSHOT) switch_state_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_STATE_SNAPSHOT) pcap_filename = os.path.join(result_dir, iter, host.PCAP_RESULT_DIR, aggregate_pcap_filename) requester_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_START_COUNTER_FILE_NAME) requester_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_FINISH_COUNTER_FILE_NAME) responder_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_START_COUNTER_FILE_NAME) responder_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_FINISH_COUNTER_FILE_NAME)
switch_counter = SwitchCounter(switch_state_snapshot, port_map)
2
2023-12-09 08:21:14+00:00
24k
ebb-earl-co/tidal-wave
tidal_wave/main.py
[ { "identifier": "login", "path": "tidal_wave/login.py", "snippet": "def login(\n audio_format: AudioFormat,\n) -> Tuple[Optional[requests.Session], Optional[AudioFormat]]:\n \"\"\"Given a selected audio_format, either log in \"automatically\"\n via the Fire TV OAuth 2.0 flow, or ask for an Andr...
from contextlib import closing from pathlib import Path from typing import Optional, Union from .login import login, AudioFormat, LogLevel from .album import Album from .artist import Artist from .mix import Mix from .playlist import Playlist from .track import Track from .video import Video from .models import ( match_tidal_url, TidalAlbum, TidalArtist, TidalMix, TidalPlaylist, TidalTrack, TidalVideo, ) from platformdirs import user_music_path from typing_extensions import Annotated import logging import typer
17,086
app = typer.Typer() @app.command() def main( tidal_url: Annotated[ str, typer.Argument( help="The Tidal album or artist or mix or playlist or track or video to download" ), ], audio_format: Annotated[ AudioFormat, typer.Option(case_sensitive=False) ] = AudioFormat.lossless.value, output_directory: Annotated[ Path, typer.Argument( help="The parent directory under which directory(ies) of files will be written" ), ] = user_music_path(), loglevel: Annotated[ LogLevel, typer.Option(case_sensitive=False) ] = LogLevel.info.value, include_eps_singles: Annotated[ bool, typer.Option( "--include-eps-singles", help="No-op unless passing TIDAL artist. Whether to include artist's EPs and singles with albums", ), ] = False, ): logging.basicConfig( format="%(asctime)s,%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d:%H:%M:%S", level=logging.getLevelName(loglevel.value), ) logger = logging.getLogger(__name__) tidal_resource: Optional[
app = typer.Typer() @app.command() def main( tidal_url: Annotated[ str, typer.Argument( help="The Tidal album or artist or mix or playlist or track or video to download" ), ], audio_format: Annotated[ AudioFormat, typer.Option(case_sensitive=False) ] = AudioFormat.lossless.value, output_directory: Annotated[ Path, typer.Argument( help="The parent directory under which directory(ies) of files will be written" ), ] = user_music_path(), loglevel: Annotated[ LogLevel, typer.Option(case_sensitive=False) ] = LogLevel.info.value, include_eps_singles: Annotated[ bool, typer.Option( "--include-eps-singles", help="No-op unless passing TIDAL artist. Whether to include artist's EPs and singles with albums", ), ] = False, ): logging.basicConfig( format="%(asctime)s,%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d:%H:%M:%S", level=logging.getLevelName(loglevel.value), ) logger = logging.getLogger(__name__) tidal_resource: Optional[
Union[TidalAlbum, TidalMix, TidalPlaylist, TidalTrack, TidalVideo]
13
2023-12-12 21:50:25+00:00
24k
ZS-YANG/FemtoDet-v3
mmdet/models/utils/misc.py
[ { "identifier": "SampleList", "path": "mmdet/structures/det_data_sample.py", "snippet": "class DetDataSample(BaseDataElement):\n def proposals(self) -> InstanceData:\n def proposals(self, value: InstanceData):\n def proposals(self):\n def gt_instances(self) -> InstanceData:\n def gt_insta...
from functools import partial from typing import List, Optional, Sequence, Tuple, Union from mmengine.structures import InstanceData from mmengine.utils import digit_version from six.moves import map, zip from torch import Tensor from torch.autograd import Function from torch.nn import functional as F from mmdet.structures import SampleList from mmdet.structures.bbox import BaseBoxes, get_box_type, stack_boxes from mmdet.structures.mask import BitmapMasks, PolygonMasks from mmdet.utils import OptInstanceList import numpy as np import torch
18,160
(num_bboxes_filtered, ). - filtered_results (dict or list or Tensor, Optional): \ The filtered results. The shape of each item is \ (num_bboxes_filtered, N). """ valid_mask = scores > score_thr scores = scores[valid_mask] valid_idxs = torch.nonzero(valid_mask) num_topk = min(topk, valid_idxs.size(0)) # torch.sort is actually faster than .topk (at least on GPUs) scores, idxs = scores.sort(descending=True) scores = scores[:num_topk] topk_idxs = valid_idxs[idxs[:num_topk]] keep_idxs, labels = topk_idxs.unbind(dim=1) filtered_results = None if results is not None: if isinstance(results, dict): filtered_results = {k: v[keep_idxs] for k, v in results.items()} elif isinstance(results, list): filtered_results = [result[keep_idxs] for result in results] elif isinstance(results, torch.Tensor): filtered_results = results[keep_idxs] else: raise NotImplementedError(f'Only supports dict or list or Tensor, ' f'but get {type(results)}.') return scores, labels, keep_idxs, filtered_results def center_of_mass(mask, esp=1e-6): """Calculate the centroid coordinates of the mask. Args: mask (Tensor): The mask to be calculated, shape (h, w). esp (float): Avoid dividing by zero. Default: 1e-6. Returns: tuple[Tensor]: the coordinates of the center point of the mask. - center_h (Tensor): the center point of the height. - center_w (Tensor): the center point of the width. """ h, w = mask.shape grid_h = torch.arange(h, device=mask.device)[:, None] grid_w = torch.arange(w, device=mask.device) normalizer = mask.sum().float().clamp(min=esp) center_h = (mask * grid_h).sum() / normalizer center_w = (mask * grid_w).sum() / normalizer return center_h, center_w def generate_coordinate(featmap_sizes, device='cuda'): """Generate the coordinate. Args: featmap_sizes (tuple): The feature to be calculated, of shape (N, C, W, H). device (str): The device where the feature will be put on. Returns: coord_feat (Tensor): The coordinate feature, of shape (N, 2, W, H). """ x_range = torch.linspace(-1, 1, featmap_sizes[-1], device=device) y_range = torch.linspace(-1, 1, featmap_sizes[-2], device=device) y, x = torch.meshgrid(y_range, x_range) y = y.expand([featmap_sizes[0], 1, -1, -1]) x = x.expand([featmap_sizes[0], 1, -1, -1]) coord_feat = torch.cat([x, y], 1) return coord_feat def levels_to_images(mlvl_tensor: List[torch.Tensor]) -> List[torch.Tensor]: """Concat multi-level feature maps by image. [feature_level0, feature_level1...] -> [feature_image0, feature_image1...] Convert the shape of each element in mlvl_tensor from (N, C, H, W) to (N, H*W , C), then split the element to N elements with shape (H*W, C), and concat elements in same image of all level along first dimension. Args: mlvl_tensor (list[Tensor]): list of Tensor which collect from corresponding level. Each element is of shape (N, C, H, W) Returns: list[Tensor]: A list that contains N tensors and each tensor is of shape (num_elements, C) """ batch_size = mlvl_tensor[0].size(0) batch_list = [[] for _ in range(batch_size)] channels = mlvl_tensor[0].size(1) for t in mlvl_tensor: t = t.permute(0, 2, 3, 1) t = t.view(batch_size, -1, channels).contiguous() for img in range(batch_size): batch_list[img].append(t[img]) return [torch.cat(item, 0) for item in batch_list] def images_to_levels(target, num_levels): """Convert targets by image to targets by feature level. [target_img0, target_img1] -> [target_level0, target_level1, ...] """ target = stack_boxes(target, 0) level_targets = [] start = 0 for n in num_levels: end = start + n # level_targets.append(target[:, start:end].squeeze(0)) level_targets.append(target[:, start:end]) start = end return level_targets def samplelist_boxtype2tensor(batch_data_samples: SampleList) -> SampleList: for data_samples in batch_data_samples: if 'gt_instances' in data_samples: bboxes = data_samples.gt_instances.get('bboxes', None)
# Copyright (c) OpenMMLab. All rights reserved. class SigmoidGeometricMean(Function): """Forward and backward function of geometric mean of two sigmoid functions. This implementation with analytical gradient function substitutes the autograd function of (x.sigmoid() * y.sigmoid()).sqrt(). The original implementation incurs none during gradient backprapagation if both x and y are very small values. """ @staticmethod def forward(ctx, x, y): x_sigmoid = x.sigmoid() y_sigmoid = y.sigmoid() z = (x_sigmoid * y_sigmoid).sqrt() ctx.save_for_backward(x_sigmoid, y_sigmoid, z) return z @staticmethod def backward(ctx, grad_output): x_sigmoid, y_sigmoid, z = ctx.saved_tensors grad_x = grad_output * z * (1 - x_sigmoid) / 2 grad_y = grad_output * z * (1 - y_sigmoid) / 2 return grad_x, grad_y sigmoid_geometric_mean = SigmoidGeometricMean.apply def interpolate_as(source, target, mode='bilinear', align_corners=False): """Interpolate the `source` to the shape of the `target`. The `source` must be a Tensor, but the `target` can be a Tensor or a np.ndarray with the shape (..., target_h, target_w). Args: source (Tensor): A 3D/4D Tensor with the shape (N, H, W) or (N, C, H, W). target (Tensor | np.ndarray): The interpolation target with the shape (..., target_h, target_w). mode (str): Algorithm used for interpolation. The options are the same as those in F.interpolate(). Default: ``'bilinear'``. align_corners (bool): The same as the argument in F.interpolate(). Returns: Tensor: The interpolated source Tensor. """ assert len(target.shape) >= 2 def _interpolate_as(source, target, mode='bilinear', align_corners=False): """Interpolate the `source` (4D) to the shape of the `target`.""" target_h, target_w = target.shape[-2:] source_h, source_w = source.shape[-2:] if target_h != source_h or target_w != source_w: source = F.interpolate( source, size=(target_h, target_w), mode=mode, align_corners=align_corners) return source if len(source.shape) == 3: source = source[:, None, :, :] source = _interpolate_as(source, target, mode, align_corners) return source[:, 0, :, :] else: return _interpolate_as(source, target, mode, align_corners) def unpack_gt_instances(batch_data_samples: SampleList) -> tuple: """Unpack ``gt_instances``, ``gt_instances_ignore`` and ``img_metas`` based on ``batch_data_samples`` Args: batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: tuple: - batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. - batch_gt_instances_ignore (list[:obj:`InstanceData`]): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. - batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. """ batch_gt_instances = [] batch_gt_instances_ignore = [] batch_img_metas = [] for data_sample in batch_data_samples: batch_img_metas.append(data_sample.metainfo) batch_gt_instances.append(data_sample.gt_instances) if 'ignored_instances' in data_sample: batch_gt_instances_ignore.append(data_sample.ignored_instances) else: batch_gt_instances_ignore.append(None) return batch_gt_instances, batch_gt_instances_ignore, batch_img_metas def empty_instances(batch_img_metas: List[dict], device: torch.device, task_type: str, instance_results: OptInstanceList = None, mask_thr_binary: Union[int, float] = 0, box_type: Union[str, type] = 'hbox', use_box_type: bool = False, num_classes: int = 80, score_per_cls: bool = False) -> List[InstanceData]: """Handle predicted instances when RoI is empty. Note: If ``instance_results`` is not None, it will be modified in place internally, and then return ``instance_results`` Args: batch_img_metas (list[dict]): List of image information. device (torch.device): Device of tensor. task_type (str): Expected returned task type. it currently supports bbox and mask. instance_results (list[:obj:`InstanceData`]): List of instance results. mask_thr_binary (int, float): mask binarization threshold. Defaults to 0. box_type (str or type): The empty box type. Defaults to `hbox`. use_box_type (bool): Whether to warp boxes with the box type. Defaults to False. num_classes (int): num_classes of bbox_head. Defaults to 80. score_per_cls (bool): Whether to generate classwise score for the empty instance. ``score_per_cls`` will be True when the model needs to produce raw results without nms. Defaults to False. Returns: list[:obj:`InstanceData`]: Detection results of each image """ assert task_type in ('bbox', 'mask'), 'Only support bbox and mask,' \ f' but got {task_type}' if instance_results is not None: assert len(instance_results) == len(batch_img_metas) results_list = [] for img_id in range(len(batch_img_metas)): if instance_results is not None: results = instance_results[img_id] assert isinstance(results, InstanceData) else: results = InstanceData() if task_type == 'bbox': _, box_type = get_box_type(box_type) bboxes = torch.zeros(0, box_type.box_dim, device=device) if use_box_type: bboxes = box_type(bboxes, clone=False) results.bboxes = bboxes score_shape = (0, num_classes + 1) if score_per_cls else (0, ) results.scores = torch.zeros(score_shape, device=device) results.labels = torch.zeros((0, ), device=device, dtype=torch.long) else: # TODO: Handle the case where rescale is false img_h, img_w = batch_img_metas[img_id]['ori_shape'][:2] # the type of `im_mask` will be torch.bool or torch.uint8, # where uint8 if for visualization and debugging. im_mask = torch.zeros( 0, img_h, img_w, device=device, dtype=torch.bool if mask_thr_binary >= 0 else torch.uint8) results.masks = im_mask results_list.append(results) return results_list def multi_apply(func, *args, **kwargs): """Apply function to a list of arguments. Note: This function applies the ``func`` to multiple inputs and map the multiple outputs of the ``func`` into different list. Each list contains the same type of outputs corresponding to different inputs. Args: func (Function): A function that will be applied to a list of arguments Returns: tuple(list): A tuple containing multiple list, each list contains \ a kind of returned results by the function """ pfunc = partial(func, **kwargs) if kwargs else func map_results = map(pfunc, *args) return tuple(map(list, zip(*map_results))) def unmap(data, count, inds, fill=0): """Unmap a subset of item (data) back to the original set of items (of size count)""" if data.dim() == 1: ret = data.new_full((count, ), fill) ret[inds.type(torch.bool)] = data else: new_size = (count, ) + data.size()[1:] ret = data.new_full(new_size, fill) ret[inds.type(torch.bool), :] = data return ret def mask2ndarray(mask): """Convert Mask to ndarray.. Args: mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or torch.Tensor or np.ndarray): The mask to be converted. Returns: np.ndarray: Ndarray mask of shape (n, h, w) that has been converted """ if isinstance(mask, (BitmapMasks, PolygonMasks)): mask = mask.to_ndarray() elif isinstance(mask, torch.Tensor): mask = mask.detach().cpu().numpy() elif not isinstance(mask, np.ndarray): raise TypeError(f'Unsupported {type(mask)} data type') return mask def flip_tensor(src_tensor, flip_direction): """flip tensor base on flip_direction. Args: src_tensor (Tensor): input feature map, shape (B, C, H, W). flip_direction (str): The flipping direction. Options are 'horizontal', 'vertical', 'diagonal'. Returns: out_tensor (Tensor): Flipped tensor. """ assert src_tensor.ndim == 4 valid_directions = ['horizontal', 'vertical', 'diagonal'] assert flip_direction in valid_directions if flip_direction == 'horizontal': out_tensor = torch.flip(src_tensor, [3]) elif flip_direction == 'vertical': out_tensor = torch.flip(src_tensor, [2]) else: out_tensor = torch.flip(src_tensor, [2, 3]) return out_tensor def select_single_mlvl(mlvl_tensors, batch_id, detach=True): """Extract a multi-scale single image tensor from a multi-scale batch tensor based on batch index. Note: The default value of detach is True, because the proposal gradient needs to be detached during the training of the two-stage model. E.g Cascade Mask R-CNN. Args: mlvl_tensors (list[Tensor]): Batch tensor for all scale levels, each is a 4D-tensor. batch_id (int): Batch index. detach (bool): Whether detach gradient. Default True. Returns: list[Tensor]: Multi-scale single image tensor. """ assert isinstance(mlvl_tensors, (list, tuple)) num_levels = len(mlvl_tensors) if detach: mlvl_tensor_list = [ mlvl_tensors[i][batch_id].detach() for i in range(num_levels) ] else: mlvl_tensor_list = [ mlvl_tensors[i][batch_id] for i in range(num_levels) ] return mlvl_tensor_list def filter_scores_and_topk(scores, score_thr, topk, results=None): """Filter results using score threshold and topk candidates. Args: scores (Tensor): The scores, shape (num_bboxes, K). score_thr (float): The score filter threshold. topk (int): The number of topk candidates. results (dict or list or Tensor, Optional): The results to which the filtering rule is to be applied. The shape of each item is (num_bboxes, N). Returns: tuple: Filtered results - scores (Tensor): The scores after being filtered, \ shape (num_bboxes_filtered, ). - labels (Tensor): The class labels, shape \ (num_bboxes_filtered, ). - anchor_idxs (Tensor): The anchor indexes, shape \ (num_bboxes_filtered, ). - filtered_results (dict or list or Tensor, Optional): \ The filtered results. The shape of each item is \ (num_bboxes_filtered, N). """ valid_mask = scores > score_thr scores = scores[valid_mask] valid_idxs = torch.nonzero(valid_mask) num_topk = min(topk, valid_idxs.size(0)) # torch.sort is actually faster than .topk (at least on GPUs) scores, idxs = scores.sort(descending=True) scores = scores[:num_topk] topk_idxs = valid_idxs[idxs[:num_topk]] keep_idxs, labels = topk_idxs.unbind(dim=1) filtered_results = None if results is not None: if isinstance(results, dict): filtered_results = {k: v[keep_idxs] for k, v in results.items()} elif isinstance(results, list): filtered_results = [result[keep_idxs] for result in results] elif isinstance(results, torch.Tensor): filtered_results = results[keep_idxs] else: raise NotImplementedError(f'Only supports dict or list or Tensor, ' f'but get {type(results)}.') return scores, labels, keep_idxs, filtered_results def center_of_mass(mask, esp=1e-6): """Calculate the centroid coordinates of the mask. Args: mask (Tensor): The mask to be calculated, shape (h, w). esp (float): Avoid dividing by zero. Default: 1e-6. Returns: tuple[Tensor]: the coordinates of the center point of the mask. - center_h (Tensor): the center point of the height. - center_w (Tensor): the center point of the width. """ h, w = mask.shape grid_h = torch.arange(h, device=mask.device)[:, None] grid_w = torch.arange(w, device=mask.device) normalizer = mask.sum().float().clamp(min=esp) center_h = (mask * grid_h).sum() / normalizer center_w = (mask * grid_w).sum() / normalizer return center_h, center_w def generate_coordinate(featmap_sizes, device='cuda'): """Generate the coordinate. Args: featmap_sizes (tuple): The feature to be calculated, of shape (N, C, W, H). device (str): The device where the feature will be put on. Returns: coord_feat (Tensor): The coordinate feature, of shape (N, 2, W, H). """ x_range = torch.linspace(-1, 1, featmap_sizes[-1], device=device) y_range = torch.linspace(-1, 1, featmap_sizes[-2], device=device) y, x = torch.meshgrid(y_range, x_range) y = y.expand([featmap_sizes[0], 1, -1, -1]) x = x.expand([featmap_sizes[0], 1, -1, -1]) coord_feat = torch.cat([x, y], 1) return coord_feat def levels_to_images(mlvl_tensor: List[torch.Tensor]) -> List[torch.Tensor]: """Concat multi-level feature maps by image. [feature_level0, feature_level1...] -> [feature_image0, feature_image1...] Convert the shape of each element in mlvl_tensor from (N, C, H, W) to (N, H*W , C), then split the element to N elements with shape (H*W, C), and concat elements in same image of all level along first dimension. Args: mlvl_tensor (list[Tensor]): list of Tensor which collect from corresponding level. Each element is of shape (N, C, H, W) Returns: list[Tensor]: A list that contains N tensors and each tensor is of shape (num_elements, C) """ batch_size = mlvl_tensor[0].size(0) batch_list = [[] for _ in range(batch_size)] channels = mlvl_tensor[0].size(1) for t in mlvl_tensor: t = t.permute(0, 2, 3, 1) t = t.view(batch_size, -1, channels).contiguous() for img in range(batch_size): batch_list[img].append(t[img]) return [torch.cat(item, 0) for item in batch_list] def images_to_levels(target, num_levels): """Convert targets by image to targets by feature level. [target_img0, target_img1] -> [target_level0, target_level1, ...] """ target = stack_boxes(target, 0) level_targets = [] start = 0 for n in num_levels: end = start + n # level_targets.append(target[:, start:end].squeeze(0)) level_targets.append(target[:, start:end]) start = end return level_targets def samplelist_boxtype2tensor(batch_data_samples: SampleList) -> SampleList: for data_samples in batch_data_samples: if 'gt_instances' in data_samples: bboxes = data_samples.gt_instances.get('bboxes', None)
if isinstance(bboxes, BaseBoxes):
1
2023-12-11 15:23:03+00:00
24k
chinhsuanwu/ifusion
model/zero123.py
[ { "identifier": "inject_trainable_lora_extended", "path": "ldm/lora.py", "snippet": "def inject_trainable_lora_extended(\n model: nn.Module,\n target_replace_module: Set[str] = UNET_EXTENDED_TARGET_REPLACE,\n r: int = 4,\n loras=None, # path to lora .pt\n eval=True,\n):\n \"\"\"\n ...
import itertools import torch import torch.nn as nn from dataclasses import dataclass from diffusers import DDIMScheduler from einops import rearrange from omegaconf import OmegaConf from ldm.lora import ( inject_trainable_lora_extended, monkeypatch_remove_lora, save_lora_weight, ) from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import load_model_from_config from util.pose import make_T from util.typing import * from util.util import default
14,862
class Zero123(nn.Module): @dataclass class Config: pretrained_model_name_or_path: str = "ldm/ckpt/zero123-xl.ckpt" pretrained_config: str = "ldm/ckpt/sd-objaverse-finetune-c_concat-256.yaml" vram_O: bool = False min_step_percent: float = 0.02 max_step_percent: float = 0.98 config: Config def __init__(self, **kwargs) -> None: super().__init__() self.config = OmegaConf.structured(self.Config(**kwargs)) self.device = "cuda" self.require_grad_params = [] self.configure() def configure(self) -> None: print("[INFO] Loading Zero123...") self.pretrained_config = OmegaConf.load(self.config.pretrained_config) self.weights_dtype = torch.float32
class Zero123(nn.Module): @dataclass class Config: pretrained_model_name_or_path: str = "ldm/ckpt/zero123-xl.ckpt" pretrained_config: str = "ldm/ckpt/sd-objaverse-finetune-c_concat-256.yaml" vram_O: bool = False min_step_percent: float = 0.02 max_step_percent: float = 0.98 config: Config def __init__(self, **kwargs) -> None: super().__init__() self.config = OmegaConf.structured(self.Config(**kwargs)) self.device = "cuda" self.require_grad_params = [] self.configure() def configure(self) -> None: print("[INFO] Loading Zero123...") self.pretrained_config = OmegaConf.load(self.config.pretrained_config) self.weights_dtype = torch.float32
self.model: LatentDiffusion = load_model_from_config(
4
2023-12-17 12:45:38+00:00
24k
penghao-wu/vstar
VisualSearch/utils/dataset.py
[ { "identifier": "conversation", "path": "VisualSearch/model/llava/conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n ...
import glob import os import random import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image from pycocotools import mask from transformers import CLIPImageProcessor from transformers import OwlViTProcessor from VisualSearch.model.llava import conversation as conversation_lib from VisualSearch.model.llava.constants import (DEFAULT_IMAGE_TOKEN, IGNORE_INDEX, IMAGE_TOKEN_INDEX) from VisualSearch.model.llava.mm_utils import tokenizer_image_token from VisualSearch.utils.data_processing import get_mask_from_json from VisualSearch.utils.refer import REFER from VisualSearch.utils.refer_seg_dataset import ReferSegDataset from VisualSearch.utils.general_segdet_dataset import SegDetDataset from VisualSearch.utils.mixed_grounding_dataset import MixedGroundingDataset from VisualSearch.utils.vqa_dataset import VQADataset from VisualSearch.utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IMAGE_TOKEN) from VisualSearch.utils.utils import box_xyxy_to_cxcywh, expand2square
14,596
cv2.setNumThreads(1) def collate_fn( batch, tokenizer=None, conv_type="llava_v1", use_mm_start_end=True, local_rank=-1 ): image_path_list = [] images_list = [] images_clip_list = [] conversation_list = [] masks_list = [] label_list = [] bboxes_labels_list = [] bboxes_valid_list = [] masks_valid_list = [] resize_list = [] questions_list = [] sampled_classes_list = [] offset_list = [0] cnt = 0 inferences = [] for ( image_path, images, images_clip, conversations, masks, label, bboxes_labels, bboxes_valid, masks_valid, resize, questions, sampled_classes, inference, ) in batch: image_path_list.append(image_path) images_list.append(images) images_clip_list.append(images_clip) conversation_list.extend(conversations) label_list.append(label) masks_list.append(masks.float()) bboxes_labels_list.extend(bboxes_labels) bboxes_valid_list.extend(bboxes_valid) masks_valid_list.append(torch.tensor(masks_valid)) resize_list.append(resize) questions_list.append(questions) sampled_classes_list.append(sampled_classes) cnt += len(conversations) offset_list.append(cnt) inferences.append(inference) if use_mm_start_end: # replace <image> token for i in range(len(conversation_list)): replace_token = DEFAULT_IMAGE_TOKEN replace_token = ( DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN ) conversation_list[i] = conversation_list[i].replace( DEFAULT_IMAGE_TOKEN, replace_token ) input_ids = [ tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversation_list ] input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id ) attention_masks = input_ids.ne(tokenizer.pad_token_id) for i in range(len(bboxes_valid_list)): bboxes_valid = bboxes_valid_list[i] attention_mask = attention_masks[i] if not bboxes_valid: attention_mask = attention_mask & input_ids[i].ne(tokenizer("[LOC]", add_special_tokens=False).input_ids[0]) attention_masks[i] = attention_mask conv = conversation_lib.default_conversation.copy() targets = input_ids.clone() if conv_type == "llava_v1": sep = conv.sep + conv.roles[1] + ": " else: sep = "[/INST] " for conversation, target in zip(conversation_list, targets): total_len = int(target.ne(tokenizer.pad_token_id).sum()) rounds = conversation.split(conv.sep2) cur_len = 1
cv2.setNumThreads(1) def collate_fn( batch, tokenizer=None, conv_type="llava_v1", use_mm_start_end=True, local_rank=-1 ): image_path_list = [] images_list = [] images_clip_list = [] conversation_list = [] masks_list = [] label_list = [] bboxes_labels_list = [] bboxes_valid_list = [] masks_valid_list = [] resize_list = [] questions_list = [] sampled_classes_list = [] offset_list = [0] cnt = 0 inferences = [] for ( image_path, images, images_clip, conversations, masks, label, bboxes_labels, bboxes_valid, masks_valid, resize, questions, sampled_classes, inference, ) in batch: image_path_list.append(image_path) images_list.append(images) images_clip_list.append(images_clip) conversation_list.extend(conversations) label_list.append(label) masks_list.append(masks.float()) bboxes_labels_list.extend(bboxes_labels) bboxes_valid_list.extend(bboxes_valid) masks_valid_list.append(torch.tensor(masks_valid)) resize_list.append(resize) questions_list.append(questions) sampled_classes_list.append(sampled_classes) cnt += len(conversations) offset_list.append(cnt) inferences.append(inference) if use_mm_start_end: # replace <image> token for i in range(len(conversation_list)): replace_token = DEFAULT_IMAGE_TOKEN replace_token = ( DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN ) conversation_list[i] = conversation_list[i].replace( DEFAULT_IMAGE_TOKEN, replace_token ) input_ids = [ tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversation_list ] input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id ) attention_masks = input_ids.ne(tokenizer.pad_token_id) for i in range(len(bboxes_valid_list)): bboxes_valid = bboxes_valid_list[i] attention_mask = attention_masks[i] if not bboxes_valid: attention_mask = attention_mask & input_ids[i].ne(tokenizer("[LOC]", add_special_tokens=False).input_ids[0]) attention_masks[i] = attention_mask conv = conversation_lib.default_conversation.copy() targets = input_ids.clone() if conv_type == "llava_v1": sep = conv.sep + conv.roles[1] + ": " else: sep = "[/INST] " for conversation, target in zip(conversation_list, targets): total_len = int(target.ne(tokenizer.pad_token_id).sum()) rounds = conversation.split(conv.sep2) cur_len = 1
target[:cur_len] = IGNORE_INDEX
2
2023-12-15 14:58:24+00:00
24k
sinoyou/nelf-pro
nerfstudio/viewer/server/viewer_utils.py
[ { "identifier": "Cameras", "path": "nerfstudio/cameras/cameras.py", "snippet": "class Cameras(TensorDataclass):\n \"\"\"Dataparser outputs for the image dataset and the ray generator.\n\n Note: currently only supports cameras with the same principal points and types. The reason we type\n the fo...
import base64 import enum import os import sys import threading import time import warnings import cv2 import numpy as np import torch from pathlib import Path from typing import Any, Dict, Optional, Tuple from cryptography.utils import CryptographyDeprecationWarning from rich.console import Console from nerfstudio.cameras.cameras import Cameras from nerfstudio.cameras.rays import RayBundle from nerfstudio.configs import base_config as cfg from nerfstudio.data.datasets.base_dataset import InputDataset from nerfstudio.models.base_model import Model from nerfstudio.utils import colormaps, profiler, writer from nerfstudio.utils.decorators import check_main_thread, decorate_all from nerfstudio.utils.images import BasicImages from nerfstudio.utils.io import load_from_json, write_to_json from nerfstudio.utils.writer import GLOBAL_BUFFER, EventName, TimeWriter from nerfstudio.viewer.server.subprocess import run_viewer_bridge_server_as_subprocess from nerfstudio.viewer.server.utils import get_intrinsics_matrix_and_camera_to_world_h from nerfstudio.viewer.server.visualizer import Viewer
19,284
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Code to interface with the `vis/` (the JS viewer). """ from __future__ import annotations warnings.filterwarnings("ignore", category=CryptographyDeprecationWarning) CONSOLE = Console(width=120) def get_viewer_version() -> str: """Get the version of the viewer.""" json_filename = os.path.join(os.path.dirname(__file__), "../app/package.json") version = load_from_json(Path(json_filename))["version"] return version @check_main_thread def setup_viewer(config: cfg.ViewerConfig, log_filename: Path): """Sets up the viewer if enabled Args: config: the configuration to instantiate viewer """ viewer_state = ViewerState(config, log_filename=log_filename) banner_messages = [f"Viewer at: {viewer_state.viewer_url}"] return viewer_state, banner_messages class OutputTypes(str, enum.Enum): """Noncomprehsnive list of output render types""" INIT = "init" RGB = "rgb" RGB_FINE = "rgb_fine" ACCUMULATION = "accumulation" ACCUMULATION_FINE = "accumulation_fine" class ColormapTypes(str, enum.Enum): """Noncomprehsnive list of colormap render types""" INIT = "init" DEFAULT = "default" TURBO = "turbo" DEPTH = "depth" SEMANTIC = "semantic" BOOLEAN = "boolean" class IOChangeException(Exception): """Basic camera exception to interrupt viewer""" class SetTrace: """Basic trace function""" def __init__(self, func): self.func = func def __enter__(self): sys.settrace(self.func) return self def __exit__(self, ext_type, exc_value, traceback): sys.settrace(None) class RenderThread(threading.Thread): """Thread that does all the rendering calls while listening for interrupts Args: state: current viewer state object graph: current checkpoint of model camera_ray_bundle: input rays to pass through the graph to render out """
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Code to interface with the `vis/` (the JS viewer). """ from __future__ import annotations warnings.filterwarnings("ignore", category=CryptographyDeprecationWarning) CONSOLE = Console(width=120) def get_viewer_version() -> str: """Get the version of the viewer.""" json_filename = os.path.join(os.path.dirname(__file__), "../app/package.json") version = load_from_json(Path(json_filename))["version"] return version @check_main_thread def setup_viewer(config: cfg.ViewerConfig, log_filename: Path): """Sets up the viewer if enabled Args: config: the configuration to instantiate viewer """ viewer_state = ViewerState(config, log_filename=log_filename) banner_messages = [f"Viewer at: {viewer_state.viewer_url}"] return viewer_state, banner_messages class OutputTypes(str, enum.Enum): """Noncomprehsnive list of output render types""" INIT = "init" RGB = "rgb" RGB_FINE = "rgb_fine" ACCUMULATION = "accumulation" ACCUMULATION_FINE = "accumulation_fine" class ColormapTypes(str, enum.Enum): """Noncomprehsnive list of colormap render types""" INIT = "init" DEFAULT = "default" TURBO = "turbo" DEPTH = "depth" SEMANTIC = "semantic" BOOLEAN = "boolean" class IOChangeException(Exception): """Basic camera exception to interrupt viewer""" class SetTrace: """Basic trace function""" def __init__(self, func): self.func = func def __enter__(self): sys.settrace(self.func) return self def __exit__(self, ext_type, exc_value, traceback): sys.settrace(None) class RenderThread(threading.Thread): """Thread that does all the rendering calls while listening for interrupts Args: state: current viewer state object graph: current checkpoint of model camera_ray_bundle: input rays to pass through the graph to render out """
def __init__(self, state: "ViewerState", graph: Model, camera_ray_bundle: RayBundle):
4
2023-12-15 20:07:22+00:00
24k
amazon-science/c2f-seg
data/dataloader_transformer.py
[ { "identifier": "FishBowl", "path": "data/dataloader_Fishbowl.py", "snippet": "class FishBowl(object):\n def __init__(self, config, mode, subtest=None):\n self.datatype = mode\n data_dir = config.root_path\n\n self.img_path = os.path.join(data_dir, self.datatype+\"_data\", self.d...
from data.dataloader_Fishbowl import FishBowl from data.dataloader_MOViD_A import MOViD_A from data.dataloader_KINS import Kins_Fusion_dataset, KINS_Aisformer_VRSP_Intersection from data.dataloader_COCOA import COCOA_Fusion_dataset, COCOA_VRSP
21,229
def load_dataset(config, args, mode): if mode=="train": if args.dataset=="KINS": train_dataset = Kins_Fusion_dataset(config, mode='train') test_dataset = Kins_Fusion_dataset(config, mode='test') elif args.dataset=="COCOA": train_dataset = COCOA_Fusion_dataset(config, mode='train') test_dataset = COCOA_Fusion_dataset(config, mode='test') elif args.dataset=="Fishbowl":
def load_dataset(config, args, mode): if mode=="train": if args.dataset=="KINS": train_dataset = Kins_Fusion_dataset(config, mode='train') test_dataset = Kins_Fusion_dataset(config, mode='test') elif args.dataset=="COCOA": train_dataset = COCOA_Fusion_dataset(config, mode='train') test_dataset = COCOA_Fusion_dataset(config, mode='test') elif args.dataset=="Fishbowl":
train_dataset = FishBowl(config, mode='train')
0
2023-12-21 04:25:47+00:00
24k
alipay/PainlessInferenceAcceleration
pia/lookahead/models/baichuan/modeling_baichuan.py
[ { "identifier": "LookaheadPreTrainedModel", "path": "pia/lookahead/common/pretrained_model.py", "snippet": "class LookaheadPreTrainedModel(PreTrainedModel):\n _batch_generation = False\n _stream_generation = False\n\n def __init__(self, config):\n super().__init__(config=config)\n\n d...
import math import os import torch import torch.utils.checkpoint from contextlib import contextmanager from threading import Thread from typing import List, Optional, Tuple, Union from torch import nn from torch.nn import CrossEntropyLoss from torch.nn import functional as F from transformers import PretrainedConfig from transformers.activations import ACT2FN from transformers.generation.utils import GenerationConfig from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from transformers.utils import logging, ContextManagers from pia.lookahead.common.pretrained_model import LookaheadPreTrainedModel from pia.lookahead.models.baichuan.configuration_baichuan import BaichuanConfig from pia.lookahead.models.baichuan.generation_utils import build_chat_input, TextIterStreamer from xformers import ops as xops from .quantizer import quantize_offline, init_model_weight_int4 from .quantizer import init_model_weight_int4 from accelerate import init_empty_weights, dispatch_model, infer_auto_device_map from accelerate.utils import CustomDtype from accelerate.utils import get_balanced_memory from .quantizer import quantize_online
17,578
return model return super(BaichuanForCausalLM, cls).from_pretrained(pretrained_model_name_or_path, *model_args, config=config, cache_dir=cache_dir, ignore_mismatched_sizes=ignore_mismatched_sizes, force_download=force_download, local_files_only=local_files_only, token=token, revision=revision, use_safetensors=use_safetensors, **kwargs) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) softmax_normalizer = shift_logits.max(-1).values ** 2 z_loss = self.config.z_loss_weight * softmax_normalizer.mean() # Enable model parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) + z_loss if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): if past_key_values: input_ids = input_ids[:, -1:] position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -1].unsqueeze(-1) # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, } ) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past def quantize(self, bits: int): try: except ImportError: raise ImportError(f"Needs QLinear to run quantize.") return quantize_online(self, bits) def chat(self, tokenizer, messages: List[dict], stream=False, generation_config: Optional[GenerationConfig] = None): generation_config = generation_config or self.generation_config input_ids = build_chat_input(self, tokenizer, messages, generation_config.max_new_tokens) if stream:
# Copyright 2023 Baichuan Inc. All Rights Reserved. # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.get_logger(__name__) try: except ImportError: xops = None logger.warning( "Xformers is not installed correctly. If you want to use memory_efficient_attention to accelerate training use the following command to install Xformers\npip install xformers." ) # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ if len(mask.size()) == 3: bsz, src_len, _ = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, :, :].expand(bsz, 1, tgt_len, src_len).to(dtype) else: bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class RMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ RMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states class RotaryEmbedding(torch.nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim)) self.max_seq_len_cached = max_position_embeddings t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=torch.float32) freqs = torch.outer(t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1) self.cos_cached = emb.cos()[None, None, :, :].to(torch.float32) self.sin_cached = emb.sin()[None, None, :, :].to(torch.float32) def forward(self, x, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case. if seq_len > self.max_seq_len_cached: self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=torch.float32) freqs = torch.outer(t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1) self.cos_cached = emb.cos()[None, None, :, :].to(torch.float32).to(x.device) self.sin_cached = emb.sin()[None, None, :, :].to(torch.float32).to(x.device) elif self.cos_cached.device != x.device: self.cos_cached = self.cos_cached.to(x.device) self.sin_cached = self.sin_cached.to(x.device) return ( self.cos_cached[:, :, :seq_len, ...], self.sin_cached[:, :, :seq_len, ...], ) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2:] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos_, sin_, position_ids): cos = cos_.squeeze(1).squeeze(0) # [seq_len, dim] sin = sin_.squeeze(1).squeeze(0) # [seq_len, dim] cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] q_embed = (q.float() * cos) + (rotate_half(q.float()) * sin) k_embed = (k.float() * cos) + (rotate_half(k.float()) * sin) return q_embed.to(q.dtype), k_embed.to(k.dtype) class MLP(nn.Module): def __init__( self, hidden_size: int, intermediate_size: int, hidden_act: str, ): super().__init__() self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False) self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.act_fn = ACT2FN[hidden_act] def forward(self, x): return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) class Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: BaichuanConfig): super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.max_position_embeddings = config.max_position_embeddings if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.W_pack = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) self.rotary_emb = RotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() proj = self.W_pack(hidden_states) proj = proj.unflatten(-1, (3, self.hidden_size)).unsqueeze(0).transpose(0, -2).squeeze(-2) query_states = proj[0].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = proj[1].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = proj[2].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) # [bsz, nh, t, hd] if past_key_value is not None: # reuse k, v, self_attention key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None if xops is not None and self.training: attn_weights = None query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) attn_output = xops.memory_efficient_attention( query_states, key_states, value_states, attn_bias=xops.LowerTriangularMask() ) else: with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=True): attn_output = F.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=attention_mask) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value class DecoderLayer(nn.Module): def __init__(self, config: BaichuanConfig): super().__init__() self.hidden_size = config.hidden_size self.self_attn = Attention(config=config) self.mlp = MLP( hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, ) self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs class BaichuanPreTrainedModel(LookaheadPreTrainedModel): config_class = BaichuanConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["DecoderLayer"] _keys_to_ignore_on_load_unexpected = [r"decoder\.version"] def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, BaichuanModel): module.gradient_checkpointing = value class BaichuanModel(BaichuanPreTrainedModel): def __init__(self, config: BaichuanConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList([DecoderLayer(config) for _ in range(config.num_hidden_layers)]) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, device=inputs_embeds.device, past_key_values_length=past_key_values_length, ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( inputs_embeds.device ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) # embed positions # Note: adapt for lookahead if attention_mask is not None and len(attention_mask.shape) == 4: # lookahead # attention_mask: [bs, 1, src_len, tgt_len] position_ids = torch.sum(attention_mask, dim=-1).squeeze(1) - 1 attention_mask = (1.0-attention_mask.to(inputs_embeds.dtype)) * torch.finfo(inputs_embeds.dtype).min else: # non-lookahead if attention_mask is None: attention_mask = torch.ones( (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device ) attention_mask = self._prepare_decoder_attention_mask( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length ) if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) else: position_ids = position_ids.view(-1, seq_length).long() hidden_states = inputs_embeds if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, output_attentions, None) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, attention_mask, position_ids, None, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) class NormHead(nn.Module): def __init__(self, hidden_size, vocab_size, bias=False): super().__init__() self.weight = nn.Parameter(torch.empty((vocab_size, hidden_size))) nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) self.first_flag = True def forward(self, hidden_states): if self.training: norm_weight = nn.functional.normalize(self.weight) elif self.first_flag: self.first_flag = False self.weight = nn.Parameter(nn.functional.normalize(self.weight)) norm_weight = self.weight else: norm_weight = self.weight return nn.functional.linear(hidden_states, norm_weight) _init_weights = True @contextmanager def no_init_weights(_enable=True): global _init_weights old_init_weights = _init_weights if _enable: _init_weights = False try: yield finally: _init_weights = old_init_weights class BaichuanForCausalLM(BaichuanPreTrainedModel): def __init__(self, config, *model_args, **model_kwargs): super().__init__(config) self.model = BaichuanModel(config) self.lm_head = NormHead(config.hidden_size, config.vocab_size, bias=False) if hasattr(config, "quantization_config") and config.quantization_config['load_in_4bit']: try: except ImportError: raise ImportError(f"Needs QLinear to run quantize.") quantize_offline(self, 4) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @classmethod def from_pretrained( cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None, cache_dir: Optional[Union[str, os.PathLike]] = None, ignore_mismatched_sizes: bool = False, force_download: bool = False, local_files_only: bool = False, token: Optional[Union[str, bool]] = None, revision: str = "main", use_safetensors: bool = None, **kwargs, ): # Load config if we don't provide a configuration if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path config, model_kwargs = cls.config_class.from_pretrained( config_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=False, proxies=None, local_files_only=local_files_only, token=token, revision=revision, subfolder="", _from_auto=False, _from_pipeline=None, **kwargs, ) else: model_kwargs = kwargs if hasattr(config, "quantization_config") and config.quantization_config['load_in_4bit']: try: except ImportError: raise ImportError(f"Needs import model weight init func to run quantize.") # Instantiate model. init_contexts = [no_init_weights(_enable=True)] init_contexts.append(init_empty_weights()) with ContextManagers(init_contexts): model = cls(config) model_file = os.path.join(pretrained_model_name_or_path, 'pytorch_model.bin') state_dict = torch.load(model_file, map_location="cpu") model.is_quantized = True device_map = kwargs.pop("device_map", None) torch_dtype = kwargs.pop("torch_dtype", None) kwargs = {"no_split_module_classes": model._no_split_modules} target_dtype = CustomDtype.INT4 max_memory = get_balanced_memory( model, dtype=target_dtype, low_zero=(device_map == "balanced_low_0"), max_memory=None, **kwargs, ) kwargs["max_memory"] = max_memory device_map = infer_auto_device_map(model, dtype=target_dtype, **kwargs) model = init_model_weight_int4(config, model, state_dict) # Set model in evaluation mode to deactivate DropOut modules by default model.eval() # If it is a model with generation capabilities, attempt to load the generation config if model.can_generate(): try: model.generation_config = GenerationConfig.from_pretrained( pretrained_model_name_or_path, cache_dir=cache_dir, force_download=force_download, resume_download=False, proxies=None, local_files_only=local_files_only, token=token, revision=revision, subfolder="", _from_auto=False, _from_pipeline=None, **kwargs, ) except (OSError, TypeError): logger.info( "Generation config file not found, using a generation config created from the model config." ) pass if device_map is not None: dispatch_model(model, device_map=device_map) return model return super(BaichuanForCausalLM, cls).from_pretrained(pretrained_model_name_or_path, *model_args, config=config, cache_dir=cache_dir, ignore_mismatched_sizes=ignore_mismatched_sizes, force_download=force_download, local_files_only=local_files_only, token=token, revision=revision, use_safetensors=use_safetensors, **kwargs) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) softmax_normalizer = shift_logits.max(-1).values ** 2 z_loss = self.config.z_loss_weight * softmax_normalizer.mean() # Enable model parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) + z_loss if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): if past_key_values: input_ids = input_ids[:, -1:] position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -1].unsqueeze(-1) # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, } ) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past def quantize(self, bits: int): try: except ImportError: raise ImportError(f"Needs QLinear to run quantize.") return quantize_online(self, bits) def chat(self, tokenizer, messages: List[dict], stream=False, generation_config: Optional[GenerationConfig] = None): generation_config = generation_config or self.generation_config input_ids = build_chat_input(self, tokenizer, messages, generation_config.max_new_tokens) if stream:
streamer = TextIterStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
3
2023-12-19 13:11:38+00:00
24k
MingtaoGuo/AnimateAnyone_unofficial
aldm/aldm.py
[ { "identifier": "conv_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(...
import einops import torch import torch as th import torch.nn as nn from ldm.modules.diffusionmodules.util import ( conv_nd, linear, zero_module, timestep_embedding, ) from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.attention import SpatialTransformer, SpatialTransformerPlus from ldm.modules.diffusionmodules.openaimodel import ResBlock, TimestepEmbedSequential, Downsample, AttentionBlock, Upsample, normalization, checkpoint, convert_module_to_f16, convert_module_to_f32 from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import log_txt_as_img, exists, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from omegaconf.listconfig import ListConfig from omegaconf.listconfig import ListConfig
19,720
""" Convert the torso of the model to float32. """ self.conv1.apply(convert_module_to_f32) self.conv2.apply(convert_module_to_f32) self.conv3.apply(convert_module_to_f32) self.conv4.apply(convert_module_to_f32) self.proj.apply(convert_module_to_f32) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) x = self.proj(x) return x class AnimateLDM(LatentDiffusion): def __init__(self, reference_stage_config, pose_guider_config, target_key, reference_key, skeleton_key, *args, **kwargs): super().__init__(*args, **kwargs) self.reference_model = instantiate_from_config(reference_stage_config) self.pose_model = instantiate_from_config(pose_guider_config) self.target_key = target_key self.reference_key = reference_key self.skeleton_key = skeleton_key self.animate_scales = [1.0] * 13 @torch.no_grad() def get_input(self, batch, k, bs=None, *args, **kwargs): x, _ = super().get_input(batch, self.target_key, *args, **kwargs) ref_x, ref_c = super().get_input(batch, self.reference_key, *args, **kwargs) reference = batch[self.reference_key] skeleton = batch[self.skeleton_key] if bs is not None: reference = reference[:bs] skeleton = skeleton[:bs] reference = reference.to(self.device) skeleton = skeleton.to(self.device) reference = einops.rearrange(reference, 'b h w c -> b c h w') skeleton = einops.rearrange(skeleton, 'b h w c -> b c h w') reference = reference.to(memory_format=torch.contiguous_format).float() skeleton = skeleton.to(memory_format=torch.contiguous_format).float() return x, dict(c_crossattn=[ref_c], c_concat=None, img_skeleton=skeleton, img_reference=reference, latent_reference=ref_x) def apply_model(self, x_noisy, t, cond, *args, **kwargs): assert isinstance(cond, dict) diffusion_model = self.model.diffusion_model cond_img = cond['c_crossattn'] refs = self.reference_model(x=cond['latent_reference'], timesteps=t, context=cond_img) pose = self.pose_model(x=cond['img_skeleton']) # control = [c * scale for c, scale in zip(control, self.control_scales)] eps = diffusion_model(x=x_noisy + pose, timesteps=t, context=cond_img, refs=refs) return eps @torch.no_grad() def get_unconditional_conditioning(self, N): return self.get_learned_conditioning([""] * N) @torch.no_grad() def log_images(self, batch, N=4, n_row=2, sample=False, ddim_steps=50, ddim_eta=0.0, return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=False, unconditional_guidance_scale=9.0, unconditional_guidance_label=None, use_ema_scope=True, **kwargs): use_ddim = ddim_steps is not None log = dict() z, cond = self.get_input(batch, self.target_key, bs=N) N = min(z.shape[0], N) n_row = min(z.shape[0], n_row) log["reconstruction"] = self.decode_first_stage(z) log["reference"] = cond["img_reference"] log["skeleton"] = cond["img_skeleton"] * 2.0 - 1.0 if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row # cond={"c_concat": [c_cat], "c_crossattn": [c_txt], "c_reference": [ref_x], "c_skeleton": [c_skt]} samples, z_denoise_row = self.sample_log(cond=cond, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if unconditional_guidance_scale > 1.0: samples_cfg, _ = self.sample_log(cond=cond, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, unconditional_guidance_scale=unconditional_guidance_scale, ) x_samples_cfg = self.decode_first_stage(samples_cfg) log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg return log @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):
class ReferenceNet(nn.Module): """ The full UNet model with attention and timestep embedding. :param in_channels: channels in the input Tensor. :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set, list, or tuple. For example, if this contains 4, then at 4x downsampling, attention will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and downsampling. :param dims: determines if the signal is 1D, 2D, or 3D. :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes. :param use_checkpoint: use gradient checkpointing to reduce memory usage. :param num_heads: the number of attention heads in each attention layer. :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head. :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated. :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. :param resblock_updown: use residual blocks for up/downsampling. :param use_new_attention_order: use a different attention pattern for potentially increased efficiency. """ def __init__( self, image_size, in_channels, model_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support context_dim=None, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, disable_self_attentions=None, num_attention_blocks=None, disable_middle_self_attn=False, use_linear_in_transformer=False, ): super().__init__() if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' if type(context_dim) == ListConfig: context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " f"This option has LESS priority than attention_resolutions {attention_resolutions}, " f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " f"attention will still not be set.") self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential( conv_nd(dims, in_channels, model_channels, 3, padding=1) ) ] ) self._feature_size = model_channels input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for nr in range(self.num_res_blocks[level]): layers = [ ResBlock( ch, time_embed_dim, dropout, out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = mult * model_channels if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if exists(disable_self_attentions): disabled_sa = disable_self_attentions[level] else: disabled_sa = False if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint ) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch input_block_chans.append(ch) if level != len(channel_mult) - 1: out_ch = ch self.input_blocks.append( TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, down=True, ) if resblock_updown else Downsample( ch, conv_resample, dims=dims, out_channels=out_ch ) ) ) ch = out_ch input_block_chans.append(ch) ds *= 2 self._feature_size += ch if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels self.middle_block = TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, out_channels=ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint ), ResBlock( ch, time_embed_dim, dropout, out_channels=ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), ) self._feature_size += ch self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(self.num_res_blocks[level] + 1): ich = input_block_chans.pop() layers = [ ResBlock( ch + ich, time_embed_dim, dropout, out_channels=model_channels * mult, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = model_channels * mult if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if exists(disable_self_attentions): disabled_sa = disable_self_attentions[level] else: disabled_sa = False if not exists(num_attention_blocks) or i < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads_upsample, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint ) ) if level and i == self.num_res_blocks[level]: out_ch = ch layers.append( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, up=True, ) if resblock_updown else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch def convert_to_fp16(self): """ Convert the torso of the model to float16. """ self.input_blocks.apply(convert_module_to_f16) self.middle_block.apply(convert_module_to_f16) self.output_blocks.apply(convert_module_to_f16) def convert_to_fp32(self): """ Convert the torso of the model to float32. """ self.input_blocks.apply(convert_module_to_f32) self.middle_block.apply(convert_module_to_f32) self.output_blocks.apply(convert_module_to_f32) def forward(self, x, timesteps=None, context=None, y=None,**kwargs): """ Apply the model to an input batch. :param x: an [N x C x ...] Tensor of inputs. :param timesteps: a 1-D batch of timesteps. :param context: conditioning plugged in via crossattn :param y: an [N] Tensor of labels, if class-conditional. :return: an [N x C x ...] Tensor of outputs. """ assert (y is not None) == ( self.num_classes is not None ), "must specify y if and only if the model is class-conditional" refs = [] hs = [] t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) emb = self.time_embed(t_emb) h = x.type(self.dtype) # --------- input_block --------- for module in self.input_blocks: for sub_m in module: if isinstance(sub_m, ResBlock): h = sub_m(h, emb) elif isinstance(sub_m, SpatialTransformer): refs.append(h) # push features into refs before cross attention module h = sub_m(h, context) else: h = sub_m(h) hs.append(h) # --------- middle_block --------- for sub_m in self.middle_block: if isinstance(sub_m, ResBlock): h = sub_m(h, emb) elif isinstance(sub_m, SpatialTransformer): refs.append(h) # push features into refs before cross attention module h = sub_m(h, context) else: h = sub_m(h) # --------- output_block --------- for module in self.output_blocks: h = th.cat([h, hs.pop()], dim=1) for sub_m in module: if isinstance(sub_m, ResBlock): h = sub_m(h, emb) elif isinstance(sub_m, SpatialTransformer): refs.append(h) # push features into refs before cross attention module h = sub_m(h, context) else: h = sub_m(h) return refs class ReferenceUNetModel(nn.Module): """ The full UNet model with attention and timestep embedding. :param in_channels: channels in the input Tensor. :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set, list, or tuple. For example, if this contains 4, then at 4x downsampling, attention will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and downsampling. :param dims: determines if the signal is 1D, 2D, or 3D. :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes. :param use_checkpoint: use gradient checkpointing to reduce memory usage. :param num_heads: the number of attention heads in each attention layer. :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head. :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated. :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. :param resblock_updown: use residual blocks for up/downsampling. :param use_new_attention_order: use a different attention pattern for potentially increased efficiency. """ def __init__( self, image_size, in_channels, model_channels, out_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_temporal_attention=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support context_dim=None, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, disable_self_attentions=None, num_attention_blocks=None, disable_middle_self_attn=False, use_linear_in_transformer=False, frames=24, # temporal length ): super().__init__() if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' if type(context_dim) == ListConfig: context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels self.out_channels = out_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " f"This option has LESS priority than attention_resolutions {attention_resolutions}, " f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " f"attention will still not be set.") self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) if self.num_classes is not None: if isinstance(self.num_classes, int): self.label_emb = nn.Embedding(num_classes, time_embed_dim) elif self.num_classes == "continuous": print("setting up linear c_adm embedding layer") self.label_emb = nn.Linear(1, time_embed_dim) else: raise ValueError() self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential( conv_nd(dims, in_channels, model_channels, 3, padding=1) ) ] ) self._feature_size = model_channels input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for nr in range(self.num_res_blocks[level]): layers = [ ResBlock( ch, time_embed_dim, dropout, out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = mult * model_channels if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if exists(disable_self_attentions): disabled_sa = disable_self_attentions[level] else: disabled_sa = False if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformerPlus( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint, use_temporal_attention=use_temporal_attention ) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch input_block_chans.append(ch) if level != len(channel_mult) - 1: out_ch = ch self.input_blocks.append( TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, down=True, ) if resblock_updown else Downsample( ch, conv_resample, dims=dims, out_channels=out_ch ) ) ) ch = out_ch input_block_chans.append(ch) ds *= 2 self._feature_size += ch if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels self.middle_block = TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, out_channels=ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformerPlus( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint, use_temporal_attention=use_temporal_attention ), ResBlock( ch, time_embed_dim, dropout, out_channels=ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), ) self._feature_size += ch self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(self.num_res_blocks[level] + 1): ich = input_block_chans.pop() layers = [ ResBlock( ch + ich, time_embed_dim, dropout, out_channels=model_channels * mult, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = model_channels * mult if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if exists(disable_self_attentions): disabled_sa = disable_self_attentions[level] else: disabled_sa = False if not exists(num_attention_blocks) or i < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads_upsample, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformerPlus( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint, use_temporal_attention=use_temporal_attention ) ) if level and i == self.num_res_blocks[level]: out_ch = ch layers.append( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, up=True, ) if resblock_updown else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch self.out = nn.Sequential( normalization(ch), nn.SiLU(), zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), ) if self.predict_codebook_ids: self.id_predictor = nn.Sequential( normalization(ch), conv_nd(dims, model_channels, n_embed, 1), #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits ) def convert_to_fp16(self): """ Convert the torso of the model to float16. """ self.input_blocks.apply(convert_module_to_f16) self.middle_block.apply(convert_module_to_f16) self.output_blocks.apply(convert_module_to_f16) def convert_to_fp32(self): """ Convert the torso of the model to float32. """ self.input_blocks.apply(convert_module_to_f32) self.middle_block.apply(convert_module_to_f32) self.output_blocks.apply(convert_module_to_f32) def forward(self, x, timesteps=None, context=None, refs=None, y=None, **kwargs): """ Apply the model to an input batch. :param x: an [N x C x ...] Tensor of inputs. :param timesteps: a 1-D batch of timesteps. :param context: conditioning plugged in via crossattn :param y: an [N] Tensor of labels, if class-conditional. :return: an [N x C x ...] Tensor of outputs. """ assert (y is not None) == ( self.num_classes is not None ), "must specify y if and only if the model is class-conditional" hs = [] t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) emb = self.time_embed(t_emb) if self.num_classes is not None: assert y.shape[0] == x.shape[0] emb = emb + self.label_emb(y) # -------- input_block ----------- h = x.type(self.dtype) for module in self.input_blocks: for sub_m in module: if isinstance(sub_m, ResBlock): h = sub_m(h, emb) elif isinstance(sub_m, SpatialTransformerPlus): # push features into refs before cross attention module h = sub_m(h, context, refs.pop(0)) else: h = sub_m(h) hs.append(h) # -------- middle_block ---------- for sub_m in self.middle_block: if isinstance(sub_m, ResBlock): h = sub_m(h, emb) elif isinstance(sub_m, SpatialTransformerPlus): # push features into refs before cross attention module h = sub_m(h, context, refs.pop(0)) else: h = sub_m(h) # -------- output_block ---------- for module in self.output_blocks: h = th.cat([h, hs.pop()], dim=1) for sub_m in module: if isinstance(sub_m, ResBlock): h = sub_m(h, emb) elif isinstance(sub_m, SpatialTransformerPlus): # push features into refs before cross attention module h = sub_m(h, context, refs.pop(0)) else: h = sub_m(h) h = h.type(x.dtype) if self.predict_codebook_ids: return self.id_predictor(h) else: return self.out(h) class PoseGuider(nn.Module): def __init__(self, in_channels=3, out_channels=4, kernel_size=4, model_channels=[16, 32, 64, 128]): super().__init__() self.conv1 = nn.Sequential(nn.Conv2d(in_channels, model_channels[0], kernel_size, 2, padding=1), nn.GroupNorm(16, model_channels[0]), nn.SiLU()) self.conv2 = nn.Sequential(nn.Conv2d(model_channels[0], model_channels[1], kernel_size, 2, padding=1), nn.GroupNorm(16, model_channels[1]), nn.SiLU()) self.conv3 = nn.Sequential(nn.Conv2d(model_channels[1], model_channels[2], kernel_size, 2, padding=1), nn.GroupNorm(16, model_channels[2]), nn.SiLU()) self.conv4 = nn.Sequential(nn.Conv2d(model_channels[2], model_channels[3], kernel_size, 1, padding=1), nn.GroupNorm(16, model_channels[3]), nn.SiLU()) self.proj = zero_module(nn.Conv2d(model_channels[3], out_channels, kernel_size, 1, padding=1)) def convert_to_fp16(self): """ Convert the torso of the model to float16. """ self.conv1.apply(convert_module_to_f16) self.conv2.apply(convert_module_to_f16) self.conv3.apply(convert_module_to_f16) self.conv4.apply(convert_module_to_f16) self.proj.apply(convert_module_to_f16) def convert_to_fp32(self): """ Convert the torso of the model to float32. """ self.conv1.apply(convert_module_to_f32) self.conv2.apply(convert_module_to_f32) self.conv3.apply(convert_module_to_f32) self.conv4.apply(convert_module_to_f32) self.proj.apply(convert_module_to_f32) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) x = self.proj(x) return x class AnimateLDM(LatentDiffusion): def __init__(self, reference_stage_config, pose_guider_config, target_key, reference_key, skeleton_key, *args, **kwargs): super().__init__(*args, **kwargs) self.reference_model = instantiate_from_config(reference_stage_config) self.pose_model = instantiate_from_config(pose_guider_config) self.target_key = target_key self.reference_key = reference_key self.skeleton_key = skeleton_key self.animate_scales = [1.0] * 13 @torch.no_grad() def get_input(self, batch, k, bs=None, *args, **kwargs): x, _ = super().get_input(batch, self.target_key, *args, **kwargs) ref_x, ref_c = super().get_input(batch, self.reference_key, *args, **kwargs) reference = batch[self.reference_key] skeleton = batch[self.skeleton_key] if bs is not None: reference = reference[:bs] skeleton = skeleton[:bs] reference = reference.to(self.device) skeleton = skeleton.to(self.device) reference = einops.rearrange(reference, 'b h w c -> b c h w') skeleton = einops.rearrange(skeleton, 'b h w c -> b c h w') reference = reference.to(memory_format=torch.contiguous_format).float() skeleton = skeleton.to(memory_format=torch.contiguous_format).float() return x, dict(c_crossattn=[ref_c], c_concat=None, img_skeleton=skeleton, img_reference=reference, latent_reference=ref_x) def apply_model(self, x_noisy, t, cond, *args, **kwargs): assert isinstance(cond, dict) diffusion_model = self.model.diffusion_model cond_img = cond['c_crossattn'] refs = self.reference_model(x=cond['latent_reference'], timesteps=t, context=cond_img) pose = self.pose_model(x=cond['img_skeleton']) # control = [c * scale for c, scale in zip(control, self.control_scales)] eps = diffusion_model(x=x_noisy + pose, timesteps=t, context=cond_img, refs=refs) return eps @torch.no_grad() def get_unconditional_conditioning(self, N): return self.get_learned_conditioning([""] * N) @torch.no_grad() def log_images(self, batch, N=4, n_row=2, sample=False, ddim_steps=50, ddim_eta=0.0, return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=False, unconditional_guidance_scale=9.0, unconditional_guidance_label=None, use_ema_scope=True, **kwargs): use_ddim = ddim_steps is not None log = dict() z, cond = self.get_input(batch, self.target_key, bs=N) N = min(z.shape[0], N) n_row = min(z.shape[0], n_row) log["reconstruction"] = self.decode_first_stage(z) log["reference"] = cond["img_reference"] log["skeleton"] = cond["img_skeleton"] * 2.0 - 1.0 if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row # cond={"c_concat": [c_cat], "c_crossattn": [c_txt], "c_reference": [ref_x], "c_skeleton": [c_skt]} samples, z_denoise_row = self.sample_log(cond=cond, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if unconditional_guidance_scale > 1.0: samples_cfg, _ = self.sample_log(cond=cond, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, unconditional_guidance_scale=unconditional_guidance_scale, ) x_samples_cfg = self.decode_first_stage(samples_cfg) log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg return log @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):
ddim_sampler = DDIMSampler(self)
11
2023-12-16 03:31:33+00:00
24k
yasserben/CLOUDS
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "clouds/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME...
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import ( MetadataCatalog, build_detection_train_loader, build_detection_test_loader, ) from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.modeling import build_model from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, inference_on_dataset, print_csv_format, DatasetEvaluator, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from fvcore.nn.precise_bn import get_bn_modules from clouds import ( CityscapesSemSegEvaluator, ClassicalSemSegEvaluator, MapperTrain, MapperTest, add_maskformer2_config, add_clouds_config, add_wandb_config, add_prerocessing_training_set_config, PersoEvalHook, add_repeat_factors, ) from clouds.utils import setup_wandb, WandbWriter import warnings import copy import itertools import logging import os import ast import torch import detectron2.utils.comm as comm
14,515
return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test(cls, cfg, model, output_folder=None, evaluators=None): """ Evaluate the given model. The given model is expected to already contain weights to evaluate. Args: cfg (CfgNode): model (nn.Module): evaluators (list[DatasetEvaluator] or None): if None, will call :meth:`build_evaluator`. Otherwise, must have the same length as ``cfg.DATASETS.TEST``. Returns: dict: a dict of result metrics """ logger = logging.getLogger(__name__) if isinstance(evaluators, DatasetEvaluator): evaluators = [evaluators] if evaluators is not None: assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( len(cfg.DATASETS.TEST), len(evaluators) ) results = OrderedDict() for idx, dataset_name in enumerate(cfg.DATASETS.TEST): data_loader = cls.build_test_loader(cfg, dataset_name) # When evaluators are passed in as arguments, # implicitly assume that evaluators can be created before data_loader. if evaluators is not None: evaluator = evaluators[idx] else: try: evaluator = cls.build_evaluator( cfg, dataset_name, output_folder=output_folder ) except NotImplementedError: logger.warn( "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " "or implement its `build_evaluator` method." ) results[dataset_name] = {} continue results_i = inference_on_dataset(model, data_loader, evaluator) results[dataset_name] = results_i if comm.is_main_process(): assert isinstance( results_i, dict ), "Evaluator must return a dict on the main process. Got {} instead.".format( results_i ) logger.info( "Evaluation results for {} in csv format:".format(dataset_name) ) print_csv_format(results_i) if len(results) == 1: results = list(results.values())[0] return results def build_hooks(self): """ Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]: """ cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN ret = [ hooks.IterationTimer(), hooks.LRScheduler(), hooks.PreciseBN( # Run at the same freq as (but before) evaluation. cfg.TEST.EVAL_PERIOD, self.model, # Build a new data loader to not affect training self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER, ) if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) else None, ] # Do PreciseBN before checkpointer, because it updates the model and need to # be saved by checkpointer. # This is not always the best: if checkpointing has a different frequency, # some checkpoints may have more precise statistics than others. if comm.is_main_process(): ret.append( hooks.PeriodicCheckpointer(self.checkpointer, cfg.TEST.EVAL_PERIOD * 5) ) def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results # Do evaluation after checkpointer, because then if it fails, # we can use the saved checkpoint to debug. # ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
""" Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/train_net.py CLOUDS Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to CLOUDS. """ def build_writers(self): writers = super().build_writers() # use wandb writer instead. writers[-1] = WandbWriter() return writers @classmethod def build_model(cls, cfg): """ Returns: torch.nn.Module: It now calls :func:`detectron2.modeling.build_model`. Overwrite it if you'd like a different model. """ model = build_model(cfg) # logger = logging.getLogger(__name__) # logger.info("Model:\n{}".format(model)) return model # @classmethod # def build_model(cls, cfg): # """ # Returns: # torch.nn.Module: # # It now calls :func:`detectron2.modeling.build_model`. # Overwrite it if you'd like a different model. # """ # model = build_model(cfg) # # logger = logging.getLogger(__name__) # # logger.info("Model:\n{}".format(model)) # return model @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") else: output_folder = os.path.join(cfg.OUTPUT_DIR, output_folder, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if ( evaluator_type == "bdd_sem_seg" or evaluator_type == "mapillary_sem_seg" or evaluator_type == "acdc_sem_seg" ): evaluator_list.append( ClassicalSemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, save_pl=cfg.MODEL.SAVE_PSEUDO_LABELS, ) ) # Cityscapes if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." # return CityscapesSemSegEvaluator(dataset_name) if cfg.MODEL.SAVE_PSEUDO_LABELS: return CityscapesSemSegEvaluator( dataset_name, save_pl=True, output_dir=output_folder ) else: return CityscapesSemSegEvaluator(dataset_name) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper mapper = MapperTrain(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_test_loader(cls, cfg, dataset_name): mapper = MapperTest(cfg, False) return build_detection_test_loader( cfg, dataset_name, batch_size=1, mapper=mapper ) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue if cfg.MODEL.CLOUDS.OVERWRITING: if any( ignored_module in module_name for ignored_module in ["sem_seg_head_ema.", "sam.sam."] ): continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = ( hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER ) if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain( *[x["params"] for x in self.param_groups] ) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test(cls, cfg, model, output_folder=None, evaluators=None): """ Evaluate the given model. The given model is expected to already contain weights to evaluate. Args: cfg (CfgNode): model (nn.Module): evaluators (list[DatasetEvaluator] or None): if None, will call :meth:`build_evaluator`. Otherwise, must have the same length as ``cfg.DATASETS.TEST``. Returns: dict: a dict of result metrics """ logger = logging.getLogger(__name__) if isinstance(evaluators, DatasetEvaluator): evaluators = [evaluators] if evaluators is not None: assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( len(cfg.DATASETS.TEST), len(evaluators) ) results = OrderedDict() for idx, dataset_name in enumerate(cfg.DATASETS.TEST): data_loader = cls.build_test_loader(cfg, dataset_name) # When evaluators are passed in as arguments, # implicitly assume that evaluators can be created before data_loader. if evaluators is not None: evaluator = evaluators[idx] else: try: evaluator = cls.build_evaluator( cfg, dataset_name, output_folder=output_folder ) except NotImplementedError: logger.warn( "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " "or implement its `build_evaluator` method." ) results[dataset_name] = {} continue results_i = inference_on_dataset(model, data_loader, evaluator) results[dataset_name] = results_i if comm.is_main_process(): assert isinstance( results_i, dict ), "Evaluator must return a dict on the main process. Got {} instead.".format( results_i ) logger.info( "Evaluation results for {} in csv format:".format(dataset_name) ) print_csv_format(results_i) if len(results) == 1: results = list(results.values())[0] return results def build_hooks(self): """ Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]: """ cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN ret = [ hooks.IterationTimer(), hooks.LRScheduler(), hooks.PreciseBN( # Run at the same freq as (but before) evaluation. cfg.TEST.EVAL_PERIOD, self.model, # Build a new data loader to not affect training self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER, ) if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) else None, ] # Do PreciseBN before checkpointer, because it updates the model and need to # be saved by checkpointer. # This is not always the best: if checkpointing has a different frequency, # some checkpoints may have more precise statistics than others. if comm.is_main_process(): ret.append( hooks.PeriodicCheckpointer(self.checkpointer, cfg.TEST.EVAL_PERIOD * 5) ) def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results # Do evaluation after checkpointer, because then if it fails, # we can use the saved checkpoint to debug. # ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
ret.append(PersoEvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
9
2023-12-15 15:40:58+00:00
24k
Ruiyuan-Zhang/CCS
multi_part_assembly/utils/wx_transformer_utilities/transformer_layer.py
[ { "identifier": "LayerNorm", "path": "multi_part_assembly/utils/wx_transformer_utilities/layer_norm.py", "snippet": "def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):\n if not export and torch.cuda.is_available() and has_fused_layernorm:\n return FusedLayerNorm(...
from typing import Dict, List, Optional from .layer_norm import LayerNorm from .multihead_attention import MultiheadAttention from .relational_memory import RelationalMemory from .group_linear_layer import GroupLinearLayer from .basic_mha import MemoryAttention from .quant_noise import quant_noise from .fairseq_dropout import FairseqDropout from torch import Tensor import torch import torch.nn as nn import multi_part_assembly.utils.wx_transformer_utilities.fairseq_utils as utils import random import torch.nn.functional as F
17,997
#print('len qlst', len(qlst)) #for kval in klst: # print(kval.shape) k = torch.cat(klst, dim=3) v = torch.cat(vlst, dim=3) #should return these q,k,v and save to a big list. Also pull in from the list passed in and concat along dim=3, i.e. so that it's nblocks * nlayers. #print('running comm attention with shapes', q.shape, k.shape, v.shape) score = torch.matmul(q, k.transpose(3,4)) #print('score shape', score.shape) score = F.softmax(score, dim=-1) out = torch.matmul(score, v).transpose(2,3) #print('out shape', out.shape) score = score.mean(dim=2) out = out.reshape(seq_len, bsz, self.n_blocks * self.head_dim * self.n_heads) out = self.final(out) out = out.view(seq_len, bsz, self.dim) return out, score class NormLayer(nn.Module): def __init__(self, num_rims, dim, export=False): super(NormLayer, self).__init__() self.num_rims = num_rims self.dim = dim self.weight = nn.Parameter(torch.ones(1,1,dim*num_rims,)) self.bias = nn.Parameter(torch.zeros(1,1,dim*num_rims,)) self.norm = LayerNorm(dim, export=export, elementwise_affine=False) def forward(self, x): seq_len, bsz, _ = x.shape x = x.view(seq_len, bsz, self.num_rims, self.dim) x = self.norm(x) x = x.view(seq_len, bsz, self.num_rims * self.dim) weight_use = self.weight.repeat(seq_len, bsz, 1) bias_use = self.bias.repeat(seq_len, bsz, 1) x = x * weight_use + bias_use return x class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, nb, blockatt, blockatt_memory, use_nfm, out_proj_dim=None): super().__init__() self.blockatt = blockatt self.blockatt_memory = blockatt_memory self.embed_dim = args.encoder_embed_dim self.quant_noise = getattr(args, "quant_noise_pq", 0) self.quant_noise_block_size = getattr(args, "quant_noise_pq_block_size", 8) self.use_nfm = use_nfm print('using nfm?', self.use_nfm) self.nb = nb self.norm_blocks = self.nb self.self_attn = self.build_self_attention(self.embed_dim, args) #should divide embed_dim by nb. Then raise embed_dim in args self.self_attn_layer_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks) self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__) self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") ) print("SETUP TRANSFORMER LAYER", 'blocks', self.nb) activation_dropout_p = getattr(args, "activation_dropout", 0) if activation_dropout_p == 0: # for backwards compatibility with models that use args.relu_dropout activation_dropout_p = getattr(args, "relu_dropout", 0) self.activation_dropout_module = FairseqDropout( float(activation_dropout_p), module_name=self.__class__.__name__ ) self.normalize_before = args.encoder_normalize_before self.fc1 = self.build_fc1( self.embed_dim, args.encoder_ffn_embed_dim, self.quant_noise, self.quant_noise_block_size ) self.fc2 = self.build_fc2( args.encoder_ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size ) self.final_layer_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks) if self.blockatt: self.comm = Attention(args.encoder_attention_heads, self.nb, self.embed_dim, self.use_nfm) self.comm_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks) if self.blockatt_memory: memory_slots = 4 memory_head_size = 128 memory_num_heads = 1 gate_style = 'memory' print('not using special key size gate_style is', gate_style, memory_slots, memory_num_heads, memory_head_size)
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer class TransformerEncoderLayerVanilla(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, out_proj = None): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = self.build_self_attention(self.embed_dim, args) self.self_attn_layer_norm = LayerNorm(self.embed_dim, eps=1e-5) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") ) self.activation_dropout = getattr(args, "activation_dropout", 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, "relu_dropout", 0) self.normalize_before = args.encoder_normalize_before self.fc1 = self.build_fc1(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = self.build_fc2(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, eps=1e-5) if out_proj is not None: self.final_linear = nn.Linear(args.encoder_embed_dim, out_proj) else: self.final_linear = None def build_fc1(self, input_dim, output_dim): return nn.Linear(input_dim, output_dim) def build_fc2(self, input_dim, output_dim): return nn.Linear(input_dim, output_dim) def build_self_attention(self, embed_dim, args): return MultiheadAttention( embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=args.self_attention, shared_memory_attention = args.shared_memory_attention, use_topk = args.use_topk, topk = args.topk, num_steps = args.num_steps, mem_slots = args.mem_slots, null_attention = args.null_attention, regressive = args.regressive ) def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"} for old, new in layer_norm_map.items(): for m in ("weight", "bias"): k = "{}.layer_norms.{}.{}".format(name, old, m) if k in state_dict: state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask, attn_mask: Optional[Tensor] = None, state = None, memory = None): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. attn_mask (ByteTensor): binary tensor of shape (T_tgt, T_src), where T_tgt is the length of query, while T_src is the length of key, though here both query and key is x here, attn_mask[t_tgt, t_src] = 1 means when calculating embedding for t_tgt, t_src is excluded (or masked out), =0 means it is included in attention Returns: encoded output of shape `(seq_len, batch, embed_dim)` """ residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) if attn_mask is not None: attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8) # anything in original attn_mask = 1, becomes -1e8 # anything in original attn_mask = 0, becomes 0 # Note that we cannot use -inf here, because at some edge cases, # the attention weight (before softmax) for some padded element in query # will become -inf, which results in NaN in model parameters # TODO: to formally solve this problem, we need to change fairseq's # MultiheadAttention. We will do this later on. #print(state is not None) x, memory, _ = self.self_attn( query=state if state is not None else x, key=x, value=x, key_padding_mask=encoder_padding_mask, attn_mask=attn_mask, memory = memory ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x if not self.normalize_before: x = self.self_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=float(self.activation_dropout), training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x if not self.normalize_before: x = self.final_layer_norm(x) if self.final_linear is not None: x = self.final_linear(x) return x, memory class Attention(nn.Module): def __init__(self, n_heads, n_blocks, dim, use_nfm): super(Attention, self).__init__() self.use_nfm = use_nfm #self.n_heads = n_heads self.n_heads = 12 self.n_blocks = n_blocks self.dim = dim self.block_dim = dim // self.n_blocks #self.head_dim = self.block_dim // self.n_heads self.head_dim = 64 self.scale = self.head_dim ** -0.5 self.query_net = GroupLinearLayer(self.block_dim, self.head_dim * self.n_heads, n_blocks) self.key_net = GroupLinearLayer(self.block_dim, self.head_dim * self.n_heads, n_blocks) self.value_net = GroupLinearLayer(self.block_dim, self.head_dim * self.n_heads, n_blocks) self.final = GroupLinearLayer(self.head_dim * self.n_heads, self.block_dim, n_blocks) def forward(self, x, qkv=None): use_exshare = False if qkv is not None: klst, vlst = qkv seq_len, bsz, _ = x.shape if use_exshare: x = x.view(seq_len, bsz, self.n_blocks * self.block_dim) q = self.query_net(x).view(seq_len, 1, bsz*self.n_blocks, self.n_heads, self.head_dim) k = self.key_net(x).view(seq_len, 1, bsz*self.n_blocks, self.n_heads, self.head_dim) v = self.value_net(x).view(seq_len, 1, bsz*self.n_blocks, self.n_heads, self.head_dim) else: x = x.view(seq_len, bsz, self.n_blocks * self.block_dim) q = self.query_net(x).view(seq_len, bsz, self.n_blocks, self.n_heads, self.head_dim) k = self.key_net(x).view(seq_len, bsz, self.n_blocks, self.n_heads, self.head_dim) v = self.value_net(x).view(seq_len, bsz, self.n_blocks, self.n_heads, self.head_dim) q = q.transpose(2,3) * self.scale k = k.transpose(2,3) v = v.transpose(2,3) if random.uniform(0,1) < 0.00001: print('use NFM?', self.use_nfm) if self.use_nfm: if qkv is not None: klst.append(k) vlst.append(v) #print('len qlst', len(qlst)) #for kval in klst: # print(kval.shape) k = torch.cat(klst, dim=3) v = torch.cat(vlst, dim=3) #should return these q,k,v and save to a big list. Also pull in from the list passed in and concat along dim=3, i.e. so that it's nblocks * nlayers. #print('running comm attention with shapes', q.shape, k.shape, v.shape) score = torch.matmul(q, k.transpose(3,4)) #print('score shape', score.shape) score = F.softmax(score, dim=-1) out = torch.matmul(score, v).transpose(2,3) #print('out shape', out.shape) score = score.mean(dim=2) out = out.reshape(seq_len, bsz, self.n_blocks * self.head_dim * self.n_heads) out = self.final(out) out = out.view(seq_len, bsz, self.dim) return out, score class NormLayer(nn.Module): def __init__(self, num_rims, dim, export=False): super(NormLayer, self).__init__() self.num_rims = num_rims self.dim = dim self.weight = nn.Parameter(torch.ones(1,1,dim*num_rims,)) self.bias = nn.Parameter(torch.zeros(1,1,dim*num_rims,)) self.norm = LayerNorm(dim, export=export, elementwise_affine=False) def forward(self, x): seq_len, bsz, _ = x.shape x = x.view(seq_len, bsz, self.num_rims, self.dim) x = self.norm(x) x = x.view(seq_len, bsz, self.num_rims * self.dim) weight_use = self.weight.repeat(seq_len, bsz, 1) bias_use = self.bias.repeat(seq_len, bsz, 1) x = x * weight_use + bias_use return x class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, nb, blockatt, blockatt_memory, use_nfm, out_proj_dim=None): super().__init__() self.blockatt = blockatt self.blockatt_memory = blockatt_memory self.embed_dim = args.encoder_embed_dim self.quant_noise = getattr(args, "quant_noise_pq", 0) self.quant_noise_block_size = getattr(args, "quant_noise_pq_block_size", 8) self.use_nfm = use_nfm print('using nfm?', self.use_nfm) self.nb = nb self.norm_blocks = self.nb self.self_attn = self.build_self_attention(self.embed_dim, args) #should divide embed_dim by nb. Then raise embed_dim in args self.self_attn_layer_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks) self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__) self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") ) print("SETUP TRANSFORMER LAYER", 'blocks', self.nb) activation_dropout_p = getattr(args, "activation_dropout", 0) if activation_dropout_p == 0: # for backwards compatibility with models that use args.relu_dropout activation_dropout_p = getattr(args, "relu_dropout", 0) self.activation_dropout_module = FairseqDropout( float(activation_dropout_p), module_name=self.__class__.__name__ ) self.normalize_before = args.encoder_normalize_before self.fc1 = self.build_fc1( self.embed_dim, args.encoder_ffn_embed_dim, self.quant_noise, self.quant_noise_block_size ) self.fc2 = self.build_fc2( args.encoder_ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size ) self.final_layer_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks) if self.blockatt: self.comm = Attention(args.encoder_attention_heads, self.nb, self.embed_dim, self.use_nfm) self.comm_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks) if self.blockatt_memory: memory_slots = 4 memory_head_size = 128 memory_num_heads = 1 gate_style = 'memory' print('not using special key size gate_style is', gate_style, memory_slots, memory_num_heads, memory_head_size)
self.memory_layer = RelationalMemory(mem_slots=memory_slots, head_size=memory_head_size, input_size=self.embed_dim, output_size=self.embed_dim,
2
2023-12-15 13:13:01+00:00
24k
m-abr/FCPCodebase
world/Robot.py
[ { "identifier": "Math_Ops", "path": "math_ops/Math_Ops.py", "snippet": "class Math_Ops():\n '''\n This class provides general mathematical operations that are not directly available through numpy \n '''\n \n @staticmethod\n def deg_sph2cart(spherical_vec):\n ''' Converts SimSpark'...
from collections import deque from math import atan, pi, sqrt, tan from math_ops.Math_Ops import Math_Ops as M from math_ops.Matrix_3x3 import Matrix_3x3 from math_ops.Matrix_4x4 import Matrix_4x4 from world.commons.Body_Part import Body_Part from world.commons.Joint_Info import Joint_Info import numpy as np import xml.etree.ElementTree as xmlp
14,671
self.body_parts = dict() # keys='body part names' (given by the robot's XML), values='Body_Part objects' self.unum = unum # Robot's uniform number self.gyro = np.zeros(3) # Angular velocity along the three axes of freedom of the robot's torso (deg/s) self.acc = np.zeros(3) # Proper acceleration along the three axes of freedom of the robot's torso (m/s2) self.frp = dict() # foot "lf"/"rf", toe "lf1"/"rf1" resistance perceptor (relative [p]oint of origin + [f]orce vector) e.g. {"lf":(px,py,pz,fx,fy,fz)} self.feet_toes_last_touch = {"lf":0,"rf":0,"lf1":0,"rf1":0} # foot "lf"/"rf", toe "lf1"/"rf1" World.time_local_ms when foot/toe last touched any surface self.feet_toes_are_touching = {"lf":False,"rf":False,"lf1":False,"rf1":False} # foot "lf"/"rf", toe "lf1"/"rf1" True if touching in last received server message self.fwd_kinematics_list = None # List of body parts, ordered according to dependencies self.rel_cart_CoM_position = np.zeros(3) # Center of Mass position, relative to head, in cartesian coordinates (m) # Joint variables are optimized for performance / array operations self.joints_position = np.zeros(self.no_of_joints) # Joints' angular position (deg) self.joints_speed = np.zeros(self.no_of_joints) # Joints' angular speed (rad/s) self.joints_target_speed = np.zeros(self.no_of_joints) # Joints' target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg) self.joints_target_last_speed = np.zeros(self.no_of_joints) # Joints' last target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg) self.joints_info = [None] * self.no_of_joints # Joints' constant information (see class Joint_Info) self.joints_transform = [Matrix_4x4() for _ in range(self.no_of_joints)] # Joints' transformation matrix # Localization variables relative to head self.loc_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field self.loc_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head self.loc_rotation_head_to_field = Matrix_3x3() # Rotation matrix from head to field self.loc_rotation_field_to_head = Matrix_3x3() # Rotation matrix from field to head self.loc_head_position = np.zeros(3) # Absolute head position (m) self.loc_head_position_history = deque(maxlen=40)# Absolute head position history (queue with up to 40 old positions at intervals of 0.04s, where index 0 is the previous position) self.loc_head_velocity = np.zeros(3) # Absolute head velocity (m/s) (Warning: possibly noisy) self.loc_head_orientation = 0 # Head orientation (deg) self.loc_is_up_to_date = False # False if this is not a visual step, or not enough elements are visible self.loc_last_update = 0 # World.time_local_ms when the localization was last updated self.loc_head_position_last_update = 0 # World.time_local_ms when loc_head_position was last updated by vision or radio self.radio_fallen_state = False # True if (radio says we fell) and (radio is significantly more recent than loc) self.radio_last_update = 0 # World.time_local_ms when radio_fallen_state was last updated (and possibly loc_head_position) # Localization variables relative to torso self.loc_torso_to_field_rotation = Matrix_3x3() # Rotation matrix from torso to field self.loc_torso_to_field_transform = Matrix_4x4() # Transformation matrix from torso to field self.loc_torso_roll = 0 # Torso roll (deg) self.loc_torso_pitch = 0 # Torso pitch (deg) self.loc_torso_orientation = 0 # Torso orientation (deg) self.loc_torso_inclination = 0 # Torso inclination (deg) (inclination of z-axis in relation to field z-axis) self.loc_torso_position = np.zeros(3) # Absolute torso position (m) self.loc_torso_velocity = np.zeros(3) # Absolute torso velocity (m/s) self.loc_torso_acceleration = np.zeros(3) # Absolute Coordinate acceleration (m/s2) # Other localization variables self.cheat_abs_pos = np.zeros(3) # Absolute head position provided by the server as cheat (m) self.cheat_ori = 0.0 # Absolute head orientation provided by the server as cheat (deg) self.loc_CoM_position = np.zeros(3) # Absolute CoM position (m) self.loc_CoM_velocity = np.zeros(3) # Absolute CoM velocity (m/s) # Localization special variables ''' self.loc_head_z is often equivalent to self.loc_head_position[2], but sometimes it differs. There are situations in which the rotation and translation cannot be computed, but the z-coordinate can still be found through vision, in which case: self.loc_is_up_to_date is False self.loc_head_z_is_up_to_date is True It should be used in applications which rely on z as an independent coordinate, such as detecting if the robot has fallen, or as an observation for machine learning. It should NEVER be used for 3D transformations. ''' self.loc_head_z = 0 # Absolute head position (z) - see above for explanation (m) self.loc_head_z_is_up_to_date = False # False if this is not a visual step, or not enough elements are visible self.loc_head_z_last_update = 0 # World.time_local_ms when loc_head_z was last computed self.loc_head_z_vel = 0 # Absolute head velocity (z) (m/s) # Localization + Gyroscope # These variables are reliable. The gyroscope is used to update the rotation when waiting for the next visual cycle self.imu_torso_roll = 0 # Torso roll (deg) (src: Localization + Gyro) self.imu_torso_pitch = 0 # Torso pitch (deg) (src: Localization + Gyro) self.imu_torso_orientation = 0 # Torso orientation (deg) (src: Localization + Gyro) self.imu_torso_inclination = 0 # Torso inclination (deg) (src: Localization + Gyro) self.imu_torso_to_field_rotation = Matrix_3x3() # Rotation matrix from torso to field (src: Localization + Gyro) self.imu_last_visual_update = 0 # World.time_local_ms when the IMU data was last updated with visual information # Localization + Gyroscope + Accelerometer # Warning: these variables are unreliable, since small errors in the Localization Orientation lead to # wrong acceleration -> wrong velocity -> wrong position self.imu_weak_torso_to_field_transform = Matrix_4x4() # Transformation matrix from torso to field (src: Localization + Gyro + Acc) self.imu_weak_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field (src: Localization + Gyro + Acc) self.imu_weak_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head (src: Localization + Gyro + Acc) self.imu_weak_torso_position = np.zeros(3) # Absolute torso position (m) (src: Localization + Gyro + Acc) self.imu_weak_torso_velocity = np.zeros(3) # Absolute torso velocity (m/s) (src: Localization + Gyro + Acc) self.imu_weak_torso_acceleration = np.zeros(3) # Absolute torso acceleration (m/s2) (src: Localization + Gyro + Acc) self.imu_weak_torso_next_position = np.zeros(3) # Absolute position in next step estimate (m) (src: Localization + Gyro + Acc) self.imu_weak_torso_next_velocity = np.zeros(3) # Absolute velocity in next step estimate (m/s) (src: Localization + Gyro + Acc) self.imu_weak_CoM_position = np.zeros(3) # Absolute CoM position (m) (src: Localization + Gyro + Acc) self.imu_weak_CoM_velocity = np.zeros(3) # Absolute CoM velocity (m/s) (src: Localization + Gyro + Acc) #Using explicit variables to enable IDE suggestions self.J_HEAD_YAW = 0 self.J_HEAD_PITCH = 1 self.J_LLEG_YAW_PITCH = 2 self.J_RLEG_YAW_PITCH = 3 self.J_LLEG_ROLL = 4 self.J_RLEG_ROLL = 5 self.J_LLEG_PITCH = 6 self.J_RLEG_PITCH = 7 self.J_LKNEE = 8 self.J_RKNEE = 9 self.J_LFOOT_PITCH = 10 self.J_RFOOT_PITCH = 11 self.J_LFOOT_ROLL = 12 self.J_RFOOT_ROLL = 13 self.J_LARM_PITCH = 14 self.J_RARM_PITCH = 15 self.J_LARM_ROLL = 16 self.J_RARM_ROLL = 17 self.J_LELBOW_YAW = 18 self.J_RELBOW_YAW = 19 self.J_LELBOW_ROLL = 20 self.J_RELBOW_ROLL = 21 self.J_LTOE_PITCH = 22 self.J_RTOE_PITCH = 23 #------------------ parse robot xml
class Robot(): STEPTIME = 0.02 # Fixed step time VISUALSTEP = 0.04 # Fixed visual step time SQ_STEPTIME = STEPTIME * STEPTIME GRAVITY = np.array([0,0,-9.81]) IMU_DECAY = 0.996 #IMU's velocity decay #------------------ constants to force symmetry in joints/effectors MAP_PERCEPTOR_TO_INDEX = {"hj1":0, "hj2":1, "llj1":2, "rlj1":3, "llj2":4, "rlj2":5, "llj3":6, "rlj3":7, "llj4":8, "rlj4":9, "llj5":10,"rlj5":11, "llj6":12,"rlj6":13,"laj1":14,"raj1":15, "laj2":16,"raj2":17,"laj3":18,"raj3":19, "laj4":20,"raj4":21,"llj7":22,"rlj7":23 } # Fix symmetry issues 1a/4 (identification) FIX_PERCEPTOR_SET = {'rlj2','rlj6','raj2','laj3','laj4'} FIX_INDICES_LIST = [5,13,17,18,20] # Recommended height for unofficial beam (near ground) BEAM_HEIGHTS = [0.4, 0.43, 0.4, 0.46, 0.4] def __init__(self, unum:int, robot_type:int) -> None: robot_xml = "nao"+str(robot_type)+".xml" # Typical NAO file name self.type = robot_type self.beam_height = Robot.BEAM_HEIGHTS[robot_type] self.no_of_joints = 24 if robot_type == 4 else 22 #Fix symmetry issues 1b/4 (identification) self.FIX_EFFECTOR_MASK = np.ones(self.no_of_joints) self.FIX_EFFECTOR_MASK[Robot.FIX_INDICES_LIST] = -1 self.body_parts = dict() # keys='body part names' (given by the robot's XML), values='Body_Part objects' self.unum = unum # Robot's uniform number self.gyro = np.zeros(3) # Angular velocity along the three axes of freedom of the robot's torso (deg/s) self.acc = np.zeros(3) # Proper acceleration along the three axes of freedom of the robot's torso (m/s2) self.frp = dict() # foot "lf"/"rf", toe "lf1"/"rf1" resistance perceptor (relative [p]oint of origin + [f]orce vector) e.g. {"lf":(px,py,pz,fx,fy,fz)} self.feet_toes_last_touch = {"lf":0,"rf":0,"lf1":0,"rf1":0} # foot "lf"/"rf", toe "lf1"/"rf1" World.time_local_ms when foot/toe last touched any surface self.feet_toes_are_touching = {"lf":False,"rf":False,"lf1":False,"rf1":False} # foot "lf"/"rf", toe "lf1"/"rf1" True if touching in last received server message self.fwd_kinematics_list = None # List of body parts, ordered according to dependencies self.rel_cart_CoM_position = np.zeros(3) # Center of Mass position, relative to head, in cartesian coordinates (m) # Joint variables are optimized for performance / array operations self.joints_position = np.zeros(self.no_of_joints) # Joints' angular position (deg) self.joints_speed = np.zeros(self.no_of_joints) # Joints' angular speed (rad/s) self.joints_target_speed = np.zeros(self.no_of_joints) # Joints' target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg) self.joints_target_last_speed = np.zeros(self.no_of_joints) # Joints' last target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg) self.joints_info = [None] * self.no_of_joints # Joints' constant information (see class Joint_Info) self.joints_transform = [Matrix_4x4() for _ in range(self.no_of_joints)] # Joints' transformation matrix # Localization variables relative to head self.loc_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field self.loc_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head self.loc_rotation_head_to_field = Matrix_3x3() # Rotation matrix from head to field self.loc_rotation_field_to_head = Matrix_3x3() # Rotation matrix from field to head self.loc_head_position = np.zeros(3) # Absolute head position (m) self.loc_head_position_history = deque(maxlen=40)# Absolute head position history (queue with up to 40 old positions at intervals of 0.04s, where index 0 is the previous position) self.loc_head_velocity = np.zeros(3) # Absolute head velocity (m/s) (Warning: possibly noisy) self.loc_head_orientation = 0 # Head orientation (deg) self.loc_is_up_to_date = False # False if this is not a visual step, or not enough elements are visible self.loc_last_update = 0 # World.time_local_ms when the localization was last updated self.loc_head_position_last_update = 0 # World.time_local_ms when loc_head_position was last updated by vision or radio self.radio_fallen_state = False # True if (radio says we fell) and (radio is significantly more recent than loc) self.radio_last_update = 0 # World.time_local_ms when radio_fallen_state was last updated (and possibly loc_head_position) # Localization variables relative to torso self.loc_torso_to_field_rotation = Matrix_3x3() # Rotation matrix from torso to field self.loc_torso_to_field_transform = Matrix_4x4() # Transformation matrix from torso to field self.loc_torso_roll = 0 # Torso roll (deg) self.loc_torso_pitch = 0 # Torso pitch (deg) self.loc_torso_orientation = 0 # Torso orientation (deg) self.loc_torso_inclination = 0 # Torso inclination (deg) (inclination of z-axis in relation to field z-axis) self.loc_torso_position = np.zeros(3) # Absolute torso position (m) self.loc_torso_velocity = np.zeros(3) # Absolute torso velocity (m/s) self.loc_torso_acceleration = np.zeros(3) # Absolute Coordinate acceleration (m/s2) # Other localization variables self.cheat_abs_pos = np.zeros(3) # Absolute head position provided by the server as cheat (m) self.cheat_ori = 0.0 # Absolute head orientation provided by the server as cheat (deg) self.loc_CoM_position = np.zeros(3) # Absolute CoM position (m) self.loc_CoM_velocity = np.zeros(3) # Absolute CoM velocity (m/s) # Localization special variables ''' self.loc_head_z is often equivalent to self.loc_head_position[2], but sometimes it differs. There are situations in which the rotation and translation cannot be computed, but the z-coordinate can still be found through vision, in which case: self.loc_is_up_to_date is False self.loc_head_z_is_up_to_date is True It should be used in applications which rely on z as an independent coordinate, such as detecting if the robot has fallen, or as an observation for machine learning. It should NEVER be used for 3D transformations. ''' self.loc_head_z = 0 # Absolute head position (z) - see above for explanation (m) self.loc_head_z_is_up_to_date = False # False if this is not a visual step, or not enough elements are visible self.loc_head_z_last_update = 0 # World.time_local_ms when loc_head_z was last computed self.loc_head_z_vel = 0 # Absolute head velocity (z) (m/s) # Localization + Gyroscope # These variables are reliable. The gyroscope is used to update the rotation when waiting for the next visual cycle self.imu_torso_roll = 0 # Torso roll (deg) (src: Localization + Gyro) self.imu_torso_pitch = 0 # Torso pitch (deg) (src: Localization + Gyro) self.imu_torso_orientation = 0 # Torso orientation (deg) (src: Localization + Gyro) self.imu_torso_inclination = 0 # Torso inclination (deg) (src: Localization + Gyro) self.imu_torso_to_field_rotation = Matrix_3x3() # Rotation matrix from torso to field (src: Localization + Gyro) self.imu_last_visual_update = 0 # World.time_local_ms when the IMU data was last updated with visual information # Localization + Gyroscope + Accelerometer # Warning: these variables are unreliable, since small errors in the Localization Orientation lead to # wrong acceleration -> wrong velocity -> wrong position self.imu_weak_torso_to_field_transform = Matrix_4x4() # Transformation matrix from torso to field (src: Localization + Gyro + Acc) self.imu_weak_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field (src: Localization + Gyro + Acc) self.imu_weak_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head (src: Localization + Gyro + Acc) self.imu_weak_torso_position = np.zeros(3) # Absolute torso position (m) (src: Localization + Gyro + Acc) self.imu_weak_torso_velocity = np.zeros(3) # Absolute torso velocity (m/s) (src: Localization + Gyro + Acc) self.imu_weak_torso_acceleration = np.zeros(3) # Absolute torso acceleration (m/s2) (src: Localization + Gyro + Acc) self.imu_weak_torso_next_position = np.zeros(3) # Absolute position in next step estimate (m) (src: Localization + Gyro + Acc) self.imu_weak_torso_next_velocity = np.zeros(3) # Absolute velocity in next step estimate (m/s) (src: Localization + Gyro + Acc) self.imu_weak_CoM_position = np.zeros(3) # Absolute CoM position (m) (src: Localization + Gyro + Acc) self.imu_weak_CoM_velocity = np.zeros(3) # Absolute CoM velocity (m/s) (src: Localization + Gyro + Acc) #Using explicit variables to enable IDE suggestions self.J_HEAD_YAW = 0 self.J_HEAD_PITCH = 1 self.J_LLEG_YAW_PITCH = 2 self.J_RLEG_YAW_PITCH = 3 self.J_LLEG_ROLL = 4 self.J_RLEG_ROLL = 5 self.J_LLEG_PITCH = 6 self.J_RLEG_PITCH = 7 self.J_LKNEE = 8 self.J_RKNEE = 9 self.J_LFOOT_PITCH = 10 self.J_RFOOT_PITCH = 11 self.J_LFOOT_ROLL = 12 self.J_RFOOT_ROLL = 13 self.J_LARM_PITCH = 14 self.J_RARM_PITCH = 15 self.J_LARM_ROLL = 16 self.J_RARM_ROLL = 17 self.J_LELBOW_YAW = 18 self.J_RELBOW_YAW = 19 self.J_LELBOW_ROLL = 20 self.J_RELBOW_ROLL = 21 self.J_LTOE_PITCH = 22 self.J_RTOE_PITCH = 23 #------------------ parse robot xml
dir = M.get_active_directory("/world/commons/robots/")
1
2023-12-16 23:40:23+00:00
24k
Sam-Izdat/tinycio
src/tinycio/lut.py
[ { "identifier": "ColorSpace", "path": "src/tinycio/colorspace.py", "snippet": "class ColorSpace:\n \"\"\"\n Color space conversion. Applies OETFs and EOTFs as needed but omits tonemapping. Cylindrical transformations are \n treated as distinct color spaces. Example:\n\n .. highlight:: python...
import typing import os import torch import torch.optim as optim import torch.nn as nn import torch.nn.functional as F from typing import Union from enum import IntEnum from contextlib import nullcontext from .colorspace import ColorSpace from .fsio.lutfile import load_lut, save_lut, _infer_lut_file_format, _generate_linear_cube_lut from .fsio.format import LUTFormat from .util.colorutil import srgb_luminance from .util.miscutil import trilinear_interpolation from .loss import feature_moments_calculation
19,584
""" Returns linear LUT. Has no effect: when applied, output matches input ([0, 1] range). :param size: Size of the LUT. :param lut_format: Format of the LUT. """ if lut_format == LUTFormat.CUBE_3D: assert cls.__min_size <= size <= cls.__max_size, f"LUT size must be between {cls.__min_size} and {cls.__max_size}" variant = LUTFormat.CUBE_3D lattice = _generate_linear_cube_lut(size) else: raise Exception(f"Backpropagation not implemented for: {lut_format.name}") return cls(size, lattice, variant) @classmethod def get_negative(cls, size:int=32, lut_format:LUTFormat=LUTFormat.CUBE_3D) -> LookupTable: """ Returns negative LUT. Output is inverted ([0, 1] range). :param size: Size of the LUT. :param lut_format: Format of the LUT. """ lut = cls.get_linear(size, lut_format) lut.lattice = 1. - lut.lattice return lut @classmethod def get_random(cls, size:int=32, lut_format:LUTFormat=LUTFormat.CUBE_3D) -> LookupTable: """ Returns random LUT. Everything mapped to random values ([0, 1] range). :param size: Size of the LUT. :param lut_format: Format of the LUT. """ lut = cls.get_linear(size, lut_format) lut.lattice = torch.randn_like(lut.lattice) return lut @classmethod def get_empty(cls, size:int=32, lut_format:LUTFormat=LUTFormat.CUBE_3D) -> LookupTable: """ Returns empty LUT. All values mapped to 0. :param size: Size of the LUT. :param lut_format: Format of the LUT. """ lut = cls.get_linear(size, lut_format) lut.lattice = lut.lattice * 0. return lut def fit_to_image(self, im_source:Union[torch.Tensor, ColorImage], im_target:Union[torch.Tensor, ColorImage], steps:int=500, learning_rate:float=0.003, strength:float=1., fit_height:int=512, fit_width:int=512, device:str='cuda', context:callable=None ) -> bool: """ Perform gradient descent on the lattice, so that the appearance of the source image matches the target. :param im_source: Source image tensor. Values must be in range [0, 1]. :type im_source: torch.Tensor | ColorImage :param im_target: Target image tensor. :type im_target: torch.Tensor | ColorImage :param steps: Number of optimization steps. :param learning_rate: Learning rate for gradient descent. :param strength: Strength of the effect in range [0, 1]. :param fit_height: Image tensors will be interpolated to this height for evaluation. :param fit_width: Image tensors will be interpolated to this width for evaluation. :param device: Device for gradient descent (if None will use input tensor device). :return: True when completed """ assert 0. <= strength <= 1., "strength must be in range [0, 1]" im_source = im_source.clone() device = torch.device(device.strip().lower()) if device is not None else im_source.device im_source = F.interpolate( im_source.unsqueeze(0), size=[fit_height, fit_width], mode='bicubic', align_corners=False).squeeze(0).clamp(0.,1.).to(device) im_target = F.interpolate( im_target.unsqueeze(0), size=[fit_height, fit_width], mode='bicubic', align_corners=False).squeeze(0).clamp(0.,1.).to(device) __ctx = context if context is not None and callable(context) else nullcontext with __ctx() as ctx: cb_callable = hasattr(ctx, 'update_fit_status') and callable(ctx.update_fit_status) cb = ctx.update_fit_status if cb_callable else lambda a, b, c, d: None if self.lut_format == LUTFormat.CUBE_3D: lut = torch.nn.Parameter(self.lattice) lut.requires_grad_() optimizer = optim.Adam([lut], lr=learning_rate) indices = (im_source * (lut.size(0) - 1)).clamp(0, lut.size(0) - 1).to(device) area = fit_height * fit_height fm_mean_scale = area fm_p2_scale = area / 32. fm_p3_scale = area / 64. selfsim_scale = area sat_scale = area # lut optimization goes a bit wild with this for step in range(steps): t_source = trilinear_interpolation(lut.to(device), indices).to(device) loss = 0. # Main feature loss feat_source_mean, feat_source_p2, feat_source_p3 = feature_moments_calculation(t_source.view(1,3,-1)) feat_target_mean, feat_target_p2, feat_target_p3 = feature_moments_calculation(im_target.view(1,3,-1)) loss += F.mse_loss(feat_source_mean, feat_target_mean) * fm_mean_scale * strength loss += F.mse_loss(feat_source_p2, feat_target_p2) * fm_p2_scale * strength loss += F.mse_loss(feat_source_p3, feat_target_p3) * fm_p3_scale * strength # Additional saturation-focused loss
from __future__ import annotations class LookupTable: """ Color lookup table. Example: .. highlight:: python .. code-block:: python lut = LookupTable.get_negative() im_negative = lut.apply(im) :param size: Size of the LUT. :param lattice: Lattice as tensor (defaults to linear). :param lut_format: Format of the LUT. """ size = 32 lattice = None lut_format= LUTFormat.UNKNOWN __min_size, __max_size = 4, 512 def __init__(self, size:int, lattice:torch.Tensor=None, lut_format:LUTFormat=LUTFormat.CUBE_3D): assert self.__min_size <= size <= self.__max_size, f"LUT size must be between {self.__min_size} and {self.__max_size}" self.size == size self.lattice = lattice if lattice is not None else _generate_linear_cube_lut(size) self.lut_format = lut_format @classmethod def load(cls, fp:str, lut_format:LUTFormat=LUTFormat.UNKNOWN) -> LookupTable: """ Load LUT from file. :param fp: File path. :param lut_format: Format of the LUT. """ fp = os.path.realpath(fp) fn, fnext = os.path.splitext(fp) variant = lut_format if lut_format > LUTFormat.UNKNOWN else _infer_lut_file_format(fnext) assert variant > LUTFormat.UNKNOWN, "Unrecognized LUT format" lattice = load_lut(fp, variant) return cls(lattice.size(0), lattice, variant) def save(self, fp:str, lut_format:LUTFormat=LUTFormat.UNKNOWN): """ Save LUT to file. .. warning:: This will overwrite existing files. :param fp: File path. :param lut_format: Format of the LUT. """ fp = os.path.realpath(fp) fn, fnext = os.path.splitext(fp) variant = lut_format if lut_format > LUTFormat.UNKNOWN else _infer_lut_file_format(fnext) or self.variant assert variant > LUTFormat.UNKNOWN, "Unrecognized LUT format" lattice = save_lut(self.lattice, fp, variant) return True def apply(self, im:Union[torch.Tensor, ColorImage]) -> torch.Tensor: """ Apply LUT to image tensor. :param im: Input image tensor :type im: torch.Tensor | ColorImage :return: Image tensor with LUT applied """ assert self.lut_format > LUTFormat.UNKNOWN and self.lattice != None, "No LUT has been loaded" assert im.size(0) == 3, "Image should have three color channels (RGB)" assert self.lattice.size(-1) == 3, "Cube LUT should have three color channels" indices = (im * (self.lattice.size(0) - 1)).clamp(0, self.lattice.size(0) - 1) im_out = trilinear_interpolation(self.lattice, indices) return im_out @classmethod def get_linear(cls, size:int=32, lut_format:LUTFormat=LUTFormat.CUBE_3D) -> LookupTable: """ Returns linear LUT. Has no effect: when applied, output matches input ([0, 1] range). :param size: Size of the LUT. :param lut_format: Format of the LUT. """ if lut_format == LUTFormat.CUBE_3D: assert cls.__min_size <= size <= cls.__max_size, f"LUT size must be between {cls.__min_size} and {cls.__max_size}" variant = LUTFormat.CUBE_3D lattice = _generate_linear_cube_lut(size) else: raise Exception(f"Backpropagation not implemented for: {lut_format.name}") return cls(size, lattice, variant) @classmethod def get_negative(cls, size:int=32, lut_format:LUTFormat=LUTFormat.CUBE_3D) -> LookupTable: """ Returns negative LUT. Output is inverted ([0, 1] range). :param size: Size of the LUT. :param lut_format: Format of the LUT. """ lut = cls.get_linear(size, lut_format) lut.lattice = 1. - lut.lattice return lut @classmethod def get_random(cls, size:int=32, lut_format:LUTFormat=LUTFormat.CUBE_3D) -> LookupTable: """ Returns random LUT. Everything mapped to random values ([0, 1] range). :param size: Size of the LUT. :param lut_format: Format of the LUT. """ lut = cls.get_linear(size, lut_format) lut.lattice = torch.randn_like(lut.lattice) return lut @classmethod def get_empty(cls, size:int=32, lut_format:LUTFormat=LUTFormat.CUBE_3D) -> LookupTable: """ Returns empty LUT. All values mapped to 0. :param size: Size of the LUT. :param lut_format: Format of the LUT. """ lut = cls.get_linear(size, lut_format) lut.lattice = lut.lattice * 0. return lut def fit_to_image(self, im_source:Union[torch.Tensor, ColorImage], im_target:Union[torch.Tensor, ColorImage], steps:int=500, learning_rate:float=0.003, strength:float=1., fit_height:int=512, fit_width:int=512, device:str='cuda', context:callable=None ) -> bool: """ Perform gradient descent on the lattice, so that the appearance of the source image matches the target. :param im_source: Source image tensor. Values must be in range [0, 1]. :type im_source: torch.Tensor | ColorImage :param im_target: Target image tensor. :type im_target: torch.Tensor | ColorImage :param steps: Number of optimization steps. :param learning_rate: Learning rate for gradient descent. :param strength: Strength of the effect in range [0, 1]. :param fit_height: Image tensors will be interpolated to this height for evaluation. :param fit_width: Image tensors will be interpolated to this width for evaluation. :param device: Device for gradient descent (if None will use input tensor device). :return: True when completed """ assert 0. <= strength <= 1., "strength must be in range [0, 1]" im_source = im_source.clone() device = torch.device(device.strip().lower()) if device is not None else im_source.device im_source = F.interpolate( im_source.unsqueeze(0), size=[fit_height, fit_width], mode='bicubic', align_corners=False).squeeze(0).clamp(0.,1.).to(device) im_target = F.interpolate( im_target.unsqueeze(0), size=[fit_height, fit_width], mode='bicubic', align_corners=False).squeeze(0).clamp(0.,1.).to(device) __ctx = context if context is not None and callable(context) else nullcontext with __ctx() as ctx: cb_callable = hasattr(ctx, 'update_fit_status') and callable(ctx.update_fit_status) cb = ctx.update_fit_status if cb_callable else lambda a, b, c, d: None if self.lut_format == LUTFormat.CUBE_3D: lut = torch.nn.Parameter(self.lattice) lut.requires_grad_() optimizer = optim.Adam([lut], lr=learning_rate) indices = (im_source * (lut.size(0) - 1)).clamp(0, lut.size(0) - 1).to(device) area = fit_height * fit_height fm_mean_scale = area fm_p2_scale = area / 32. fm_p3_scale = area / 64. selfsim_scale = area sat_scale = area # lut optimization goes a bit wild with this for step in range(steps): t_source = trilinear_interpolation(lut.to(device), indices).to(device) loss = 0. # Main feature loss feat_source_mean, feat_source_p2, feat_source_p3 = feature_moments_calculation(t_source.view(1,3,-1)) feat_target_mean, feat_target_p2, feat_target_p3 = feature_moments_calculation(im_target.view(1,3,-1)) loss += F.mse_loss(feat_source_mean, feat_target_mean) * fm_mean_scale * strength loss += F.mse_loss(feat_source_p2, feat_target_p2) * fm_p2_scale * strength loss += F.mse_loss(feat_source_p3, feat_target_p3) * fm_p3_scale * strength # Additional saturation-focused loss
sat_s = srgb_luminance(t_source).repeat(3,1,1) - t_source
6
2023-12-15 15:39:08+00:00
24k
Azure-Samples/functions-python-web-crawler
.venv/Lib/site-packages/urllib3/connection.py
[ { "identifier": "HTTPHeaderDict", "path": ".venv/Lib/site-packages/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(typing.MutableMapping[str, str]):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-in...
import datetime import logging import os import re import socket import sys import typing import warnings import ssl from http.client import HTTPConnection as _HTTPConnection from http.client import HTTPException as HTTPException # noqa: F401 from http.client import ResponseNotReady from socket import timeout as SocketTimeout from typing import Literal from .response import HTTPResponse from .util.ssl_ import _TYPE_PEER_CERT_RET_DICT from .util.ssltransport import SSLTransport from ._collections import HTTPHeaderDict from .util.response import assert_header_parsing from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT, Timeout from .util.util import to_str from .util.wait import wait_for_read from ._base_connection import _TYPE_BODY from ._base_connection import ProxyConfig as ProxyConfig from ._base_connection import _ResponseOptions as _ResponseOptions from ._version import __version__ from .exceptions import ( ConnectTimeoutError, HeaderParsingError, NameResolutionError, NewConnectionError, ProxyError, SystemTimeWarning, ) from .util import SKIP_HEADER, SKIPPABLE_HEADERS, connection, ssl_ from .util.request import body_to_chunks from .util.ssl_ import assert_fingerprint as _assert_fingerprint from .util.ssl_ import ( create_urllib3_context, is_ipaddress, resolve_cert_reqs, resolve_ssl_version, ssl_wrap_socket, ) from .util.ssl_match_hostname import CertificateError, match_hostname from .util.url import Url from .response import HTTPResponse
15,793
) -> _WrappedAndVerifiedSocket: """Logic for constructing an SSLContext from all TLS parameters, passing that down into ssl_wrap_socket, and then doing certificate verification either via hostname or fingerprint. This function exists to guarantee that both proxies and targets have the same behavior when connecting via TLS. """ default_ssl_context = False if ssl_context is None: default_ssl_context = True context = create_urllib3_context( ssl_version=resolve_ssl_version(ssl_version), ssl_minimum_version=ssl_minimum_version, ssl_maximum_version=ssl_maximum_version, cert_reqs=resolve_cert_reqs(cert_reqs), ) else: context = ssl_context context.verify_mode = resolve_cert_reqs(cert_reqs) # In some cases, we want to verify hostnames ourselves if ( # `ssl` can't verify fingerprints or alternate hostnames assert_fingerprint or assert_hostname # assert_hostname can be set to False to disable hostname checking or assert_hostname is False # We still support OpenSSL 1.0.2, which prevents us from verifying # hostnames easily: https://github.com/pyca/pyopenssl/pull/933 or ssl_.IS_PYOPENSSL or not ssl_.HAS_NEVER_CHECK_COMMON_NAME ): context.check_hostname = False # Try to load OS default certs if none are given. We need to do the hasattr() check # for custom pyOpenSSL SSLContext objects because they don't support # load_default_certs(). if ( not ca_certs and not ca_cert_dir and not ca_cert_data and default_ssl_context and hasattr(context, "load_default_certs") ): context.load_default_certs() # Ensure that IPv6 addresses are in the proper format and don't have a # scope ID. Python's SSL module fails to recognize scoped IPv6 addresses # and interprets them as DNS hostnames. if server_hostname is not None: normalized = server_hostname.strip("[]") if "%" in normalized: normalized = normalized[: normalized.rfind("%")] if is_ipaddress(normalized): server_hostname = normalized ssl_sock = ssl_wrap_socket( sock=sock, keyfile=key_file, certfile=cert_file, key_password=key_password, ca_certs=ca_certs, ca_cert_dir=ca_cert_dir, ca_cert_data=ca_cert_data, server_hostname=server_hostname, ssl_context=context, tls_in_tls=tls_in_tls, ) try: if assert_fingerprint: _assert_fingerprint( ssl_sock.getpeercert(binary_form=True), assert_fingerprint ) elif ( context.verify_mode != ssl.CERT_NONE and not context.check_hostname and assert_hostname is not False ): cert: _TYPE_PEER_CERT_RET_DICT = ssl_sock.getpeercert() # type: ignore[assignment] # Need to signal to our match_hostname whether to use 'commonName' or not. # If we're using our own constructed SSLContext we explicitly set 'False' # because PyPy hard-codes 'True' from SSLContext.hostname_checks_common_name. if default_ssl_context: hostname_checks_common_name = False else: hostname_checks_common_name = ( getattr(context, "hostname_checks_common_name", False) or False ) _match_hostname( cert, assert_hostname or server_hostname, # type: ignore[arg-type] hostname_checks_common_name, ) return _WrappedAndVerifiedSocket( socket=ssl_sock, is_verified=context.verify_mode == ssl.CERT_REQUIRED or bool(assert_fingerprint), ) except BaseException: ssl_sock.close() raise def _match_hostname( cert: _TYPE_PEER_CERT_RET_DICT | None, asserted_hostname: str, hostname_checks_common_name: bool = False, ) -> None: # Our upstream implementation of ssl.match_hostname() # only applies this normalization to IP addresses so it doesn't # match DNS SANs so we do the same thing! stripped_hostname = asserted_hostname.strip("[]") if is_ipaddress(stripped_hostname): asserted_hostname = stripped_hostname try:
from __future__ import annotations if typing.TYPE_CHECKING: try: # Compiled with SSL? BaseSSLError = ssl.SSLError except (ImportError, AttributeError): ssl = None # type: ignore[assignment] class BaseSSLError(BaseException): # type: ignore[no-redef] pass # Not a no-op, we're adding this to the namespace so it can be imported. ConnectionError = ConnectionError BrokenPipeError = BrokenPipeError log = logging.getLogger(__name__) port_by_scheme = {"http": 80, "https": 443} # When it comes time to update this value as a part of regular maintenance # (ie test_recent_date is failing) update it to ~6 months before the current date. RECENT_DATE = datetime.date(2022, 1, 1) _CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]") _HAS_SYS_AUDIT = hasattr(sys, "audit") class HTTPConnection(_HTTPConnection): """ Based on :class:`http.client.HTTPConnection` but provides an extra constructor backwards-compatibility layer between older and newer Pythons. Additional keyword parameters are used to configure attributes of the connection. Accepted parameters include: - ``source_address``: Set the source address for the current connection. - ``socket_options``: Set specific options on the underlying socket. If not specified, then defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy. For example, if you wish to enable TCP Keep Alive in addition to the defaults, you might pass: .. code-block:: python HTTPConnection.default_socket_options + [ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), ] Or you may want to disable the defaults by passing an empty list (e.g., ``[]``). """ default_port: typing.ClassVar[int] = port_by_scheme["http"] # type: ignore[misc] #: Disable Nagle's algorithm by default. #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]`` default_socket_options: typing.ClassVar[connection._TYPE_SOCKET_OPTIONS] = [ (socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) ] #: Whether this connection verifies the host's certificate. is_verified: bool = False #: Whether this proxy connection verified the proxy host's certificate. # If no proxy is currently connected to the value will be ``None``. proxy_is_verified: bool | None = None blocksize: int source_address: tuple[str, int] | None socket_options: connection._TYPE_SOCKET_OPTIONS | None _has_connected_to_proxy: bool _response_options: _ResponseOptions | None _tunnel_host: str | None _tunnel_port: int | None _tunnel_scheme: str | None def __init__( self, host: str, port: int | None = None, *, timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, source_address: tuple[str, int] | None = None, blocksize: int = 16384, socket_options: None | (connection._TYPE_SOCKET_OPTIONS) = default_socket_options, proxy: Url | None = None, proxy_config: ProxyConfig | None = None, ) -> None: super().__init__( host=host, port=port, timeout=Timeout.resolve_default_timeout(timeout), source_address=source_address, blocksize=blocksize, ) self.socket_options = socket_options self.proxy = proxy self.proxy_config = proxy_config self._has_connected_to_proxy = False self._response_options = None self._tunnel_host: str | None = None self._tunnel_port: int | None = None self._tunnel_scheme: str | None = None # https://github.com/python/mypy/issues/4125 # Mypy treats this as LSP violation, which is considered a bug. # If `host` is made a property it violates LSP, because a writeable attribute is overridden with a read-only one. # However, there is also a `host` setter so LSP is not violated. # Potentially, a `@host.deleter` might be needed depending on how this issue will be fixed. @property def host(self) -> str: """ Getter method to remove any trailing dots that indicate the hostname is an FQDN. In general, SSL certificates don't include the trailing dot indicating a fully-qualified domain name, and thus, they don't validate properly when checked against a domain name that includes the dot. In addition, some servers may not expect to receive the trailing dot when provided. However, the hostname with trailing dot is critical to DNS resolution; doing a lookup with the trailing dot will properly only resolve the appropriate FQDN, whereas a lookup without a trailing dot will search the system's search domain list. Thus, it's important to keep the original host around for use only in those cases where it's appropriate (i.e., when doing DNS lookup to establish the actual TCP connection across which we're going to send HTTP requests). """ return self._dns_host.rstrip(".") @host.setter def host(self, value: str) -> None: """ Setter for the `host` property. We assume that only urllib3 uses the _dns_host attribute; httplib itself only uses `host`, and it seems reasonable that other libraries follow suit. """ self._dns_host = value def _new_conn(self) -> socket.socket: """Establish a socket connection and set nodelay settings on it. :return: New socket connection. """ try: sock = connection.create_connection( (self._dns_host, self.port), self.timeout, source_address=self.source_address, socket_options=self.socket_options, ) except socket.gaierror as e: raise NameResolutionError(self.host, self, e) from e except SocketTimeout as e: raise ConnectTimeoutError( self, f"Connection to {self.host} timed out. (connect timeout={self.timeout})", ) from e except OSError as e: raise NewConnectionError( self, f"Failed to establish a new connection: {e}" ) from e # Audit hooks are only available in Python 3.8+ if _HAS_SYS_AUDIT: sys.audit("http.client.connect", self, self.host, self.port) return sock def set_tunnel( self, host: str, port: int | None = None, headers: typing.Mapping[str, str] | None = None, scheme: str = "http", ) -> None: if scheme not in ("http", "https"): raise ValueError( f"Invalid proxy scheme for tunneling: {scheme!r}, must be either 'http' or 'https'" ) super().set_tunnel(host, port=port, headers=headers) self._tunnel_scheme = scheme def connect(self) -> None: self.sock = self._new_conn() if self._tunnel_host: # If we're tunneling it means we're connected to our proxy. self._has_connected_to_proxy = True # TODO: Fix tunnel so it doesn't depend on self.sock state. self._tunnel() # type: ignore[attr-defined] # If there's a proxy to be connected to we are fully connected. # This is set twice (once above and here) due to forwarding proxies # not using tunnelling. self._has_connected_to_proxy = bool(self.proxy) @property def is_closed(self) -> bool: return self.sock is None @property def is_connected(self) -> bool: if self.sock is None: return False return not wait_for_read(self.sock, timeout=0.0) @property def has_connected_to_proxy(self) -> bool: return self._has_connected_to_proxy def close(self) -> None: try: super().close() finally: # Reset all stateful properties so connection # can be re-used without leaking prior configs. self.sock = None self.is_verified = False self.proxy_is_verified = None self._has_connected_to_proxy = False self._response_options = None self._tunnel_host = None self._tunnel_port = None self._tunnel_scheme = None def putrequest( self, method: str, url: str, skip_host: bool = False, skip_accept_encoding: bool = False, ) -> None: """""" # Empty docstring because the indentation of CPython's implementation # is broken but we don't want this method in our documentation. match = _CONTAINS_CONTROL_CHAR_RE.search(method) if match: raise ValueError( f"Method cannot contain non-token characters {method!r} (found at least {match.group()!r})" ) return super().putrequest( method, url, skip_host=skip_host, skip_accept_encoding=skip_accept_encoding ) def putheader(self, header: str, *values: str) -> None: """""" if not any(isinstance(v, str) and v == SKIP_HEADER for v in values): super().putheader(header, *values) elif to_str(header.lower()) not in SKIPPABLE_HEADERS: skippable_headers = "', '".join( [str.title(header) for header in sorted(SKIPPABLE_HEADERS)] ) raise ValueError( f"urllib3.util.SKIP_HEADER only supports '{skippable_headers}'" ) # `request` method's signature intentionally violates LSP. # urllib3's API is different from `http.client.HTTPConnection` and the subclassing is only incidental. def request( # type: ignore[override] self, method: str, url: str, body: _TYPE_BODY | None = None, headers: typing.Mapping[str, str] | None = None, *, chunked: bool = False, preload_content: bool = True, decode_content: bool = True, enforce_content_length: bool = True, ) -> None: # Update the inner socket's timeout value to send the request. # This only triggers if the connection is re-used. if self.sock is not None: self.sock.settimeout(self.timeout) # Store these values to be fed into the HTTPResponse # object later. TODO: Remove this in favor of a real # HTTP lifecycle mechanism. # We have to store these before we call .request() # because sometimes we can still salvage a response # off the wire even if we aren't able to completely # send the request body. self._response_options = _ResponseOptions( request_method=method, request_url=url, preload_content=preload_content, decode_content=decode_content, enforce_content_length=enforce_content_length, ) if headers is None: headers = {} header_keys = frozenset(to_str(k.lower()) for k in headers) skip_accept_encoding = "accept-encoding" in header_keys skip_host = "host" in header_keys self.putrequest( method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host ) # Transform the body into an iterable of sendall()-able chunks # and detect if an explicit Content-Length is doable. chunks_and_cl = body_to_chunks(body, method=method, blocksize=self.blocksize) chunks = chunks_and_cl.chunks content_length = chunks_and_cl.content_length # When chunked is explicit set to 'True' we respect that. if chunked: if "transfer-encoding" not in header_keys: self.putheader("Transfer-Encoding", "chunked") else: # Detect whether a framing mechanism is already in use. If so # we respect that value, otherwise we pick chunked vs content-length # depending on the type of 'body'. if "content-length" in header_keys: chunked = False elif "transfer-encoding" in header_keys: chunked = True # Otherwise we go off the recommendation of 'body_to_chunks()'. else: chunked = False if content_length is None: if chunks is not None: chunked = True self.putheader("Transfer-Encoding", "chunked") else: self.putheader("Content-Length", str(content_length)) # Now that framing headers are out of the way we send all the other headers. if "user-agent" not in header_keys: self.putheader("User-Agent", _get_default_user_agent()) for header, value in headers.items(): self.putheader(header, value) self.endheaders() # If we're given a body we start sending that in chunks. if chunks is not None: for chunk in chunks: # Sending empty chunks isn't allowed for TE: chunked # as it indicates the end of the body. if not chunk: continue if isinstance(chunk, str): chunk = chunk.encode("utf-8") if chunked: self.send(b"%x\r\n%b\r\n" % (len(chunk), chunk)) else: self.send(chunk) # Regardless of whether we have a body or not, if we're in # chunked mode we want to send an explicit empty chunk. if chunked: self.send(b"0\r\n\r\n") def request_chunked( self, method: str, url: str, body: _TYPE_BODY | None = None, headers: typing.Mapping[str, str] | None = None, ) -> None: """ Alternative to the common request method, which sends the body with chunked encoding and not as one block """ warnings.warn( "HTTPConnection.request_chunked() is deprecated and will be removed " "in urllib3 v2.1.0. Instead use HTTPConnection.request(..., chunked=True).", category=DeprecationWarning, stacklevel=2, ) self.request(method, url, body=body, headers=headers, chunked=True) def getresponse( # type: ignore[override] self, ) -> HTTPResponse: """ Get the response from the server. If the HTTPConnection is in the correct state, returns an instance of HTTPResponse or of whatever object is returned by the response_class variable. If a request has not been sent or if a previous response has not be handled, ResponseNotReady is raised. If the HTTP response indicates that the connection should be closed, then it will be closed before the response is returned. When the connection is closed, the underlying socket is closed. """ # Raise the same error as http.client.HTTPConnection if self._response_options is None: raise ResponseNotReady() # Reset this attribute for being used again. resp_options = self._response_options self._response_options = None # Since the connection's timeout value may have been updated # we need to set the timeout on the socket. self.sock.settimeout(self.timeout) # This is needed here to avoid circular import errors # Get the response from http.client.HTTPConnection httplib_response = super().getresponse() try: assert_header_parsing(httplib_response.msg) except (HeaderParsingError, TypeError) as hpe: log.warning( "Failed to parse headers (url=%s): %s", _url_from_connection(self, resp_options.request_url), hpe, exc_info=True, ) headers = HTTPHeaderDict(httplib_response.msg.items()) response = HTTPResponse( body=httplib_response, headers=headers, status=httplib_response.status, version=httplib_response.version, reason=httplib_response.reason, preload_content=resp_options.preload_content, decode_content=resp_options.decode_content, original_response=httplib_response, enforce_content_length=resp_options.enforce_content_length, request_method=resp_options.request_method, request_url=resp_options.request_url, ) return response class HTTPSConnection(HTTPConnection): """ Many of the parameters to this constructor are passed to the underlying SSL socket by means of :py:func:`urllib3.util.ssl_wrap_socket`. """ default_port = port_by_scheme["https"] # type: ignore[misc] cert_reqs: int | str | None = None ca_certs: str | None = None ca_cert_dir: str | None = None ca_cert_data: None | str | bytes = None ssl_version: int | str | None = None ssl_minimum_version: int | None = None ssl_maximum_version: int | None = None assert_fingerprint: str | None = None def __init__( self, host: str, port: int | None = None, *, timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, source_address: tuple[str, int] | None = None, blocksize: int = 16384, socket_options: None | (connection._TYPE_SOCKET_OPTIONS) = HTTPConnection.default_socket_options, proxy: Url | None = None, proxy_config: ProxyConfig | None = None, cert_reqs: int | str | None = None, assert_hostname: None | str | Literal[False] = None, assert_fingerprint: str | None = None, server_hostname: str | None = None, ssl_context: ssl.SSLContext | None = None, ca_certs: str | None = None, ca_cert_dir: str | None = None, ca_cert_data: None | str | bytes = None, ssl_minimum_version: int | None = None, ssl_maximum_version: int | None = None, ssl_version: int | str | None = None, # Deprecated cert_file: str | None = None, key_file: str | None = None, key_password: str | None = None, ) -> None: super().__init__( host, port=port, timeout=timeout, source_address=source_address, blocksize=blocksize, socket_options=socket_options, proxy=proxy, proxy_config=proxy_config, ) self.key_file = key_file self.cert_file = cert_file self.key_password = key_password self.ssl_context = ssl_context self.server_hostname = server_hostname self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint self.ssl_version = ssl_version self.ssl_minimum_version = ssl_minimum_version self.ssl_maximum_version = ssl_maximum_version self.ca_certs = ca_certs and os.path.expanduser(ca_certs) self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir) self.ca_cert_data = ca_cert_data # cert_reqs depends on ssl_context so calculate last. if cert_reqs is None: if self.ssl_context is not None: cert_reqs = self.ssl_context.verify_mode else: cert_reqs = resolve_cert_reqs(None) self.cert_reqs = cert_reqs def set_cert( self, key_file: str | None = None, cert_file: str | None = None, cert_reqs: int | str | None = None, key_password: str | None = None, ca_certs: str | None = None, assert_hostname: None | str | Literal[False] = None, assert_fingerprint: str | None = None, ca_cert_dir: str | None = None, ca_cert_data: None | str | bytes = None, ) -> None: """ This method should only be called once, before the connection is used. """ warnings.warn( "HTTPSConnection.set_cert() is deprecated and will be removed " "in urllib3 v2.1.0. Instead provide the parameters to the " "HTTPSConnection constructor.", category=DeprecationWarning, stacklevel=2, ) # If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also # have an SSLContext object in which case we'll use its verify_mode. if cert_reqs is None: if self.ssl_context is not None: cert_reqs = self.ssl_context.verify_mode else: cert_reqs = resolve_cert_reqs(None) self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs self.key_password = key_password self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint self.ca_certs = ca_certs and os.path.expanduser(ca_certs) self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir) self.ca_cert_data = ca_cert_data def connect(self) -> None: sock: socket.socket | ssl.SSLSocket self.sock = sock = self._new_conn() server_hostname: str = self.host tls_in_tls = False # Do we need to establish a tunnel? if self._tunnel_host is not None: # We're tunneling to an HTTPS origin so need to do TLS-in-TLS. if self._tunnel_scheme == "https": self.sock = sock = self._connect_tls_proxy(self.host, sock) tls_in_tls = True # If we're tunneling it means we're connected to our proxy. self._has_connected_to_proxy = True self._tunnel() # type: ignore[attr-defined] # Override the host with the one we're requesting data from. server_hostname = self._tunnel_host if self.server_hostname is not None: server_hostname = self.server_hostname is_time_off = datetime.date.today() < RECENT_DATE if is_time_off: warnings.warn( ( f"System time is way off (before {RECENT_DATE}). This will probably " "lead to SSL verification errors" ), SystemTimeWarning, ) sock_and_verified = _ssl_wrap_socket_and_match_hostname( sock=sock, cert_reqs=self.cert_reqs, ssl_version=self.ssl_version, ssl_minimum_version=self.ssl_minimum_version, ssl_maximum_version=self.ssl_maximum_version, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, ca_cert_data=self.ca_cert_data, cert_file=self.cert_file, key_file=self.key_file, key_password=self.key_password, server_hostname=server_hostname, ssl_context=self.ssl_context, tls_in_tls=tls_in_tls, assert_hostname=self.assert_hostname, assert_fingerprint=self.assert_fingerprint, ) self.sock = sock_and_verified.socket self.is_verified = sock_and_verified.is_verified # If there's a proxy to be connected to we are fully connected. # This is set twice (once above and here) due to forwarding proxies # not using tunnelling. self._has_connected_to_proxy = bool(self.proxy) def _connect_tls_proxy(self, hostname: str, sock: socket.socket) -> ssl.SSLSocket: """ Establish a TLS connection to the proxy using the provided SSL context. """ # `_connect_tls_proxy` is called when self._tunnel_host is truthy. proxy_config = typing.cast(ProxyConfig, self.proxy_config) ssl_context = proxy_config.ssl_context sock_and_verified = _ssl_wrap_socket_and_match_hostname( sock, cert_reqs=self.cert_reqs, ssl_version=self.ssl_version, ssl_minimum_version=self.ssl_minimum_version, ssl_maximum_version=self.ssl_maximum_version, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, ca_cert_data=self.ca_cert_data, server_hostname=hostname, ssl_context=ssl_context, assert_hostname=proxy_config.assert_hostname, assert_fingerprint=proxy_config.assert_fingerprint, # Features that aren't implemented for proxies yet: cert_file=None, key_file=None, key_password=None, tls_in_tls=False, ) self.proxy_is_verified = sock_and_verified.is_verified return sock_and_verified.socket # type: ignore[return-value] class _WrappedAndVerifiedSocket(typing.NamedTuple): """ Wrapped socket and whether the connection is verified after the TLS handshake """ socket: ssl.SSLSocket | SSLTransport is_verified: bool def _ssl_wrap_socket_and_match_hostname( sock: socket.socket, *, cert_reqs: None | str | int, ssl_version: None | str | int, ssl_minimum_version: int | None, ssl_maximum_version: int | None, cert_file: str | None, key_file: str | None, key_password: str | None, ca_certs: str | None, ca_cert_dir: str | None, ca_cert_data: None | str | bytes, assert_hostname: None | str | Literal[False], assert_fingerprint: str | None, server_hostname: str | None, ssl_context: ssl.SSLContext | None, tls_in_tls: bool = False, ) -> _WrappedAndVerifiedSocket: """Logic for constructing an SSLContext from all TLS parameters, passing that down into ssl_wrap_socket, and then doing certificate verification either via hostname or fingerprint. This function exists to guarantee that both proxies and targets have the same behavior when connecting via TLS. """ default_ssl_context = False if ssl_context is None: default_ssl_context = True context = create_urllib3_context( ssl_version=resolve_ssl_version(ssl_version), ssl_minimum_version=ssl_minimum_version, ssl_maximum_version=ssl_maximum_version, cert_reqs=resolve_cert_reqs(cert_reqs), ) else: context = ssl_context context.verify_mode = resolve_cert_reqs(cert_reqs) # In some cases, we want to verify hostnames ourselves if ( # `ssl` can't verify fingerprints or alternate hostnames assert_fingerprint or assert_hostname # assert_hostname can be set to False to disable hostname checking or assert_hostname is False # We still support OpenSSL 1.0.2, which prevents us from verifying # hostnames easily: https://github.com/pyca/pyopenssl/pull/933 or ssl_.IS_PYOPENSSL or not ssl_.HAS_NEVER_CHECK_COMMON_NAME ): context.check_hostname = False # Try to load OS default certs if none are given. We need to do the hasattr() check # for custom pyOpenSSL SSLContext objects because they don't support # load_default_certs(). if ( not ca_certs and not ca_cert_dir and not ca_cert_data and default_ssl_context and hasattr(context, "load_default_certs") ): context.load_default_certs() # Ensure that IPv6 addresses are in the proper format and don't have a # scope ID. Python's SSL module fails to recognize scoped IPv6 addresses # and interprets them as DNS hostnames. if server_hostname is not None: normalized = server_hostname.strip("[]") if "%" in normalized: normalized = normalized[: normalized.rfind("%")] if is_ipaddress(normalized): server_hostname = normalized ssl_sock = ssl_wrap_socket( sock=sock, keyfile=key_file, certfile=cert_file, key_password=key_password, ca_certs=ca_certs, ca_cert_dir=ca_cert_dir, ca_cert_data=ca_cert_data, server_hostname=server_hostname, ssl_context=context, tls_in_tls=tls_in_tls, ) try: if assert_fingerprint: _assert_fingerprint( ssl_sock.getpeercert(binary_form=True), assert_fingerprint ) elif ( context.verify_mode != ssl.CERT_NONE and not context.check_hostname and assert_hostname is not False ): cert: _TYPE_PEER_CERT_RET_DICT = ssl_sock.getpeercert() # type: ignore[assignment] # Need to signal to our match_hostname whether to use 'commonName' or not. # If we're using our own constructed SSLContext we explicitly set 'False' # because PyPy hard-codes 'True' from SSLContext.hostname_checks_common_name. if default_ssl_context: hostname_checks_common_name = False else: hostname_checks_common_name = ( getattr(context, "hostname_checks_common_name", False) or False ) _match_hostname( cert, assert_hostname or server_hostname, # type: ignore[arg-type] hostname_checks_common_name, ) return _WrappedAndVerifiedSocket( socket=ssl_sock, is_verified=context.verify_mode == ssl.CERT_REQUIRED or bool(assert_fingerprint), ) except BaseException: ssl_sock.close() raise def _match_hostname( cert: _TYPE_PEER_CERT_RET_DICT | None, asserted_hostname: str, hostname_checks_common_name: bool = False, ) -> None: # Our upstream implementation of ssl.match_hostname() # only applies this normalization to IP addresses so it doesn't # match DNS SANs so we do the same thing! stripped_hostname = asserted_hostname.strip("[]") if is_ipaddress(stripped_hostname): asserted_hostname = stripped_hostname try:
match_hostname(cert, asserted_hostname, hostname_checks_common_name)
29
2023-12-16 04:12:01+00:00
24k
YaoFANGUK/video-subtitle-remover
backend/scenedetect/scene_manager.py
[ { "identifier": "SimpleTableCell", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableCell(object):\n \"\"\"A table class to create table cells.\n\n Example:\n cell = SimpleTableCell('Hello, world!')\n \"\"\"\n\n def __init__(self, text, header=False):\...
import csv import threading import queue import logging import math import sys import cv2 import numpy as np from enum import Enum from typing import Iterable, List, Tuple, Optional, Dict, Callable, Union, TextIO from backend.scenedetect._thirdparty.simpletable import (SimpleTableCell, SimpleTableImage, SimpleTableRow, SimpleTable, HTMLPage) from backend.scenedetect.platform import (tqdm, get_and_create_path, get_cv2_imwrite_params, Template) from backend.scenedetect.frame_timecode import FrameTimecode from backend.scenedetect.video_stream import VideoStream from backend.scenedetect.scene_detector import SceneDetector, SparseSceneDetector from backend.scenedetect.stats_manager import StatsManager, FrameMetricRegistered
14,447
def write_scene_list(output_csv_file: TextIO, scene_list: Iterable[Tuple[FrameTimecode, FrameTimecode]], include_cut_list: bool = True, cut_list: Optional[Iterable[FrameTimecode]] = None) -> None: """Writes the given list of scenes to an output file handle in CSV format. Arguments: output_csv_file: Handle to open file in write mode. scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. include_cut_list: Bool indicating if the first row should include the timecodes where each scene starts. Should be set to False if RFC 4180 compliant CSV output is required. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not specified, the cut list is generated using the start times of each scene following the first one. """ csv_writer = csv.writer(output_csv_file, lineterminator='\n') # If required, output the cutting list as the first row (i.e. before the header row). if include_cut_list: csv_writer.writerow( ["Timecode List:"] + cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]]) csv_writer.writerow([ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ]) for i, (start, end) in enumerate(scene_list): duration = end - start csv_writer.writerow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) def write_scene_list_html(output_html_filename, scene_list, cut_list=None, css=None, css_class='mytable', image_filenames=None, image_width=None, image_height=None): """Writes the given list of scenes to an output file handle in html format. Arguments: output_html_filename: filename of output html file scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not passed, the start times of each scene (besides the 0th scene) is used instead. css: String containing all the css information for the resulting html page. css_class: String containing the named css class image_filenames: dict where key i contains a list with n elements (filenames of the n saved images from that scene) image_width: Optional desired width of images in table in pixels image_height: Optional desired height of images in table in pixels """ if not css: css = """ table.mytable { font-family: times; font-size:12px; color:#000000; border-width: 1px; border-color: #eeeeee; border-collapse: collapse; background-color: #ffffff; width=100%; max-width:550px; table-layout:fixed; } table.mytable th { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; background-color: #e6eed6; color:#000000; } table.mytable td { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; } #code { display:inline; font-family: courier; color: #3d9400; } #string { display:inline; font-weight: bold; } """ # Output Timecode list timecode_table = SimpleTable( [["Timecode List:"] + (cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]])], css_class=css_class) # Output list of scenes header_row = [ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ] for i, (start, end) in enumerate(scene_list): duration = end - start
# -*- coding: utf-8 -*- # # PySceneDetect: Python-Based Video Scene Detector # ------------------------------------------------------------------- # [ Site: https://scenedetect.com ] # [ Docs: https://scenedetect.com/docs/ ] # [ Github: https://github.com/Breakthrough/PySceneDetect/ ] # # Copyright (C) 2014-2023 Brandon Castellano <http://www.bcastell.com>. # PySceneDetect is licensed under the BSD 3-Clause License; see the # included LICENSE file, or visit one of the above pages for details. # """``scenedetect.scene_manager`` Module This module implements :class:`SceneManager`, coordinates running a :mod:`SceneDetector <scenedetect.detectors>` over the frames of a video (:mod:`VideoStream <scenedetect.video_stream>`). Video decoding is done in a separate thread to improve performance. This module also contains other helper functions (e.g. :func:`save_images`) which can be used to process the resulting scene list. =============================================================== Usage =============================================================== The following example shows basic usage of a :class:`SceneManager`: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector video = open_video(video_path) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) # Detect all scenes in video from current position to end. scene_manager.detect_scenes(video) # `get_scene_list` returns a list of start/end timecode pairs # for each scene that was found. scenes = scene_manager.get_scene_list() An optional callback can also be invoked on each detected scene, for example: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector # Callback to invoke on the first frame of every new scene detection. def on_new_scene(frame_img: numpy.ndarray, frame_num: int): print("New scene found at frame %d." % frame_num) video = open_video(test_video_file) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video, callback=on_new_scene) To use a `SceneManager` with a webcam/device or existing `cv2.VideoCapture` device, use the :class:`VideoCaptureAdapter <scenedetect.backends.opencv.VideoCaptureAdapter>` instead of `open_video`. ======================================================================= Storing Per-Frame Statistics ======================================================================= `SceneManager` can use an optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to save frame statistics to disk: .. code:: python from scenedetect import open_video, ContentDetector, SceneManager, StatsManager video = open_video(test_video_file) scene_manager = SceneManager(stats_manager=StatsManager()) scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video) scene_list = scene_manager.get_scene_list() print_scenes(scene_list=scene_list) # Save per-frame statistics to disk. scene_manager.stats_manager.save_to_csv(csv_file=STATS_FILE_PATH) The statsfile can be used to find a better threshold for certain inputs, or perform statistical analysis of the video. """ logger = logging.getLogger('pyscenedetect') # TODO: This value can and should be tuned for performance improvements as much as possible, # until accuracy falls, on a large enough dataset. This has yet to be done, but the current # value doesn't seem to have caused any issues at least. DEFAULT_MIN_WIDTH: int = 256 """The default minimum width a frame will be downscaled to when calculating a downscale factor.""" MAX_FRAME_QUEUE_LENGTH: int = 4 """Maximum number of decoded frames which can be buffered while waiting to be processed.""" PROGRESS_BAR_DESCRIPTION = 'Detected: %d | Progress' """Template to use for progress bar.""" class Interpolation(Enum): """Interpolation method used for image resizing. Based on constants defined in OpenCV.""" NEAREST = cv2.INTER_NEAREST """Nearest neighbor interpolation.""" LINEAR = cv2.INTER_LINEAR """Bilinear interpolation.""" CUBIC = cv2.INTER_CUBIC """Bicubic interpolation.""" AREA = cv2.INTER_AREA """Pixel area relation resampling. Provides moire'-free downscaling.""" LANCZOS4 = cv2.INTER_LANCZOS4 """Lanczos interpolation over 8x8 neighborhood.""" def compute_downscale_factor(frame_width: int, effective_width: int = DEFAULT_MIN_WIDTH) -> int: """Get the optimal default downscale factor based on a video's resolution (currently only the width in pixels is considered). The resulting effective width of the video will be between frame_width and 1.5 * frame_width pixels (e.g. if frame_width is 200, the range of effective widths will be between 200 and 300). Arguments: frame_width: Actual width of the video frame in pixels. effective_width: Desired minimum width in pixels. Returns: int: The default downscale factor to use to achieve at least the target effective_width. """ assert not (frame_width < 1 or effective_width < 1) if frame_width < effective_width: return 1 return frame_width // effective_width def get_scenes_from_cuts( cut_list: Iterable[FrameTimecode], start_pos: Union[int, FrameTimecode], end_pos: Union[int, FrameTimecode], base_timecode: Optional[FrameTimecode] = None, ) -> List[Tuple[FrameTimecode, FrameTimecode]]: """Returns a list of tuples of start/end FrameTimecodes for each scene based on a list of detected scene cuts/breaks. This function is called when using the :meth:`SceneManager.get_scene_list` method. The scene list is generated from a cutting list (:meth:`SceneManager.get_cut_list`), noting that each scene is contiguous, starting from the first to last frame of the input. If `cut_list` is empty, the resulting scene will span from `start_pos` to `end_pos`. Arguments: cut_list: List of FrameTimecode objects where scene cuts/breaks occur. base_timecode: The base_timecode of which all FrameTimecodes in the cut_list are based on. num_frames: The number of frames, or FrameTimecode representing duration, of the video that was processed (used to generate last scene's end time). start_frame: The start frame or FrameTimecode of the cut list. Used to generate the first scene's start time. base_timecode: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: List of tuples in the form (start_time, end_time), where both start_time and end_time are FrameTimecode objects representing the exact time/frame where each scene occupies based on the input cut_list. """ # TODO(v0.7): Use the warnings module to turn this into a warning. if base_timecode is not None: logger.error('`base_timecode` argument is deprecated has no effect.') # Scene list, where scenes are tuples of (Start FrameTimecode, End FrameTimecode). scene_list = [] if not cut_list: scene_list.append((start_pos, end_pos)) return scene_list # Initialize last_cut to the first frame we processed,as it will be # the start timecode for the first scene in the list. last_cut = start_pos for cut in cut_list: scene_list.append((last_cut, cut)) last_cut = cut # Last scene is from last cut to end of video. scene_list.append((last_cut, end_pos)) return scene_list def write_scene_list(output_csv_file: TextIO, scene_list: Iterable[Tuple[FrameTimecode, FrameTimecode]], include_cut_list: bool = True, cut_list: Optional[Iterable[FrameTimecode]] = None) -> None: """Writes the given list of scenes to an output file handle in CSV format. Arguments: output_csv_file: Handle to open file in write mode. scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. include_cut_list: Bool indicating if the first row should include the timecodes where each scene starts. Should be set to False if RFC 4180 compliant CSV output is required. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not specified, the cut list is generated using the start times of each scene following the first one. """ csv_writer = csv.writer(output_csv_file, lineterminator='\n') # If required, output the cutting list as the first row (i.e. before the header row). if include_cut_list: csv_writer.writerow( ["Timecode List:"] + cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]]) csv_writer.writerow([ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ]) for i, (start, end) in enumerate(scene_list): duration = end - start csv_writer.writerow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) def write_scene_list_html(output_html_filename, scene_list, cut_list=None, css=None, css_class='mytable', image_filenames=None, image_width=None, image_height=None): """Writes the given list of scenes to an output file handle in html format. Arguments: output_html_filename: filename of output html file scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not passed, the start times of each scene (besides the 0th scene) is used instead. css: String containing all the css information for the resulting html page. css_class: String containing the named css class image_filenames: dict where key i contains a list with n elements (filenames of the n saved images from that scene) image_width: Optional desired width of images in table in pixels image_height: Optional desired height of images in table in pixels """ if not css: css = """ table.mytable { font-family: times; font-size:12px; color:#000000; border-width: 1px; border-color: #eeeeee; border-collapse: collapse; background-color: #ffffff; width=100%; max-width:550px; table-layout:fixed; } table.mytable th { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; background-color: #e6eed6; color:#000000; } table.mytable td { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; } #code { display:inline; font-family: courier; color: #3d9400; } #string { display:inline; font-weight: bold; } """ # Output Timecode list timecode_table = SimpleTable( [["Timecode List:"] + (cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]])], css_class=css_class) # Output list of scenes header_row = [ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ] for i, (start, end) in enumerate(scene_list): duration = end - start
row = SimpleTableRow([
2
2023-10-25 02:50:01+00:00
24k
EulerSearch/embedding_studio
plugins/default_fine_tuning_method.py
[ { "identifier": "settings", "path": "embedding_studio/core/config.py", "snippet": "class Settings(BaseSettings):\n API_V1_STR: str = \"/api/v1\"\n SECRET_KEY: str = secrets.token_urlsafe(32)\n ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 * 24 * 8\n BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = []\n F...
from typing import List from sentence_transformers import SentenceTransformer from embedding_studio.core.config import settings from embedding_studio.core.plugin import FineTuningMethod from embedding_studio.embeddings.data.clickstream.parsers.s3_parser import ( AWSS3ClickstreamParser, ) from embedding_studio.embeddings.data.clickstream.search_event import ( DummyEventType, SearchResult, ) from embedding_studio.embeddings.data.clickstream.splitter import ( ClickstreamSessionsSplitter, ) from embedding_studio.embeddings.data.clickstream.text_query_item import ( TextQueryItem, ) from embedding_studio.embeddings.data.clickstream.text_query_retriever import ( TextQueryRetriever, ) from embedding_studio.embeddings.data.loaders.s3.s3_loader import ( AWSS3DataLoader, ) from embedding_studio.embeddings.data.storages.producers.clip import ( CLIPItemStorageProducer, ) from embedding_studio.embeddings.data.utils.fields_normalizer import ( DatasetFieldsNormalizer, ) from embedding_studio.embeddings.losses.prob_cosine_margin_ranking_loss import ( CosineProbMarginRankingLoss, ) from embedding_studio.embeddings.models.text_to_image.clip import ( TextToImageCLIPModel, ) from embedding_studio.models.clickstream.sessions import SessionWithEvents from embedding_studio.models.plugin import FineTuningBuilder, PluginMeta from embedding_studio.workers.fine_tuning.data.prepare_data import prepare_data from embedding_studio.workers.fine_tuning.experiments.experiments_tracker import ( ExperimentsManager, ) from embedding_studio.workers.fine_tuning.experiments.finetuning_settings import ( FineTuningSettings, ) from embedding_studio.workers.fine_tuning.experiments.initial_params.clip import ( INITIAL_PARAMS, ) from embedding_studio.workers.fine_tuning.experiments.metrics_accumulator import ( MetricsAccumulator, )
16,533
class DefaultFineTuningMethod(FineTuningMethod): meta = PluginMeta( name="Default Fine Tuning Method", version="0.0.1", description="A default fine-tuning plugin", ) def __init__(self): # uncomment and pass your credentials to use your own s3 bucket # creds = { # "role_arn": "arn:aws:iam::123456789012:role/some_data" # "aws_access_key_id": "TESTACCESSKEIDTEST11", # "aws_secret_access_key": "QWERTY1232qdsadfasfg5349BBdf30ekp23odk03", # } # self.data_loader = AWSS3DataLoader(**creds) # with empty creds, use anonymous session creds = { } self.data_loader = AWSS3DataLoader(**creds) self.retriever = TextQueryRetriever() self.parser = AWSS3ClickstreamParser(
class DefaultFineTuningMethod(FineTuningMethod): meta = PluginMeta( name="Default Fine Tuning Method", version="0.0.1", description="A default fine-tuning plugin", ) def __init__(self): # uncomment and pass your credentials to use your own s3 bucket # creds = { # "role_arn": "arn:aws:iam::123456789012:role/some_data" # "aws_access_key_id": "TESTACCESSKEIDTEST11", # "aws_secret_access_key": "QWERTY1232qdsadfasfg5349BBdf30ekp23odk03", # } # self.data_loader = AWSS3DataLoader(**creds) # with empty creds, use anonymous session creds = { } self.data_loader = AWSS3DataLoader(**creds) self.retriever = TextQueryRetriever() self.parser = AWSS3ClickstreamParser(
TextQueryItem, SearchResult, DummyEventType
6
2023-10-31 00:33:13+00:00
24k
nv-tlabs/vid2player3d
uhc/utils/convert_amass_isaac.py
[ { "identifier": "SMPL_BONE_ORDER_NAMES", "path": "uhc/smpllib/smpl_parser.py", "snippet": "SMPL_BONE_ORDER_NAMES = [\n \"Pelvis\",\n \"L_Hip\",\n \"R_Hip\",\n \"Torso\",\n \"L_Knee\",\n \"R_Knee\",\n \"Spine\",\n \"L_Ankle\",\n \"R_Ankle\",\n \"Chest\",\n \"L_Toe\",\n ...
import joblib import numpy as np import os import sys import argparse import torch import yaml import ipdb from scipy.spatial.transform import Rotation as sRot from tqdm import tqdm from uhc.smpllib.smpl_parser import SMPL_BONE_ORDER_NAMES as joint_names from uhc.smpllib.smpl_local_robot import Robot as LocalRobot from embodied_pose.utils.motion_lib import MotionLib from poselib.skeleton.skeleton3d import SkeletonTree, SkeletonMotion, SkeletonState
16,442
key = ",".join([f"{x:.6f}" for x in beta]) beta_mapping[key] = i print(f'AMASS data has {len(beta_mapping)} unique body shapes!') joblib.dump({'beta_arr': beta_arr, 'beta_mapping': beta_mapping}, f'{args.out_dir}/shape_data.pkl') robot_cfg = { "mesh": True, "model": "smpl", "body_params": {}, "joint_params": {}, "geom_params": {}, "actuator_params": {}, } model_xml_path = f"/tmp/smpl/smpl_mesh_humanoid_v1_convert.xml" smpl_local_robot = LocalRobot( robot_cfg, data_dir= "data/smpl", model_xml_path=model_xml_path ) mujoco_joint_names = [ 'Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand' ] smpl_2_mujoco = [ joint_names.index(q) for q in mujoco_joint_names if q in joint_names ] amass_full_motion_dict = {} sequences = np.array(list(amass_data.keys())) if num_seq is not None: sequences = sequences[:num_seq] seq_mapping = {seq_name.item(): seq_idx for seq_idx, seq_name in enumerate(sequences)} motion_lib_seq_arr = np.array_split(sequences, num_motion_libs) seq_name_splits = {} for i, seq_arr in enumerate(motion_lib_seq_arr): seq_name_splits[i] = [seq_name.item()[2:] for seq_name in seq_arr] joblib.dump(seq_name_splits, f'{args.out_dir}/seq_name_splits.pkl') for i, motion_lib_seqs in enumerate(tqdm(motion_lib_seq_arr)): motion_lib_input_dict = dict() for key_name in motion_lib_seqs: key_name = key_name.item() smpl_data_entry = amass_data[key_name] file_name = f"data/amass/singles/{key_name}.npy" seq_len = smpl_data_entry['pose_aa'].shape[0] pose_aa = smpl_data_entry['pose_aa'].copy() trans = smpl_data_entry['trans'].copy() beta = smpl_data_entry['beta'][:10].copy() gender = smpl_data_entry['gender'] fps = 30.0 if isinstance(gender, np.ndarray): gender = gender.item() if isinstance(gender, bytes): gender = gender.decode("utf-8") if gender == "neutral": gender_number = [0] smpl_parser = smpl_local_robot.smpl_parser_n elif gender == "male": gender_number = [1] smpl_parser = smpl_local_robot.smpl_parser_m elif gender == "female": gender_number = [2] smpl_parser = smpl_local_robot.smpl_parser_f else: ipdb.set_trace() raise Exception("Gender Not Supported!!") batch_size = pose_aa.shape[0] pose_aa = np.concatenate([pose_aa[:, :66], np.zeros((batch_size, 6))], axis=1) # TODO: need to extract correct handle rotations instead of zero pose_quat = sRot.from_rotvec(pose_aa.reshape(-1, 3)).as_quat().reshape(batch_size, 24, 4)[..., smpl_2_mujoco, :] smpl_local_robot.load_from_skeleton(betas=torch.from_numpy(beta[None, ]), gender=gender_number) smpl_local_robot.write_xml() skeleton_tree = SkeletonTree.from_mjcf(model_xml_path) root_trans = trans + skeleton_tree.local_translation[0].numpy() new_sk_state = SkeletonState.from_rotation_and_root_translation( skeleton_tree, torch.from_numpy(pose_quat), torch.from_numpy(root_trans), is_local=True) verts, joints = smpl_parser.get_joints_verts( pose=torch.from_numpy(pose_aa), th_betas=torch.from_numpy(beta[None, ]), th_trans=torch.from_numpy(trans) ) # min_verts_h = verts[..., 2].min().item() min_verts_h = verts[..., 2].min(dim=-1)[0].mean().item() beta_key = ",".join([f"{x:.6f}" for x in beta]) new_motion = SkeletonMotion.from_skeleton_state(new_sk_state, fps=fps) new_motion_out = new_motion.to_dict() new_motion_out['seq_name'] = key_name new_motion_out['seq_idx'] = seq_mapping[key_name] new_motion_out['trans'] = trans new_motion_out['root_trans'] = root_trans new_motion_out['pose_aa'] = pose_aa new_motion_out['beta'] = beta new_motion_out['beta_idx'] = beta_mapping[beta_key] new_motion_out['gender'] = gender new_motion_out['min_verts_h'] = min_verts_h new_motion_out['body_scale'] = 1.0 new_motion_out['__name__'] = "SkeletonMotion" motion_lib_input_dict[key_name] = new_motion_out
sys.path.append(os.getcwd()) parser = argparse.ArgumentParser() parser.add_argument('--amass_data', type=str, default="data/amass/amass_copycat_take5_5.pkl") parser.add_argument('--out_dir', type=str, default="data/motion_lib/amass") parser.add_argument('--num_seq', type=int, default=None) parser.add_argument('--num_motion_libs', type=int, default=14) args = parser.parse_args() num_seq = args.num_seq num_motion_libs = args.num_motion_libs os.makedirs(args.out_dir, exist_ok=True) meta_data = { "amass_data": args.amass_data, "num_seq": num_seq, "num_motion_libs": num_motion_libs } yaml.safe_dump(meta_data, open(f'{args.out_dir}/args.yml', 'w')) amass_data = joblib.load(args.amass_data) info = joblib.load('data/misc/smpl_body_info.pkl') # body_shapes all_beta = [x['beta'][:10] for x in amass_data.values()] _, index = np.unique([",".join([f"{x:.6f}" for x in beta]) for beta in all_beta], return_index=True) index.sort() beta_arr = [all_beta[i] for i in index] beta_mapping = dict() for i, beta in enumerate(beta_arr): key = ",".join([f"{x:.6f}" for x in beta]) beta_mapping[key] = i print(f'AMASS data has {len(beta_mapping)} unique body shapes!') joblib.dump({'beta_arr': beta_arr, 'beta_mapping': beta_mapping}, f'{args.out_dir}/shape_data.pkl') robot_cfg = { "mesh": True, "model": "smpl", "body_params": {}, "joint_params": {}, "geom_params": {}, "actuator_params": {}, } model_xml_path = f"/tmp/smpl/smpl_mesh_humanoid_v1_convert.xml" smpl_local_robot = LocalRobot( robot_cfg, data_dir= "data/smpl", model_xml_path=model_xml_path ) mujoco_joint_names = [ 'Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand' ] smpl_2_mujoco = [ joint_names.index(q) for q in mujoco_joint_names if q in joint_names ] amass_full_motion_dict = {} sequences = np.array(list(amass_data.keys())) if num_seq is not None: sequences = sequences[:num_seq] seq_mapping = {seq_name.item(): seq_idx for seq_idx, seq_name in enumerate(sequences)} motion_lib_seq_arr = np.array_split(sequences, num_motion_libs) seq_name_splits = {} for i, seq_arr in enumerate(motion_lib_seq_arr): seq_name_splits[i] = [seq_name.item()[2:] for seq_name in seq_arr] joblib.dump(seq_name_splits, f'{args.out_dir}/seq_name_splits.pkl') for i, motion_lib_seqs in enumerate(tqdm(motion_lib_seq_arr)): motion_lib_input_dict = dict() for key_name in motion_lib_seqs: key_name = key_name.item() smpl_data_entry = amass_data[key_name] file_name = f"data/amass/singles/{key_name}.npy" seq_len = smpl_data_entry['pose_aa'].shape[0] pose_aa = smpl_data_entry['pose_aa'].copy() trans = smpl_data_entry['trans'].copy() beta = smpl_data_entry['beta'][:10].copy() gender = smpl_data_entry['gender'] fps = 30.0 if isinstance(gender, np.ndarray): gender = gender.item() if isinstance(gender, bytes): gender = gender.decode("utf-8") if gender == "neutral": gender_number = [0] smpl_parser = smpl_local_robot.smpl_parser_n elif gender == "male": gender_number = [1] smpl_parser = smpl_local_robot.smpl_parser_m elif gender == "female": gender_number = [2] smpl_parser = smpl_local_robot.smpl_parser_f else: ipdb.set_trace() raise Exception("Gender Not Supported!!") batch_size = pose_aa.shape[0] pose_aa = np.concatenate([pose_aa[:, :66], np.zeros((batch_size, 6))], axis=1) # TODO: need to extract correct handle rotations instead of zero pose_quat = sRot.from_rotvec(pose_aa.reshape(-1, 3)).as_quat().reshape(batch_size, 24, 4)[..., smpl_2_mujoco, :] smpl_local_robot.load_from_skeleton(betas=torch.from_numpy(beta[None, ]), gender=gender_number) smpl_local_robot.write_xml() skeleton_tree = SkeletonTree.from_mjcf(model_xml_path) root_trans = trans + skeleton_tree.local_translation[0].numpy() new_sk_state = SkeletonState.from_rotation_and_root_translation( skeleton_tree, torch.from_numpy(pose_quat), torch.from_numpy(root_trans), is_local=True) verts, joints = smpl_parser.get_joints_verts( pose=torch.from_numpy(pose_aa), th_betas=torch.from_numpy(beta[None, ]), th_trans=torch.from_numpy(trans) ) # min_verts_h = verts[..., 2].min().item() min_verts_h = verts[..., 2].min(dim=-1)[0].mean().item() beta_key = ",".join([f"{x:.6f}" for x in beta]) new_motion = SkeletonMotion.from_skeleton_state(new_sk_state, fps=fps) new_motion_out = new_motion.to_dict() new_motion_out['seq_name'] = key_name new_motion_out['seq_idx'] = seq_mapping[key_name] new_motion_out['trans'] = trans new_motion_out['root_trans'] = root_trans new_motion_out['pose_aa'] = pose_aa new_motion_out['beta'] = beta new_motion_out['beta_idx'] = beta_mapping[beta_key] new_motion_out['gender'] = gender new_motion_out['min_verts_h'] = min_verts_h new_motion_out['body_scale'] = 1.0 new_motion_out['__name__'] = "SkeletonMotion" motion_lib_input_dict[key_name] = new_motion_out
motion_lib = MotionLib(motion_file=motion_lib_input_dict,
2
2023-10-30 20:43:43+00:00
24k
masked-spacetime-hashing/msth
MSTH/datamanager.py
[ { "identifier": "CameraOptimizerConfig", "path": "nerfstudio/cameras/camera_optimizers.py", "snippet": "class CameraOptimizerConfig(InstantiateConfig):\n \"\"\"Configuration of optimization for camera poses.\"\"\"\n\n _target: Type = field(default_factory=lambda: CameraOptimizer)\n\n mode: Lite...
import random import torch import os from dataclasses import dataclass, field from typing import Dict, List, Optional, Tuple, Type, Union from typing import Literal, Callable from typing_extensions import Literal from pathlib import Path from rich.progress import Console from torch.nn.parameter import Parameter from torch.utils.data.dataloader import DataLoader from nerfstudio.cameras.camera_optimizers import CameraOptimizerConfig from nerfstudio.cameras.rays import RayBundle from nerfstudio.data.datamanagers.base_datamanager import DataManager, DataManagerConfig from nerfstudio.data.pixel_samplers import PixelSampler from nerfstudio.data.utils.nerfstudio_collate import nerfstudio_collate from nerfstudio.model_components.ray_generators import RayGenerator from MSTH.dataparser import ( VideoDataParser, VideoDataParserConfig, VideoDataParserOutputs, ) from MSTH.dataset import VideoDataset, VideoDatasetWithFeature, VideoDatasetAllCached, VideoDatasetAllCachedUint8 from MSTH.sampler import ( CompletePixelSampler, CompletePixelSamplerIter, PixelTimeSampler, PixelTimeUniformSampler, spacetime_samplers, spacetime_samplers_default_args, PixelTimeUniformSampler_origin, SpatioTemporalSampler, ) from MSTH.utils import Timer
19,872
try: except ImportError: # from MSTH.dataset import EvalVideoDataset, VideoDataset CONSOLE = Console(width=120) @dataclass class VideoDataManagerConfig(DataManagerConfig): """Video Data Manager config""" _target: Type = field(default_factory=lambda: VideoDataManager) dataparser: VideoDataParserConfig = VideoDataParserConfig() collate_fn = staticmethod(nerfstudio_collate) """Specifies the collate function to use for the train and eval dataloaders.""" camera_res_scale_factor: float = 1.0 train_num_rays_per_batch: int = 1024 eval_num_rays_per_batch: int = 1024 camera_optimizer: CameraOptimizerConfig = CameraOptimizerConfig() mask_extend_radius: int = 5 next_n_frames: int = 1 """mask extend radius for gaussian filter""" class VideoDataManager(DataManager): train_dataset: VideoDataset eval_dataset: VideoDataset
try: except ImportError: # from MSTH.dataset import EvalVideoDataset, VideoDataset CONSOLE = Console(width=120) @dataclass class VideoDataManagerConfig(DataManagerConfig): """Video Data Manager config""" _target: Type = field(default_factory=lambda: VideoDataManager) dataparser: VideoDataParserConfig = VideoDataParserConfig() collate_fn = staticmethod(nerfstudio_collate) """Specifies the collate function to use for the train and eval dataloaders.""" camera_res_scale_factor: float = 1.0 train_num_rays_per_batch: int = 1024 eval_num_rays_per_batch: int = 1024 camera_optimizer: CameraOptimizerConfig = CameraOptimizerConfig() mask_extend_radius: int = 5 next_n_frames: int = 1 """mask extend radius for gaussian filter""" class VideoDataManager(DataManager): train_dataset: VideoDataset eval_dataset: VideoDataset
train_dataparser_outputs: VideoDataParserOutputs
9
2023-10-26 04:39:15+00:00
24k
chenruduan/OAReactDiff
oa_reactdiff/trainer/pl_trainer.py
[ { "identifier": "ProcessedQM9", "path": "oa_reactdiff/dataset/qm9.py", "snippet": "class ProcessedQM9(BaseQM9):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=2,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n **kw...
from typing import Dict, List, Optional, Tuple from pathlib import Path from torch import nn from torch.utils.data import DataLoader from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, StepLR from pytorch_lightning import LightningModule from torchmetrics.classification import ( BinaryAccuracy, BinaryAUROC, BinaryF1Score, BinaryPrecision, BinaryCohenKappa, ) from torchmetrics import PearsonCorrCoef, SpearmanCorrCoef, MeanAbsoluteError from oa_reactdiff.dataset import ( ProcessedQM9, ProcessedDoubleQM9, ProcessedTripleQM9, ProcessedTS1x, ) from oa_reactdiff.dynamics import EGNNDynamics, Confidence from oa_reactdiff.diffusion._schedule import DiffSchedule, PredefinedNoiseSchedule from oa_reactdiff.diffusion._normalizer import Normalizer, FEATURE_MAPPING from oa_reactdiff.diffusion.en_diffusion import EnVariationalDiffusion from oa_reactdiff.trainer._metrics import average_over_batch_metrics, pretty_print from oa_reactdiff.analyze.rmsd import batch_rmsd import torch import copy import torch.nn.functional as F import numpy as np import pandas as pd import oa_reactdiff.utils.training_tools as utils
19,977
PROCESS_FUNC = { "QM9": ProcessedQM9, "DoubleQM9": ProcessedDoubleQM9, "TripleQM9": ProcessedTripleQM9, "TS1x": ProcessedTS1x, } FILE_TYPE = { "QM9": ".npz", "DoubleQM9": ".npz", "TripleQM9": ".npz", "TS1x": ".pkl", } LR_SCHEDULER = { "cos": CosineAnnealingWarmRestarts, "step": StepLR, } class DDPMModule(LightningModule): def __init__( self, model_config: Dict, optimizer_config: Dict, training_config: Dict, node_nfs: List[int] = [9] * 3, edge_nf: int = 4, condition_nf: int = 3, fragment_names: List[str] = ["inorg_node", "org_edge", "org_node"], pos_dim: int = 3, update_pocket_coords: bool = True, condition_time: bool = True, edge_cutoff: Optional[float] = None, norm_values: Tuple = (1.0, 1.0, 1.0), norm_biases: Tuple = (0.0, 0.0, 0.0), noise_schedule: str = "polynomial_2", timesteps: int = 1000, precision: float = 1e-5, loss_type: str = "l2", pos_only: bool = False, process_type: Optional[str] = None, model: nn.Module = None, enforce_same_encoding: Optional[List] = None, scales: List[float] = [1.0, 1.0, 1.0], eval_epochs: int = 20, source: Optional[Dict] = None, fixed_idx: Optional[List] = None, ) -> None: super().__init__() egnn_dynamics = EGNNDynamics( model_config=model_config, node_nfs=node_nfs, edge_nf=edge_nf, condition_nf=condition_nf, fragment_names=fragment_names, pos_dim=pos_dim, update_pocket_coords=update_pocket_coords, condition_time=condition_time, edge_cutoff=edge_cutoff, model=model, enforce_same_encoding=enforce_same_encoding, source=source, )
PROCESS_FUNC = { "QM9": ProcessedQM9, "DoubleQM9": ProcessedDoubleQM9, "TripleQM9": ProcessedTripleQM9, "TS1x": ProcessedTS1x, } FILE_TYPE = { "QM9": ".npz", "DoubleQM9": ".npz", "TripleQM9": ".npz", "TS1x": ".pkl", } LR_SCHEDULER = { "cos": CosineAnnealingWarmRestarts, "step": StepLR, } class DDPMModule(LightningModule): def __init__( self, model_config: Dict, optimizer_config: Dict, training_config: Dict, node_nfs: List[int] = [9] * 3, edge_nf: int = 4, condition_nf: int = 3, fragment_names: List[str] = ["inorg_node", "org_edge", "org_node"], pos_dim: int = 3, update_pocket_coords: bool = True, condition_time: bool = True, edge_cutoff: Optional[float] = None, norm_values: Tuple = (1.0, 1.0, 1.0), norm_biases: Tuple = (0.0, 0.0, 0.0), noise_schedule: str = "polynomial_2", timesteps: int = 1000, precision: float = 1e-5, loss_type: str = "l2", pos_only: bool = False, process_type: Optional[str] = None, model: nn.Module = None, enforce_same_encoding: Optional[List] = None, scales: List[float] = [1.0, 1.0, 1.0], eval_epochs: int = 20, source: Optional[Dict] = None, fixed_idx: Optional[List] = None, ) -> None: super().__init__() egnn_dynamics = EGNNDynamics( model_config=model_config, node_nfs=node_nfs, edge_nf=edge_nf, condition_nf=condition_nf, fragment_names=fragment_names, pos_dim=pos_dim, update_pocket_coords=update_pocket_coords, condition_time=condition_time, edge_cutoff=edge_cutoff, model=model, enforce_same_encoding=enforce_same_encoding, source=source, )
normalizer = Normalizer(
8
2023-10-30 02:53:38+00:00
24k
nv-tlabs/pacer
scripts/vis_egoquest_sp.py
[ { "identifier": "SMPL_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPL_Parser(_SMPL):\n def __init__(self, create_transl=False, *args, **kwargs):\n \"\"\"SMPL model constructor\n Parameters\n ----------\n model_path: str\n The path to the fo...
import os import argparse import numpy as np import scenepic as sp import torch import cv2 import joblib from uhc.smpllib.smpl_parser import ( SMPL_Parser, SMPLH_Parser, SMPLX_Parser, ) from tqdm import tqdm from scipy.spatial.transform import Rotation as sRot from poselib.poselib.skeleton.skeleton3d import SkeletonTree,SkeletonMotion, SkeletonState from uhc.smpllib.smpl_mujoco import SMPL_BONE_ORDER_NAMES as joint_names
21,232
"""Example script demonstrating the basic ScenePic functionality.""" mujoco_joint_names = [ 'Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand' ] Name = "getting_started" Title = "Getting Started" data_dir = "data/smpl" smpl_parser_n = SMPL_Parser(model_path=data_dir,gender="neutral") smpl_parser_m = SMPL_Parser(model_path=data_dir,gender="male") smpl_parser_f = SMPL_Parser(model_path=data_dir,gender="female") # texture_path ="/hdd/zen/data/SURREAL/smpl_data/" # texture_image = cv2.imread("/hdd/zen/data/SURREAL/smpl_data/textures/male/nongrey_male_0550.jpg") pkl_dir = "output/renderings/smpl_ego_4_1-2022-10-05-20:51:40.pkl" Name = pkl_dir.split("/")[-1].split(".")[0] pkl_data = joblib.load(pkl_dir) mujoco_2_smpl = [ mujoco_joint_names.index(q) for q in joint_names if q in mujoco_joint_names ] def build_scene() -> sp.Scene: scene = sp.Scene() scene.framerate = 30 base_size = 600 num_per_row = 4 items = list(pkl_data.items()) # num_items = 4 num_items = len(items) for entry_key, data_seq in items[:num_items]: main = scene.create_canvas_3d(width=base_size, height=base_size, canvas_id=entry_key) gender, beta = data_seq['betas'][0], data_seq['betas'][1:] if gender == 0: smpl_parser = smpl_parser_n humanoid_color = np.array([[0, 1, 100]]).repeat(6890, axis=0) elif gender == 1: smpl_parser = smpl_parser_m humanoid_color = np.array([[0, 0.75, 1]]).repeat(6890, axis=0) else: smpl_parser = smpl_parser_f humanoid_color = np.array([[0.8, 0.15, 0.15]]).repeat(6890, axis=0) ground = scene.create_mesh("ground") ground.add_quad(color=sp.Colors.Gray, p0=np.array([-50, -50, 0]), p1=np.array([50, -50, 0]), p2=np.array([50, 50, 0]), p3=np.array([-50, 50, 0]), normal =np.array([0, 0, 1]) ) ref_jt_pos_full = data_seq['body_pos_full'].numpy()[::2] skeleon_motion = SkeletonMotion.from_dict(data_seq) offset = skeleon_motion.skeleton_tree.local_translation[0] global_rot = skeleon_motion.global_rotation B, J, N = global_rot.shape pose_quat_global = (sRot.from_quat(global_rot.reshape(-1, 4).numpy()) * sRot.from_quat([0.5, 0.5, 0.5, 0.5])).as_quat().reshape(B, -1, 4)[::2] # downsample to 30 fps B_down = pose_quat_global.shape[0] body_trans = skeleon_motion.global_translation[::2] root_trans = body_trans[:, 0] root_trans_offset = root_trans - offset
"""Example script demonstrating the basic ScenePic functionality.""" mujoco_joint_names = [ 'Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand' ] Name = "getting_started" Title = "Getting Started" data_dir = "data/smpl" smpl_parser_n = SMPL_Parser(model_path=data_dir,gender="neutral") smpl_parser_m = SMPL_Parser(model_path=data_dir,gender="male") smpl_parser_f = SMPL_Parser(model_path=data_dir,gender="female") # texture_path ="/hdd/zen/data/SURREAL/smpl_data/" # texture_image = cv2.imread("/hdd/zen/data/SURREAL/smpl_data/textures/male/nongrey_male_0550.jpg") pkl_dir = "output/renderings/smpl_ego_4_1-2022-10-05-20:51:40.pkl" Name = pkl_dir.split("/")[-1].split(".")[0] pkl_data = joblib.load(pkl_dir) mujoco_2_smpl = [ mujoco_joint_names.index(q) for q in joint_names if q in mujoco_joint_names ] def build_scene() -> sp.Scene: scene = sp.Scene() scene.framerate = 30 base_size = 600 num_per_row = 4 items = list(pkl_data.items()) # num_items = 4 num_items = len(items) for entry_key, data_seq in items[:num_items]: main = scene.create_canvas_3d(width=base_size, height=base_size, canvas_id=entry_key) gender, beta = data_seq['betas'][0], data_seq['betas'][1:] if gender == 0: smpl_parser = smpl_parser_n humanoid_color = np.array([[0, 1, 100]]).repeat(6890, axis=0) elif gender == 1: smpl_parser = smpl_parser_m humanoid_color = np.array([[0, 0.75, 1]]).repeat(6890, axis=0) else: smpl_parser = smpl_parser_f humanoid_color = np.array([[0.8, 0.15, 0.15]]).repeat(6890, axis=0) ground = scene.create_mesh("ground") ground.add_quad(color=sp.Colors.Gray, p0=np.array([-50, -50, 0]), p1=np.array([50, -50, 0]), p2=np.array([50, 50, 0]), p3=np.array([-50, 50, 0]), normal =np.array([0, 0, 1]) ) ref_jt_pos_full = data_seq['body_pos_full'].numpy()[::2] skeleon_motion = SkeletonMotion.from_dict(data_seq) offset = skeleon_motion.skeleton_tree.local_translation[0] global_rot = skeleon_motion.global_rotation B, J, N = global_rot.shape pose_quat_global = (sRot.from_quat(global_rot.reshape(-1, 4).numpy()) * sRot.from_quat([0.5, 0.5, 0.5, 0.5])).as_quat().reshape(B, -1, 4)[::2] # downsample to 30 fps B_down = pose_quat_global.shape[0] body_trans = skeleon_motion.global_translation[::2] root_trans = body_trans[:, 0] root_trans_offset = root_trans - offset
new_sk_state = SkeletonState.from_rotation_and_root_translation(
5
2023-10-31 20:47:12+00:00
24k
Improbable-AI/dexenv
dexenv/envs/dclaw_multiobjs.py
[ { "identifier": "DClawBase", "path": "dexenv/envs/dclaw_base.py", "snippet": "class DClawBase(VecTask):\n\n def __init__(self, cfg, sim_device, rl_device, graphics_device_id):\n\n self.cfg = cfg\n headless = self.cfg.headless\n self.randomize = self.cfg[\"task\"][\"randomize\"]\n...
import numpy as np import torch import dexenv from gym.utils import seeding from isaacgym import gymapi from loguru import logger from tqdm import tqdm from dexenv.envs.dclaw_base import DClawBase from dexenv.utils.common import chunker_list from dexenv.utils.common import get_all_files_with_name from dexenv.utils.common import load_from_pickle from dexenv.utils.isaac_utils import load_a_goal_object_asset from dexenv.utils.isaac_utils import load_an_object_asset from dexenv.utils.isaac_utils import load_obj_texture
15,615
# add goal object goal_handle = self.gym.create_actor(env_ptr, goal_assets[obj_asset_id], goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.obj.load_texture: self.gym.set_rigid_body_texture(env_ptr, object_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) self.gym.set_rigid_body_texture(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) else: color = np.array([179, 193, 134]) / 255.0 self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(180 / 255., 180 / 255., 180 / 255.)) if self.cfg.rgb_render: render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params) self.render_camera_handles.append(render_camera_handle[0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.setup_torch_states() self.env_obj_ids = torch.LongTensor(env_obj_ids).to(self.device).view(-1, 1) self.object_cat_indices = torch.LongTensor(self.object_cat_indices).to(self.device).view(-1, 1) def parse_obj_dataset(self, dataset): asset_root = dexenv.LIB_PATH.joinpath('assets') split_dataset_name = dataset.split(':') if len(split_dataset_name) == 1: dataset_path = asset_root.joinpath(dataset, 'train') else: target_object = split_dataset_name[1] dataset_path = asset_root.joinpath(split_dataset_name[0], 'train', target_object) logger.warning(f'Dataset path:{dataset_path}') urdf_files = get_all_files_with_name(dataset_path, name='model.urdf') permute_ids = self.np_random.permutation(np.arange(len(urdf_files))) permuted_urdfs = [urdf_files[i] for i in permute_ids] object_categories = sorted(list(set([self.get_object_category(urdf) for urdf in permuted_urdfs]))) obj_name_to_id = {name: idx for idx, name in enumerate(object_categories)} return permuted_urdfs, dataset_path, obj_name_to_id def get_object_category(self, urdf_path): cat = urdf_path.parents[0].name if 'var_' in cat: cat = urdf_path.parents[1].name return cat def load_object_asset(self): asset_root = dexenv.LIB_PATH.joinpath('assets') object_urdfs = self.object_urdfs object_assets, goal_assets, object_ids, object_tex_handles, object_ptds = [], [], [], [], [] object_cat_ids = [] if self.cfg.obj.object_id is not None: urdf_to_load = self.object_urdfs[self.cfg.obj.object_id] logger.info(f'Loading a single object: {urdf_to_load}') obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) else: if self.cfg.obj.start_id is None: start = 0 end = min(len(object_urdfs), self.cfg.obj.num_objs) else: start = self.cfg.obj.start_id end = min(start + self.cfg.obj.num_objs, len(object_urdfs)) iters = range(start, end) logger.info(f'Loading object IDs from {start} to {end}.') for idx in tqdm(iters, desc='Loading Asset'): urdf_to_load = object_urdfs[idx] obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) return object_assets, goal_assets, object_ids, object_tex_handles, object_ptds, object_cat_ids def load_an_object(self, asset_root, object_urdf): out = [] obj_asset = load_an_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=self.cfg.env.vhacd) obj_asset = self.change_obj_asset_dyn(obj_asset) goal_obj_asset = load_a_goal_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=False) ptd = None if self.cfg.env.loadCADPTD: ptd_file = object_urdf.parent.joinpath(f'point_cloud_{self.cfg.env.objCadNumPts}_pts.pkl') if ptd_file.exists(): ptd = load_from_pickle(ptd_file) out.append(obj_asset) out.append(goal_obj_asset) if self.cfg.obj.load_texture:
class DclawMultiObjs(DClawBase): def __init__(self, cfg, sim_device, rl_device, graphics_device_id): self.set_random_gen() self.object_urdfs, self.dataset_path, self.obj_name_to_cat_id = self.parse_obj_dataset(cfg.obj.dataset) self.num_objects = len(self.object_urdfs) logger.info(f'Object urdf root path:{self.dataset_path}.') logger.info(f'Number of available objects:{self.num_objects}.') super().__init__(cfg=cfg, sim_device=sim_device, rl_device=rl_device, graphics_device_id=graphics_device_id) def set_random_gen(self, seed=12345): self.np_random, seed = seeding.np_random(seed) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw').as_posix() dclaw_asset, dclaw_dof_props = self.get_dclaw_asset(asset_root=asset_root) # load manipulated object and goal assets table_asset = self.get_table_asset() table_pose = self.get_table_pose() object_assets, goal_assets, object_ids, object_textures, object_ptds, object_cat_ids = self.load_object_asset() # create fingertip force sensors, if needed if self.obs_type == "full_state": sensor_pose = gymapi.Transform() for ft_handle in self.fingertip_handles: self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose) dclaw_start_pose = self.get_dclaw_start_pose() object_start_pose = self.get_object_start_pose(dclaw_start_pose) goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose) self.dclaws = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.object_cat_indices = [] self.goal_object_indices = [] self.render_camera_handles = [] if self.cfg.rgb_render: render_cam_pose, render_cam_params = self.get_visual_render_camera_setup() self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in self.fingertips] dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_assets[0]) self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count)) self.object_handles = [] num_object_assets = len(object_assets) env_obj_ids = [] for i in range(self.num_envs): # create env instance obj_asset_id = i % num_object_assets env_obj_ids.append(object_ids[obj_asset_id]) env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 1: # compute aggregate size obj_num_bodies = self.gym.get_asset_rigid_body_count(object_assets[obj_asset_id]) obj_num_shapes = self.gym.get_asset_rigid_shape_count(object_assets[obj_asset_id]) max_agg_bodies = self.num_dclaw_bodies + obj_num_bodies * 2 + 1 max_agg_shapes = self.num_dclaw_shapes + obj_num_shapes * 2 + 1 self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) self.create_hand_actor(env_ptr=env_ptr, dclaw_asset=dclaw_asset, dclaw_start_pose=dclaw_start_pose, dclaw_dof_props=dclaw_dof_props, env_id=i) # add object object_handle = self.gym.create_actor(env_ptr, object_assets[obj_asset_id], object_start_pose, "object", i, 0, 1) self.object_handles.append(object_handle) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) self.object_cat_indices.append(object_cat_ids[obj_asset_id]) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_assets[obj_asset_id], goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.obj.load_texture: self.gym.set_rigid_body_texture(env_ptr, object_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) self.gym.set_rigid_body_texture(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) else: color = np.array([179, 193, 134]) / 255.0 self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(180 / 255., 180 / 255., 180 / 255.)) if self.cfg.rgb_render: render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params) self.render_camera_handles.append(render_camera_handle[0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.setup_torch_states() self.env_obj_ids = torch.LongTensor(env_obj_ids).to(self.device).view(-1, 1) self.object_cat_indices = torch.LongTensor(self.object_cat_indices).to(self.device).view(-1, 1) def parse_obj_dataset(self, dataset): asset_root = dexenv.LIB_PATH.joinpath('assets') split_dataset_name = dataset.split(':') if len(split_dataset_name) == 1: dataset_path = asset_root.joinpath(dataset, 'train') else: target_object = split_dataset_name[1] dataset_path = asset_root.joinpath(split_dataset_name[0], 'train', target_object) logger.warning(f'Dataset path:{dataset_path}') urdf_files = get_all_files_with_name(dataset_path, name='model.urdf') permute_ids = self.np_random.permutation(np.arange(len(urdf_files))) permuted_urdfs = [urdf_files[i] for i in permute_ids] object_categories = sorted(list(set([self.get_object_category(urdf) for urdf in permuted_urdfs]))) obj_name_to_id = {name: idx for idx, name in enumerate(object_categories)} return permuted_urdfs, dataset_path, obj_name_to_id def get_object_category(self, urdf_path): cat = urdf_path.parents[0].name if 'var_' in cat: cat = urdf_path.parents[1].name return cat def load_object_asset(self): asset_root = dexenv.LIB_PATH.joinpath('assets') object_urdfs = self.object_urdfs object_assets, goal_assets, object_ids, object_tex_handles, object_ptds = [], [], [], [], [] object_cat_ids = [] if self.cfg.obj.object_id is not None: urdf_to_load = self.object_urdfs[self.cfg.obj.object_id] logger.info(f'Loading a single object: {urdf_to_load}') obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) else: if self.cfg.obj.start_id is None: start = 0 end = min(len(object_urdfs), self.cfg.obj.num_objs) else: start = self.cfg.obj.start_id end = min(start + self.cfg.obj.num_objs, len(object_urdfs)) iters = range(start, end) logger.info(f'Loading object IDs from {start} to {end}.') for idx in tqdm(iters, desc='Loading Asset'): urdf_to_load = object_urdfs[idx] obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) return object_assets, goal_assets, object_ids, object_tex_handles, object_ptds, object_cat_ids def load_an_object(self, asset_root, object_urdf): out = [] obj_asset = load_an_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=self.cfg.env.vhacd) obj_asset = self.change_obj_asset_dyn(obj_asset) goal_obj_asset = load_a_goal_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=False) ptd = None if self.cfg.env.loadCADPTD: ptd_file = object_urdf.parent.joinpath(f'point_cloud_{self.cfg.env.objCadNumPts}_pts.pkl') if ptd_file.exists(): ptd = load_from_pickle(ptd_file) out.append(obj_asset) out.append(goal_obj_asset) if self.cfg.obj.load_texture:
texture_handle = load_obj_texture(self.gym, self.sim, object_urdf)
6
2023-10-25 17:22:41+00:00
24k
ai-safety-foundation/sparse_autoencoder
sparse_autoencoder/activation_resampler/tests/test_activation_resampler.py
[ { "identifier": "ActivationResampler", "path": "sparse_autoencoder/activation_resampler/activation_resampler.py", "snippet": "class ActivationResampler:\n \"\"\"Activation resampler.\n\n Collates the number of times each neuron fires over a set number of learned activation vectors,\n and then p...
from jaxtyping import Float, Int64 from torch import Tensor from torch.nn import Parameter from sparse_autoencoder.activation_resampler.activation_resampler import ActivationResampler from sparse_autoencoder.activation_store.base_store import ActivationStore from sparse_autoencoder.activation_store.tensor_store import TensorActivationStore from sparse_autoencoder.autoencoder.model import SparseAutoencoder, SparseAutoencoderConfig from sparse_autoencoder.loss.decoded_activations_l2 import L2ReconstructionLoss from sparse_autoencoder.loss.learned_activations_l1 import LearnedActivationsL1Loss from sparse_autoencoder.loss.reducer import LossReducer from sparse_autoencoder.tensor_types import Axis import pytest import torch
16,542
"""Tests for the resample_neurons module.""" DEFAULT_N_ACTIVATIONS_STORE: int = 100 DEFAULT_N_INPUT_FEATURES: int = 3 DEFAULT_N_LEARNED_FEATURES: int = 5 DEFAULT_N_COMPONENTS: int = 2 @pytest.fixture() def full_activation_store() -> ActivationStore: """Create a dummy activation store, pre-populated with data.""" store = TensorActivationStore( max_items=DEFAULT_N_ACTIVATIONS_STORE, n_components=DEFAULT_N_COMPONENTS, n_neurons=DEFAULT_N_INPUT_FEATURES, ) store.fill_with_test_data( batch_size=DEFAULT_N_ACTIVATIONS_STORE, input_features=DEFAULT_N_INPUT_FEATURES, n_batches=1, n_components=DEFAULT_N_COMPONENTS, ) return store @pytest.fixture() def autoencoder_model() -> SparseAutoencoder: """Create a dummy autoencoder model.""" return SparseAutoencoder( SparseAutoencoderConfig( n_input_features=DEFAULT_N_INPUT_FEATURES, n_learned_features=DEFAULT_N_LEARNED_FEATURES, n_components=DEFAULT_N_COMPONENTS, ) ) @pytest.fixture() def loss_fn() -> LossReducer: """Loss function fixture."""
"""Tests for the resample_neurons module.""" DEFAULT_N_ACTIVATIONS_STORE: int = 100 DEFAULT_N_INPUT_FEATURES: int = 3 DEFAULT_N_LEARNED_FEATURES: int = 5 DEFAULT_N_COMPONENTS: int = 2 @pytest.fixture() def full_activation_store() -> ActivationStore: """Create a dummy activation store, pre-populated with data.""" store = TensorActivationStore( max_items=DEFAULT_N_ACTIVATIONS_STORE, n_components=DEFAULT_N_COMPONENTS, n_neurons=DEFAULT_N_INPUT_FEATURES, ) store.fill_with_test_data( batch_size=DEFAULT_N_ACTIVATIONS_STORE, input_features=DEFAULT_N_INPUT_FEATURES, n_batches=1, n_components=DEFAULT_N_COMPONENTS, ) return store @pytest.fixture() def autoencoder_model() -> SparseAutoencoder: """Create a dummy autoencoder model.""" return SparseAutoencoder( SparseAutoencoderConfig( n_input_features=DEFAULT_N_INPUT_FEATURES, n_learned_features=DEFAULT_N_LEARNED_FEATURES, n_components=DEFAULT_N_COMPONENTS, ) ) @pytest.fixture() def loss_fn() -> LossReducer: """Loss function fixture."""
return LossReducer(LearnedActivationsL1Loss(0.01), L2ReconstructionLoss())
6
2023-10-27 07:37:15+00:00
24k
OATML-Markslab/ProteinNPT
scripts/train.py
[ { "identifier": "ProteinNPTModel", "path": "proteinnpt/model.py", "snippet": "class ProteinNPTModel(nn.Module):\n def __init__(self, args, alphabet):\n super().__init__()\n self.args = args\n self.alphabet = alphabet\n self.alphabet_size = len(alphabet)\n self.paddi...
import os,gc import json import argparse import random import numpy as np import pandas as pd import wandb import torch import proteinnpt,baselines,utils from collections import defaultdict from proteinnpt.model import ProteinNPTModel from baselines.model import AugmentedPropertyPredictor from utils.esm.data import Alphabet from utils.tranception.model_pytorch import get_tranception_tokenizer from utils.data_utils import get_train_val_test_data, standardize, pnpt_count_non_nan, pnpt_spearmanr from utils.msa_utils import process_MSA from utils.model_utils import Trainer
20,530
else: args.augmentation_short="none" for target_index,target in enumerate(args.target_config): if "location" not in args.target_config[target].keys(): # Note: the case of zero-shot fitness predictions is already handled above if present if args.assay_location is not None: # We passed at least one path for the assay location num_targets = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] if len(args.assay_location) > 1: assert len(args.assay_location)==num_targets, "Trying to predict {} targets, but only referencing {} distinct paths for them.".format(num_targets,len(args.assay_location)) args.target_config[target]["location"] = args.assay_location[target_index] print("Location used for target {} if {}".format(target,args.assay_location[target_index])) else: args.target_config[target]["location"] = args.assay_location[0] print("Location used for target {} if {}".format(target,args.assay_location[0])) else: print("Assay location not provided. Defaulting to location for single substitutions fitness assays: {}".format(args.data_location + os.sep + 'data/fitness/substitutions_singles')) args.target_config[target]["location"] = args.data_location + os.sep + 'data/fitness/substitutions_singles' return args def log_performance_fold(args,target_names,test_eval_results,trainer_final_status,perf_list,logs_folder=None): test_logs = {'total_training_steps': trainer_final_status['total_training_steps'], 'total_training_epochs': trainer_final_status['total_training_epochs'], 'total_train_time': trainer_final_status['total_train_time']} if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) if args.model_type=="ProteinNPT": normalization = 0 for target_name in target_names: normalization += test_eval_results['eval_num_masked_targets'][target_name] else: normalization = test_eval_results['eval_num_predicted_targets'] test_logs['Test total loss per seq.'] = test_eval_results['eval_total_loss'] / normalization spearmans = {target_name: pnpt_spearmanr(test_eval_results['output_scores']['predictions_'+target_name], test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names} num_obs_spearmans = {target_name: pnpt_count_non_nan(test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names} for target_name in target_names: print("Spearman {} target: {}".format(target_name,spearmans[target_name])) test_logs['Test Spearman '+target_name] = spearmans[target_name] if args.model_type=="ProteinNPT": normalization = test_eval_results['eval_num_masked_targets'][target_name] test_logs['Test loss '+str(target_name)+' per seq.'] = test_eval_results['eval_target_prediction_loss_dict'][target_name] / normalization with open(logs_folder+os.sep+"test_performance_by_fold_"+args.model_name_suffix+".csv", "a") as perf_tracker: if os.path.getsize(logs_folder+os.sep+"test_performance_by_fold_"+args.model_name_suffix+".csv") == 0: header="fold_index,model_type,model_name_suffix,targets,assay_id,UniProt_id,fold_variable_name,total_training_steps,total_training_epochs,aa_embeddings,target_prediction_model,target_prediction_head,augmentation,frozen_embedding_parameters,dropout,weight_decay,early_stopping_patience,use_validation_set,training_num_assay_sequences_per_batch_per_gpu,eval_num_sequences_to_score_per_batch_per_gpu,eval_num_training_sequences_per_batch_per_gpu,eval_training_sequences_sampling_method,num_MSA_sequences_per_training_instance,embed_dim,ffn_embed_dim,attention_heads,conv_kernel_size,num_protein_npt_layers,total_loss" for target_name in target_names: header += (",loss_" + target_name + ",Spearman_" + target_name + ",num_obs_Spearman_" + target_name) perf_tracker.write(header+"\n") perf = ",".join([str(x) for x in perf_list]) + "," + str(round(test_logs['Test total loss per seq.'],5)) for target_name in target_names: perf += ("," + str(round(test_logs['Test loss '+str(target_name)+' per seq.'],5)) +","+str(spearmans[target_name])+","+str(num_obs_spearmans[target_name])) perf_tracker.write(perf+"\n") return test_logs, spearmans def log_performance_all_folds(args,target_names,all_test_predictions_across_folds,spearmans_across_folds,perf_list,logs_folder=None): if not os.path.exists(args.output_scores_location + os.sep + 'all_aggregated_predictions'): os.mkdir(args.output_scores_location + os.sep + 'all_aggregated_predictions') all_test_predictions_across_folds = pd.DataFrame.from_dict(all_test_predictions_across_folds) all_test_predictions_across_folds.to_csv(args.output_scores_location + os.sep + 'all_aggregated_predictions' + os.sep + model_name_prefix + ".csv", index=False) if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) with open(logs_folder+os.sep+"test_performance_overall_"+perf_list[2]+".csv", "a") as overall_perf: if os.path.getsize(logs_folder+os.sep+"test_performance_overall_"+perf_list[2]+".csv") == 0: header = "model_type,model_name_suffix,targets,assay_id,UniProt_id,fold_variable_name,total_training_steps,total_training_epochs,aa_embeddings,target_prediction_model,target_prediction_head,augmentation,frozen_embedding_parameters,dropout,weight_decay,early_stopping_patience,use_validation_set,training_num_assay_sequences_per_batch_per_gpu,eval_num_sequences_to_score_per_batch_per_gpu,eval_num_training_sequences_per_batch_per_gpu,eval_training_sequences_sampling_method,num_MSA_sequences_per_training_instance,embed_dim,ffn_embed_dim,attention_heads,conv_kernel_size,num_protein_npt_layers,total_loss" for target_name in target_names: header += (",loss_" + target_name + ",Spearman_" + target_name + ",Std_dev_Spearman_" + target_name + ",num_obs_Spearman_" + target_name + ",standardized_loss_" + target_name + ",standardized_Spearman_" + target_name) overall_perf.write(header+"\n") perf = ",".join([str(x) for x in perf_list[1:]]) #Remove fold_index from perf_list for target_name in target_names: missing_mask = np.isnan(all_test_predictions_across_folds['labels_'+target_name]) | np.equal(all_test_predictions_across_folds['labels_'+target_name],-100) MSE = ((all_test_predictions_across_folds['predictions_'+target_name][~missing_mask] - all_test_predictions_across_folds['labels_'+target_name][~missing_mask])**2).mean() spearman = pnpt_spearmanr(all_test_predictions_across_folds['predictions_'+target_name], all_test_predictions_across_folds['labels_'+target_name]) num_obs_spearman = pnpt_count_non_nan(all_test_predictions_across_folds['labels_'+target_name]) MSE_standardized = ((all_test_predictions_across_folds['fold_standardized_predictions_'+target_name][~missing_mask] - all_test_predictions_across_folds['labels_'+target_name][~missing_mask])**2).mean() spearman_standardized = pnpt_spearmanr(all_test_predictions_across_folds['fold_standardized_predictions_'+target_name], all_test_predictions_across_folds['labels_'+target_name]) spearman_std_dev = np.array(spearmans_across_folds[target_name]).std() perf += ("," + str(MSE) +","+str(spearman) + ","+ str(spearman_std_dev) + "," + str(num_obs_spearman) + "," + str(MSE_standardized) +","+str(spearman_standardized)) overall_perf.write(perf+"\n") def main(args): # Set random seeds torch.manual_seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) # target_names are the true targets we want to predict. target_names_input also includes auxiliary labels (as used in ProteinNPT) target_names = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] target_names_input = args.target_config.keys() num_targets = len(target_names) num_targets_input = len(target_names_input) print("We want to predict {} target(s): {}".format(num_targets, ' and '.join(target_names))) if num_targets_input > num_targets: print("We leverage {} target(s) and auxiliary labels: {}".format(num_targets_input, ' and '.join(target_names_input))) assay_reference_file = pd.read_csv(args.assay_reference_file_location) assay_id=assay_reference_file["DMS_id"][args.assay_index] args.seq_len = int(assay_reference_file["seq_len"][assay_reference_file["DMS_id"]==assay_id].values[0]) args.MSA_seq_len = int(assay_reference_file["MSA_len"][assay_reference_file["DMS_id"]==assay_id].values[0]) print("Training model for assay: {}, where the test_fold index is: {}".format(assay_id, args.test_fold_index)) args.save_model_checkpoint = not args.do_not_save_model_checkpoint args.frozen_embedding_parameters = not args.fine_tune_model_embedding_parameters if args.model_type=="MSA_Transformer_pred": assert args.num_MSA_sequences_per_training_instance==args.num_MSA_sequences_per_eval_instance, "MSA_Transformer_pred only supports same size of MSA for train and eval" effective_batch_size = args.gradient_accumulation * args.training_num_assay_sequences_per_batch_per_gpu print("Effective batch size is {}".format(effective_batch_size)) model_hypers = [args.aa_embeddings,args.target_prediction_model,args.target_prediction_head,args.augmentation,args.frozen_embedding_parameters,args.dropout,args.weight_decay, \ args.early_stopping_patience, args.use_validation_set, args.training_num_assay_sequences_per_batch_per_gpu, args.eval_num_sequences_to_score_per_batch_per_gpu, args.eval_num_training_sequences_per_batch_per_gpu, \ args.eval_training_sequences_sampling_method, args.num_MSA_sequences_per_training_instance, args.embed_dim, args.ffn_embed_dim, args.attention_heads, args.conv_kernel_size, args.num_protein_npt_layers] model_hypers_str = ','.join([str(x) for x in model_hypers]) model_name_prefix = '_'.join([str(x) for x in [args.model_type,assay_id,"_".join(target_names_input),args.fold_variable_name,'embed_'+args.aa_embeddings,'head_'+str(args.target_prediction_model),'aug_'+str(args.augmentation_short), \ 'froz_'+str(args.frozen_embedding_parameters),'drop_'+str(args.dropout),'val_'+str(args.use_validation_set),args.model_name_suffix]]) model_name = model_name_prefix + "_fold-" + str(args.test_fold_index) if not os.path.exists(args.model_location+os.sep+model_name): os.mkdir(args.model_location+os.sep+model_name) with open(args.model_location+os.sep+model_name+os.sep+'training_arguments', 'w') as f: json.dump(args.__dict__, f, indent=2) print("Model name: "+model_name) assay_file_name = assay_reference_file["DMS_filename"][assay_reference_file["DMS_id"]==assay_id].values[0] # File name of main assay used during training (if single property, this is also the only assay). Retrieved embeddings are always for this assay. args.sequence_embeddings_location = args.sequence_embeddings_folder + os.sep + assay_file_name.split(".csv")[0] + '.h5' if args.sequence_embeddings_folder else None print("Sequence embeddings: {}".format(args.sequence_embeddings_location)) if args.use_wandb: wandb.login() # Create & initiate model alphabet = get_tranception_tokenizer() if args.aa_embeddings=="Tranception" else Alphabet.from_architecture("msa_transformer") if args.model_type=="ProteinNPT":
def setup_config_and_paths(args): # All parameters that are not defined by end user are fetched from the config file if args.model_config_location is not None: args.main_config=json.load(open(args.model_config_location)) for key in args.main_config: if args.__dict__[key] is None: args.__dict__[key] = args.main_config[key] # File paths config for local_path in ['embedding_model_location','MSA_data_folder','MSA_weight_data_folder','path_to_hhfilter']: if getattr(args, local_path): setattr(args, local_path, args.data_location + os.sep + getattr(args, local_path)) if not os.path.exists(args.data_location + os.sep + 'model_predictions'): os.mkdir(args.data_location + os.sep + 'model_predictions') if not os.path.exists(args.data_location + os.sep + 'checkpoint'): os.mkdir(args.data_location + os.sep + 'checkpoint') args.output_scores_location = args.data_location + os.sep + 'model_predictions' + os.sep + args.model_name_suffix if not os.path.exists(args.output_scores_location): os.mkdir(args.output_scores_location) args.model_location = args.data_location + os.sep + 'checkpoint' + os.sep + args.model_name_suffix if not os.path.exists(args.model_location): os.mkdir(args.model_location) # Target config args.target_config=json.load(open(args.target_config_location)) zero_shot_predictions_mapping={ "MSA_Transformer_pred": "MSA_Transformer_ensemble", "ESM1v_pred": "ESM1v_ensemble", "TranceptEVE_pred": "TranceptEVE_L", "Tranception_pred": "Tranception_L", "DeepSequence_pred": "DeepSequence_ensemble" } if args.model_type=="ProteinNPT": zero_shot_predictions_mapping["ProteinNPT"]=zero_shot_predictions_mapping[args.aa_embeddings+"_pred"] if args.augmentation=="zero_shot_fitness_predictions_auxiliary_labels": # Add auxiliary label to target_config assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as auxiliary labels not properly referenced" print("Using zero-shot fitness predictions as auxiliary labels") args.target_config["zero_shot_fitness_predictions"] = { "type": "continuous", "dim": 1, "var_name": zero_shot_predictions_mapping[args.model_type], #Select the relevant model for zero-shot fitness predictions "location": args.zero_shot_fitness_predictions_location, "in_NPT_loss": False, "main_target": False } args.augmentation_short="auxiliary" elif args.augmentation=="zero_shot_fitness_predictions_covariate": # Will use zero-shot fitness predictions as an additional model covariate assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as model covariate not properly referenced" print("Using zero-shot fitness predictions as covariate") args.augmentation_short="covariate" args.zero_shot_fitness_predictions_var_name = zero_shot_predictions_mapping[args.model_type] else: args.augmentation_short="none" for target_index,target in enumerate(args.target_config): if "location" not in args.target_config[target].keys(): # Note: the case of zero-shot fitness predictions is already handled above if present if args.assay_location is not None: # We passed at least one path for the assay location num_targets = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] if len(args.assay_location) > 1: assert len(args.assay_location)==num_targets, "Trying to predict {} targets, but only referencing {} distinct paths for them.".format(num_targets,len(args.assay_location)) args.target_config[target]["location"] = args.assay_location[target_index] print("Location used for target {} if {}".format(target,args.assay_location[target_index])) else: args.target_config[target]["location"] = args.assay_location[0] print("Location used for target {} if {}".format(target,args.assay_location[0])) else: print("Assay location not provided. Defaulting to location for single substitutions fitness assays: {}".format(args.data_location + os.sep + 'data/fitness/substitutions_singles')) args.target_config[target]["location"] = args.data_location + os.sep + 'data/fitness/substitutions_singles' return args def log_performance_fold(args,target_names,test_eval_results,trainer_final_status,perf_list,logs_folder=None): test_logs = {'total_training_steps': trainer_final_status['total_training_steps'], 'total_training_epochs': trainer_final_status['total_training_epochs'], 'total_train_time': trainer_final_status['total_train_time']} if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) if args.model_type=="ProteinNPT": normalization = 0 for target_name in target_names: normalization += test_eval_results['eval_num_masked_targets'][target_name] else: normalization = test_eval_results['eval_num_predicted_targets'] test_logs['Test total loss per seq.'] = test_eval_results['eval_total_loss'] / normalization spearmans = {target_name: pnpt_spearmanr(test_eval_results['output_scores']['predictions_'+target_name], test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names} num_obs_spearmans = {target_name: pnpt_count_non_nan(test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names} for target_name in target_names: print("Spearman {} target: {}".format(target_name,spearmans[target_name])) test_logs['Test Spearman '+target_name] = spearmans[target_name] if args.model_type=="ProteinNPT": normalization = test_eval_results['eval_num_masked_targets'][target_name] test_logs['Test loss '+str(target_name)+' per seq.'] = test_eval_results['eval_target_prediction_loss_dict'][target_name] / normalization with open(logs_folder+os.sep+"test_performance_by_fold_"+args.model_name_suffix+".csv", "a") as perf_tracker: if os.path.getsize(logs_folder+os.sep+"test_performance_by_fold_"+args.model_name_suffix+".csv") == 0: header="fold_index,model_type,model_name_suffix,targets,assay_id,UniProt_id,fold_variable_name,total_training_steps,total_training_epochs,aa_embeddings,target_prediction_model,target_prediction_head,augmentation,frozen_embedding_parameters,dropout,weight_decay,early_stopping_patience,use_validation_set,training_num_assay_sequences_per_batch_per_gpu,eval_num_sequences_to_score_per_batch_per_gpu,eval_num_training_sequences_per_batch_per_gpu,eval_training_sequences_sampling_method,num_MSA_sequences_per_training_instance,embed_dim,ffn_embed_dim,attention_heads,conv_kernel_size,num_protein_npt_layers,total_loss" for target_name in target_names: header += (",loss_" + target_name + ",Spearman_" + target_name + ",num_obs_Spearman_" + target_name) perf_tracker.write(header+"\n") perf = ",".join([str(x) for x in perf_list]) + "," + str(round(test_logs['Test total loss per seq.'],5)) for target_name in target_names: perf += ("," + str(round(test_logs['Test loss '+str(target_name)+' per seq.'],5)) +","+str(spearmans[target_name])+","+str(num_obs_spearmans[target_name])) perf_tracker.write(perf+"\n") return test_logs, spearmans def log_performance_all_folds(args,target_names,all_test_predictions_across_folds,spearmans_across_folds,perf_list,logs_folder=None): if not os.path.exists(args.output_scores_location + os.sep + 'all_aggregated_predictions'): os.mkdir(args.output_scores_location + os.sep + 'all_aggregated_predictions') all_test_predictions_across_folds = pd.DataFrame.from_dict(all_test_predictions_across_folds) all_test_predictions_across_folds.to_csv(args.output_scores_location + os.sep + 'all_aggregated_predictions' + os.sep + model_name_prefix + ".csv", index=False) if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) with open(logs_folder+os.sep+"test_performance_overall_"+perf_list[2]+".csv", "a") as overall_perf: if os.path.getsize(logs_folder+os.sep+"test_performance_overall_"+perf_list[2]+".csv") == 0: header = "model_type,model_name_suffix,targets,assay_id,UniProt_id,fold_variable_name,total_training_steps,total_training_epochs,aa_embeddings,target_prediction_model,target_prediction_head,augmentation,frozen_embedding_parameters,dropout,weight_decay,early_stopping_patience,use_validation_set,training_num_assay_sequences_per_batch_per_gpu,eval_num_sequences_to_score_per_batch_per_gpu,eval_num_training_sequences_per_batch_per_gpu,eval_training_sequences_sampling_method,num_MSA_sequences_per_training_instance,embed_dim,ffn_embed_dim,attention_heads,conv_kernel_size,num_protein_npt_layers,total_loss" for target_name in target_names: header += (",loss_" + target_name + ",Spearman_" + target_name + ",Std_dev_Spearman_" + target_name + ",num_obs_Spearman_" + target_name + ",standardized_loss_" + target_name + ",standardized_Spearman_" + target_name) overall_perf.write(header+"\n") perf = ",".join([str(x) for x in perf_list[1:]]) #Remove fold_index from perf_list for target_name in target_names: missing_mask = np.isnan(all_test_predictions_across_folds['labels_'+target_name]) | np.equal(all_test_predictions_across_folds['labels_'+target_name],-100) MSE = ((all_test_predictions_across_folds['predictions_'+target_name][~missing_mask] - all_test_predictions_across_folds['labels_'+target_name][~missing_mask])**2).mean() spearman = pnpt_spearmanr(all_test_predictions_across_folds['predictions_'+target_name], all_test_predictions_across_folds['labels_'+target_name]) num_obs_spearman = pnpt_count_non_nan(all_test_predictions_across_folds['labels_'+target_name]) MSE_standardized = ((all_test_predictions_across_folds['fold_standardized_predictions_'+target_name][~missing_mask] - all_test_predictions_across_folds['labels_'+target_name][~missing_mask])**2).mean() spearman_standardized = pnpt_spearmanr(all_test_predictions_across_folds['fold_standardized_predictions_'+target_name], all_test_predictions_across_folds['labels_'+target_name]) spearman_std_dev = np.array(spearmans_across_folds[target_name]).std() perf += ("," + str(MSE) +","+str(spearman) + ","+ str(spearman_std_dev) + "," + str(num_obs_spearman) + "," + str(MSE_standardized) +","+str(spearman_standardized)) overall_perf.write(perf+"\n") def main(args): # Set random seeds torch.manual_seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) # target_names are the true targets we want to predict. target_names_input also includes auxiliary labels (as used in ProteinNPT) target_names = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] target_names_input = args.target_config.keys() num_targets = len(target_names) num_targets_input = len(target_names_input) print("We want to predict {} target(s): {}".format(num_targets, ' and '.join(target_names))) if num_targets_input > num_targets: print("We leverage {} target(s) and auxiliary labels: {}".format(num_targets_input, ' and '.join(target_names_input))) assay_reference_file = pd.read_csv(args.assay_reference_file_location) assay_id=assay_reference_file["DMS_id"][args.assay_index] args.seq_len = int(assay_reference_file["seq_len"][assay_reference_file["DMS_id"]==assay_id].values[0]) args.MSA_seq_len = int(assay_reference_file["MSA_len"][assay_reference_file["DMS_id"]==assay_id].values[0]) print("Training model for assay: {}, where the test_fold index is: {}".format(assay_id, args.test_fold_index)) args.save_model_checkpoint = not args.do_not_save_model_checkpoint args.frozen_embedding_parameters = not args.fine_tune_model_embedding_parameters if args.model_type=="MSA_Transformer_pred": assert args.num_MSA_sequences_per_training_instance==args.num_MSA_sequences_per_eval_instance, "MSA_Transformer_pred only supports same size of MSA for train and eval" effective_batch_size = args.gradient_accumulation * args.training_num_assay_sequences_per_batch_per_gpu print("Effective batch size is {}".format(effective_batch_size)) model_hypers = [args.aa_embeddings,args.target_prediction_model,args.target_prediction_head,args.augmentation,args.frozen_embedding_parameters,args.dropout,args.weight_decay, \ args.early_stopping_patience, args.use_validation_set, args.training_num_assay_sequences_per_batch_per_gpu, args.eval_num_sequences_to_score_per_batch_per_gpu, args.eval_num_training_sequences_per_batch_per_gpu, \ args.eval_training_sequences_sampling_method, args.num_MSA_sequences_per_training_instance, args.embed_dim, args.ffn_embed_dim, args.attention_heads, args.conv_kernel_size, args.num_protein_npt_layers] model_hypers_str = ','.join([str(x) for x in model_hypers]) model_name_prefix = '_'.join([str(x) for x in [args.model_type,assay_id,"_".join(target_names_input),args.fold_variable_name,'embed_'+args.aa_embeddings,'head_'+str(args.target_prediction_model),'aug_'+str(args.augmentation_short), \ 'froz_'+str(args.frozen_embedding_parameters),'drop_'+str(args.dropout),'val_'+str(args.use_validation_set),args.model_name_suffix]]) model_name = model_name_prefix + "_fold-" + str(args.test_fold_index) if not os.path.exists(args.model_location+os.sep+model_name): os.mkdir(args.model_location+os.sep+model_name) with open(args.model_location+os.sep+model_name+os.sep+'training_arguments', 'w') as f: json.dump(args.__dict__, f, indent=2) print("Model name: "+model_name) assay_file_name = assay_reference_file["DMS_filename"][assay_reference_file["DMS_id"]==assay_id].values[0] # File name of main assay used during training (if single property, this is also the only assay). Retrieved embeddings are always for this assay. args.sequence_embeddings_location = args.sequence_embeddings_folder + os.sep + assay_file_name.split(".csv")[0] + '.h5' if args.sequence_embeddings_folder else None print("Sequence embeddings: {}".format(args.sequence_embeddings_location)) if args.use_wandb: wandb.login() # Create & initiate model alphabet = get_tranception_tokenizer() if args.aa_embeddings=="Tranception" else Alphabet.from_architecture("msa_transformer") if args.model_type=="ProteinNPT":
model = ProteinNPTModel(args, alphabet)
0
2023-10-28 11:41:05+00:00
24k
CVHub520/yolov5_obb
val.py
[ { "identifier": "poly2hbb", "path": "utils/rboxs_utils.py", "snippet": "def poly2hbb(polys):\n \"\"\"\n Trans poly format to hbb format\n Args:\n rboxes (array/tensor): (num_gts, poly) \n\n Returns:\n hbboxes (array/tensor): (num_gts, [xc yc w h]) \n \"\"\"\n assert polys...
import argparse import json import os import sys import numpy as np import torch from pathlib import Path from threading import Thread from tqdm import tqdm from utils.rboxs_utils import poly2hbb, rbox2poly from models.common import DetectMultiBackend from utils.callbacks import Callbacks from utils.datasets import create_dataloader from utils.general import (LOGGER, box_iou, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_coords, scale_polys, xywh2xyxy, xyxy2xywh, non_max_suppression_obb) from utils.metrics import ConfusionMatrix, ap_per_class from utils.plots import output_to_target, plot_images, plot_val_study from utils.torch_utils import select_device, time_sync from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
15,157
tbox = xywh2xyxy(poly2hbb(tpoly)) # target hbb boxes [xyxy] scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels labels_hbbn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels (n, [cls xyxy]) correct = process_batch(pred_hbbn, labels_hbbn, iouv) if plots: confusion_matrix.process_batch(pred_hbbn, labels_hbbn) else: correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool) # stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # (correct, conf, pcls, tcls) stats.append((correct.cpu(), pred_poly[:, 8].cpu(), pred_poly[:, 9].cpu(), tcls)) # (correct, conf, pcls, tcls) # Save/log if save_txt: # just save hbb pred results! save_one_txt(pred_hbbn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt')) # LOGGER.info('The horizontal prediction results has been saved in txt, which format is [cls cx cy w h /conf/]') if save_json: # save hbb pred results and poly pred results. save_one_json(pred_hbbn, pred_polyn, jdict, path, class_map) # append to COCO-JSON dictionary # LOGGER.info('The hbb and obb results has been saved in json file') callbacks.run('on_val_image_end', pred_hbb, pred_hbbn, path, names, im[si]) # Plot images if plots and batch_i < 3: f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels Thread(target=plot_images, args=(im, targets, paths, f, names), daemon=True).start() f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions Thread(target=plot_images, args=(im, output_to_target(out), paths, f, names), daemon=True).start() # Compute metrics stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy if len(stats) and stats[0].any(): tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95 mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class else: nt = torch.zeros(1) # Print results pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): for i, c in enumerate(ap_class): LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) # Print speeds t = tuple(x / seen * 1E3 for x in dt) # speeds per image if not training: shape = (batch_size, 3, imgsz, imgsz) LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) callbacks.run('on_val_end') # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json pred_json = str(save_dir / f"{w}_obb_predictions.json") # predictions json LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) LOGGER.info('---------------------The hbb and obb results has been saved in json file-----------------------') try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb check_requirements(['pycocotools']) anno = COCO(anno_json) # init annotations api pred = anno.loadRes(pred_json) # init predictions api eval = COCOeval(anno, pred, 'bbox') if is_coco: eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate eval.evaluate() eval.accumulate() eval.summarize() map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5) except Exception as e: LOGGER.info(f'pycocotools unable to run: {e}') # Return results model.float() # for training if not training: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") maps = np.zeros(nc) + map for i, c in enumerate(ap_class): maps[c] = ap[i] return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=ROOT / 'data/DroneVehicle_poly.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'runs/train/yolov5n_DroneVehicle/weights/best.pt', help='model.pt path(s)') parser.add_argument('--batch-size', type=int, default=8, help='batch size') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=1024, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.01, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.4, help='NMS IoU threshold') parser.add_argument('--task', default='val', help='train, val, test, speed or study') parser.add_argument('--device', default='1', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--verbose', action='store_true', help='report mAP by class') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args() opt.data = check_yaml(opt.data) # check YAML opt.save_json |= opt.data.endswith('coco.yaml') opt.save_txt |= opt.save_hybrid
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Validate a trained YOLOv5 model accuracy on a custom dataset Usage: $ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640 """ FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative def save_one_txt(predn, save_conf, shape, file): # Save one txt result gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(file, 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') # def save_one_json(predn, jdict, path, class_map): def save_one_json(pred_hbbn, pred_polyn, jdict, path, class_map): """ Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236, "poly": [...]} Args: pred_hbbn (tensor): (n, [poly, conf, cls]) pred_polyn (tensor): (n, [xyxy, conf, cls]) """ image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(pred_hbbn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred_polyn.tolist(), box.tolist()): jdict.append({'image_id': image_id, 'category_id': class_map[int(p[-1]) + 1], # COCO's category_id start from 1, not 0 'bbox': [round(x, 1) for x in b], 'score': round(p[-2], 5), 'poly': [round(x, 1) for x in p[:8]], 'file_name': path.stem}) def process_batch(detections, labels, iouv): """ Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format. Arguments: detections (Array[N, 6]), x1, y1, x2, y2, conf, class labels (Array[M, 5]), class, x1, y1, x2, y2 Returns: correct (Array[N, 10]), for 10 IoU levels """ correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device) iou = box_iou(labels[:, 1:], detections[:, :4]) x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detection, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] matches = torch.Tensor(matches).to(iouv.device) correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv return correct @torch.no_grad() def run(data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.01, # confidence threshold iou_thres=0.4, # NMS IoU threshold task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, callbacks=Callbacks(), compute_loss=None, ): # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt or jit: model.model.half() if half else model.model.float() elif engine: batch_size = model.batch_size else: half = False batch_size = 1 # export.py models default to batch-size 1 device = torch.device('cpu') LOGGER.info(f'Forcing --batch-size 1 square inference shape(1,3,{imgsz},{imgsz}) for non-PyTorch backends') # Data data = check_dataset(data) # check # Configure model.eval() is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} # Dataloader if not training: model.warmup(imgsz=(1, 3, imgsz, imgsz), half=half) # warmup pad = 0.0 if task == 'speed' else 0.5 task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, stride, names, single_cls, pad=pad, rect=pt, workers=workers, prefix=colorstr(f'{task}: '))[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) # names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'HBBmAP@.5', ' HBBmAP@.5:.95') dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 # loss = torch.zeros(3, device=device) loss = torch.zeros(4, device=device) jdict, stats, ap, ap_class = [], [], [], [] pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar for batch_i, (im, targets, paths, shapes) in enumerate(pbar): # targets (tensor): (n_gt_all_batch, [img_index clsid cx cy l s theta gaussian_θ_labels]) θ ∈ [-pi/2, pi/2) # shapes (tensor): (b, [(h_raw, w_raw), (hw_ratios, wh_paddings)]) t1 = time_sync() if pt or jit or engine: im = im.to(device, non_blocking=True) targets = targets.to(device) im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 nb, _, height, width = im.shape # batch size, channels, height, width t2 = time_sync() dt[0] += t2 - t1 # Inference out, train_out = model(im) if training else model(im, augment=augment, val=True) # inference, loss outputs dt[1] += time_sync() - t2 # Loss if compute_loss: loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls, theta # NMS # targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t3 = time_sync() # out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) out = non_max_suppression_obb(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) # list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2) dt[2] += time_sync() - t3 # Metrics for si, pred in enumerate(out): # pred (tensor): (n, [xylsθ, conf, cls]) labels = targets[targets[:, 0] == si, 1:7] # labels (tensor):(n_gt, [clsid cx cy l s theta]) θ[-pi/2, pi/2) nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class path, shape = Path(paths[si]), shapes[si][0] # shape (tensor): (h_raw, w_raw) seen += 1 if len(pred) == 0: if nl: stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) continue # Predictions if single_cls: # pred[:, 5] = 0 pred[:, 6] = 0 poly = rbox2poly(pred[:, :5]) # (n, 8) pred_poly = torch.cat((poly, pred[:, -2:]), dim=1) # (n, [poly, conf, cls]) hbbox = xywh2xyxy(poly2hbb(pred_poly[:, :8])) # (n, [x1 y1 x2 y2]) pred_hbb = torch.cat((hbbox, pred_poly[:, -2:]), dim=1) # (n, [xyxy, conf, cls]) pred_polyn = pred_poly.clone() # predn (tensor): (n, [poly, conf, cls]) scale_polys(im[si].shape[1:], pred_polyn[:, :8], shape, shapes[si][1]) # native-space pred hbboxn = xywh2xyxy(poly2hbb(pred_polyn[:, :8])) # (n, [x1 y1 x2 y2]) pred_hbbn = torch.cat((hbboxn, pred_polyn[:, -2:]), dim=1) # (n, [xyxy, conf, cls]) native-space pred # Evaluate if nl: # tbox = xywh2xyxy(labels[:, 1:5]) # target boxes tpoly = rbox2poly(labels[:, 1:6]) # target poly tbox = xywh2xyxy(poly2hbb(tpoly)) # target hbb boxes [xyxy] scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels labels_hbbn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels (n, [cls xyxy]) correct = process_batch(pred_hbbn, labels_hbbn, iouv) if plots: confusion_matrix.process_batch(pred_hbbn, labels_hbbn) else: correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool) # stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # (correct, conf, pcls, tcls) stats.append((correct.cpu(), pred_poly[:, 8].cpu(), pred_poly[:, 9].cpu(), tcls)) # (correct, conf, pcls, tcls) # Save/log if save_txt: # just save hbb pred results! save_one_txt(pred_hbbn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt')) # LOGGER.info('The horizontal prediction results has been saved in txt, which format is [cls cx cy w h /conf/]') if save_json: # save hbb pred results and poly pred results. save_one_json(pred_hbbn, pred_polyn, jdict, path, class_map) # append to COCO-JSON dictionary # LOGGER.info('The hbb and obb results has been saved in json file') callbacks.run('on_val_image_end', pred_hbb, pred_hbbn, path, names, im[si]) # Plot images if plots and batch_i < 3: f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels Thread(target=plot_images, args=(im, targets, paths, f, names), daemon=True).start() f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions Thread(target=plot_images, args=(im, output_to_target(out), paths, f, names), daemon=True).start() # Compute metrics stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy if len(stats) and stats[0].any(): tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95 mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class else: nt = torch.zeros(1) # Print results pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): for i, c in enumerate(ap_class): LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) # Print speeds t = tuple(x / seen * 1E3 for x in dt) # speeds per image if not training: shape = (batch_size, 3, imgsz, imgsz) LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) callbacks.run('on_val_end') # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json pred_json = str(save_dir / f"{w}_obb_predictions.json") # predictions json LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) LOGGER.info('---------------------The hbb and obb results has been saved in json file-----------------------') try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb check_requirements(['pycocotools']) anno = COCO(anno_json) # init annotations api pred = anno.loadRes(pred_json) # init predictions api eval = COCOeval(anno, pred, 'bbox') if is_coco: eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate eval.evaluate() eval.accumulate() eval.summarize() map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5) except Exception as e: LOGGER.info(f'pycocotools unable to run: {e}') # Return results model.float() # for training if not training: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") maps = np.zeros(nc) + map for i, c in enumerate(ap_class): maps[c] = ap[i] return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=ROOT / 'data/DroneVehicle_poly.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'runs/train/yolov5n_DroneVehicle/weights/best.pt', help='model.pt path(s)') parser.add_argument('--batch-size', type=int, default=8, help='batch size') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=1024, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.01, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.4, help='NMS IoU threshold') parser.add_argument('--task', default='val', help='train, val, test, speed or study') parser.add_argument('--device', default='1', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--verbose', action='store_true', help='report mAP by class') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args() opt.data = check_yaml(opt.data) # check YAML opt.save_json |= opt.data.endswith('coco.yaml') opt.save_txt |= opt.save_hybrid
print_args(FILE.stem, opt)
5
2023-10-31 06:06:41+00:00
24k
serengil/LightPHE
lightphe/models/Ciphertext.py
[ { "identifier": "Homomorphic", "path": "lightphe/models/Homomorphic.py", "snippet": "class Homomorphic(ABC):\n keys: dict\n plaintext_modulo: int\n ciphertext_modulo: int\n\n @abstractmethod\n def generate_keys(self, key_size: int, s: Optional[int] = None) -> dict:\n pass\n\n @a...
from typing import Union from lightphe.models.Homomorphic import Homomorphic from lightphe.models.Algorithm import Algorithm from lightphe.cryptosystems.RSA import RSA from lightphe.cryptosystems.ElGamal import ElGamal from lightphe.cryptosystems.Paillier import Paillier from lightphe.cryptosystems.DamgardJurik import DamgardJurik from lightphe.cryptosystems.OkamotoUchiyama import OkamotoUchiyama from lightphe.cryptosystems.Benaloh import Benaloh from lightphe.cryptosystems.NaccacheStern import NaccacheStern from lightphe.cryptosystems.GoldwasserMicali import GoldwasserMicali from lightphe.cryptosystems.EllipticCurveElGamal import EllipticCurveElGamal from lightphe.commons import phe_utils from lightphe.commons.logger import Logger
17,571
logger = Logger(module="lightphe/models/Ciphertext.py") # pylint: disable=too-few-public-methods, no-else-return class Ciphertext: def __init__(self, algorithm_name: str, keys: dict, value: Union[int, tuple, list]): self.algorithm_name = algorithm_name self.keys = keys self.value = value if algorithm_name == Algorithm.RSA: cs = RSA(keys=keys) elif algorithm_name == Algorithm.ElGamal: cs = ElGamal(keys=keys) elif algorithm_name == Algorithm.ExponentialElGamal: cs = ElGamal(keys=keys, exponential=True) elif algorithm_name == Algorithm.EllipticCurveElGamal: cs = EllipticCurveElGamal(keys=keys) elif algorithm_name == Algorithm.Paillier: cs = Paillier(keys=keys) elif algorithm_name == Algorithm.DamgardJurik: cs = DamgardJurik(keys=keys) elif algorithm_name == Algorithm.OkamotoUchiyama: cs = OkamotoUchiyama(keys=keys)
logger = Logger(module="lightphe/models/Ciphertext.py") # pylint: disable=too-few-public-methods, no-else-return class Ciphertext: def __init__(self, algorithm_name: str, keys: dict, value: Union[int, tuple, list]): self.algorithm_name = algorithm_name self.keys = keys self.value = value if algorithm_name == Algorithm.RSA: cs = RSA(keys=keys) elif algorithm_name == Algorithm.ElGamal: cs = ElGamal(keys=keys) elif algorithm_name == Algorithm.ExponentialElGamal: cs = ElGamal(keys=keys, exponential=True) elif algorithm_name == Algorithm.EllipticCurveElGamal: cs = EllipticCurveElGamal(keys=keys) elif algorithm_name == Algorithm.Paillier: cs = Paillier(keys=keys) elif algorithm_name == Algorithm.DamgardJurik: cs = DamgardJurik(keys=keys) elif algorithm_name == Algorithm.OkamotoUchiyama: cs = OkamotoUchiyama(keys=keys)
elif algorithm_name == Algorithm.Benaloh:
7
2023-10-28 14:57:59+00:00
24k
chenran-li/RQL-release
stable_baselines3/dqn_ME/dqn_ME.py
[ { "identifier": "ReplayBuffer", "path": "stable_baselines3/common/buffers.py", "snippet": "class ReplayBuffer(BaseBuffer):\n \"\"\"\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation ...
import warnings import numpy as np import torch as th from typing import Any, Dict, List, Optional, Tuple, Type, TypeVar, Union from gym import spaces from torch.nn import functional as F from stable_baselines3.common.buffers import ReplayBuffer from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm from stable_baselines3.common.policies import BasePolicy from stable_baselines3.common.preprocessing import maybe_transpose from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule from stable_baselines3.common.utils import get_linear_fn, get_parameters_by_name, is_vectorized_observation, polyak_update from stable_baselines3.dqn_ME.policies_ME import CnnPolicy, DQNPolicy, MlpPolicy, MultiInputPolicy from stable_baselines3.dqn.dqn import DQN
16,038
:param exploration_final_eps: final value of random action probability :param max_grad_norm: The maximum value for the gradient clipping :param tensorboard_log: the lonext_q_valuesg location for tensorboard (if None, no logging) :param policy_kwargs: additionnext_q_valuesal arguments to be passed to the policy on creation :param verbose: Verbosity levenext_q_valuesl: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for debug messagesnext_q_values :param seed: Seed for the pseunext_q_valuesdo random generators :param device: Device (cpu, cuda, ...) on which the code should be run. Setting it to auto, the code will be run on the GPU if possible. :param _init_setup_model: Whether or not to build the network at the creation of the instance """ policy_aliases: Dict[str, Type[BasePolicy]] = { "MlpPolicy": MlpPolicy, "CnnPolicy": CnnPolicy, "MultiInputPolicy": MultiInputPolicy, } def __init__( self, policy: Union[str, Type[DQNPolicy]], env: Union[GymEnv, str], learning_rate: Union[float, Schedule] = 1e-4, buffer_size: int = 1_000_000, # 1e6 learning_starts: int = 50000, batch_size: int = 32, tau: float = 1.0, gamma: float = 0.99, train_freq: Union[int, Tuple[int, str]] = 4, gradient_steps: int = 1, replay_buffer_class: Optional[Type[ReplayBuffer]] = None, replay_buffer_kwargs: Optional[Dict[str, Any]] = None, optimize_memory_usage: bool = False, target_update_interval: int = 10000, exploration_fraction: float = 0.1, exploration_initial_eps: float = 1.0, exploration_final_eps: float = 0.05, max_grad_norm: float = 10, tensorboard_log: Optional[str] = None, policy_kwargs: Optional[Dict[str, Any]] = None, verbose: int = 0, seed: Optional[int] = None, device: Union[th.device, str] = "auto", _init_setup_model: bool = True, ): super().__init__( policy, env, learning_rate, buffer_size, learning_starts, batch_size, tau, gamma, train_freq, gradient_steps, replay_buffer_class=replay_buffer_class, replay_buffer_kwargs=replay_buffer_kwargs, optimize_memory_usage=optimize_memory_usage, target_update_interval=target_update_interval, exploration_fraction=exploration_fraction, exploration_initial_eps=exploration_initial_eps, exploration_final_eps=exploration_final_eps, max_grad_norm=max_grad_norm, tensorboard_log=tensorboard_log, policy_kwargs=policy_kwargs, verbose=verbose, seed=seed, device=device, _init_setup_model=_init_setup_model, ) def train(self, gradient_steps: int, batch_size: int = 100) -> None: # Switch to train mode (this affects batch norm / dropout) self.policy.set_training_mode(True) # Update learning rate according to schedule self._update_learning_rate(self.policy.optimizer) losses = [] for _ in range(gradient_steps): # Sample replay buffer replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env) with th.no_grad(): # Compute the next Q-values using the target network next_q_values = self.q_net_target(replay_data.next_observations) # Compute the next soft value function by taking the log-sum-exp of the next Q-values next_q_values = th.logsumexp(next_q_values, 1) # Avoid potential broadcast issue next_q_values = next_q_values.reshape(-1, 1) # 1-step TD target target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values # Get current Q-values estimates current_q_values = self.q_net(replay_data.observations) # Retrieve the q-values for the actions from the replay buffer current_q_values = th.gather(current_q_values, dim=1, index=replay_data.actions.long()) # Compute Huber loss (less sensitive to outliers) loss = F.smooth_l1_loss(current_q_values, target_q_values) losses.append(loss.item()) # Optimize the policy self.policy.optimizer.zero_grad() loss.backward() # Clip gradient norm th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm) self.policy.optimizer.step() # Increase update counter self._n_updates += gradient_steps self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard") self.logger.record("train/loss", np.mean(losses)) def learn( self: SelfDQN_ME, total_timesteps: int,
SelfDQN_ME = TypeVar("SelfDQN_ME", bound="DQN_ME") class DQN_ME(DQN): """ Soft Deep Q-Network (i.e. entropy-regularized DQN) Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236, https://arxiv.org/abs/1702.08165 Default hyperparameters are taken from the Nature paper, except for the optimizer and learning rate that were taken from Stable Baselines defaults. :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) :param env: The environment to learn from (if registered in Gym, can be str) :param learning_rate: The learning rate, it can be a function of the current progress remaining (from 1 to 0) :param buffer_size: size of the replay buffer :param learning_starts: how many steps of the model to collect transitions for before learning starts :param batch_size: Minibatch size for each gradient update :param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update :param gamma: the discount factor :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit like ``(5, "step")`` or ``(2, "episode")``. :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``) Set to ``-1`` means to do as many gradient steps as steps done in the environment during the rollout. :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``). If ``None``, it will be automatically selected. :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation. :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer at a cost of more complexity. See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 :param target_update_interval: update the target network every ``target_update_interval`` environment steps. :param exploration_fraction: fraction of entire training period over which the exploration rate is reduced :param exploration_initial_eps: initial value of random action probability :param exploration_final_eps: final value of random action probability :param max_grad_norm: The maximum value for the gradient clipping :param tensorboard_log: the lonext_q_valuesg location for tensorboard (if None, no logging) :param policy_kwargs: additionnext_q_valuesal arguments to be passed to the policy on creation :param verbose: Verbosity levenext_q_valuesl: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for debug messagesnext_q_values :param seed: Seed for the pseunext_q_valuesdo random generators :param device: Device (cpu, cuda, ...) on which the code should be run. Setting it to auto, the code will be run on the GPU if possible. :param _init_setup_model: Whether or not to build the network at the creation of the instance """ policy_aliases: Dict[str, Type[BasePolicy]] = { "MlpPolicy": MlpPolicy, "CnnPolicy": CnnPolicy, "MultiInputPolicy": MultiInputPolicy, } def __init__( self, policy: Union[str, Type[DQNPolicy]], env: Union[GymEnv, str], learning_rate: Union[float, Schedule] = 1e-4, buffer_size: int = 1_000_000, # 1e6 learning_starts: int = 50000, batch_size: int = 32, tau: float = 1.0, gamma: float = 0.99, train_freq: Union[int, Tuple[int, str]] = 4, gradient_steps: int = 1, replay_buffer_class: Optional[Type[ReplayBuffer]] = None, replay_buffer_kwargs: Optional[Dict[str, Any]] = None, optimize_memory_usage: bool = False, target_update_interval: int = 10000, exploration_fraction: float = 0.1, exploration_initial_eps: float = 1.0, exploration_final_eps: float = 0.05, max_grad_norm: float = 10, tensorboard_log: Optional[str] = None, policy_kwargs: Optional[Dict[str, Any]] = None, verbose: int = 0, seed: Optional[int] = None, device: Union[th.device, str] = "auto", _init_setup_model: bool = True, ): super().__init__( policy, env, learning_rate, buffer_size, learning_starts, batch_size, tau, gamma, train_freq, gradient_steps, replay_buffer_class=replay_buffer_class, replay_buffer_kwargs=replay_buffer_kwargs, optimize_memory_usage=optimize_memory_usage, target_update_interval=target_update_interval, exploration_fraction=exploration_fraction, exploration_initial_eps=exploration_initial_eps, exploration_final_eps=exploration_final_eps, max_grad_norm=max_grad_norm, tensorboard_log=tensorboard_log, policy_kwargs=policy_kwargs, verbose=verbose, seed=seed, device=device, _init_setup_model=_init_setup_model, ) def train(self, gradient_steps: int, batch_size: int = 100) -> None: # Switch to train mode (this affects batch norm / dropout) self.policy.set_training_mode(True) # Update learning rate according to schedule self._update_learning_rate(self.policy.optimizer) losses = [] for _ in range(gradient_steps): # Sample replay buffer replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env) with th.no_grad(): # Compute the next Q-values using the target network next_q_values = self.q_net_target(replay_data.next_observations) # Compute the next soft value function by taking the log-sum-exp of the next Q-values next_q_values = th.logsumexp(next_q_values, 1) # Avoid potential broadcast issue next_q_values = next_q_values.reshape(-1, 1) # 1-step TD target target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values # Get current Q-values estimates current_q_values = self.q_net(replay_data.observations) # Retrieve the q-values for the actions from the replay buffer current_q_values = th.gather(current_q_values, dim=1, index=replay_data.actions.long()) # Compute Huber loss (less sensitive to outliers) loss = F.smooth_l1_loss(current_q_values, target_q_values) losses.append(loss.item()) # Optimize the policy self.policy.optimizer.zero_grad() loss.backward() # Clip gradient norm th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm) self.policy.optimizer.step() # Increase update counter self._n_updates += gradient_steps self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard") self.logger.record("train/loss", np.mean(losses)) def learn( self: SelfDQN_ME, total_timesteps: int,
callback: MaybeCallback = None,
4
2023-10-28 01:09:21+00:00
24k
hyperspy/exspy
exspy/tests/signals/test_kramers_kronig_transform.py
[ { "identifier": "VolumePlasmonDrude", "path": "exspy/components/volume_plasmon_drude.py", "snippet": "class VolumePlasmonDrude(hs.model.components1D.Expression):\n r\"\"\"\n Drude volume plasmon energy loss function component, the energy loss\n function is defined as:\n\n .. math::\n\n ...
import numpy as np import pytest import hyperspy.api as hs from hyperspy.components1d import Lorentzian from exspy.components import VolumePlasmonDrude from exspy.misc.eels.tools import eels_constant from exspy.signals import EELSSpectrum
20,793
# -*- coding: utf-8 -*- # Copyright 2007-2023 The exSpy developers # # This file is part of exSpy. # # exSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # exSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with exSpy. If not, see <https://www.gnu.org/licenses/#GPL>. class Test2D: def setup_method(self, method): """To test the kramers_kronig_analysis we will generate 3 EELSSpectrum instances. First a model energy loss function(ELF), in our case following the Drude bulk plasmon peak. Second, we simulate the inelastic scattering to generate a model scattering distribution (SPC). Finally, we use a lorentzian peak with integral equal to 1 to simulate a ZLP. """ # Parameters i0 = 1.0 t = hs.signals.BaseSignal(np.arange(10, 70, 10).reshape((2, 3))) t = t.transpose(signal_axes=0) scale = 0.02 # Create an 3x2x2048 spectrum with Drude plasmon s = EELSSpectrum(np.zeros((2, 3, 2 * 2048))) s.set_microscope_parameters( beam_energy=300.0, convergence_angle=5, collection_angle=10.0 ) s.axes_manager.signal_axes[0].scale = scale
# -*- coding: utf-8 -*- # Copyright 2007-2023 The exSpy developers # # This file is part of exSpy. # # exSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # exSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with exSpy. If not, see <https://www.gnu.org/licenses/#GPL>. class Test2D: def setup_method(self, method): """To test the kramers_kronig_analysis we will generate 3 EELSSpectrum instances. First a model energy loss function(ELF), in our case following the Drude bulk plasmon peak. Second, we simulate the inelastic scattering to generate a model scattering distribution (SPC). Finally, we use a lorentzian peak with integral equal to 1 to simulate a ZLP. """ # Parameters i0 = 1.0 t = hs.signals.BaseSignal(np.arange(10, 70, 10).reshape((2, 3))) t = t.transpose(signal_axes=0) scale = 0.02 # Create an 3x2x2048 spectrum with Drude plasmon s = EELSSpectrum(np.zeros((2, 3, 2 * 2048))) s.set_microscope_parameters( beam_energy=300.0, convergence_angle=5, collection_angle=10.0 ) s.axes_manager.signal_axes[0].scale = scale
k = eels_constant(s, i0, t)
1
2023-10-28 20:04:10+00:00
24k
Elfenreigen/UniChest
train.py
[ { "identifier": "utils", "path": "factory/utils.py", "snippet": "class SmoothedValue(object):\nclass MetricLogger(object):\nclass AttrDict(dict):\n def __init__(self, window_size=20, fmt=None):\n def update(self, value, n=1):\n def synchronize_between_processes(self):\n def median(self):\n ...
import argparse import os import logging import yaml import numpy as np import random import time import datetime import json import math import torch import torch.nn as nn import torch.nn.functional as F import torch.backends.cudnn as cudnn import torch.distributed as dist import socket from pathlib import Path from functools import partial from sklearn.metrics import roc_auc_score from collections import OrderedDict from torch.utils.data import DataLoader from tensorboardX import SummaryWriter from transformers import AutoModel,BertConfig,AutoTokenizer from factory import utils from scheduler import create_scheduler from optim import create_optimizer from engine.train import train,valid_on_cheXpert,valid_on_chestxray14 from models.clip_tqn import CLP_clinical,ModelRes,TQN_Model,TQN_Model_Add,ModelDense,CLP_clinical2 from models.tokenization_bert import BertTokenizer from dataset.dataset_entity import MIMIC_Dataset,Mergetrain_Dataset, Chestxray14_Dataset,CheXpert_Dataset from io import BytesIO
16,967
# import ruamel.yaml as yaml def main(args, config): torch.cuda.current_device() torch.cuda._initialized = True print("Total CUDA devices: ", torch.cuda.device_count()) torch.set_default_tensor_type('torch.FloatTensor') utils.init_distributed_mode(args) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) cudnn.benchmark = True start_epoch = 0 max_epoch = config['schedular']['epochs'] warmup_steps = config['schedular']['warmup_epochs'] num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_rank = global_rank print('sampler_rank',sampler_rank,'num_tasks',num_tasks) #### Dataset #### print("Creating dataset") if args.add_dataset == True: train_dataset = Mergetrain_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) else: train_dataset = MIMIC_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) train_dataloader = DataLoader( train_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=train_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) train_dataloader.num_samples = len(train_dataset) train_dataloader.num_batches = len(train_dataloader) val_dataset = Chestxray14_Dataset(config['chestxray_valid_file'],config['image_res']) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) val_dataloader =DataLoader( val_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=val_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) val_dataloader.num_samples = len(val_dataset) val_dataloader.num_batches = len(val_dataloader) test_dataset = Chestxray14_Dataset(config['chestxray_test_file'],config['image_res']) test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader =DataLoader( test_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=test_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader.num_samples = len(test_dataset) test_dataloader.num_batches = len(test_dataloader) test_dataset_chexpert = CheXpert_Dataset(config['chexpert_valid_file'],config['image_res']) test_sampler_chexpert = torch.utils.data.distributed.DistributedSampler(test_dataset_chexpert,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader_chexpert =DataLoader( test_dataset_chexpert, batch_size=config['batch_size'], num_workers=4, pin_memory=True, sampler=test_sampler_chexpert, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader_chexpert.num_samples = len(test_dataset_chexpert) test_dataloader_chexpert.num_batches = len(test_dataloader_chexpert) if args.image_encoder_name == 'resnet':
# import ruamel.yaml as yaml def main(args, config): torch.cuda.current_device() torch.cuda._initialized = True print("Total CUDA devices: ", torch.cuda.device_count()) torch.set_default_tensor_type('torch.FloatTensor') utils.init_distributed_mode(args) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) cudnn.benchmark = True start_epoch = 0 max_epoch = config['schedular']['epochs'] warmup_steps = config['schedular']['warmup_epochs'] num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_rank = global_rank print('sampler_rank',sampler_rank,'num_tasks',num_tasks) #### Dataset #### print("Creating dataset") if args.add_dataset == True: train_dataset = Mergetrain_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) else: train_dataset = MIMIC_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) train_dataloader = DataLoader( train_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=train_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) train_dataloader.num_samples = len(train_dataset) train_dataloader.num_batches = len(train_dataloader) val_dataset = Chestxray14_Dataset(config['chestxray_valid_file'],config['image_res']) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) val_dataloader =DataLoader( val_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=val_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) val_dataloader.num_samples = len(val_dataset) val_dataloader.num_batches = len(val_dataloader) test_dataset = Chestxray14_Dataset(config['chestxray_test_file'],config['image_res']) test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader =DataLoader( test_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=test_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader.num_samples = len(test_dataset) test_dataloader.num_batches = len(test_dataloader) test_dataset_chexpert = CheXpert_Dataset(config['chexpert_valid_file'],config['image_res']) test_sampler_chexpert = torch.utils.data.distributed.DistributedSampler(test_dataset_chexpert,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader_chexpert =DataLoader( test_dataset_chexpert, batch_size=config['batch_size'], num_workers=4, pin_memory=True, sampler=test_sampler_chexpert, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader_chexpert.num_samples = len(test_dataset_chexpert) test_dataloader_chexpert.num_batches = len(test_dataloader_chexpert) if args.image_encoder_name == 'resnet':
image_encoder = ModelRes(res_base_model='resnet50').cuda()
7
2023-10-30 00:24:16+00:00
24k
ifrit98/storage-subnet
neurons/miner.py
[ { "identifier": "hash_data", "path": "storage/shared/ecc.py", "snippet": "def hash_data(data):\n \"\"\"\n Compute a SHA3-256 hash of the input data and return its integer representation.\n\n The function handles both byte-like and non-byte-like inputs by converting non-byte inputs to\n strin...
import os import sys import copy import json import time import torch import typing import base64 import asyncio import aioredis import argparse import threading import traceback import bittensor as bt import storage from collections import defaultdict from Crypto.Random import get_random_bytes from typing import Dict from pprint import pprint, pformat from storage.shared.ecc import ( hash_data, setup_CRS, ECCommitment, ecc_point_to_hex, hex_to_ecc_point, ) from storage.shared.merkle import ( MerkleTree, ) from storage.shared.utils import b64_encode, b64_decode, chunk_data, safe_key_search from storage.miner import ( run, set_weights, ) from storage.miner.utils import ( compute_subsequent_commitment, save_data_to_filesystem, load_from_filesystem, commit_data_with_seed, init_wandb, get_directory_size, get_free_disk_space, update_storage_stats, ) from storage.miner.config import ( config, check_config, add_args, ) from storage.miner.database import ( store_chunk_metadata, update_seed_info, get_chunk_metadata, )
15,138
f"stored data hash {data_hash} with commitment: {synapse.commitment}" ) # Don't send data back, no need. synapse.encrypted_data = base64.b64encode(b"").decode() # Empty b64 response return synapse async def challenge( self, synapse: storage.protocol.Challenge ) -> storage.protocol.Challenge: """ Handles a data challenge by providing cryptographic proof of data possession. This method retrieves the specified data from storage, calculates its commitment using elliptic curve cryptography, and constructs a Merkle proof. The response includes the requested data chunk, Merkle proof, root, and the commitment, which collectively serve as verifiable evidence of data possession. Args: synapse (storage.protocol.Challenge): An object representing the challenge request, which includes parameters such as the hash of the data to retrieve, chunk size, challenge index, and elliptic curve parameters for commitment calculation. Returns: storage.protocol.Challenge: The synapse object is updated with the response to the challenge, including the encrypted data chunk, commitment point, Merkle proof, and root hash. The method performs the following steps: 1. Fetches the encrypted data from storage using the hash provided in the challenge. 2. Splits the data into chunks based on the specified chunk size. 3. Computes a new commitment hash to provide a time-bound proof of possession. 4. Generates a Merkle tree from the committed data chunks and extracts a proof for the requested chunk. 5. Encodes the requested chunk and Merkle proof in base64 for transmission. 6. Updates the challenge synapse with the commitment, data chunk, randomness, and Merkle proof. 7. Records the updated commitment hash in storage for future challenges. This method ensures data integrity and allows the verification of data possession without disclosing the entire data set. It is designed to fulfill data verification requests in a secure and verifiable manner. Example usage: Assuming an initialized 'synapse' object with the challenge parameters: >>> updated_synapse = self.challenge(synapse) """ # Retrieve the data itself from miner storage bt.logging.info(f"received challenge hash: {synapse.challenge_hash}") self.request_count += 1 bt.logging.trace(f"entering get_chunk_metadata()") data = await get_chunk_metadata(self.database, synapse.challenge_hash) if data is None: bt.logging.error(f"No data found for {synapse.challenge_hash}") return synapse bt.logging.trace(f"retrieved data: {pformat(data)}") # Chunk the data according to the specified (random) chunk size filepath = data.get(b"filepath", None) if filepath is None: bt.logging.warning( f"No file found for {synapse.challenge_hash} in index, trying path construction..." ) # fallback to load the data from the filesystem via database path construction filepath = os.path.expanduser( f"{self.config.database.directory}/{synapse.challenge_hash}" ) if not os.path.isfile(filepath): bt.logging.error( f"No file found for {synapse.challenge_hash} in {self.config.database.directory}." ) return synapse bt.logging.trace(f"entering load_from_filesystem()") try: encrypted_data_bytes = load_from_filesystem(filepath) except Exception as e: bt.logging.error(f"Error loading file {filepath}: {e}") synapse.axon.status_code = 404 synapse.axon.status_message = "File not found" return synapse # Construct the next commitment hash using previous commitment and hash # of the data to prove storage over time prev_seed = data.get(b"seed", "").encode() if prev_seed == None: bt.logging.error(f"No seed found for {synapse.challenge_hash}") return synapse bt.logging.trace(f"entering comput_subsequent_commitment()...") new_seed = synapse.seed.encode() next_commitment, proof = compute_subsequent_commitment( encrypted_data_bytes, prev_seed, new_seed, verbose=self.config.miner.verbose ) if self.config.miner.verbose: bt.logging.debug(f"prev seed : {prev_seed}") bt.logging.debug(f"new seed : {new_seed}") bt.logging.debug(f"proof : {proof}") bt.logging.debug(f"commitment: {next_commitment}\n") synapse.commitment_hash = next_commitment synapse.commitment_proof = proof # update the commitment seed challenge hash in storage bt.logging.trace(f"udpating challenge miner storage: {pformat(data)}") await update_seed_info( self.database, synapse.challenge_hash, synapse.dendrite.hotkey, new_seed.decode("utf-8"), ) # Chunk the data according to the provided chunk_size bt.logging.trace(f"entering chunk_data()") data_chunks = chunk_data(encrypted_data_bytes, synapse.chunk_size) # Extract setup params g = hex_to_ecc_point(synapse.g, synapse.curve) h = hex_to_ecc_point(synapse.h, synapse.curve) # Commit the data chunks based on the provided curve points bt.logging.trace(f"entering ECCcommitment()") committer = ECCommitment(g, h) bt.logging.trace(f"entering commit_data_with_seed()")
# The MIT License (MIT) # Copyright © 2023 Yuma Rao # Copyright © 2023 philanthrope # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the “Software”), to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of # the Software. # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # import this repo class miner: @classmethod def check_config(cls, config: "bt.Config"): """ Adds neuron-specific arguments to the argument parser. Args: parser (argparse.ArgumentParser): Parser to add arguments to. This class method enriches the argument parser with options specific to the neuron's configuration. """ check_config(cls, config) @classmethod def add_args(cls, parser): """ Adds neuron-specific arguments to the argument parser. Args: parser (argparse.ArgumentParser): Parser to add arguments to. This class method enriches the argument parser with options specific to the neuron's configuration. """ add_args(cls, parser) @classmethod def config(cls): """ Retrieves the configuration for the neuron. Returns: bt.Config: The configuration object for the neuron. This class method returns the neuron's configuration, which is used throughout the neuron's lifecycle for various functionalities and operations. """ return config(cls) subtensor: "bt.subtensor" wallet: "bt.wallet" metagraph: "bt.metagraph" def __init__(self): self.config = miner.config() self.check_config(self.config) bt.logging(config=self.config, logging_dir=self.config.miner.full_path) bt.logging.info(f"{self.config}") bt.logging.info("miner.__init__()") # Init device. bt.logging.debug("loading device") self.device = torch.device(self.config.miner.device) bt.logging.debug(str(self.device)) # Init subtensor bt.logging.debug("loading subtensor") self.subtensor = bt.subtensor(config=self.config) bt.logging.debug(str(self.subtensor)) self.current_block = self.subtensor.get_current_block() # Init wallet. bt.logging.debug("loading wallet") self.wallet = bt.wallet(config=self.config) self.wallet.create_if_non_existent() if not self.config.wallet._mock: if not self.subtensor.is_hotkey_registered_on_subnet( hotkey_ss58=self.wallet.hotkey.ss58_address, netuid=self.config.netuid ): raise Exception( f"Wallet not currently registered on netuid {self.config.netuid}, please first register wallet before running" ) bt.logging.debug(f"wallet: {str(self.wallet)}") # Init metagraph. bt.logging.debug("loading metagraph") self.metagraph = bt.metagraph( netuid=self.config.netuid, network=self.subtensor.network, sync=False ) # Make sure not to sync without passing subtensor self.metagraph.sync(subtensor=self.subtensor) # Sync metagraph with subtensor. bt.logging.debug(str(self.metagraph)) # Setup database self.database = aioredis.StrictRedis( host=self.config.database.host, port=self.config.database.port, db=self.config.database.index, socket_keepalive=True, socket_connect_timeout=300, ) self.my_subnet_uid = self.metagraph.hotkeys.index( self.wallet.hotkey.ss58_address ) bt.logging.info(f"Running miner on uid: {self.my_subnet_uid}") # Init wandb. if not self.config.wandb.off: bt.logging.debug("loading wandb") init_wandb(self) # The axon handles request processing, allowing validators to send this process requests. self.axon = bt.axon(wallet=self.wallet, config=self.config) bt.logging.info(f"Axon {self.axon}") # Attach determiners which functions are called when servicing a request. bt.logging.info(f"Attaching forward functions to axon.") self.axon.attach( forward_fn=self.store, blacklist_fn=self.store_blacklist_fn, priority_fn=self.store_priority_fn, ).attach( forward_fn=self.challenge, blacklist_fn=self.challenge_blacklist_fn, priority_fn=self.challenge_priority_fn, ).attach( forward_fn=self.retrieve, blacklist_fn=self.retrieve_blacklist_fn, priority_fn=self.retrieve_priority_fn, ) # Serve passes the axon information to the network + netuid we are hosting on. # This will auto-update if the axon port of external ip have changed. bt.logging.info( f"Serving axon {self.axon} on network: {self.subtensor.chain_endpoint} with netuid: {self.config.netuid}" ) self.axon.serve(netuid=self.config.netuid, subtensor=self.subtensor) # Start starts the miner's axon, making it active on the network. bt.logging.info(f"Starting axon server on port: {self.config.axon.port}") self.axon.start() # Init the event loop. self.loop = asyncio.get_event_loop() # Instantiate runners self.should_exit: bool = False self.is_running: bool = False self.thread: threading.Thread = None self.lock = asyncio.Lock() self.request_timestamps: Dict = {} self.step = 0 # Init the miner's storage request tracker self.request_count = 0 self.start_request_count_timer() self.requests_per_hour = [] self.average_requests_per_hour = 0 # Init the miner's storage usage tracker update_storage_stats(self) def start_request_count_timer(self): """ Initializes and starts a timer for tracking the number of requests received by the miner in an hour. This method sets up a one-hour timer that, upon expiration, calls the `reset_request_count` method to log the number of requests received and reset the count for the next hour. The timer is set to run in a separate thread to avoid blocking the main execution. Usage: Should be called during the initialization of the miner to start tracking requests per hour. """ self.request_count_timer = threading.Timer(3600, self.reset_request_count) self.request_count_timer.start() def reset_request_count(self): """ Logs the number of requests received in the last hour and resets the count. This method is automatically called when the one-hour timer set by `start_request_count_timer` expires. It logs the count of requests received in the last hour and then resets the count. Additionally, it restarts the timer for the next hour. Usage: This method is intended to be called automatically by a timer and typically should not be called directly. """ bt.logging.info( f"Number of requests received in the last hour: {self.request_count}" ) self.requests_per_hour.append(self.request_count) bt.logging.info(f"Requests per hour: {self.requests_per_hour}") self.average_requests_per_hour = sum(self.requests_per_hour) / len( self.requests_per_hour ) bt.logging.info(f"Average requests per hour: {self.average_requests_per_hour}") self.request_count = 0 self.start_request_count_timer() @property async def total_storage(self): """ Calculates the total size of data stored by the miner. This method fetches all data keys from the Redis database and sums up the size of each data object. It provides an estimate of the total amount of data currently held by the miner. Returns: int: Total size of data (in bytes) stored by the miner. Example: >>> miner.total_storage() 102400 # Example output indicating 102,400 bytes of data stored """ # Fetch all keys from Redis all_keys = await safe_key_search(self.database, "*") # Filter out keys that contain a period (temporary, remove later) filtered_keys = [key for key in all_keys if b"." not in key] # Get the size of each data object and sum them up total_size = sum( [ await get_chunk_metadata(self.database, key).get(b"size", 0) for key in filtered_keys ] ) return total_size def store_blacklist_fn( self, synapse: storage.protocol.Store ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def store_priority_fn(self, synapse: storage.protocol.Store) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority def challenge_blacklist_fn( self, synapse: storage.protocol.Challenge ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def challenge_priority_fn(self, synapse: storage.protocol.Challenge) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority def retrieve_blacklist_fn( self, synapse: storage.protocol.Retrieve ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def retrieve_priority_fn(self, synapse: storage.protocol.Retrieve) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority async def store(self, synapse: storage.protocol.Store) -> storage.protocol.Store: """ Processes the storage request from a synapse by securely storing the provided data and returning a proof of storage. The data is committed using elliptic curve cryptography, stored on the filesystem, and the metadata is recorded in a Redis database. A cryptographic proof of the commitment, along with a digital signature from the server's hotkey, is returned in the synapse for verification by the requester. Args: synapse (storage.protocol.Store): An object containing the data to be stored, encoded in base64 format, along with associated metadata like the cryptographic curve parameters, a seed for the commitment, and the expected commitment group elements. Returns: storage.protocol.Store: The synapse is returned with additional fields populated, including the randomness used in the commitment, the commitment point itself, a signature from this storage server's hotkey, and a commitment hash that can be used for chained proofs. The method performs the following operations: 1. Decodes the base64-encoded data into raw bytes. 2. Commits to the data using the provided elliptic curve parameters and the seed to generate a commitment point. 3. Stores the raw byte data in the filesystem using a hash of the data as the filename. 4. Records metadata about the stored data in the Redis database, including the file path, previous seed, and data size. 5. Updates the synapse object with the commitment details and a digital signature. This process ensures the integrity and non-repudiation of the data storage, allowing clients to verify that their data has been stored correctly without the need to retrieve the full data set. Example usage: Assuming an initialized 'committer' object and 'synapse' with necessary data: >>> updated_synapse = self.store(synapse) """ bt.logging.info(f"received store request: {synapse.encrypted_data[:24]}") self.request_count += 1 # Decode the data from base64 to raw bytes encrypted_byte_data = base64.b64decode(synapse.encrypted_data) bt.logging.trace(f"store b64decrypted data: {encrypted_byte_data[:24]}") # Store the data with the hash as the key in the filesystem bt.logging.trace(f"entering hash_data()") data_hash = hash_data(encrypted_byte_data) # If already storing this hash, simply update the validator seeds and return challenge bt.logging.trace(f"checking if data already exists...") if await self.database.exists(data_hash): # update the validator seed challenge hash in storage await update_seed_info( self.database, data_hash, synapse.dendrite.hotkey, synapse.seed ) else: # Store the data in the filesystem filepath = save_data_to_filesystem( encrypted_byte_data, self.config.database.directory, str(data_hash) ) bt.logging.trace(f"stored data {data_hash} in filepath: {filepath}") # Add the initial chunk, size, and validator seed information await store_chunk_metadata( self.database, data_hash, filepath, synapse.dendrite.hotkey, sys.getsizeof(encrypted_byte_data), synapse.seed, ) # Commit to the entire data block bt.logging.trace(f"entering ECCommitment()") committer = ECCommitment( hex_to_ecc_point(synapse.g, synapse.curve), hex_to_ecc_point(synapse.h, synapse.curve), ) bt.logging.trace(f"entering commit()") c, m_val, r = committer.commit(encrypted_byte_data + str(synapse.seed).encode()) if self.config.miner.verbose: bt.logging.debug(f"committer: {committer}") bt.logging.debug(f"encrypted_byte_data: {encrypted_byte_data}") bt.logging.debug(f"c: {c}") bt.logging.debug(f"m_val: {m_val}") bt.logging.debug(f"r: {r}") # Send back some proof that we stored the data synapse.randomness = r synapse.commitment = ecc_point_to_hex(c) bt.logging.trace(f"signed commitment: {synapse.commitment}") # Initialize the commitment hash with the initial commitment for chained proofs synapse.commitment_hash = str(m_val) bt.logging.trace(f"initial commitment_hash: {synapse.commitment_hash}") if self.config.miner.verbose: bt.logging.debug(f"signed m_val: {synapse.signature.hex()}") bt.logging.debug(f"type(seed): {type(synapse.seed)}") bt.logging.debug(f"initial commitment_hash: {synapse.commitment_hash}") bt.logging.info( f"stored data hash {data_hash} with commitment: {synapse.commitment}" ) # Don't send data back, no need. synapse.encrypted_data = base64.b64encode(b"").decode() # Empty b64 response return synapse async def challenge( self, synapse: storage.protocol.Challenge ) -> storage.protocol.Challenge: """ Handles a data challenge by providing cryptographic proof of data possession. This method retrieves the specified data from storage, calculates its commitment using elliptic curve cryptography, and constructs a Merkle proof. The response includes the requested data chunk, Merkle proof, root, and the commitment, which collectively serve as verifiable evidence of data possession. Args: synapse (storage.protocol.Challenge): An object representing the challenge request, which includes parameters such as the hash of the data to retrieve, chunk size, challenge index, and elliptic curve parameters for commitment calculation. Returns: storage.protocol.Challenge: The synapse object is updated with the response to the challenge, including the encrypted data chunk, commitment point, Merkle proof, and root hash. The method performs the following steps: 1. Fetches the encrypted data from storage using the hash provided in the challenge. 2. Splits the data into chunks based on the specified chunk size. 3. Computes a new commitment hash to provide a time-bound proof of possession. 4. Generates a Merkle tree from the committed data chunks and extracts a proof for the requested chunk. 5. Encodes the requested chunk and Merkle proof in base64 for transmission. 6. Updates the challenge synapse with the commitment, data chunk, randomness, and Merkle proof. 7. Records the updated commitment hash in storage for future challenges. This method ensures data integrity and allows the verification of data possession without disclosing the entire data set. It is designed to fulfill data verification requests in a secure and verifiable manner. Example usage: Assuming an initialized 'synapse' object with the challenge parameters: >>> updated_synapse = self.challenge(synapse) """ # Retrieve the data itself from miner storage bt.logging.info(f"received challenge hash: {synapse.challenge_hash}") self.request_count += 1 bt.logging.trace(f"entering get_chunk_metadata()") data = await get_chunk_metadata(self.database, synapse.challenge_hash) if data is None: bt.logging.error(f"No data found for {synapse.challenge_hash}") return synapse bt.logging.trace(f"retrieved data: {pformat(data)}") # Chunk the data according to the specified (random) chunk size filepath = data.get(b"filepath", None) if filepath is None: bt.logging.warning( f"No file found for {synapse.challenge_hash} in index, trying path construction..." ) # fallback to load the data from the filesystem via database path construction filepath = os.path.expanduser( f"{self.config.database.directory}/{synapse.challenge_hash}" ) if not os.path.isfile(filepath): bt.logging.error( f"No file found for {synapse.challenge_hash} in {self.config.database.directory}." ) return synapse bt.logging.trace(f"entering load_from_filesystem()") try: encrypted_data_bytes = load_from_filesystem(filepath) except Exception as e: bt.logging.error(f"Error loading file {filepath}: {e}") synapse.axon.status_code = 404 synapse.axon.status_message = "File not found" return synapse # Construct the next commitment hash using previous commitment and hash # of the data to prove storage over time prev_seed = data.get(b"seed", "").encode() if prev_seed == None: bt.logging.error(f"No seed found for {synapse.challenge_hash}") return synapse bt.logging.trace(f"entering comput_subsequent_commitment()...") new_seed = synapse.seed.encode() next_commitment, proof = compute_subsequent_commitment( encrypted_data_bytes, prev_seed, new_seed, verbose=self.config.miner.verbose ) if self.config.miner.verbose: bt.logging.debug(f"prev seed : {prev_seed}") bt.logging.debug(f"new seed : {new_seed}") bt.logging.debug(f"proof : {proof}") bt.logging.debug(f"commitment: {next_commitment}\n") synapse.commitment_hash = next_commitment synapse.commitment_proof = proof # update the commitment seed challenge hash in storage bt.logging.trace(f"udpating challenge miner storage: {pformat(data)}") await update_seed_info( self.database, synapse.challenge_hash, synapse.dendrite.hotkey, new_seed.decode("utf-8"), ) # Chunk the data according to the provided chunk_size bt.logging.trace(f"entering chunk_data()") data_chunks = chunk_data(encrypted_data_bytes, synapse.chunk_size) # Extract setup params g = hex_to_ecc_point(synapse.g, synapse.curve) h = hex_to_ecc_point(synapse.h, synapse.curve) # Commit the data chunks based on the provided curve points bt.logging.trace(f"entering ECCcommitment()") committer = ECCommitment(g, h) bt.logging.trace(f"entering commit_data_with_seed()")
randomness, chunks, commitments, merkle_tree = commit_data_with_seed(
15
2023-10-26 18:54:47+00:00
24k
cpacker/MemGPT
memgpt/main.py
[ { "identifier": "logger", "path": "memgpt/log.py", "snippet": "" }, { "identifier": "CLIInterface", "path": "memgpt/interface.py", "snippet": "class CLIInterface(AgentInterface):\r\n \"\"\"Basic interface for dumping agent events to the command-line\"\"\"\r\n\r\n @staticmethod\r\n ...
import shutil import configparser import uuid import logging import glob import os import sys import pickle import traceback import json import questionary import typer import memgpt.agent as agent import memgpt.system as system import memgpt.constants as constants import memgpt.errors as errors from rich.console import Console from prettytable import PrettyTable from memgpt.log import logger from memgpt.interface import CLIInterface as interface # for printing to terminal from memgpt.config import MemGPTConfig from memgpt.cli.cli import run, attach, version, server, open_folder, quickstart, migrate from memgpt.cli.cli_config import configure, list, add, delete from memgpt.cli.cli_load import app as load_app from memgpt.agent_store.storage import StorageConnector, TableType from memgpt.metadata import MetadataStore, save_agent
17,433
console = Console() app = typer.Typer(pretty_exceptions_enable=False) app.command(name="run")(run) app.command(name="version")(version) app.command(name="attach")(attach) app.command(name="configure")(configure) app.command(name="list")(list) app.command(name="add")(add) app.command(name="delete")(delete)
console = Console() app = typer.Typer(pretty_exceptions_enable=False) app.command(name="run")(run) app.command(name="version")(version) app.command(name="attach")(attach) app.command(name="configure")(configure) app.command(name="list")(list) app.command(name="add")(add) app.command(name="delete")(delete)
app.command(name="server")(server)
6
2023-10-11 07:38:37+00:00
24k
PixArt-alpha/PixArt-alpha
train_scripts/train_pixart_lcm.py
[ { "identifier": "IDDPM", "path": "diffusion/iddpm.py", "snippet": "def IDDPM(\n timestep_respacing,\n noise_schedule=\"linear\",\n use_kl=False,\n sigma_small=False,\n predict_xstart=False,\n learn_sigma=True,\n pred_sigma=True,\n rescale_learned_s...
import os import sys import types import argparse import datetime import time import warnings import torch import torch.nn as nn import numpy as np import torch.nn.functional as F from pathlib import Path from accelerate import Accelerator, InitProcessGroupKwargs from accelerate.utils import DistributedType from diffusers.models import AutoencoderKL from torch.utils.data import RandomSampler from mmcv.runner import LogBuffer from copy import deepcopy from tqdm import tqdm from diffusion import IDDPM from diffusion.utils.checkpoint import save_checkpoint, load_checkpoint from diffusion.utils.dist_utils import synchronize, get_world_size, clip_grad_norm_ from diffusion.data.builder import build_dataset, build_dataloader, set_data_root from diffusion.model.builder import build_model from diffusion.utils.logger import get_root_logger from diffusion.utils.misc import set_random_seed, read_config, init_random_seed, DebugUnderflowOverflow from diffusion.utils.optimizer import build_optimizer, auto_scale_lr from diffusion.utils.lr_scheduler import build_lr_scheduler from diffusion.utils.data_sampler import AspectRatioBatchSampler, BalancedAspectRatioBatchSampler from diffusion.lcm_scheduler import LCMScheduler from torchvision.utils import save_image from accelerate import FullyShardedDataParallelPlugin from torch.distributed.fsdp.fully_sharded_data_parallel import FullStateDictConfig
16,104
data_time_all += time.time() - data_time_start if load_vae_feat: z = batch[0] else: with torch.no_grad(): with torch.cuda.amp.autocast(enabled=config.mixed_precision == 'fp16'): posterior = vae.encode(batch[0]).latent_dist if config.sample_posterior: z = posterior.sample() else: z = posterior.mode() latents = z * config.scale_factor y = batch[1] y_mask = batch[2] data_info = batch[3] # Sample a random timestep for each image grad_norm = None with accelerator.accumulate(model): # Predict the noise residual optimizer.zero_grad() # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image t_n ~ U[0, N - k - 1] without bias. topk = config.train_sampling_steps // config.num_ddim_timesteps index = torch.randint(0, config.num_ddim_timesteps, (bsz,), device=latents.device).long() start_timesteps = solver.ddim_timesteps[index] timesteps = start_timesteps - topk timesteps = torch.where(timesteps < 0, torch.zeros_like(timesteps), timesteps) # Get boundary scalings for start_timesteps and (end) timesteps. c_skip_start, c_out_start = scalings_for_boundary_conditions(start_timesteps) c_skip_start, c_out_start = [append_dims(x, latents.ndim) for x in [c_skip_start, c_out_start]] c_skip, c_out = scalings_for_boundary_conditions(timesteps) c_skip, c_out = [append_dims(x, latents.ndim) for x in [c_skip, c_out]] # Sample a random guidance scale w from U[w_min, w_max] and embed it # w = (config.w_max - config.w_min) * torch.rand((bsz,)) + config.w_min w = config.cfg_scale * torch.ones((bsz,)) w = w.reshape(bsz, 1, 1, 1) w = w.to(device=latents.device, dtype=latents.dtype) # Get online LCM prediction on z_{t_{n + k}}, w, c, t_{n + k} _, pred_x_0, noisy_model_input = train_diffusion.training_losses(model, latents, start_timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), noise=noise) model_pred = c_skip_start * noisy_model_input + c_out_start * pred_x_0 # Use the ODE solver to predict the kth step in the augmented PF-ODE trajectory after # noisy_latents with both the conditioning embedding c and unconditional embedding 0 # Get teacher model prediction on noisy_latents and conditional embedding with torch.no_grad(): with torch.autocast("cuda"): cond_teacher_output, cond_pred_x0, _ = train_diffusion.training_losses(model_teacher, latents, start_timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), noise=noise) # Get teacher model prediction on noisy_latents and unconditional embedding uncond_teacher_output, uncond_pred_x0, _ = train_diffusion.training_losses(model_teacher, latents, start_timesteps, model_kwargs=dict(y=uncond_prompt_embeds, mask=y_mask, data_info=data_info), noise=noise) # Perform "CFG" to get x_prev estimate (using the LCM paper's CFG formulation) pred_x0 = cond_pred_x0 + w * (cond_pred_x0 - uncond_pred_x0) pred_noise = cond_teacher_output + w * (cond_teacher_output - uncond_teacher_output) x_prev = solver.ddim_step(pred_x0, pred_noise, index) # Get target LCM prediction on x_prev, w, c, t_n with torch.no_grad(): with torch.autocast("cuda", enabled=True): _, pred_x_0, _ = train_diffusion.training_losses(model_ema, x_prev.float(), timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), skip_noise=True) target = c_skip * x_prev + c_out * pred_x_0 # Calculate loss if config.loss_type == "l2": loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") elif config.loss_type == "huber": loss = torch.mean(torch.sqrt((model_pred.float() - target.float()) ** 2 + config.huber_c**2) - config.huber_c) # Backpropagation on the online student model (`model`) accelerator.backward(loss) if accelerator.sync_gradients: grad_norm = accelerator.clip_grad_norm_(model.parameters(), config.gradient_clip) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=True) if accelerator.sync_gradients: ema_update(model_ema, model, config.ema_decay) lr = lr_scheduler.get_last_lr()[0] logs = {"loss": accelerator.gather(loss).mean().item()} if grad_norm is not None: logs.update(grad_norm=accelerator.gather(grad_norm).mean().item()) log_buffer.update(logs) if (step + 1) % config.log_interval == 0 or (step + 1) == 1: t = (time.time() - last_tic) / config.log_interval t_d = data_time_all / config.log_interval avg_time = (time.time() - time_start) / (global_step + 1) eta = str(datetime.timedelta(seconds=int(avg_time * (total_steps - start_step - global_step - 1)))) eta_epoch = str(datetime.timedelta(seconds=int(avg_time * (len(train_dataloader) - step - 1)))) # avg_loss = sum(loss_buffer) / len(loss_buffer) log_buffer.average() info = f"Step/Epoch [{(epoch-1)*len(train_dataloader)+step+1}/{epoch}][{step + 1}/{len(train_dataloader)}]:total_eta: {eta}, " \ f"epoch_eta:{eta_epoch}, time_all:{t:.3f}, time_data:{t_d:.3f}, lr:{lr:.3e}, s:({data_info['resolution'][0][0].item()}, {data_info['resolution'][0][1].item()}), " info += ', '.join([f"{k}:{v:.4f}" for k, v in log_buffer.output.items()]) logger.info(info) last_tic = time.time() log_buffer.clear() data_time_all = 0 logs.update(lr=lr) accelerator.log(logs, step=global_step + start_step) global_step += 1 data_time_start= time.time() synchronize() torch.cuda.empty_cache() if accelerator.is_main_process: # log_validation(model_ema, step, model.device) if ((epoch - 1) * len(train_dataloader) + step + 1) % config.save_model_steps == 0: os.umask(0o000)
current_file_path = Path(__file__).resolve() sys.path.insert(0, str(current_file_path.parent.parent)) warnings.filterwarnings("ignore") # ignore warning def set_fsdp_env(): os.environ["ACCELERATE_USE_FSDP"] = 'true' os.environ["FSDP_AUTO_WRAP_POLICY"] = 'TRANSFORMER_BASED_WRAP' os.environ["FSDP_BACKWARD_PREFETCH"] = 'BACKWARD_PRE' os.environ["FSDP_TRANSFORMER_CLS_TO_WRAP"] = 'PixArtBlock' def ema_update(model_dest: nn.Module, model_src: nn.Module, rate): param_dict_src = dict(model_src.named_parameters()) for p_name, p_dest in model_dest.named_parameters(): p_src = param_dict_src[p_name] assert p_src is not p_dest p_dest.data.mul_(rate).add_((1 - rate) * p_src.data) def append_dims(x, target_dims): """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" dims_to_append = target_dims - x.ndim if dims_to_append < 0: raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less") return x[(...,) + (None,) * dims_to_append] # From LCMScheduler.get_scalings_for_boundary_condition_discrete def scalings_for_boundary_conditions(timestep, sigma_data=0.5, timestep_scaling=10.0): c_skip = sigma_data**2 / ((timestep / 0.1) ** 2 + sigma_data**2) c_out = (timestep / 0.1) / ((timestep / 0.1) ** 2 + sigma_data**2) ** 0.5 return c_skip, c_out def extract_into_tensor(a, t, x_shape): b, *_ = t.shape out = a.gather(-1, t) return out.reshape(b, *((1,) * (len(x_shape) - 1))) class DDIMSolver: def __init__(self, alpha_cumprods, timesteps=1000, ddim_timesteps=50): # DDIM sampling parameters step_ratio = timesteps // ddim_timesteps self.ddim_timesteps = (np.arange(1, ddim_timesteps + 1) * step_ratio).round().astype(np.int64) - 1 self.ddim_alpha_cumprods = alpha_cumprods[self.ddim_timesteps] self.ddim_alpha_cumprods_prev = np.asarray( [alpha_cumprods[0]] + alpha_cumprods[self.ddim_timesteps[:-1]].tolist() ) # convert to torch tensors self.ddim_timesteps = torch.from_numpy(self.ddim_timesteps).long() self.ddim_alpha_cumprods = torch.from_numpy(self.ddim_alpha_cumprods) self.ddim_alpha_cumprods_prev = torch.from_numpy(self.ddim_alpha_cumprods_prev) def to(self, device): self.ddim_timesteps = self.ddim_timesteps.to(device) self.ddim_alpha_cumprods = self.ddim_alpha_cumprods.to(device) self.ddim_alpha_cumprods_prev = self.ddim_alpha_cumprods_prev.to(device) return self def ddim_step(self, pred_x0, pred_noise, timestep_index): alpha_cumprod_prev = extract_into_tensor(self.ddim_alpha_cumprods_prev, timestep_index, pred_x0.shape) dir_xt = (1.0 - alpha_cumprod_prev).sqrt() * pred_noise x_prev = alpha_cumprod_prev.sqrt() * pred_x0 + dir_xt return x_prev @torch.no_grad() def log_validation(model, step, device): if hasattr(model, 'module'): model = model.module scheduler = LCMScheduler(beta_start=0.0001, beta_end=0.02, beta_schedule="linear", prediction_type="epsilon") scheduler.set_timesteps(4, 50) infer_timesteps = scheduler.timesteps dog_embed = torch.load('data/tmp/dog.pth', map_location='cpu') caption_embs, emb_masks = dog_embed['dog_text'].to(device), dog_embed['dog_mask'].to(device) hw = torch.tensor([[1024, 1024]], dtype=torch.float, device=device).repeat(1, 1) ar = torch.tensor([[1.]], device=device).repeat(1, 1) # Create sampling noise: infer_latents = torch.randn(1, 4, 1024, 1024, device=device) model_kwargs = dict(data_info={'img_hw': hw, 'aspect_ratio': ar}, mask=emb_masks) logger.info("Running validation... ") # 7. LCM MultiStep Sampling Loop: for i, t in tqdm(list(enumerate(infer_timesteps))): ts = torch.full((1,), t, device=device, dtype=torch.long) # model prediction (v-prediction, eps, x) model_pred = model(infer_latents, ts, caption_embs, **model_kwargs)[:, :4] # compute the previous noisy sample x_t -> x_t-1 infer_latents, denoised = scheduler.step(model_pred, i, t, infer_latents, return_dict=False) samples = vae.decode(denoised / 0.18215).sample torch.cuda.empty_cache() save_image(samples[0], f'output_cv/vis/{step}.jpg', nrow=1, normalize=True, value_range=(-1, 1)) def train(): if config.get('debug_nan', False): DebugUnderflowOverflow(model) logger.info('NaN debugger registered. Start to detect overflow during training.') time_start, last_tic = time.time(), time.time() log_buffer = LogBuffer() start_step = start_epoch * len(train_dataloader) global_step = 0 total_steps = len(train_dataloader) * config.num_epochs load_vae_feat = getattr(train_dataloader.dataset, 'load_vae_feat', False) # Create uncond embeds for classifier free guidance uncond_prompt_embeds = model.module.y_embedder.y_embedding.repeat(config.train_batch_size, 1, 1, 1) # Now you train the model for epoch in range(start_epoch + 1, config.num_epochs + 1): data_time_start= time.time() data_time_all = 0 for step, batch in enumerate(train_dataloader): data_time_all += time.time() - data_time_start if load_vae_feat: z = batch[0] else: with torch.no_grad(): with torch.cuda.amp.autocast(enabled=config.mixed_precision == 'fp16'): posterior = vae.encode(batch[0]).latent_dist if config.sample_posterior: z = posterior.sample() else: z = posterior.mode() latents = z * config.scale_factor y = batch[1] y_mask = batch[2] data_info = batch[3] # Sample a random timestep for each image grad_norm = None with accelerator.accumulate(model): # Predict the noise residual optimizer.zero_grad() # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image t_n ~ U[0, N - k - 1] without bias. topk = config.train_sampling_steps // config.num_ddim_timesteps index = torch.randint(0, config.num_ddim_timesteps, (bsz,), device=latents.device).long() start_timesteps = solver.ddim_timesteps[index] timesteps = start_timesteps - topk timesteps = torch.where(timesteps < 0, torch.zeros_like(timesteps), timesteps) # Get boundary scalings for start_timesteps and (end) timesteps. c_skip_start, c_out_start = scalings_for_boundary_conditions(start_timesteps) c_skip_start, c_out_start = [append_dims(x, latents.ndim) for x in [c_skip_start, c_out_start]] c_skip, c_out = scalings_for_boundary_conditions(timesteps) c_skip, c_out = [append_dims(x, latents.ndim) for x in [c_skip, c_out]] # Sample a random guidance scale w from U[w_min, w_max] and embed it # w = (config.w_max - config.w_min) * torch.rand((bsz,)) + config.w_min w = config.cfg_scale * torch.ones((bsz,)) w = w.reshape(bsz, 1, 1, 1) w = w.to(device=latents.device, dtype=latents.dtype) # Get online LCM prediction on z_{t_{n + k}}, w, c, t_{n + k} _, pred_x_0, noisy_model_input = train_diffusion.training_losses(model, latents, start_timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), noise=noise) model_pred = c_skip_start * noisy_model_input + c_out_start * pred_x_0 # Use the ODE solver to predict the kth step in the augmented PF-ODE trajectory after # noisy_latents with both the conditioning embedding c and unconditional embedding 0 # Get teacher model prediction on noisy_latents and conditional embedding with torch.no_grad(): with torch.autocast("cuda"): cond_teacher_output, cond_pred_x0, _ = train_diffusion.training_losses(model_teacher, latents, start_timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), noise=noise) # Get teacher model prediction on noisy_latents and unconditional embedding uncond_teacher_output, uncond_pred_x0, _ = train_diffusion.training_losses(model_teacher, latents, start_timesteps, model_kwargs=dict(y=uncond_prompt_embeds, mask=y_mask, data_info=data_info), noise=noise) # Perform "CFG" to get x_prev estimate (using the LCM paper's CFG formulation) pred_x0 = cond_pred_x0 + w * (cond_pred_x0 - uncond_pred_x0) pred_noise = cond_teacher_output + w * (cond_teacher_output - uncond_teacher_output) x_prev = solver.ddim_step(pred_x0, pred_noise, index) # Get target LCM prediction on x_prev, w, c, t_n with torch.no_grad(): with torch.autocast("cuda", enabled=True): _, pred_x_0, _ = train_diffusion.training_losses(model_ema, x_prev.float(), timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), skip_noise=True) target = c_skip * x_prev + c_out * pred_x_0 # Calculate loss if config.loss_type == "l2": loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") elif config.loss_type == "huber": loss = torch.mean(torch.sqrt((model_pred.float() - target.float()) ** 2 + config.huber_c**2) - config.huber_c) # Backpropagation on the online student model (`model`) accelerator.backward(loss) if accelerator.sync_gradients: grad_norm = accelerator.clip_grad_norm_(model.parameters(), config.gradient_clip) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=True) if accelerator.sync_gradients: ema_update(model_ema, model, config.ema_decay) lr = lr_scheduler.get_last_lr()[0] logs = {"loss": accelerator.gather(loss).mean().item()} if grad_norm is not None: logs.update(grad_norm=accelerator.gather(grad_norm).mean().item()) log_buffer.update(logs) if (step + 1) % config.log_interval == 0 or (step + 1) == 1: t = (time.time() - last_tic) / config.log_interval t_d = data_time_all / config.log_interval avg_time = (time.time() - time_start) / (global_step + 1) eta = str(datetime.timedelta(seconds=int(avg_time * (total_steps - start_step - global_step - 1)))) eta_epoch = str(datetime.timedelta(seconds=int(avg_time * (len(train_dataloader) - step - 1)))) # avg_loss = sum(loss_buffer) / len(loss_buffer) log_buffer.average() info = f"Step/Epoch [{(epoch-1)*len(train_dataloader)+step+1}/{epoch}][{step + 1}/{len(train_dataloader)}]:total_eta: {eta}, " \ f"epoch_eta:{eta_epoch}, time_all:{t:.3f}, time_data:{t_d:.3f}, lr:{lr:.3e}, s:({data_info['resolution'][0][0].item()}, {data_info['resolution'][0][1].item()}), " info += ', '.join([f"{k}:{v:.4f}" for k, v in log_buffer.output.items()]) logger.info(info) last_tic = time.time() log_buffer.clear() data_time_all = 0 logs.update(lr=lr) accelerator.log(logs, step=global_step + start_step) global_step += 1 data_time_start= time.time() synchronize() torch.cuda.empty_cache() if accelerator.is_main_process: # log_validation(model_ema, step, model.device) if ((epoch - 1) * len(train_dataloader) + step + 1) % config.save_model_steps == 0: os.umask(0o000)
save_checkpoint(os.path.join(config.work_dir, 'checkpoints'),
1
2023-10-12 14:16:33+00:00
24k
NVlabs/EmerNeRF
train_emernerf.py
[ { "identifier": "metrics", "path": "datasets/metrics.py", "snippet": "def compute_valid_depth_rmse(prediction: Tensor, target: Tensor) -> float:\ndef compute_psnr(prediction: Tensor, target: Tensor) -> float:\ndef compute_ssim(\n prediction: Union[Tensor, np.ndarray], target: Union[Tensor, np.ndarray...
import argparse import json import logging import os import time import imageio import numpy as np import torch import torch.utils.data import builders import loss import utils.misc as misc import wandb from typing import List, Optional from omegaconf import OmegaConf from tqdm import tqdm from datasets import metrics from datasets.base import SceneDataset from radiance_fields import DensityField, RadianceField from radiance_fields.render_utils import render_rays from radiance_fields.video_utils import render_pixels, save_videos from third_party.nerfacc_prop_net import PropNetEstimator, get_proposal_requires_grad_fn from utils.logging import MetricLogger, setup_logging from utils.visualization_tools import visualize_voxels, visualize_scene_flow from datasets.waymo import WaymoDataset from datasets.nuscenes import NuScenesDataset
19,783
help="Render a data video", ) parser.add_argument( "--render_data_video_only", action="store_true", help="Quit after rendering a data video", ) parser.add_argument( "--render_video_postfix", type=str, default=None, help="an optional postfix for video", ) parser.add_argument( "--output_root", default="./work_dirs/", help="path to save checkpoints and logs", type=str, ) # wandb logging part parser.add_argument( "--enable_wandb", action="store_true", help="enable wandb logging" ) parser.add_argument( "--entity", default="YOUR ENTITY NAME", type=str, help="wandb entity name", required=False, ) parser.add_argument( "--project", default="emernerf", type=str, help="wandb project name, also used to enhance log_dir", required=True, ) parser.add_argument( "--run_name", default="debug", type=str, help="wandb run name, also used to enhance log_dir", required=True, ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) return parser def setup(args): # ------ get config from args -------- # default_config = OmegaConf.create(OmegaConf.load("configs/default_config.yaml")) cfg = OmegaConf.load(args.config_file) cfg = OmegaConf.merge(default_config, cfg, OmegaConf.from_cli(args.opts)) log_dir = os.path.join(args.output_root, args.project, args.run_name) cfg.log_dir = log_dir cfg.nerf.model.num_cams = cfg.data.pixel_source.num_cams cfg.nerf.model.unbounded = cfg.nerf.unbounded cfg.nerf.propnet.unbounded = cfg.nerf.unbounded cfg.nerf.model.resume_from = cfg.resume_from os.makedirs(log_dir, exist_ok=True) for folder in [ "images", "full_videos", "test_videos", "lowres_videos", "metrics", "configs_bk", "buffer_maps", ]: os.makedirs(os.path.join(log_dir, folder), exist_ok=True) # ------ setup logging -------- # if args.enable_wandb: # sometimes wandb fails to init in cloud machines, so we give it several (many) tries while ( wandb.init( project=args.project, entity=args.entity, sync_tensorboard=True, settings=wandb.Settings(start_method="fork"), ) is not wandb.run ): continue wandb.run.name = args.run_name wandb.run.save() wandb.config.update(OmegaConf.to_container(cfg, resolve=True)) wandb.config.update(args) misc.fix_random_seeds(cfg.optim.seed) global logger setup_logging(output=log_dir, level=logging.INFO, time_string=current_time) logger.info( "\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())) ) # -------- write config -------- # logger.info(f"Config:\n{OmegaConf.to_yaml(cfg)}") saved_cfg_path = os.path.join(log_dir, "config.yaml") with open(saved_cfg_path, "w") as f: OmegaConf.save(config=cfg, f=f) # also save a backup copy saved_cfg_path_bk = os.path.join( log_dir, "configs_bk", f"config_{current_time}.yaml" ) with open(saved_cfg_path_bk, "w") as f: OmegaConf.save(config=cfg, f=f) logger.info(f"Full config saved to {saved_cfg_path}, and {saved_cfg_path_bk}") return cfg @torch.no_grad() def do_evaluation( step: int = 0, cfg: OmegaConf = None, model: RadianceField = None,
logger = logging.getLogger() current_time = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime()) # a global list of keys to render, # comment out the keys you don't want to render or uncomment the keys you want to render render_keys = [ "gt_rgbs", "rgbs", "depths", # "median_depths", "gt_dino_feats", "dino_feats", "dynamic_rgbs", "dynamic_depths", "static_rgbs", "static_depths", "forward_flows", "backward_flows", "dynamic_rgb_on_static_dinos", "dino_pe", "dino_feats_pe_free", # "dynamic_dino_on_static_rgbs", # "shadow_reduced_static_rgbs", # "shadow_only_static_rgbs", # "shadows", # "gt_sky_masks", # "sky_masks", ] def get_args_parser(): parser = argparse.ArgumentParser("Train EmernNerf for a single scene") parser.add_argument("--config_file", help="path to config file", type=str) parser.add_argument( "--eval_only", action="store_true", help="perform evaluation only" ) parser.add_argument( "--visualize_voxel", action="store_true", help="perform evaluation only" ) parser.add_argument( "--render_data_video", action="store_true", help="Render a data video", ) parser.add_argument( "--render_data_video_only", action="store_true", help="Quit after rendering a data video", ) parser.add_argument( "--render_video_postfix", type=str, default=None, help="an optional postfix for video", ) parser.add_argument( "--output_root", default="./work_dirs/", help="path to save checkpoints and logs", type=str, ) # wandb logging part parser.add_argument( "--enable_wandb", action="store_true", help="enable wandb logging" ) parser.add_argument( "--entity", default="YOUR ENTITY NAME", type=str, help="wandb entity name", required=False, ) parser.add_argument( "--project", default="emernerf", type=str, help="wandb project name, also used to enhance log_dir", required=True, ) parser.add_argument( "--run_name", default="debug", type=str, help="wandb run name, also used to enhance log_dir", required=True, ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) return parser def setup(args): # ------ get config from args -------- # default_config = OmegaConf.create(OmegaConf.load("configs/default_config.yaml")) cfg = OmegaConf.load(args.config_file) cfg = OmegaConf.merge(default_config, cfg, OmegaConf.from_cli(args.opts)) log_dir = os.path.join(args.output_root, args.project, args.run_name) cfg.log_dir = log_dir cfg.nerf.model.num_cams = cfg.data.pixel_source.num_cams cfg.nerf.model.unbounded = cfg.nerf.unbounded cfg.nerf.propnet.unbounded = cfg.nerf.unbounded cfg.nerf.model.resume_from = cfg.resume_from os.makedirs(log_dir, exist_ok=True) for folder in [ "images", "full_videos", "test_videos", "lowres_videos", "metrics", "configs_bk", "buffer_maps", ]: os.makedirs(os.path.join(log_dir, folder), exist_ok=True) # ------ setup logging -------- # if args.enable_wandb: # sometimes wandb fails to init in cloud machines, so we give it several (many) tries while ( wandb.init( project=args.project, entity=args.entity, sync_tensorboard=True, settings=wandb.Settings(start_method="fork"), ) is not wandb.run ): continue wandb.run.name = args.run_name wandb.run.save() wandb.config.update(OmegaConf.to_container(cfg, resolve=True)) wandb.config.update(args) misc.fix_random_seeds(cfg.optim.seed) global logger setup_logging(output=log_dir, level=logging.INFO, time_string=current_time) logger.info( "\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())) ) # -------- write config -------- # logger.info(f"Config:\n{OmegaConf.to_yaml(cfg)}") saved_cfg_path = os.path.join(log_dir, "config.yaml") with open(saved_cfg_path, "w") as f: OmegaConf.save(config=cfg, f=f) # also save a backup copy saved_cfg_path_bk = os.path.join( log_dir, "configs_bk", f"config_{current_time}.yaml" ) with open(saved_cfg_path_bk, "w") as f: OmegaConf.save(config=cfg, f=f) logger.info(f"Full config saved to {saved_cfg_path}, and {saved_cfg_path_bk}") return cfg @torch.no_grad() def do_evaluation( step: int = 0, cfg: OmegaConf = None, model: RadianceField = None,
proposal_networks: Optional[List[DensityField]] = None,
2
2023-10-11 20:56:27+00:00
24k
alibaba-damo-academy/FunCodec
funcodec/models/encoder/transformer_encoder.py
[ { "identifier": "AbsEncoder", "path": "funcodec/models/encoder/abs_encoder.py", "snippet": "class AbsEncoder(torch.nn.Module, ABC):\n @abstractmethod\n def output_size(self) -> int:\n raise NotImplementedError\n\n @abstractmethod\n def forward(\n self,\n xs_pad: torch.Te...
from typing import List from typing import Optional from typing import Tuple from torch import nn from funcodec.models.encoder.abs_encoder import AbsEncoder from funcodec.modules.attention import ( MultiHeadedAttention, RelPositionMultiHeadedAttention, # noqa: H301 LegacyRelPositionMultiHeadedAttention, # noqa: H301 ) from funcodec.modules.layer_norm import LayerNorm from funcodec.modules.multi_layer_conv import Conv1dLinear from funcodec.modules.multi_layer_conv import MultiLayeredConv1d from funcodec.modules.nets_utils import make_pad_mask from funcodec.modules.embedding import ( PositionalEncoding, # noqa: H301 ScaledPositionalEncoding, # noqa: H301 RelPositionalEncoding, # noqa: H301 LegacyRelPositionalEncoding, # noqa: H301 ) from funcodec.modules.positionwise_feed_forward import ( PositionwiseFeedForward, # noqa: H301 ) from funcodec.modules.repeat import repeat from funcodec.modules.nets_utils import rename_state_dict from funcodec.modules.dynamic_conv import DynamicConvolution from funcodec.modules.dynamic_conv2d import DynamicConvolution2D from funcodec.modules.lightconv import LightweightConvolution from funcodec.modules.lightconv2d import LightweightConvolution2D from funcodec.modules.subsampling import Conv2dSubsampling from funcodec.modules.subsampling import Conv2dSubsampling2 from funcodec.modules.subsampling import Conv2dSubsampling6 from funcodec.modules.subsampling import Conv2dSubsampling8 from funcodec.modules.subsampling import TooShortUttError from funcodec.modules.subsampling import check_short_utt import torch import logging
19,166
selfattention_layer_type == "lightconv*" or "dynamiconv*". linear_units (int): The number of units of position-wise feed forward. num_blocks (int): The number of decoder blocks. dropout_rate (float): Dropout rate. positional_dropout_rate (float): Dropout rate after adding positional encoding. attention_dropout_rate (float): Dropout rate in attention. input_layer (Union[str, torch.nn.Module]): Input layer type. pos_enc_class (torch.nn.Module): Positional encoding module class. `PositionalEncoding `or `ScaledPositionalEncoding` normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. selfattention_layer_type (str): Encoder attention layer type. padding_idx (int): Padding idx for input_layer=embed. stochastic_depth_rate (float): Maximum probability to skip the encoder layer. intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer. indices start from 1. if not None, intermediate outputs are returned (which changes return type signature.) """ def __init__( self, idim, attention_dim=256, attention_heads=4, conv_wshare=4, conv_kernel_length="11", conv_usebias=False, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer="conv2d", pos_enc_class=PositionalEncoding, normalize_before=True, concat_after=False, positionwise_layer_type="linear", positionwise_conv_kernel_size=1, selfattention_layer_type="selfattn", padding_idx=-1, stochastic_depth_rate=0.0, intermediate_layers=None, ctc_softmax=None, conditioning_layer_dim=None, zero_triu: bool = False, ): """Construct an Encoder object.""" super(TransformerEncoder_s0, self).__init__() self._register_load_state_dict_pre_hook(_pre_hook) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim), torch.nn.LayerNorm(attention_dim), torch.nn.Dropout(dropout_rate), torch.nn.ReLU(), pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling(idim, attention_dim, dropout_rate) self.conv_subsampling_factor = 4 elif input_layer == "conv2d-scaled-pos-enc": self.embed = Conv2dSubsampling( idim, attention_dim, dropout_rate, pos_enc_class(attention_dim, positional_dropout_rate), ) self.conv_subsampling_factor = 4 elif input_layer == "conv2d6": self.embed = Conv2dSubsampling6(idim, attention_dim, dropout_rate) self.conv_subsampling_factor = 6 elif input_layer == "conv2d8": self.embed = Conv2dSubsampling8(idim, attention_dim, dropout_rate) self.conv_subsampling_factor = 8 elif input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(idim, attention_dim, padding_idx=padding_idx), pos_enc_class(attention_dim, positional_dropout_rate), ) elif isinstance(input_layer, torch.nn.Module): self.embed = torch.nn.Sequential( input_layer, pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer is None: self.embed = torch.nn.Sequential( pos_enc_class(attention_dim, positional_dropout_rate) ) elif input_layer == "none": self.embed = torch.nn.Identity() else: raise ValueError("unknown input_layer: " + input_layer) self.normalize_before = normalize_before positionwise_layer, positionwise_layer_args = self.get_positionwise_layer( positionwise_layer_type, attention_dim, linear_units, dropout_rate, positionwise_conv_kernel_size, ) if selfattention_layer_type == "selfattn": logging.info("encoder self-attention layer type = self-attention") encoder_selfattn_layer = MultiHeadedAttention encoder_selfattn_layer_args = [( attention_heads, attention_dim, attention_dropout_rate, )] * num_blocks elif selfattention_layer_type == "legacy_rel_selfattn": logging.info("encoder self-attention layer type = legacy relative self-attention") assert pos_enc_class == LegacyRelPositionalEncoding
# Copyright 2019 Shigeki Karita # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Transformer encoder definition.""" class EncoderLayer(nn.Module): """Encoder layer module. Args: size (int): Input dimension. self_attn (torch.nn.Module): Self-attention module instance. `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` instance can be used as the argument. feed_forward (torch.nn.Module): Feed-forward module instance. `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance can be used as the argument. dropout_rate (float): Dropout rate. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) stochastic_depth_rate (float): Proability to skip this layer. During training, the layer may skip residual computation and return input as-is with given probability. """ def __init__( self, size, self_attn, feed_forward, dropout_rate, normalize_before=True, concat_after=False, stochastic_depth_rate=0.0, ): """Construct an EncoderLayer object.""" super(EncoderLayer, self).__init__() self.self_attn = self_attn self.feed_forward = feed_forward self.norm1 = LayerNorm(size) self.norm2 = LayerNorm(size) self.dropout = nn.Dropout(dropout_rate) self.size = size self.normalize_before = normalize_before self.concat_after = concat_after if self.concat_after: self.concat_linear = nn.Linear(size + size, size) self.stochastic_depth_rate = stochastic_depth_rate def forward(self, x, mask, cache=None): """Compute encoded features. Args: x_input (torch.Tensor): Input tensor (#batch, time, size). mask (torch.Tensor): Mask tensor for the input (#batch, time). cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size). Returns: torch.Tensor: Output tensor (#batch, time, size). torch.Tensor: Mask tensor (#batch, time). """ if isinstance(x, tuple): x, pos_emb = x[0], x[1] else: x, pos_emb = x, None skip_layer = False # with stochastic depth, residual connection `x + f(x)` becomes # `x <- x + 1 / (1 - p) * f(x)` at training time. stoch_layer_coeff = 1.0 if self.training and self.stochastic_depth_rate > 0: skip_layer = torch.rand(1).item() < self.stochastic_depth_rate stoch_layer_coeff = 1.0 / (1 - self.stochastic_depth_rate) if skip_layer: if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask residual = x if self.normalize_before: x = self.norm1(x) if cache is None: x_q = x else: assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size) x_q = x[:, -1:, :] residual = residual[:, -1:, :] mask = None if mask is None else mask[:, -1:, :] if pos_emb is not None: x_att = self.self_attn(x_q, x, x, pos_emb, mask) else: x_att = self.self_attn(x_q, x, x, mask) if self.concat_after: x_concat = torch.cat((x, x_att), dim=-1) x = residual + stoch_layer_coeff * self.concat_linear(x_concat) else: x = residual + stoch_layer_coeff * self.dropout(x_att) if not self.normalize_before: x = self.norm1(x) residual = x if self.normalize_before: x = self.norm2(x) x = residual + stoch_layer_coeff * self.dropout(self.feed_forward(x)) if not self.normalize_before: x = self.norm2(x) if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask class TransformerEncoder(AbsEncoder): """Transformer encoder module. Args: input_size: input dim output_size: dimension of attention attention_heads: the number of heads of multi head attention linear_units: the number of units of position-wise feed forward num_blocks: the number of decoder blocks dropout_rate: dropout rate attention_dropout_rate: dropout rate in attention positional_dropout_rate: dropout rate after adding positional encoding input_layer: input layer type pos_enc_class: PositionalEncoding or ScaledPositionalEncoding normalize_before: whether to use layer_norm before the first block concat_after: whether to concat attention layer's input and output if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type: linear of conv1d positionwise_conv_kernel_size: kernel size of positionwise conv1d layer padding_idx: padding_idx for input_layer=embed """ def __init__( self, input_size: int, output_size: int = 256, attention_heads: int = 4, linear_units: int = 2048, num_blocks: int = 6, dropout_rate: float = 0.1, positional_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.0, input_layer: Optional[str] = "conv2d", pos_enc_class=PositionalEncoding, normalize_before: bool = True, concat_after: bool = False, positionwise_layer_type: str = "linear", positionwise_conv_kernel_size: int = 1, padding_idx: int = -1, interctc_layer_idx: List[int] = [], interctc_use_conditioning: bool = False, causal_mode: str = "None", ): super().__init__() self._output_size = output_size self.causal_mode = causal_mode if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(input_size, output_size), torch.nn.LayerNorm(output_size), torch.nn.Dropout(dropout_rate), torch.nn.ReLU(), pos_enc_class(output_size, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling(input_size, output_size, dropout_rate) elif input_layer == "conv2d2": self.embed = Conv2dSubsampling2(input_size, output_size, dropout_rate) elif input_layer == "conv2d6": self.embed = Conv2dSubsampling6(input_size, output_size, dropout_rate) elif input_layer == "conv2d8": self.embed = Conv2dSubsampling8(input_size, output_size, dropout_rate) elif input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(input_size, output_size, padding_idx=padding_idx), pos_enc_class(output_size, positional_dropout_rate), ) elif input_layer is None: if input_size == output_size: self.embed = None else: self.embed = torch.nn.Linear(input_size, output_size) else: raise ValueError("unknown input_layer: " + input_layer) self.normalize_before = normalize_before if positionwise_layer_type == "linear": positionwise_layer = PositionwiseFeedForward positionwise_layer_args = ( output_size, linear_units, dropout_rate, ) elif positionwise_layer_type == "conv1d": positionwise_layer = MultiLayeredConv1d positionwise_layer_args = ( output_size, linear_units, positionwise_conv_kernel_size, dropout_rate, ) elif positionwise_layer_type == "conv1d-linear": positionwise_layer = Conv1dLinear positionwise_layer_args = ( output_size, linear_units, positionwise_conv_kernel_size, dropout_rate, ) else: raise NotImplementedError("Support only linear or conv1d.") self.encoders = repeat( num_blocks, lambda lnum: EncoderLayer( output_size, MultiHeadedAttention( attention_heads, output_size, attention_dropout_rate ), positionwise_layer(*positionwise_layer_args), dropout_rate, normalize_before, concat_after, ), ) if self.normalize_before: self.after_norm = LayerNorm(output_size) self.interctc_layer_idx = interctc_layer_idx if len(interctc_layer_idx) > 0: assert 0 < min(interctc_layer_idx) and max(interctc_layer_idx) < num_blocks self.interctc_use_conditioning = interctc_use_conditioning self.conditioning_layer = None def output_size(self) -> int: return self._output_size def forward( self, xs_pad: torch.Tensor, ilens: torch.Tensor, prev_states: torch.Tensor = None, ctc = None, ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]: """Embed positions in tensor. Args: xs_pad: input tensor (B, L, D) ilens: input length (B) prev_states: Not to be used now. Returns: position embedded tensor and mask """ masks = (~make_pad_mask(ilens)[:, None, :]).to(xs_pad.device) if self.causal_mode == "None": pass elif self.causal_mode == "causal": tt = xs_pad.shape[1] pos_idx = torch.arange(tt) causal_mask = torch.less_equal(pos_idx.unsqueeze(0), pos_idx.unsqueeze(1)) causal_mask = causal_mask.unsqueeze(0).to(xs_pad.device) masks = masks * causal_mask if self.embed is None: xs_pad = xs_pad elif ( isinstance(self.embed, Conv2dSubsampling) or isinstance(self.embed, Conv2dSubsampling2) or isinstance(self.embed, Conv2dSubsampling6) or isinstance(self.embed, Conv2dSubsampling8) ): short_status, limit_size = check_short_utt(self.embed, xs_pad.size(1)) if short_status: raise TooShortUttError( f"has {xs_pad.size(1)} frames and is too short for subsampling " + f"(it needs more than {limit_size} frames), return empty results", xs_pad.size(1), limit_size, ) xs_pad, masks = self.embed(xs_pad, masks) else: xs_pad = self.embed(xs_pad) intermediate_outs = [] if len(self.interctc_layer_idx) == 0: xs_pad, masks = self.encoders(xs_pad, masks) else: for layer_idx, encoder_layer in enumerate(self.encoders): xs_pad, masks = encoder_layer(xs_pad, masks) if layer_idx + 1 in self.interctc_layer_idx: encoder_out = xs_pad # intermediate outputs are also normalized if self.normalize_before: encoder_out = self.after_norm(encoder_out) intermediate_outs.append((layer_idx + 1, encoder_out)) if self.interctc_use_conditioning: ctc_out = ctc.softmax(encoder_out) xs_pad = xs_pad + self.conditioning_layer(ctc_out) if self.normalize_before: xs_pad = self.after_norm(xs_pad) olens = masks.squeeze(1).sum(1) if len(intermediate_outs) > 0: return (xs_pad, intermediate_outs), olens, None return xs_pad, olens, None def _pre_hook( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, ): # https://github.com/espnet/espnet/commit/21d70286c354c66c0350e65dc098d2ee236faccc#diff-bffb1396f038b317b2b64dd96e6d3563 rename_state_dict(prefix + "input_layer.", prefix + "embed.", state_dict) # https://github.com/espnet/espnet/commit/3d422f6de8d4f03673b89e1caef698745ec749ea#diff-bffb1396f038b317b2b64dd96e6d3563 rename_state_dict(prefix + "norm.", prefix + "after_norm.", state_dict) class TransformerEncoder_s0(torch.nn.Module): """Transformer encoder module. Args: idim (int): Input dimension. attention_dim (int): Dimension of attention. attention_heads (int): The number of heads of multi head attention. conv_wshare (int): The number of kernel of convolution. Only used in selfattention_layer_type == "lightconv*" or "dynamiconv*". conv_kernel_length (Union[int, str]): Kernel size str of convolution (e.g. 71_71_71_71_71_71). Only used in selfattention_layer_type == "lightconv*" or "dynamiconv*". conv_usebias (bool): Whether to use bias in convolution. Only used in selfattention_layer_type == "lightconv*" or "dynamiconv*". linear_units (int): The number of units of position-wise feed forward. num_blocks (int): The number of decoder blocks. dropout_rate (float): Dropout rate. positional_dropout_rate (float): Dropout rate after adding positional encoding. attention_dropout_rate (float): Dropout rate in attention. input_layer (Union[str, torch.nn.Module]): Input layer type. pos_enc_class (torch.nn.Module): Positional encoding module class. `PositionalEncoding `or `ScaledPositionalEncoding` normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. selfattention_layer_type (str): Encoder attention layer type. padding_idx (int): Padding idx for input_layer=embed. stochastic_depth_rate (float): Maximum probability to skip the encoder layer. intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer. indices start from 1. if not None, intermediate outputs are returned (which changes return type signature.) """ def __init__( self, idim, attention_dim=256, attention_heads=4, conv_wshare=4, conv_kernel_length="11", conv_usebias=False, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer="conv2d", pos_enc_class=PositionalEncoding, normalize_before=True, concat_after=False, positionwise_layer_type="linear", positionwise_conv_kernel_size=1, selfattention_layer_type="selfattn", padding_idx=-1, stochastic_depth_rate=0.0, intermediate_layers=None, ctc_softmax=None, conditioning_layer_dim=None, zero_triu: bool = False, ): """Construct an Encoder object.""" super(TransformerEncoder_s0, self).__init__() self._register_load_state_dict_pre_hook(_pre_hook) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim), torch.nn.LayerNorm(attention_dim), torch.nn.Dropout(dropout_rate), torch.nn.ReLU(), pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling(idim, attention_dim, dropout_rate) self.conv_subsampling_factor = 4 elif input_layer == "conv2d-scaled-pos-enc": self.embed = Conv2dSubsampling( idim, attention_dim, dropout_rate, pos_enc_class(attention_dim, positional_dropout_rate), ) self.conv_subsampling_factor = 4 elif input_layer == "conv2d6": self.embed = Conv2dSubsampling6(idim, attention_dim, dropout_rate) self.conv_subsampling_factor = 6 elif input_layer == "conv2d8": self.embed = Conv2dSubsampling8(idim, attention_dim, dropout_rate) self.conv_subsampling_factor = 8 elif input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(idim, attention_dim, padding_idx=padding_idx), pos_enc_class(attention_dim, positional_dropout_rate), ) elif isinstance(input_layer, torch.nn.Module): self.embed = torch.nn.Sequential( input_layer, pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer is None: self.embed = torch.nn.Sequential( pos_enc_class(attention_dim, positional_dropout_rate) ) elif input_layer == "none": self.embed = torch.nn.Identity() else: raise ValueError("unknown input_layer: " + input_layer) self.normalize_before = normalize_before positionwise_layer, positionwise_layer_args = self.get_positionwise_layer( positionwise_layer_type, attention_dim, linear_units, dropout_rate, positionwise_conv_kernel_size, ) if selfattention_layer_type == "selfattn": logging.info("encoder self-attention layer type = self-attention") encoder_selfattn_layer = MultiHeadedAttention encoder_selfattn_layer_args = [( attention_heads, attention_dim, attention_dropout_rate, )] * num_blocks elif selfattention_layer_type == "legacy_rel_selfattn": logging.info("encoder self-attention layer type = legacy relative self-attention") assert pos_enc_class == LegacyRelPositionalEncoding
encoder_selfattn_layer = LegacyRelPositionMultiHeadedAttention
3
2023-10-07 02:00:40+00:00
24k
Beckschen/3D-TransUNet
nn_transunet/trainer/nnUNetTrainerV2_DDP.py
[ { "identifier": "nnUNetTrainerV2", "path": "nn_transunet/trainer/nnUNetTrainerV2.py", "snippet": "class nnUNetTrainerV2(nnUNetTrainer):\n \"\"\"\n Info for Fabian: same as internal nnUNetTrainerV2_2\n \"\"\"\n\n def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None,...
from genericpath import exists from _warnings import warn from collections import OrderedDict from multiprocessing import Pool from time import sleep, time from typing import Tuple from nnunet.configuration import default_num_threads from nnunet.evaluation.evaluator import aggregate_scores from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax from nnunet.network_architecture.neural_network import SegmentationNetwork from nnunet.postprocessing.connected_components import determine_postprocessing from nnunet.utilities.distributed import awesome_allgather_function from nnunet.utilities.nd_softmax import softmax_helper from nnunet.utilities.tensor_utilities import sum_tensor from nnunet.utilities.to_torch import to_cuda, maybe_to_torch from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss from nnunet.training.loss_functions.dice_loss import get_tp_fp_fn_tn from torch import nn, distributed from torch.backends import cudnn from torch.cuda.amp import autocast from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim.lr_scheduler import _LRScheduler from tqdm import trange from ..trainer.nnUNetTrainerV2 import nnUNetTrainerV2, InitWeights_He from batchgenerators.utilities.file_and_folder_operations import maybe_mkdir_p, join, subfiles, isfile, load_pickle, \ save_json from ..data.data_augmentation_moreDA import get_moreDA_augmentation from ..data.dataset_loading import unpack_dataset from ..data.default_data_augmentation import default_2D_augmentation_params, get_patch_size, default_3D_augmentation_params from ..networks.transunet3d_model import Generic_TransUNet_max_ppbp from nnunet.training.data_augmentation.data_augmentation_insaneDA2 import get_insaneDA_augmentation2 from ..optimizers.lr_scheduler import LinearWarmupCosineAnnealingLR from torch.optim import lr_scheduler from network_trainer import warmup_poly_lr from network_trainer import poly_lr from ..networks.transunet3d_model import HungarianMatcher3D, compute_loss_hungarian from ..utils.dist_utils import check_call_hdfs_command, mkdir_hdfs import os import shutil import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F
17,954
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: if self.local_rank == 0: print("unpacking dataset") unpack_dataset(self.folder_with_preprocessed_data) print("done") distributed.barrier() else: # distributed.barrier() print( "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you " "will wait all winter for your model to finish!") # setting weights for deep supervision losses if not self.model.startswith("Generic") and self.args.fix_ds_net_numpool: # here is a bug, which need to be fixed! net_numpool = len(self.deep_supervision_scales) else: net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True if i < net_numpool - 1 else False for i in range(net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.ds_loss_weights = weights if self.disable_ds: self.ds_loss_weights[0]=1 self.ds_loss_weights[1:]=0 seeds_train = np.random.random_integers(0, 99999, self.data_aug_params.get('num_threads')) seeds_val = np.random.random_integers(0, 99999, max(self.data_aug_params.get('num_threads') // 2, 1)) print("seeds train", seeds_train) print("seeds_val", seeds_val) # add more transform into dataloader if self.reclip: lb, ub, means, stds = self.reclip[0], self.reclip[1], self.intensity_properties[0]['mean'], self.intensity_properties[0]['sd'] self.reclip = [lb, ub, means, stds] if self.args.config.find('500Region') != -1: # BraTSRegions_moreDA self.tr_gen, self.val_gen = get_insaneDA_augmentation2( self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=self.deep_supervision_scales, pin_memory=self.pin_memory, regions=self.regions ) # such that we can get val else: self.tr_gen, self.val_gen = get_moreDA_augmentation( self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=self.deep_supervision_scales, seeds_train=seeds_train, seeds_val=seeds_val, pin_memory=self.pin_memory, is_spatial_aug_only=self.is_spatial_aug_only, reclip=self.reclip ) self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())), also_print_to_console=False) # in network_trainer.py tr_keys = val_keys = list(self.dataset.keys()) if fold=='all' self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() self.initialize_optimizer_and_scheduler() self.network = DDP(self.network, device_ids=[self.local_rank], find_unused_parameters=True) if self.local_rank==0: print(self.network) else: self.print_to_log_file('self.was_initialized is True, not running self.initialize again') self.was_initialized = True def initialize_network(self): """ - momentum 0.99 - SGD instead of Adam - self.lr_scheduler = None because we do poly_lr - deep supervision = True - i am sure I forgot something here Known issue: forgot to set neg_slope=0 in InitWeights_He; should not make a difference though :return: """ if self.model.startswith("Generic"): if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU # nnunet v1, not softmax..., interesting..., but compute_loss has consider the softmax.. net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} do_ds = not self.disable_ds if not do_ds: print("disable ds") if self.model == 'Generic_TransUNet_max_ppbp':
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #installed package class nnUNetTrainerV2_DDP(nnUNetTrainerV2): def __init__(self, plans_file, fold, local_rank, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, distribute_batch_size=False, fp16=False, model="Generic_UNet", input_size=(64, 160, 160), args=None): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.init_args = ( plans_file, fold, local_rank, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, distribute_batch_size, fp16) assert args is not None self.args = args if self.args.config.find('500Region') != -1: self.regions = {"whole tumor": (1, 2, 3), "tumor core": (2, 3), "enhancing tumor": (3,) # correct } if self.args.config.find('500RegionFix') != -1: self.regions = {"whole tumor": (1, 2, 3), "tumor core": (2, 3), "enhancing tumor": (2,) # fig 1: the innermost tumor, but this is a bug!! } self.regions_class_order = (1, 2, 3) self.layer_decay = args.layer_decay self.lr_scheduler_name = args.lrschedule # [ TO DO ] self.reclip = args.reclip self.warmup_epochs = args.warmup_epochs self.min_lr = args.min_lr self.is_spatial_aug_only = args.is_spatial_aug_only if "model_params" in args: self.model_params = args.model_params else: self.model_params = {} self.optim_name = args.optim_name self.find_zero_weight_decay = args.find_zero_weight_decay self.model = args.model self.resume = args.resume self.input_size=input_size self.disable_ds=args.disable_ds self.max_num_epochs = args.max_num_epochs # set 8 gpu training self.initial_lr = args.initial_lr # 8 * 0.01 self.weight_decay = args.weight_decay # 3e-5 in nnUNetTrainer.py self.save_every = 1 # prev 50 self.distribute_batch_size = distribute_batch_size np.random.seed(local_rank) torch.manual_seed(local_rank) if torch.cuda.is_available(): torch.cuda.manual_seed_all(local_rank) self.local_rank = local_rank if torch.cuda.is_available(): torch.cuda.set_device(local_rank) # dist.init_process_group(backend='nccl', init_method='env://') # init outside self.loss = None self.ce_loss = RobustCrossEntropyLoss() self.global_batch_size = None # we need to know this to properly steer oversample def setup_DA_params_BraTSRegions(self): # nnUNetTrainerV2.setup_DA_params(self) self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod( np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))[:-1] if self.threeD: self.data_aug_params = default_3D_augmentation_params self.data_aug_params['rotation_x'] = (-90. / 360 * 2. * np.pi, 90. / 360 * 2. * np.pi) self.data_aug_params['rotation_y'] = (-90. / 360 * 2. * np.pi, 90. / 360 * 2. * np.pi) self.data_aug_params['rotation_z'] = (-90. / 360 * 2. * np.pi, 90. / 360 * 2. * np.pi) if self.do_dummy_2D_aug: self.data_aug_params["dummy_2D"] = True self.print_to_log_file("Using dummy2d data augmentation") self.data_aug_params["elastic_deform_alpha"] = \ default_2D_augmentation_params["elastic_deform_alpha"] self.data_aug_params["elastic_deform_sigma"] = \ default_2D_augmentation_params["elastic_deform_sigma"] self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"] else: self.do_dummy_2D_aug = False if max(self.patch_size) / min(self.patch_size) > 1.5: default_2D_augmentation_params['rotation_x'] = (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi) self.data_aug_params = default_2D_augmentation_params self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm if self.do_dummy_2D_aug: self.basic_generator_patch_size = get_patch_size(self.patch_size[1:], self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size)) else: self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) self.data_aug_params['selected_seg_channels'] = [0] self.data_aug_params['patch_size_for_spatialtransform'] = self.patch_size self.data_aug_params["p_rot"] = 0.3 self.data_aug_params["scale_range"] = (0.65, 1.6) self.data_aug_params["p_scale"] = 0.3 self.data_aug_params["independent_scale_factor_for_each_axis"] = True self.data_aug_params["p_independent_scale_per_axis"] = 0.3 self.data_aug_params["do_elastic"] = True self.data_aug_params["p_eldef"] = 0.3 # LMH 0.2 -> 0.3 according to paper self.data_aug_params["eldef_deformation_scale"] = (0, 0.25) self.data_aug_params["do_additive_brightness"] = True self.data_aug_params["additive_brightness_mu"] = 0 self.data_aug_params["additive_brightness_sigma"] = 0.2 self.data_aug_params["additive_brightness_p_per_sample"] = 0.3 self.data_aug_params["additive_brightness_p_per_channel"] = 0.5 self.data_aug_params['gamma_range'] = (0.5, 1.6) self.data_aug_params['num_cached_per_thread'] = 4 def set_batch_size_and_oversample(self): batch_sizes = [] oversample_percents = [] world_size = self.args.world_size# dist.get_world_size() my_rank = self.args.rank # dist.get_rank() # not local_rank if self.args.total_batch_size: # actually it is global_batch_size # reset the batch_size per gpu accordingly self.batch_size = self.args.total_batch_size // world_size # if self.args.local_rank == 0: # print("total_batch_size: %d, updated batch_size per gpu %d, world_size %d" % (self.args.total_batch_size, self.batch_size, world_size)) if self.distribute_batch_size: # set total batch_size to 16 will be fine... self.global_batch_size = self.batch_size else: self.global_batch_size = self.batch_size * world_size batch_size_per_GPU = np.ceil(self.batch_size / world_size).astype(int) # probably 1 for rank in range(world_size): if self.distribute_batch_size: if (rank + 1) * batch_size_per_GPU > self.batch_size: batch_size = batch_size_per_GPU - ((rank + 1) * batch_size_per_GPU - self.batch_size) else: batch_size = batch_size_per_GPU else: batch_size = self.batch_size batch_sizes.append(batch_size) sample_id_low = 0 if len(batch_sizes) == 0 else np.sum(batch_sizes[:-1]) sample_id_high = np.sum(batch_sizes) if sample_id_high / self.global_batch_size < (1 - self.oversample_foreground_percent): oversample_percents.append(0.0) elif sample_id_low / self.global_batch_size > (1 - self.oversample_foreground_percent): oversample_percents.append(1.0) else: percent_covered_by_this_rank = sample_id_high / self.global_batch_size - sample_id_low / self.global_batch_size oversample_percent_here = 1 - (((1 - self.oversample_foreground_percent) - sample_id_low / self.global_batch_size) / percent_covered_by_this_rank) oversample_percents.append(oversample_percent_here) print("worker", my_rank, "oversample", oversample_percents[my_rank]) print("worker", my_rank, "batch_size", batch_sizes[my_rank]) # batch_sizes [self.batch_size]*world_size self.batch_size = batch_sizes[my_rank] self.oversample_foreground_percent = oversample_percents[my_rank] def save_checkpoint(self, fname, save_optimizer=True): if self.local_rank == 0: super().save_checkpoint(fname, save_optimizer) def plot_progress(self): if self.local_rank == 0: super().plot_progress() def print_to_log_file(self, *args, also_print_to_console=True): if self.local_rank == 0: super().print_to_log_file(*args, also_print_to_console=also_print_to_console) def process_plans(self, plans): super().process_plans(plans) if (self.patch_size != self.args.crop_size).any(): self.patch_size = self.args.crop_size self.set_batch_size_and_oversample() if self.args.config.find('500Region') != -1: self.num_classes = len(self.regions) # only care about foreground (compatible with sigmoid) def initialize(self, training=True, force_load_plans=False): """ :param training: :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() if self.args.config.find('500Region') != -1: # BraTSRegions_moreDA self.setup_DA_params_BraTSRegions() if hasattr(self.args, 'deep_supervision_scales') and len(self.args.deep_supervision_scales)>0: self.deep_supervision_scales = self.args.deep_supervision_scales # overwrite setup_DA_params() from nnUNetTrainerV2 self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: if self.local_rank == 0: print("unpacking dataset") unpack_dataset(self.folder_with_preprocessed_data) print("done") distributed.barrier() else: # distributed.barrier() print( "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you " "will wait all winter for your model to finish!") # setting weights for deep supervision losses if not self.model.startswith("Generic") and self.args.fix_ds_net_numpool: # here is a bug, which need to be fixed! net_numpool = len(self.deep_supervision_scales) else: net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True if i < net_numpool - 1 else False for i in range(net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.ds_loss_weights = weights if self.disable_ds: self.ds_loss_weights[0]=1 self.ds_loss_weights[1:]=0 seeds_train = np.random.random_integers(0, 99999, self.data_aug_params.get('num_threads')) seeds_val = np.random.random_integers(0, 99999, max(self.data_aug_params.get('num_threads') // 2, 1)) print("seeds train", seeds_train) print("seeds_val", seeds_val) # add more transform into dataloader if self.reclip: lb, ub, means, stds = self.reclip[0], self.reclip[1], self.intensity_properties[0]['mean'], self.intensity_properties[0]['sd'] self.reclip = [lb, ub, means, stds] if self.args.config.find('500Region') != -1: # BraTSRegions_moreDA self.tr_gen, self.val_gen = get_insaneDA_augmentation2( self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=self.deep_supervision_scales, pin_memory=self.pin_memory, regions=self.regions ) # such that we can get val else: self.tr_gen, self.val_gen = get_moreDA_augmentation( self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=self.deep_supervision_scales, seeds_train=seeds_train, seeds_val=seeds_val, pin_memory=self.pin_memory, is_spatial_aug_only=self.is_spatial_aug_only, reclip=self.reclip ) self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())), also_print_to_console=False) # in network_trainer.py tr_keys = val_keys = list(self.dataset.keys()) if fold=='all' self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() self.initialize_optimizer_and_scheduler() self.network = DDP(self.network, device_ids=[self.local_rank], find_unused_parameters=True) if self.local_rank==0: print(self.network) else: self.print_to_log_file('self.was_initialized is True, not running self.initialize again') self.was_initialized = True def initialize_network(self): """ - momentum 0.99 - SGD instead of Adam - self.lr_scheduler = None because we do poly_lr - deep supervision = True - i am sure I forgot something here Known issue: forgot to set neg_slope=0 in InitWeights_He; should not make a difference though :return: """ if self.model.startswith("Generic"): if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU # nnunet v1, not softmax..., interesting..., but compute_loss has consider the softmax.. net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} do_ds = not self.disable_ds if not do_ds: print("disable ds") if self.model == 'Generic_TransUNet_max_ppbp':
self.network = Generic_TransUNet_max_ppbp(self.num_input_channels, self.base_num_features, self.num_classes,
5
2023-10-11 05:19:25+00:00
24k
eai-lab/On-NAS
cifar_search.py
[ { "identifier": "genotypes", "path": "utils/genotypes.py", "snippet": "PRIMITIVES = [\n \"max_pool_3x3\",\n \"avg_pool_3x3\",\n \"skip_connect\", # identity\n \"sep_conv_3x3\",\n \"sep_conv_5x5\",\n \"dil_conv_3x3\",\n \"dil_conv_5x5\",\n \"none\",\n]\nPRIMITIVES_FEWSHOT = [\n ...
import os import torch import torch.nn as nn import numpy as np import utils.utils as utils import random import time import pandas as pd import copy import argparse from utils import genotypes as gt from models.search_cnn import SearchCNNController from models.search_cnn_PC import SearchCNNControllerPC from task_optimizer.darts import Darts,Architect from task_optimizer.darts import train as d_train from tqdm import tqdm from tqdm import tqdm
16,321
torch.backends.cudnn.benchmark = True # get data with meta info input_size, input_channels, n_classes, train_data = utils.get_data( config.dataset, config.data_path, cutout_length=0, validation=False) _,_,_,_,test_data = utils.get_data(config.dataset, config.data_path, cutout_length=0, validation=True) # input my model architecture here normalizer = _init_alpha_normalizer( config.normalizer, config.task_train_steps, config.normalizer_t_max, config.normalizer_t_min, config.normalizer_temp_anneal_mode, ) net_crit = nn.CrossEntropyLoss().to(device) model = SearchCNNController( 3, config.init_channels, config.k, config.layers, config, n_nodes=config.nodes, reduction_layers=config.reduction_layers, device_ids=config.gpus, normalizer=normalizer, PRIMITIVES=gt.PRIMITIVES, feature_scale_rate=1, use_hierarchical_alphas=config.use_hierarchical_alphas, use_pairwise_input_alphas=config.use_pairwise_input_alphas, alpha_prune_threshold=config.alpha_prune_threshold, ) if config.meta_model == 'pc_adaptation': print("model created as PC adaptation") model = SearchCNNControllerPC( 3, config.init_channels, config.k, config.layers, n_nodes=config.nodes, reduction_layers=config.reduction_layers, device_ids=config.gpus, normalizer=normalizer, PRIMITIVES=gt.PRIMITIVES, feature_scale_rate=1, use_hierarchical_alphas=config.use_hierarchical_alphas, use_pairwise_input_alphas=config.use_pairwise_input_alphas, use_pc_adaptation=True, alpha_prune_threshold=config.alpha_prune_threshold ) ############################################################ model = model.to(device) # weights optimizer w_optim = torch.optim.Adam(model.weights(), config.w_lr, betas=(0.0, 0.999), weight_decay=config.w_weight_decay) # alphas optimizer alpha_optim = torch.optim.Adam(model.alphas(), config.alpha_lr, betas=(0.0, 0.999), weight_decay=config.alpha_weight_decay) # split data to train/validation n_train = len(train_data) split = n_train // 2 # changed here indices = list(range(n_train)) train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[split:]) #and order of these valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:split]) train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, sampler=train_sampler, num_workers=config.workers, pin_memory=True) valid_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, sampler=valid_sampler, num_workers=config.workers, pin_memory=True) test_loader = torch.utils.data.DataLoader(test_data,batch_size=config.batch_size, shuffle=True, num_workers=config.workers, pin_memory=True) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(w_optim, config.epochs, eta_min=0.0) architect = Architect(model, config.w_momentum, config.w_weight_decay, use_first_order_darts=True) # training loop best_top1 = 0. global_progress = 0 start_time = time.process_time() warm_up_flag = False epoch_avg = pd.DataFrame() for epoch in tqdm(range(config.epochs),total=config.epochs): mem = torch.cuda.memory_stats(0)['allocated_bytes.all.peak']/(1024**2) config.epoch_score = [] lr = lr_scheduler.get_last_lr()[0] # training loader_chunk = Loader_Chunk(train_loader,valid_loader) if epoch < config.warm_up_epochs: warm_up_flag = True #a = list(self.parameters())[0].clone() # loss.backward() # self.optimizer.step() # b = list(self.parameters())[0].clone() # torch.equal(a.data, b.data)
""" Search cell """ ''' Based on https://github.com/boschresearch/metanas which is licensed under GNU Affero General Public License, ''' device = torch.device("cuda") # tensorboard def _init_alpha_normalizer(name, task_train_steps, t_max, t_min, temp_anneal_mode): normalizer = dict() normalizer["name"] = name normalizer["params"] = dict() normalizer["params"]["curr_step"] = 0.0 # current step for scheduling normalizer normalizer["params"]["max_steps"] = float( task_train_steps ) # for scheduling normalizer normalizer["params"]["t_max"] = t_max normalizer["params"]["t_min"] = t_min normalizer["params"]["temp_anneal_mode"] = temp_anneal_mode # temperature annealing return normalizer def main(config): # set default gpu device id torch.cuda.set_device(config.gpus[0]) # set seed np.random.seed(config.seed) torch.manual_seed(config.seed) torch.cuda.manual_seed_all(config.seed) random.seed(config.seed) torch.backends.cudnn.benchmark = True # get data with meta info input_size, input_channels, n_classes, train_data = utils.get_data( config.dataset, config.data_path, cutout_length=0, validation=False) _,_,_,_,test_data = utils.get_data(config.dataset, config.data_path, cutout_length=0, validation=True) # input my model architecture here normalizer = _init_alpha_normalizer( config.normalizer, config.task_train_steps, config.normalizer_t_max, config.normalizer_t_min, config.normalizer_temp_anneal_mode, ) net_crit = nn.CrossEntropyLoss().to(device) model = SearchCNNController( 3, config.init_channels, config.k, config.layers, config, n_nodes=config.nodes, reduction_layers=config.reduction_layers, device_ids=config.gpus, normalizer=normalizer, PRIMITIVES=gt.PRIMITIVES, feature_scale_rate=1, use_hierarchical_alphas=config.use_hierarchical_alphas, use_pairwise_input_alphas=config.use_pairwise_input_alphas, alpha_prune_threshold=config.alpha_prune_threshold, ) if config.meta_model == 'pc_adaptation': print("model created as PC adaptation") model = SearchCNNControllerPC( 3, config.init_channels, config.k, config.layers, n_nodes=config.nodes, reduction_layers=config.reduction_layers, device_ids=config.gpus, normalizer=normalizer, PRIMITIVES=gt.PRIMITIVES, feature_scale_rate=1, use_hierarchical_alphas=config.use_hierarchical_alphas, use_pairwise_input_alphas=config.use_pairwise_input_alphas, use_pc_adaptation=True, alpha_prune_threshold=config.alpha_prune_threshold ) ############################################################ model = model.to(device) # weights optimizer w_optim = torch.optim.Adam(model.weights(), config.w_lr, betas=(0.0, 0.999), weight_decay=config.w_weight_decay) # alphas optimizer alpha_optim = torch.optim.Adam(model.alphas(), config.alpha_lr, betas=(0.0, 0.999), weight_decay=config.alpha_weight_decay) # split data to train/validation n_train = len(train_data) split = n_train // 2 # changed here indices = list(range(n_train)) train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[split:]) #and order of these valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:split]) train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, sampler=train_sampler, num_workers=config.workers, pin_memory=True) valid_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, sampler=valid_sampler, num_workers=config.workers, pin_memory=True) test_loader = torch.utils.data.DataLoader(test_data,batch_size=config.batch_size, shuffle=True, num_workers=config.workers, pin_memory=True) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(w_optim, config.epochs, eta_min=0.0) architect = Architect(model, config.w_momentum, config.w_weight_decay, use_first_order_darts=True) # training loop best_top1 = 0. global_progress = 0 start_time = time.process_time() warm_up_flag = False epoch_avg = pd.DataFrame() for epoch in tqdm(range(config.epochs),total=config.epochs): mem = torch.cuda.memory_stats(0)['allocated_bytes.all.peak']/(1024**2) config.epoch_score = [] lr = lr_scheduler.get_last_lr()[0] # training loader_chunk = Loader_Chunk(train_loader,valid_loader) if epoch < config.warm_up_epochs: warm_up_flag = True #a = list(self.parameters())[0].clone() # loss.backward() # self.optimizer.step() # b = list(self.parameters())[0].clone() # torch.equal(a.data, b.data)
d_train(loader_chunk,model,architect,w_optim,alpha_optim,config.w_lr,global_progress,config,warm_up=warm_up_flag)
4
2023-10-08 02:42:27+00:00
24k
LukeForeverYoung/UReader
serve/model_worker.py
[ { "identifier": "IO", "path": "serve/io_utils.py", "snippet": "class IO:\n @staticmethod\n def register(options):\n pass\n\n def open(self, path: str, mode: str):\n raise NotImplementedError\n\n def exists(self, path: str) -> bool:\n raise NotImplementedError\n\n def ...
from PIL import Image from io import BytesIO from .io_utils import IO, DefaultIO, OSS from mplug_owl.processing_mplug_owl import MplugOwlProcessor, MplugOwlImageProcessor from mplug_owl.modeling_mplug_owl import MplugOwlForConditionalGeneration from mplug_owl.configuration_mplug_owl import MplugOwlConfig from mplug_owl.tokenization_mplug_owl import MplugOwlTokenizer from transformers import GenerationConfig from .model_utils import post_process_output, Stream, Iteratorize from pathlib import Path from mplug_owl.processing_mplug_owl import MplugOwlProcessor from mplug_owl.modeling_mplug_owl import MplugOwlForConditionalGeneration from pipeline.data_utils.processors.builder import build_processors from pipeline.data_utils.processors import * from transformers.models.llama.tokenization_llama import LlamaTokenizer from icecream import ic import torch import gradio as gr import logging import sys import os import json import requests import datetime import uuid import base64 import time import sys import transformers
15,224
# text_list = prompts[0].split('<image>') # text = text_list[0] # for ri, image in enumerate(images): # if args.patch_pos_embed_type == 'pre': # # 对于pre处理 v2t最终输出的是一张图的token # text += '<image>' # else: # # 对于post处理 v2t最终输出的是多图 # text += '<image>'*image.shape[0] # text += text_list[ri+1] # images = torch.cat(images, dim=0) # patch_position = torch.cat(patch_position, dim=0) # print(text) # ic(images.shape) # ic(patch_position.shape) # from mplug_owl.processing_mplug_owl import tokenize_prompts # input_ids = tokenize_prompts(text, tokenizer=self.tokenizer, return_tensors='pt') # return { # "pixel_values": images, # 'patch_position': patch_position, # "input_ids": input_ids # } class mPLUG_Owl_Server: def __init__( self, base_model='MAGAer13/mplug-owl-llama-7b', log_dir='./', load_in_8bit=False, bf16=True, device="cuda", io=None, config=None, ): self.log_dir = log_dir self.config = config self.image_processor = build_processors(config['valid_processors'])['sft'] self.tokenizer = LlamaTokenizer.from_pretrained(base_model) self.processor = MplugOwlProcessor(self.image_processor, self.tokenizer) self.model = MplugOwlForConditionalGeneration.from_pretrained( base_model, torch_dtype=torch.float, ) ckpt = {} for cf in Path(base_model).iterdir(): if 'pytorch_model' in cf.name and cf.name.endswith('.bin'): ckpt.update(torch.load(cf, map_location='cpu')) msg = self.model.load_state_dict(ckpt, strict=False) print(msg) del ckpt self.bf16 = bf16 self.load_in_8bit = load_in_8bit if not load_in_8bit: if bf16: self.model.bfloat16() else: self.model.half() self.model.cuda() self.model.eval() self.io = io def evaluate( self, pixel_values=None, patch_positions=None, input_ids=None, temperature=1.0, top_p=0.9, top_k=5, num_beams=3, max_new_tokens=256, stream_output=True, length_penalty=1.0, no_repeat_ngram_size=2, do_sample=False, early_stopping=True, **kwargs ): generation_config = dict( temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, no_repeat_ngram_size=no_repeat_ngram_size, do_sample=do_sample, early_stopping=early_stopping, length_penalty=length_penalty, ) generate_params = { "pixel_values": pixel_values, "patch_positions": patch_positions, "input_ids": input_ids, "return_dict_in_generate": True, "output_scores": True, "max_new_tokens": max_new_tokens, } generate_params.update(generation_config) if stream_output: # Stream the reply 1 token at a time. # This is based on the trick of using 'stopping_criteria' to create an iterator, # from https://github.com/oobabooga/text-generation-webui/blob/ad37f396fc8bcbab90e11ecf17c56c97bfbd4a9c/modules/text_generation.py#L216-L243. def generate_with_callback(callback=None, **kwargs): kwargs.setdefault( "stopping_criteria", transformers.StoppingCriteriaList() ) kwargs["stopping_criteria"].append(Stream(callback_func=callback)) with torch.no_grad(): self.model.generate(**kwargs) def generate_with_streaming(**kwargs):
sys.path.append("..") server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**" # from pipeline.data_utils.xgpt3_dataset import ImageIO # class ImageProcessor(object): # def __init__(self, resolution=224, tokenizer=None): # normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) # # self.transform = transforms.Compose([ # # transforms.Resize((resolution, resolution),interpolation=Image.BICUBIC), # # transforms.ToTensor(), # # normalize, # # ]) # from megatron.data.processors import doc_processor # processor_class = os.environ.get('DocProcessor','DocSFTProcessor') # self.transform = getattr(doc_processor,processor_class)() # self.image_io = ImageIO() # self.tokenizer=tokenizer # def __call__(self, image_paths, prompts): # if isinstance(image_paths, str): # image_paths = [image_paths] # images = [] # images = self.image_io._load_img(image_paths) # images = [self.transform(image, None) for image in images] # image_input, text_input, patch_position # patch_position = [_[2] for _ in images] # images = [_[0] for _ in images] # text_list = prompts[0].split('<image>') # text = text_list[0] # for ri, image in enumerate(images): # if args.patch_pos_embed_type == 'pre': # # 对于pre处理 v2t最终输出的是一张图的token # text += '<image>' # else: # # 对于post处理 v2t最终输出的是多图 # text += '<image>'*image.shape[0] # text += text_list[ri+1] # images = torch.cat(images, dim=0) # patch_position = torch.cat(patch_position, dim=0) # print(text) # ic(images.shape) # ic(patch_position.shape) # from mplug_owl.processing_mplug_owl import tokenize_prompts # input_ids = tokenize_prompts(text, tokenizer=self.tokenizer, return_tensors='pt') # return { # "pixel_values": images, # 'patch_position': patch_position, # "input_ids": input_ids # } class mPLUG_Owl_Server: def __init__( self, base_model='MAGAer13/mplug-owl-llama-7b', log_dir='./', load_in_8bit=False, bf16=True, device="cuda", io=None, config=None, ): self.log_dir = log_dir self.config = config self.image_processor = build_processors(config['valid_processors'])['sft'] self.tokenizer = LlamaTokenizer.from_pretrained(base_model) self.processor = MplugOwlProcessor(self.image_processor, self.tokenizer) self.model = MplugOwlForConditionalGeneration.from_pretrained( base_model, torch_dtype=torch.float, ) ckpt = {} for cf in Path(base_model).iterdir(): if 'pytorch_model' in cf.name and cf.name.endswith('.bin'): ckpt.update(torch.load(cf, map_location='cpu')) msg = self.model.load_state_dict(ckpt, strict=False) print(msg) del ckpt self.bf16 = bf16 self.load_in_8bit = load_in_8bit if not load_in_8bit: if bf16: self.model.bfloat16() else: self.model.half() self.model.cuda() self.model.eval() self.io = io def evaluate( self, pixel_values=None, patch_positions=None, input_ids=None, temperature=1.0, top_p=0.9, top_k=5, num_beams=3, max_new_tokens=256, stream_output=True, length_penalty=1.0, no_repeat_ngram_size=2, do_sample=False, early_stopping=True, **kwargs ): generation_config = dict( temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, no_repeat_ngram_size=no_repeat_ngram_size, do_sample=do_sample, early_stopping=early_stopping, length_penalty=length_penalty, ) generate_params = { "pixel_values": pixel_values, "patch_positions": patch_positions, "input_ids": input_ids, "return_dict_in_generate": True, "output_scores": True, "max_new_tokens": max_new_tokens, } generate_params.update(generation_config) if stream_output: # Stream the reply 1 token at a time. # This is based on the trick of using 'stopping_criteria' to create an iterator, # from https://github.com/oobabooga/text-generation-webui/blob/ad37f396fc8bcbab90e11ecf17c56c97bfbd4a9c/modules/text_generation.py#L216-L243. def generate_with_callback(callback=None, **kwargs): kwargs.setdefault( "stopping_criteria", transformers.StoppingCriteriaList() ) kwargs["stopping_criteria"].append(Stream(callback_func=callback)) with torch.no_grad(): self.model.generate(**kwargs) def generate_with_streaming(**kwargs):
return Iteratorize(generate_with_callback, kwargs, callback=None)
10
2023-10-08 06:29:02+00:00
24k
sakemin/cog-musicgen-remixer
predict.py
[ { "identifier": "MultiBandDiffusion", "path": "audiocraft/models/multibanddiffusion.py", "snippet": "class MultiBandDiffusion:\n \"\"\"Sample from multiple diffusion models.\n\n Args:\n DPs (list of DiffusionProcess): Diffusion processes.\n codec_model (CompressionModel): Underlying ...
import os import random import torchaudio import typing as tp import numpy as np import torch import librosa import subprocess import math import allin1 import pytsmod as tsm import shutil import shutil from typing import Optional from cog import BasePredictor, Input, Path from audiocraft.models import MusicGen, MultiBandDiffusion from audiocraft.solvers.compression import CompressionSolver from audiocraft.models.loaders import ( load_compression_model, load_lm_model, ) from audiocraft.data.audio import audio_write from audiocraft.models.builders import get_lm_model from omegaconf import OmegaConf from audiocraft.modules.btc.btc_model import BTC_model from audiocraft.modules.btc.utils.mir_eval_modules import idx2chord from demucs.audio import convert_audio from demucs.apply import apply_model
14,614
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg
compression_model = CompressionSolver.model_from_checkpoint(cfg.compression_model_checkpoint, device=device)
2
2023-10-09 09:55:24+00:00
24k
oracle/guardian-ai
tests/unitary/test_fairness_metrics.py
[ { "identifier": "ConsistencyScorer", "path": "guardian_ai/fairness/metrics/dataset.py", "snippet": "class ConsistencyScorer(_SimpleDatasetFairnessScorer):\n \"\"\"\n Measures the consistency of a dataset.\n\n Consistency is measured as the number of ratio of instances that have a\n different...
import math import numpy as np import pandas as pd import pytest import sklearn from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import OneHotEncoder from guardian_ai.fairness.metrics.dataset import ( ConsistencyScorer, DatasetStatisticalParityScorer, SmoothedEDFScorer, consistency, dataset_statistical_parity, smoothed_edf, ) from guardian_ai.fairness.metrics.model import ( EqualizedOddsScorer, ErrorRateScorer, FalseDiscoveryRateScorer, FalseNegativeRateScorer, FalseOmissionRateScorer, FalsePositiveRateScorer, ModelStatisticalParityScorer, TheilIndexScorer, TruePositiveRateScorer, equalized_odds, error_rate, false_discovery_rate, false_negative_rate, false_omission_rate, false_positive_rate, model_statistical_parity, theil_index, true_positive_rate, ) from guardian_ai.utils.exception import GuardianAITypeError, GuardianAIValueError from tests.utils import get_dummy_dataset
18,505
#!/usr/bin/env python # -*- coding: utf-8 -*-- # Copyright (c) 2023 Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ @pytest.fixture(scope="module", autouse=True) def init(): np.random.seed(12345) def is_close(a, b): return math.isclose(a, b, rel_tol=1e-5) def approx_dict(d): return pytest.approx(d, rel=1e-5) MODEL_X_Y_SCORERS = { "model_statistical_parity_scorer": ModelStatisticalParityScorer, "true_positive_rate_scorer": TruePositiveRateScorer, "false_positive_rate_scorer": FalsePositiveRateScorer, "false_negative_rate_scorer": FalseNegativeRateScorer, "false_omission_rate_scorer": FalseOmissionRateScorer, "false_discovery_rate_scorer": FalseDiscoveryRateScorer, "error_rate_scorer": ErrorRateScorer, "equalized_odds_scorer": EqualizedOddsScorer, "theil_index_scorer": TheilIndexScorer, } MODEL_SUBGROUPS_SCORERS = { "model_statistical_parity_scorer": model_statistical_parity, "true_positive_rate_scorer": true_positive_rate,
#!/usr/bin/env python # -*- coding: utf-8 -*-- # Copyright (c) 2023 Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ @pytest.fixture(scope="module", autouse=True) def init(): np.random.seed(12345) def is_close(a, b): return math.isclose(a, b, rel_tol=1e-5) def approx_dict(d): return pytest.approx(d, rel=1e-5) MODEL_X_Y_SCORERS = { "model_statistical_parity_scorer": ModelStatisticalParityScorer, "true_positive_rate_scorer": TruePositiveRateScorer, "false_positive_rate_scorer": FalsePositiveRateScorer, "false_negative_rate_scorer": FalseNegativeRateScorer, "false_omission_rate_scorer": FalseOmissionRateScorer, "false_discovery_rate_scorer": FalseDiscoveryRateScorer, "error_rate_scorer": ErrorRateScorer, "equalized_odds_scorer": EqualizedOddsScorer, "theil_index_scorer": TheilIndexScorer, } MODEL_SUBGROUPS_SCORERS = { "model_statistical_parity_scorer": model_statistical_parity, "true_positive_rate_scorer": true_positive_rate,
"false_positive_rate_scorer": false_positive_rate,
20
2023-10-09 09:48:50+00:00
24k
jiangjiechen/auction-arena
app.py
[ { "identifier": "create_items", "path": "src/item_base.py", "snippet": "def create_items(item_info_jsl):\n '''\n item_info: a list of dict (name, price, desc, id)\n '''\n item_info_jsl = LoadJsonL(item_info_jsl)\n item_list = []\n for info in item_info_jsl:\n item_list.append(It...
import os import gradio as gr from app_modules.presets import * from app_modules.overwrites import * from app_modules.utils import * from src.item_base import create_items from src.bidder_base import Bidder from src.human_bidder import HumanBidder from src.auctioneer_base import Auctioneer from auction_workflow import run_auction, make_auction_hash from utils import chunks, reset_state_list
15,496
BIDDER_NUM = 4 items = create_items('data/items_demo.jsonl') def auction_loop_app(*args): global items bidder_list = args[0] # gr.State() -> session state items_id = args[1] os.environ['OPENAI_API_KEY'] = args[2] if args[2] != '' else os.environ.get('OPENAI_API_KEY', '') os.environ['ANTHROPIC_API_KEY'] = args[3] if args[3] != '' else os.environ.get('ANTHROPIC_API_KEY', '') thread_num = args[4] item_shuffle = args[5] enable_discount = args[6] min_markup_pct = args[7] args = args[8:]
BIDDER_NUM = 4 items = create_items('data/items_demo.jsonl') def auction_loop_app(*args): global items bidder_list = args[0] # gr.State() -> session state items_id = args[1] os.environ['OPENAI_API_KEY'] = args[2] if args[2] != '' else os.environ.get('OPENAI_API_KEY', '') os.environ['ANTHROPIC_API_KEY'] = args[3] if args[3] != '' else os.environ.get('ANTHROPIC_API_KEY', '') thread_num = args[4] item_shuffle = args[5] enable_discount = args[6] min_markup_pct = args[7] args = args[8:]
auction_hash = make_auction_hash()
5
2023-10-08 09:30:57+00:00
24k
sakemin/cog-musicgen-chord
predict.py
[ { "identifier": "CompressionSolver", "path": "audiocraft/solvers/compression.py", "snippet": "class CompressionSolver(base.StandardSolver):\n \"\"\"Solver for compression task.\n\n The compression task combines a set of perceptual and objective losses\n to train an EncodecModel (composed of an ...
import os import random import torchaudio import typing as tp import numpy as np import torch import subprocess from typing import Optional from cog import BasePredictor, Input, Path from audiocraft.solvers.compression import CompressionSolver from audiocraft.models import MusicGen, MultiBandDiffusion from audiocraft.solvers.compression import CompressionSolver from audiocraft.models.loaders import ( load_compression_model, load_lm_model, ) from audiocraft.data.audio import audio_write from audiocraft.models.builders import get_lm_model from omegaconf import OmegaConf
17,665
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p')
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p')
lm = get_lm_model(loaded['xp.cfg'])
7
2023-10-09 09:52:24+00:00
24k
zhijie-group/LOVECon
test_lovecon.py
[ { "identifier": "UNetPseudo3DConditionModel", "path": "video_diffusion/models/unet_3d_condition.py", "snippet": "class UNetPseudo3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Opti...
import os import copy import click import re import numpy as np import torch import torch.utils.data import torch.utils.checkpoint import decord import shutil from glob import glob from typing import Optional,Dict from tqdm.auto import tqdm from omegaconf import OmegaConf from PIL import Image from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDIMScheduler, ) from diffusers.utils.import_utils import is_xformers_available from transformers import AutoTokenizer, CLIPTextModel from einops import rearrange from video_diffusion.models.unet_3d_condition import UNetPseudo3DConditionModel from video_diffusion.models.controlnet_3d_condition import ControlNetPseudo3DModel from video_diffusion.data.dataset import ImageSequenceDataset from video_diffusion.common.util import get_time_string, get_function_args from video_diffusion.common.logger import get_logger_config_path from video_diffusion.common.image_util import log_train_samples from video_diffusion.common.instantiate_from_config import instantiate_from_config from video_diffusion.pipelines.p2p_validation_loop_controlnet import P2pSampleLogger from annotator.util import get_control from video_diffusion.pipelines.DDIMInterpolationScheduler import DDIMInterpolationScheduler from RIFEModel.RIFE_HDv3 import Model
20,742
pipeline = instantiate_from_config( test_pipeline_config, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, control_type = control_type, editing_type = editing_config.editing_type, dilation_kernel = editing_config.dilation_kernel, disk_store=kwargs.get('disk_store', False) ) pipeline.scheduler.set_timesteps(editing_config['num_inference_steps']) if editing_config.use_interpolater: new_scheduler = DDIMInterpolationScheduler.from_pretrained( pretrained_model_path, subfolder="scheduler", ) interpolater = Model() interpolater.load_model('RIFEModel', -1) new_scheduler.set_model(vae,interpolater) print('using interpolater') pipeline.add_new_scheduler(new_scheduler) pipeline.new_scheduler.set_timesteps(editing_config['num_inference_steps']) pipeline.set_progress_bar_config(disable=True) # pipeline.print_pipeline(logger) if is_xformers_available(): try: pipeline.enable_xformers_memory_efficient_attention() except Exception as e: logger.warning( "Could not enable memory efficient attention. Make sure xformers is installed" f" correctly and a GPU is available: {e}" ) vae.requires_grad_(False) unet.requires_grad_(False) text_encoder.requires_grad_(False) prompt_ids = tokenizer( dataset_config["prompt"], truncation=True, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt", ).input_ids video_dataset = ImageSequenceDataset(**dataset_config, prompt_ids=prompt_ids) train_dataloader = torch.utils.data.DataLoader( video_dataset, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=collate_fn, ) train_sample_save_path = os.path.join(logdir, "train_samples.gif") log_train_samples(save_path=train_sample_save_path, train_dataloader=train_dataloader) unet, train_dataloader,controlnet = accelerator.prepare( unet, train_dataloader,controlnet ) weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 print('use fp16') elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move text_encode and vae to gpu. # For mixed precision training we cast the text_encoder and vae weights to half-precision # These models are only used for inference, keeping weights in full precision is not required. vae.to(accelerator.device, dtype=weight_dtype) text_encoder.to(accelerator.device, dtype=weight_dtype) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("video") # , config=vars(args)) logger.info("***** wait to fix the logger path *****") if editing_config is not None and accelerator.is_main_process: validation_sample_logger = P2pSampleLogger(**editing_config, logdir=logdir, source_prompt=dataset_config['prompt']) # validation_sample_logger.log_sample_images( # pipeline=pipeline, # device=accelerator.device, # step=0, # ) def make_data_yielder(dataloader): while True: for batch in dataloader: yield batch accelerator.wait_for_everyone() train_data_yielder = make_data_yielder(train_dataloader) batch = next(train_data_yielder) if editing_config.get('use_invertion_latents', False): # Precompute the latents for this video to align the initial latents in training and test assert batch["images"].shape[0] == 1, "Only support, overfiting on a single video" # we only inference for latents, no training vae.eval() text_encoder.eval() unet.eval() text_embeddings = pipeline._encode_prompt( dataset_config.prompt, device = accelerator.device, num_images_per_prompt = 1, do_classifier_free_guidance = True, negative_prompt=None ) use_inversion_attention = editing_config.get('use_inversion_attention', False)
decord.bridge.set_bridge('torch') # from video_diffusion.pipelines.p2p_validation_loop_controlnet_ablation import P2pSampleLogger # logger = get_logger(__name__) def collate_fn(examples): """Concat a batch of sampled image in dataloader """ batch = { "prompt_ids": torch.cat([example["prompt_ids"] for example in examples], dim=0), "images": torch.stack([example["images"] for example in examples]), } return batch def test( config: str, pretrained_model_path: str, control_type:str, pretrained_controlnet_model_path :str, dataset_config: Dict, logdir: str = None, editing_config: Optional[Dict] = None, test_pipeline_config: Optional[Dict] = None, gradient_accumulation_steps: int = 1, seed: Optional[int] = None, mixed_precision: Optional[str] = "fp16", batch_size: int = 1, model_config: dict={}, verbose: bool=True, **kwargs ): args = get_function_args() vr = decord.VideoReader(dataset_config.video_path) fps = vr.get_avg_fps() duration = len(vr) / fps print("There are {} frames in the video but we take {} frames".format(len(vr), dataset_config.n_sample_frame)) if dataset_config.n_sample_frame <= 50: duration = 100 fps = 10 sample_index = list(range(0,len(vr), 1))[:dataset_config.n_sample_frame] video = vr.get_batch(sample_index) video_name_match = re.search(r"(.*)/(.*).mp4", dataset_config.video_path) video_name = video_name_match.group(2) video_frame_folder = os.path.join('data',video_name) if os.path.exists(video_frame_folder): shutil.rmtree(video_frame_folder) os.makedirs(video_frame_folder,exist_ok=True) for i in range(video.shape[0]): frame = video[i] frame_path = os.path.join(video_frame_folder,f'frame-{i:04}.jpg') frame = Image.fromarray(frame.numpy().astype(np.uint8)) frame.save(frame_path) dataset_config.update({'path': video_frame_folder} ) time_string = get_time_string() if logdir is None: logdir = config.replace('config', 'result').replace('.yml', '').replace('.yaml', '') logdir += f"_{time_string}" accelerator = Accelerator( gradient_accumulation_steps=gradient_accumulation_steps, mixed_precision=mixed_precision, ) if accelerator.is_main_process: os.makedirs(logdir, exist_ok=True) OmegaConf.save(args, os.path.join(logdir, "config.yml")) logger = get_logger_config_path(logdir) if seed is not None: set_seed(seed) # Load the tokenizer tokenizer = AutoTokenizer.from_pretrained( pretrained_model_path, subfolder="tokenizer", use_fast=False, ) # Load models and create wrapper for stable diffusion text_encoder = CLIPTextModel.from_pretrained( pretrained_model_path, subfolder="text_encoder", ) vae = AutoencoderKL.from_pretrained( pretrained_model_path, subfolder="vae", ) #加载unet报错 unet = UNetPseudo3DConditionModel.from_2d_model( os.path.join(pretrained_model_path, "unet"), model_config=model_config ) controlnet = ControlNetPseudo3DModel.from_2d_model( pretrained_controlnet_model_path, model_config=model_config ) if 'target' not in test_pipeline_config: test_pipeline_config['target'] = 'video_diffusion.pipelines.stable_diffusion.SpatioTemporalStableDiffusionControlPipeline' scheduler = DDIMScheduler.from_pretrained( pretrained_model_path, subfolder="scheduler", ) pipeline = instantiate_from_config( test_pipeline_config, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, control_type = control_type, editing_type = editing_config.editing_type, dilation_kernel = editing_config.dilation_kernel, disk_store=kwargs.get('disk_store', False) ) pipeline.scheduler.set_timesteps(editing_config['num_inference_steps']) if editing_config.use_interpolater: new_scheduler = DDIMInterpolationScheduler.from_pretrained( pretrained_model_path, subfolder="scheduler", ) interpolater = Model() interpolater.load_model('RIFEModel', -1) new_scheduler.set_model(vae,interpolater) print('using interpolater') pipeline.add_new_scheduler(new_scheduler) pipeline.new_scheduler.set_timesteps(editing_config['num_inference_steps']) pipeline.set_progress_bar_config(disable=True) # pipeline.print_pipeline(logger) if is_xformers_available(): try: pipeline.enable_xformers_memory_efficient_attention() except Exception as e: logger.warning( "Could not enable memory efficient attention. Make sure xformers is installed" f" correctly and a GPU is available: {e}" ) vae.requires_grad_(False) unet.requires_grad_(False) text_encoder.requires_grad_(False) prompt_ids = tokenizer( dataset_config["prompt"], truncation=True, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt", ).input_ids video_dataset = ImageSequenceDataset(**dataset_config, prompt_ids=prompt_ids) train_dataloader = torch.utils.data.DataLoader( video_dataset, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=collate_fn, ) train_sample_save_path = os.path.join(logdir, "train_samples.gif") log_train_samples(save_path=train_sample_save_path, train_dataloader=train_dataloader) unet, train_dataloader,controlnet = accelerator.prepare( unet, train_dataloader,controlnet ) weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 print('use fp16') elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move text_encode and vae to gpu. # For mixed precision training we cast the text_encoder and vae weights to half-precision # These models are only used for inference, keeping weights in full precision is not required. vae.to(accelerator.device, dtype=weight_dtype) text_encoder.to(accelerator.device, dtype=weight_dtype) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("video") # , config=vars(args)) logger.info("***** wait to fix the logger path *****") if editing_config is not None and accelerator.is_main_process: validation_sample_logger = P2pSampleLogger(**editing_config, logdir=logdir, source_prompt=dataset_config['prompt']) # validation_sample_logger.log_sample_images( # pipeline=pipeline, # device=accelerator.device, # step=0, # ) def make_data_yielder(dataloader): while True: for batch in dataloader: yield batch accelerator.wait_for_everyone() train_data_yielder = make_data_yielder(train_dataloader) batch = next(train_data_yielder) if editing_config.get('use_invertion_latents', False): # Precompute the latents for this video to align the initial latents in training and test assert batch["images"].shape[0] == 1, "Only support, overfiting on a single video" # we only inference for latents, no training vae.eval() text_encoder.eval() unet.eval() text_embeddings = pipeline._encode_prompt( dataset_config.prompt, device = accelerator.device, num_images_per_prompt = 1, do_classifier_free_guidance = True, negative_prompt=None ) use_inversion_attention = editing_config.get('use_inversion_attention', False)
apply_control = get_control(control_type)
9
2023-10-09 14:38:28+00:00
24k
LiYunfengLYF/LightFC
lib/train/data/base_functions.py
[ { "identifier": "sampler", "path": "lib/train/data/sampler.py", "snippet": "def no_processing(data):\r\n def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\r\n num_search_frames, num_template_frames=1, processing=no_processing, frame_sample_mode='causal',\r\n ...
import torch import lib.train.data.transforms as tfm from torch.utils.data.distributed import DistributedSampler from lib.train.data import sampler, opencv_loader, processing, LTRLoader from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.optimizer.anan import Adan from lib.train.optimizer.lion import Lion from lib.utils.misc import is_main_process
20,935
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE settings.save_interval = cfg.TRAIN.SAVE_INTERVAL def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: # assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", # "COCO17", "VID", "TRACKINGNET"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append( Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append( Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader, env_num=settings.env_num)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb")
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE settings.save_interval = cfg.TRAIN.SAVE_INTERVAL def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: # assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", # "COCO17", "VID", "TRACKINGNET"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append( Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append( Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader, env_num=settings.env_num)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb")
datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader,
12
2023-10-08 11:44:32+00:00
24k
LiyaoTang/ERDA
utils/trainer.py
[ { "identifier": "log_config", "path": "config/utils.py", "snippet": "def log_config(config, title='', f_out=None, prefix='', base=None):\n if f_out is None:\n f_out = sys.stdout\n if base is None:\n root = os.path.join(os.getcwd(), os.path.dirname(__file__), '../')\n sys.path ...
import os, re, gc, sys, time, pickle, psutil, subprocess import numpy as np import tensorflow as tf from config import log_config from utils.logger import print_dict, print_table from utils.ply import read_ply, write_ply from utils.tester import ModelTester from utils.average_gradients import average_gradients from utils.AdamWOptimizer import AdamWeightDecayOptimizer from utils.logger import setup_logger from utils.scheduler import StepScheduler, LrScheduler from utils.metrics import AverageMeter from utils.tf_graph_builder import GraphBuilder
17,484
if tf.__version__.split('.')[0] == '2': tf = tf.compat.v1 tf.disable_v2_behavior() # PLY reader FILE_DIR = os.path.abspath(__file__) BASE_DIR = os.path.dirname(FILE_DIR) ROOT_DIR = os.path.dirname(BASE_DIR) sys.path.insert(0, ROOT_DIR) sys.path.insert(0, BASE_DIR) sys.path.insert(0, os.path.join(ROOT_DIR, 'models')) sys.path.insert(0, os.path.join(ROOT_DIR, 'utils')) DEBUG = False class ModelTrainer: """ get & train the model (potential multi-gpu training) """ def __init__(self, config, verbose=True): self.config = config self.verbose = verbose self.tester = ModelTester(config, verbose=False) def add_summary(self, model): with tf.variable_scope('summary'): summary = model.summary log_content = self.config.log_content if 'var' in log_content: summary['per_log'] += [tf.summary.histogram(v.name, v) for g, v in gvs] if 'gard' in log_content: summary['per_log'] += [tf.summary.histogram(f'{v.name}_grad', g) for g, v in gvs] sum_levels = ['per_step', 'per_log', 'per_epoch'] assert all([k in sum_levels for k in summary.keys()]), f'undesired keys in summary dict: {str(summary.keys())}' for i in range(len(sum_levels)): summary[lv] = tf.summary.merge(summary[lv]) if summary[lv] else [tf.no_op] self.summary = summary return # Training main method # ------------------------------------------------------------------------------------------------------------------ def train(self): config = self.config with tf.Graph().as_default(): # use one graph # prepare compute graph g = GraphBuilder(config, verbose=self.verbose) ops, sess, grads, saver = g.ops, g.sess, g.grads, g.saver model, dataset = g.model, g.dataset self.model = model # printing model parameters if self.verbose: print('\n --------- printing grads {') re_list = ['.*bias:.*', '.*batch_normalization.*'] # skipping
if tf.__version__.split('.')[0] == '2': tf = tf.compat.v1 tf.disable_v2_behavior() # PLY reader FILE_DIR = os.path.abspath(__file__) BASE_DIR = os.path.dirname(FILE_DIR) ROOT_DIR = os.path.dirname(BASE_DIR) sys.path.insert(0, ROOT_DIR) sys.path.insert(0, BASE_DIR) sys.path.insert(0, os.path.join(ROOT_DIR, 'models')) sys.path.insert(0, os.path.join(ROOT_DIR, 'utils')) DEBUG = False class ModelTrainer: """ get & train the model (potential multi-gpu training) """ def __init__(self, config, verbose=True): self.config = config self.verbose = verbose self.tester = ModelTester(config, verbose=False) def add_summary(self, model): with tf.variable_scope('summary'): summary = model.summary log_content = self.config.log_content if 'var' in log_content: summary['per_log'] += [tf.summary.histogram(v.name, v) for g, v in gvs] if 'gard' in log_content: summary['per_log'] += [tf.summary.histogram(f'{v.name}_grad', g) for g, v in gvs] sum_levels = ['per_step', 'per_log', 'per_epoch'] assert all([k in sum_levels for k in summary.keys()]), f'undesired keys in summary dict: {str(summary.keys())}' for i in range(len(sum_levels)): summary[lv] = tf.summary.merge(summary[lv]) if summary[lv] else [tf.no_op] self.summary = summary return # Training main method # ------------------------------------------------------------------------------------------------------------------ def train(self): config = self.config with tf.Graph().as_default(): # use one graph # prepare compute graph g = GraphBuilder(config, verbose=self.verbose) ops, sess, grads, saver = g.ops, g.sess, g.grads, g.saver model, dataset = g.model, g.dataset self.model = model # printing model parameters if self.verbose: print('\n --------- printing grads {') re_list = ['.*bias:.*', '.*batch_normalization.*'] # skipping
print_table([(v.name, g) for g, v in grads if not any([bool(re.fullmatch(expr, v.name)) for expr in re_list])], prefix='\t')
2
2023-10-13 08:03:07+00:00
24k
bilibini/Lovely_Image_Downloader
py/Python38/site-packages/urllib3/poolmanager.py
[ { "identifier": "HTTPHeaderDict", "path": "py/Python38/site-packages/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(typing.MutableMapping[str, str]):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-...
import functools import logging import typing import warnings import ssl from types import TracebackType from urllib.parse import urljoin from ._collections import HTTPHeaderDict, RecentlyUsedContainer from ._request_methods import RequestMethods from .connection import ProxyConfig from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme from .exceptions import ( LocationValueError, MaxRetryError, ProxySchemeUnknown, URLSchemeUnknown, ) from .response import BaseHTTPResponse from .util.connection import _TYPE_SOCKET_OPTIONS from .util.proxy import connection_requires_http_tunnel from .util.retry import Retry from .util.timeout import Timeout from .util.url import Url, parse_url from typing_extensions import Literal
20,142
from __future__ import annotations if typing.TYPE_CHECKING: __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ssl_minimum_version", "ssl_maximum_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # Default value for `blocksize` - a new parameter introduced to # http.client.HTTPConnection & http.client.HTTPSConnection in Python 3.7 _DEFAULT_BLOCKSIZE = 16384 _SelfT = typing.TypeVar("_SelfT") class PoolKey(typing.NamedTuple): """ All known keyword arguments that could be provided to the pool manager, its pools, or the underlying connections. All custom key schemes should include the fields in this key at a minimum. """ key_scheme: str key_host: str key_port: int | None key_timeout: Timeout | float | int | None key_retries: Retry | bool | int | None key_block: bool | None key_source_address: tuple[str, int] | None key_key_file: str | None key_key_password: str | None key_cert_file: str | None key_cert_reqs: str | None key_ca_certs: str | None key_ssl_version: int | str | None key_ssl_minimum_version: ssl.TLSVersion | None key_ssl_maximum_version: ssl.TLSVersion | None key_ca_cert_dir: str | None key_ssl_context: ssl.SSLContext | None key_maxsize: int | None key_headers: frozenset[tuple[str, str]] | None key__proxy: Url | None key__proxy_headers: frozenset[tuple[str, str]] | None
from __future__ import annotations if typing.TYPE_CHECKING: __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ssl_minimum_version", "ssl_maximum_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # Default value for `blocksize` - a new parameter introduced to # http.client.HTTPConnection & http.client.HTTPSConnection in Python 3.7 _DEFAULT_BLOCKSIZE = 16384 _SelfT = typing.TypeVar("_SelfT") class PoolKey(typing.NamedTuple): """ All known keyword arguments that could be provided to the pool manager, its pools, or the underlying connections. All custom key schemes should include the fields in this key at a minimum. """ key_scheme: str key_host: str key_port: int | None key_timeout: Timeout | float | int | None key_retries: Retry | bool | int | None key_block: bool | None key_source_address: tuple[str, int] | None key_key_file: str | None key_key_password: str | None key_cert_file: str | None key_cert_reqs: str | None key_ca_certs: str | None key_ssl_version: int | str | None key_ssl_minimum_version: ssl.TLSVersion | None key_ssl_maximum_version: ssl.TLSVersion | None key_ca_cert_dir: str | None key_ssl_context: ssl.SSLContext | None key_maxsize: int | None key_headers: frozenset[tuple[str, str]] | None key__proxy: Url | None key__proxy_headers: frozenset[tuple[str, str]] | None
key__proxy_config: ProxyConfig | None
3
2023-10-11 09:08:57+00:00
24k
MTgeophysics/mtpy-v2
tests/core/test_mt_stations.py
[ { "identifier": "MTLocation", "path": "mtpy/core/mt_location.py", "snippet": "class MTLocation:\n \"\"\"\n Location for a MT site or point measurement\n\n \"\"\"\n\n def __init__(self, survey_metadata=None, **kwargs):\n\n self.logger = logger\n if survey_metadata is None:\n ...
import unittest import pandas as pd import numpy as np from mtpy.core import MTStations, MTLocation from mtpy import MT
20,616
# -*- coding: utf-8 -*- """ Created on Tue Sep 5 16:27:01 2023 @author: jpeacock """ # ============================================================================= # Imports # ============================================================================= # ============================================================================= class TestMTStationGrid(unittest.TestCase): @classmethod def setUpClass(self): self.east = 243900.352 self.north = 4432069.056898517 self.utm_epsg = 32611 self.center = MTLocation( latitude=40.036594, longitude=-119.978167, utm_epsg=32611, model_east=245900.352, model_north=4436069.057, ) dx = 1000 dy = 2000 count = 1 mt_list = [] for ii in range(5): for jj in range(5):
# -*- coding: utf-8 -*- """ Created on Tue Sep 5 16:27:01 2023 @author: jpeacock """ # ============================================================================= # Imports # ============================================================================= # ============================================================================= class TestMTStationGrid(unittest.TestCase): @classmethod def setUpClass(self): self.east = 243900.352 self.north = 4432069.056898517 self.utm_epsg = 32611 self.center = MTLocation( latitude=40.036594, longitude=-119.978167, utm_epsg=32611, model_east=245900.352, model_north=4436069.057, ) dx = 1000 dy = 2000 count = 1 mt_list = [] for ii in range(5): for jj in range(5):
mt_obj = MT(
2
2023-10-11 22:24:50+00:00
24k
weavel-ai/promptmodel-python
promptmodel/llms/llm_proxy.py
[ { "identifier": "LLM", "path": "promptmodel/llms/llm.py", "snippet": "class LLM:\n def __init__(self):\n pass\n\n @classmethod\n def __parse_output_pattern__(\n cls,\n raw_output: Optional[str] = None,\n parsing_type: Optional[ParsingType] = None,\n ) -> ParseResu...
from typing import ( Any, AsyncGenerator, Callable, Dict, Generator, List, Optional, Tuple, Union, ) from uuid import UUID from threading import Thread from rich import print from uuid import uuid4 from litellm.utils import ModelResponse, get_max_tokens from promptmodel.llms.llm import LLM from promptmodel.database.models import ( DeployedPrompt, DeployedFunctionModel, DeployedFunctionModelVersion, ) from promptmodel.database.crud import ( get_deployed_prompts, ) from promptmodel.promptmodel_init import CacheManager from promptmodel.utils.config_utils import read_config, upsert_config from promptmodel.utils.random_utils import select_version_by_ratio from promptmodel.utils import logger from promptmodel.utils.async_utils import run_async_in_sync from promptmodel.utils.token_counting import ( num_tokens_for_messages_for_each, num_tokens_from_functions_input, ) from promptmodel.utils.output_utils import update_dict from promptmodel.apis.base import AsyncAPIClient from promptmodel.types.response import ( LLMResponse, LLMStreamResponse, FunctionModelConfig, ChatModelConfig, UnitConfig, PMDetail, ) from promptmodel.types.request import ChatLogRequest
19,959
inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> AsyncGenerator[LLMStreamResponse, None]: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_gen(super().astream_and_parse)(inputs, **kwargs) def chat_run( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_chat(super().run)(session_uuid, **kwargs) def chat_arun( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_chat(super().arun)(session_uuid, **kwargs) def chat_stream( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_chat_gen(super().stream)(session_uuid, **kwargs) def chat_astream( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_chat_gen(super().astream)(session_uuid, **kwargs) @staticmethod async def fetch_prompts( name, version: Optional[Union[str, int]] = "deploy", ) -> Tuple[List[Dict[str, str]], Dict[str, Any]]: """fetch prompts. Args: name (str): name of FunctionModel Returns: Tuple[List[Dict[str, str]], Optional[Dict[str, Any]]]: (prompts, version_detail) """ # Check connection activate config = read_config() if ( "connection" in config and "initializing" in config["connection"] and config["connection"]["initializing"] == True ): return [], {} elif ( "connection" in config and "reloading" in config["connection"] and config["connection"]["reloading"] == True ): return [], {} else: if ( "project" in config and "use_cache" in config["project"] and config["project"]["use_cache"] == True and version == "deploy" ): cache_manager = CacheManager() # call update_local API in background task cache_update_thread = Thread( target=cache_manager.cache_update_background_task, args=(config,) ) cache_update_thread.daemon = True cache_update_thread.start() # get prompt from local DB by ratio prompt_rows, version_detail = get_deployed_prompts(name) if prompt_rows is None: return [], {} return [ {"role": prompt.role, "content": prompt.content} for prompt in prompt_rows ], version_detail else: try: config_list = await AsyncAPIClient.execute( method="GET", path="/function_model_versions", params={"function_model_name": name, "version": version}, use_cli_key=False, ) config_list = config_list.json() except Exception as e: raise e function_model_versions = [ x["function_model_version"] for x in config_list ] if version == "deploy": for version in function_model_versions: if version["is_published"] is True: version["ratio"] = 1.0
class LLMProxy(LLM): def __init__( self, name: str, version: Optional[Union[str, int]] = "deploy", unit_config: Optional[UnitConfig] = None ): super().__init__() self._name = name self.version = version self.unit_config = unit_config def _wrap_gen(self, gen: Callable[..., Any]) -> Callable[..., Any]: def wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = run_async_in_sync( LLMProxy.fetch_prompts(self._name, self.version) ) call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) log_uuid = str(uuid4()) # Call the generator with the arguments stream_response: Generator[LLMStreamResponse, None, None] = gen(**call_args) api_response = None dict_cache = {} # to store aggregated dictionary values string_cache = "" # to store aggregated string values error_occurs = False error_log = None for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.parsed_outputs: dict_cache = update_dict(dict_cache, item.parsed_outputs) if item.raw_output: string_cache += item.raw_output if item.error and not error_occurs: error_occurs = True error_log = item.error_log if error_occurs: # delete all promptmodel data in item item.raw_output = None item.parsed_outputs = None item.function_call = None item.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) yield item metadata = { "error": error_occurs, "error_log": error_log, } run_async_in_sync( self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=api_response, parsed_outputs=dict_cache, metadata=metadata, ) ) return wrapper def _wrap_async_gen(self, async_gen: Callable[..., Any]) -> Callable[..., Any]: async def wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = await LLMProxy.fetch_prompts( self._name, self.version ) call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) # Call async_gen with the arguments stream_response: AsyncGenerator[LLMStreamResponse, None] = async_gen( **call_args ) log_uuid = str(uuid4()) api_response = None dict_cache = {} # to store aggregated dictionary values string_cache = "" # to store aggregated string values error_occurs = False error_log = None api_response: Optional[ModelResponse] = None async for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.parsed_outputs: dict_cache = update_dict(dict_cache, item.parsed_outputs) if item.raw_output: string_cache += item.raw_output if item.error and not error_occurs: error_occurs = True error_log = item.error_log item.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) yield item # # add string_cache in model_response # if api_response: # if "message" not in api_response.choices[0]: # api_response.choices[0].message = {} # if "content" not in api_response.choices[0].message: # api_response.choices[0].message["content"] = string_cache # api_response.choices[0].message["role"] = "assistant" metadata = { "error": error_occurs, "error_log": error_log, } await self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=api_response, parsed_outputs=dict_cache, metadata=metadata, ) # raise Exception("error_log") return wrapper def _wrap_method(self, method: Callable[..., Any]) -> Callable[..., Any]: def wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = run_async_in_sync( LLMProxy.fetch_prompts(self._name, self.version) ) call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) # Call the method with the arguments llm_response: LLMResponse = method(**call_args) error_occurs = llm_response.error error_log = llm_response.error_log metadata = { "error": error_occurs, "error_log": error_log, } log_uuid = str(uuid4()) if llm_response.parsed_outputs: run_async_in_sync( self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=llm_response.api_response, parsed_outputs=llm_response.parsed_outputs, metadata=metadata, ) ) else: run_async_in_sync( self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=llm_response.api_response, parsed_outputs={}, metadata=metadata, ) ) if error_occurs: # delete all promptmodel data in llm_response llm_response.raw_output = None llm_response.parsed_outputs = None llm_response.function_call = None llm_response.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) return llm_response return wrapper def _wrap_async_method(self, method: Callable[..., Any]) -> Callable[..., Any]: async def async_wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = await LLMProxy.fetch_prompts( self._name, self.version ) # messages, model, uuid = self._fetch_prompts() call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) # Call the method with the arguments llm_response: LLMResponse = await method(**call_args) error_occurs = llm_response.error error_log = llm_response.error_log metadata = { "error": error_occurs, "error_log": error_log, } log_uuid = str(uuid4()) if llm_response.parsed_outputs: await self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=llm_response.api_response, parsed_outputs=llm_response.parsed_outputs, metadata=metadata, ) else: await self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=llm_response.api_response, parsed_outputs={}, metadata=metadata, ) if error_occurs: # delete all promptmodel data in llm_response llm_response.raw_output = None llm_response.parsed_outputs = None llm_response.function_call = None llm_response.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) return llm_response return async_wrapper def _wrap_chat(self, method: Callable[..., Any]) -> Callable[..., Any]: def wrapper(session_uuid: str, **kwargs): instruction, version_details, message_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self._name, session_uuid, self.version) ) call_args = self._prepare_call_args_for_chat( message_logs, version_details, kwargs ) # Call the method with the arguments llm_response: LLMResponse = method(**call_args) error_occurs = llm_response.error error_log = llm_response.error_log metadata = { "error": error_occurs, "error_log": error_log, } api_response = None if llm_response.api_response: api_response = llm_response.api_response log_uuid = str(uuid4()) run_async_in_sync( self._async_chat_log_to_cloud( session_uuid=session_uuid, version_uuid=version_details["uuid"], chat_log_request_list=[ ChatLogRequest( message=llm_response.api_response.choices[ 0 ].message.model_dump(), uuid=log_uuid, metadata=metadata, api_response=api_response, ) ], ) ) if error_occurs: # delete all promptmodel data in llm_response llm_response.raw_output = None llm_response.parsed_outputs = None llm_response.function_call = None llm_response.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) return llm_response return wrapper def _wrap_async_chat(self, method: Callable[..., Any]) -> Callable[..., Any]: async def async_wrapper(session_uuid: str, **kwargs): ( instruction, version_details, message_logs, ) = await LLMProxy.fetch_chat_model(self._name, session_uuid, self.version) call_args = self._prepare_call_args_for_chat( message_logs, version_details, kwargs ) # Call the method with the arguments llm_response: LLMResponse = await method(**call_args) error_occurs = llm_response.error error_log = llm_response.error_log metadata = { "error": error_occurs, "error_log": error_log, } api_response = None if llm_response.api_response: api_response = llm_response.api_response log_uuid = str(uuid4()) await self._async_chat_log_to_cloud( session_uuid=session_uuid, version_uuid=version_details["uuid"], chat_log_request_list=[ ChatLogRequest( uuid=log_uuid, message=llm_response.api_response.choices[ 0 ].message.model_dump(), metadata=metadata, api_response=api_response, ) ], ) if error_occurs: # delete all promptmodel data in llm_response llm_response.raw_output = None llm_response.parsed_outputs = None llm_response.function_call = None llm_response.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) return llm_response return async_wrapper def _wrap_chat_gen(self, gen: Callable[..., Any]) -> Callable[..., Any]: def wrapper(session_uuid: str, **kwargs): instruction, version_details, message_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self._name, session_uuid, self.version) ) call_args = self._prepare_call_args_for_chat( message_logs, version_details, kwargs ) # Call the generator with the arguments stream_response: Generator[LLMStreamResponse, None, None] = gen(**call_args) api_response = None error_occurs = False error_log = None log_uuid = str(uuid4()) for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.error and not error_occurs: error_occurs = True error_log = item.error_log if error_occurs: # delete all promptmodel data in item item.raw_output = None item.parsed_outputs = None item.function_call = None item.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) yield item metadata = { "error": error_occurs, "error_log": error_log, } run_async_in_sync( self._async_chat_log_to_cloud( session_uuid=session_uuid, version_uuid=version_details["uuid"], chat_log_request_list=[ ChatLogRequest( uuid=log_uuid, message=api_response.choices[0].message.model_dump(), metadata=metadata, api_response=api_response, ) ], ) ) return wrapper def _wrap_async_chat_gen(self, async_gen: Callable[..., Any]) -> Callable[..., Any]: async def wrapper(session_uuid: str, **kwargs): ( instruction, version_details, message_logs, ) = await LLMProxy.fetch_chat_model(self._name, session_uuid, self.version) call_args = self._prepare_call_args_for_chat( message_logs, version_details, kwargs ) # Call the generator with the arguments stream_response: AsyncGenerator[LLMStreamResponse, None] = async_gen( **call_args ) api_response = None error_occurs = False error_log = None log_uuid = str(uuid4()) async for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.error and not error_occurs: error_occurs = True error_log = item.error_log if error_occurs: # delete all promptmodel data in item item.raw_output = None item.parsed_outputs = None item.function_call = None item.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) yield item metadata = { "error": error_occurs, "error_log": error_log, } await self._async_chat_log_to_cloud( session_uuid=session_uuid, version_uuid=version_details["uuid"], chat_log_request_list=[ ChatLogRequest( uuid=log_uuid, message=api_response.choices[0].message.model_dump(), metadata=metadata, api_response=api_response, ) ], ) return wrapper def _prepare_call_args( self, prompts: List[Dict[str, str]], version_detail: Dict[str, Any], inputs: Dict[str, Any], kwargs, ): stringified_inputs = {key: str(value) for key, value in inputs.items()} messages = [ { "content": prompt["content"].format(**stringified_inputs), "role": prompt["role"], } for prompt in prompts ] call_args = { "messages": messages, "model": version_detail["model"] if version_detail else None, "parsing_type": version_detail["parsing_type"] if version_detail else None, "output_keys": version_detail["output_keys"] if version_detail else None, } if call_args["parsing_type"] is None: del call_args["parsing_type"] del call_args["output_keys"] if "functions" in kwargs: call_args["functions"] = kwargs["functions"] if "tools" in kwargs: call_args["tools"] = kwargs["tools"] if "api_key" in kwargs: call_args["api_key"] = kwargs["api_key"] return call_args def _prepare_call_args_for_chat( self, messages: List[Dict[str, Any]], version_detail: Dict[str, Any], kwargs, ): call_args = {} token_per_tools = 0 if "functions" in kwargs: call_args["functions"] = kwargs["functions"] token_per_tools = num_tokens_from_functions_input( functions=kwargs["functions"], model=version_detail["model"] if version_detail else "gpt-3.5-turbo", ) if "tools" in kwargs: call_args["tools"] = kwargs["tools"] token_per_tools = num_tokens_from_functions_input( functions=kwargs["tools"], model=version_detail["model"] if version_detail else "gpt-3.5-turbo", ) # truncate messages to make length <= model's max length model_max_tokens = get_max_tokens( model=version_detail["model"] if version_detail else "gpt-3.5-turbo" ) token_per_messages = num_tokens_for_messages_for_each( messages, version_detail["model"] ) token_limit_exceeded = ( sum(token_per_messages) + token_per_tools ) - model_max_tokens if token_limit_exceeded > 0: while token_limit_exceeded > 0: # erase the second oldest message (first one is system prompt, so it should not be erased) if len(messages) == 1: # if there is only one message, Error cannot be solved. Just call LLM and get error response break token_limit_exceeded -= token_per_messages[1] del messages[1] del token_per_messages[1] call_args["messages"] = messages call_args["model"] = version_detail["model"] if version_detail else None if "api_key" in kwargs: call_args["api_key"] = kwargs["api_key"] if "tools" in kwargs: call_args["tools"] = kwargs["tools"] return call_args async def _async_log_to_cloud( self, version_uuid: str, log_uuid: str, inputs: Optional[Dict] = None, api_response: Optional[ModelResponse] = None, parsed_outputs: Optional[Dict] = None, metadata: Optional[Dict] = None, ): config = read_config() if ( "project" in config and "mask_inputs" in config["project"] and config["project"]["mask_inputs"] == True ): inputs = {key: "PRIVATE LOGGING" for key, value in inputs.items()} # Perform the logging asynchronously if api_response: api_response_dict = api_response.model_dump() api_response_dict["response_ms"] = api_response._response_ms api_response_dict["_response_ms"] = api_response._response_ms else: api_response_dict = None run_log_request_body = { "uuid": log_uuid, "api_response": api_response_dict, "inputs": inputs, "parsed_outputs": parsed_outputs, "metadata": metadata, } res = await AsyncAPIClient.execute( method="POST", path="/run_log", params={ "version_uuid": version_uuid, }, json=run_log_request_body, use_cli_key=False, ) if res.status_code != 200: print(f"[red]Failed to log to cloud: {res.json()}[/red]"); if self.unit_config: res_connect = await AsyncAPIClient.execute( method="POST", path="/unit/connect", json={ "unit_log_uuid": self.unit_config.log_uuid, "run_log_uuid": log_uuid, }, use_cli_key=False, ) if res_connect.status_code != 200: print(f"[red]Failed to connect prompt component to run log: {res_connect.json()}[/red]") return res async def _async_chat_log_to_cloud( self, session_uuid: str, version_uuid: Optional[str] = None, chat_log_request_list: List[ChatLogRequest] = [], ): # Perform the logging asynchronously res = await AsyncAPIClient.execute( method="POST", path="/chat_log", params={ "session_uuid": session_uuid, "version_uuid": version_uuid, }, json=[r.model_dump() for r in chat_log_request_list], use_cli_key=False, ) if res.status_code != 200: print(f"[red]Failed to log to cloud: {res.json()}[/red]") return res async def _async_make_session_cloud( self, session_uuid: str, version_uuid: Optional[str] = None, ): # Perform the logging asynchronously res = await AsyncAPIClient.execute( method="POST", path="/make_session", params={ "session_uuid": session_uuid, "version_uuid": version_uuid, }, use_cli_key=False, ) if res.status_code != 200: print(f"[red]Failed to make ChatSession in cloud: {res.json()}[/red]") return res def make_kwargs(self, **kwargs): res = {} for key, value in kwargs.items(): if value is not None: res[key] = value return res def run( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_method(super().run)(inputs, **kwargs) def arun( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_method(super().arun)(inputs, **kwargs) def stream( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> Generator[LLMStreamResponse, None, None]: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_gen(super().stream)(inputs, **kwargs) def astream( self, inputs: Optional[Dict[str, Any]] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> AsyncGenerator[LLMStreamResponse, None]: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_gen(super().astream)(inputs, **kwargs) def run_and_parse( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_method(super().run_and_parse)(inputs, **kwargs) def arun_and_parse( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_method(super().arun_and_parse)(inputs, **kwargs) def stream_and_parse( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> Generator[LLMStreamResponse, None, None]: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_gen(super().stream_and_parse)(inputs, **kwargs) def astream_and_parse( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> AsyncGenerator[LLMStreamResponse, None]: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_gen(super().astream_and_parse)(inputs, **kwargs) def chat_run( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_chat(super().run)(session_uuid, **kwargs) def chat_arun( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_chat(super().arun)(session_uuid, **kwargs) def chat_stream( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_chat_gen(super().stream)(session_uuid, **kwargs) def chat_astream( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_chat_gen(super().astream)(session_uuid, **kwargs) @staticmethod async def fetch_prompts( name, version: Optional[Union[str, int]] = "deploy", ) -> Tuple[List[Dict[str, str]], Dict[str, Any]]: """fetch prompts. Args: name (str): name of FunctionModel Returns: Tuple[List[Dict[str, str]], Optional[Dict[str, Any]]]: (prompts, version_detail) """ # Check connection activate config = read_config() if ( "connection" in config and "initializing" in config["connection"] and config["connection"]["initializing"] == True ): return [], {} elif ( "connection" in config and "reloading" in config["connection"] and config["connection"]["reloading"] == True ): return [], {} else: if ( "project" in config and "use_cache" in config["project"] and config["project"]["use_cache"] == True and version == "deploy" ): cache_manager = CacheManager() # call update_local API in background task cache_update_thread = Thread( target=cache_manager.cache_update_background_task, args=(config,) ) cache_update_thread.daemon = True cache_update_thread.start() # get prompt from local DB by ratio prompt_rows, version_detail = get_deployed_prompts(name) if prompt_rows is None: return [], {} return [ {"role": prompt.role, "content": prompt.content} for prompt in prompt_rows ], version_detail else: try: config_list = await AsyncAPIClient.execute( method="GET", path="/function_model_versions", params={"function_model_name": name, "version": version}, use_cli_key=False, ) config_list = config_list.json() except Exception as e: raise e function_model_versions = [ x["function_model_version"] for x in config_list ] if version == "deploy": for version in function_model_versions: if version["is_published"] is True: version["ratio"] = 1.0
selected_version = select_version_by_ratio(function_model_versions)
8
2023-10-09 03:35:44+00:00
24k
MachinePerceptionLab/Attentive_DFPrior
src/DF_Prior.py
[ { "identifier": "config", "path": "src/config.py", "snippet": "def load_config(path, default_path=None):\ndef update_recursive(dict1, dict2):\ndef get_model(cfg):" }, { "identifier": "Mapper", "path": "src/Mapper.py", "snippet": "class Mapper(object):\n \"\"\"\n Mapper thread. \n\n...
import os import time import numpy as np import torch import torch.multiprocessing import torch.multiprocessing as mp from src import config from src.Mapper import Mapper from src.Tracker import Tracker from src.utils.datasets import get_dataset from src.utils.Logger import Logger from src.utils.Mesher import Mesher from src.utils.Renderer import Renderer
20,631
# import src.fusion as fusion # import open3d as o3d torch.multiprocessing.set_sharing_strategy('file_system') class DF_Prior(): """ DF_Prior main class. Mainly allocate shared resources, and dispatch mapping and tracking process. """ def __init__(self, cfg, args): self.cfg = cfg self.args = args self.occupancy = cfg['occupancy'] self.low_gpu_mem = cfg['low_gpu_mem'] self.verbose = cfg['verbose'] self.dataset = cfg['dataset'] if args.output is None: self.output = cfg['data']['output'] else: self.output = args.output self.ckptsdir = os.path.join(self.output, 'ckpts') os.makedirs(self.output, exist_ok=True) os.makedirs(self.ckptsdir, exist_ok=True) os.makedirs(f'{self.output}/mesh', exist_ok=True) self.H, self.W, self.fx, self.fy, self.cx, self.cy = cfg['cam']['H'], cfg['cam'][ 'W'], cfg['cam']['fx'], cfg['cam']['fy'], cfg['cam']['cx'], cfg['cam']['cy'] self.update_cam() model = config.get_model(cfg) self.shared_decoders = model self.scale = cfg['scale'] self.load_bound(cfg) self.load_pretrain(cfg) self.grid_init(cfg) # need to use spawn try: mp.set_start_method('spawn', force=True) except RuntimeError: pass self.frame_reader = get_dataset(cfg, args, self.scale) self.n_img = len(self.frame_reader) self.estimate_c2w_list = torch.zeros((self.n_img, 4, 4)) self.estimate_c2w_list.share_memory_() dataset = self.cfg['data']['dataset'] scene_id = self.cfg['data']['id'] self.scene_id = scene_id print(scene_id) # load tsdf grid if dataset == 'scannet': self.tsdf_volume_shared = torch.load(f'scannet_tsdf_volume/scene{scene_id}_tsdf_volume.pt') elif dataset == 'replica': self.tsdf_volume_shared = torch.load(f'replica_tsdf_volume/{scene_id}_tsdf_volume.pt') self.tsdf_volume_shared = self.tsdf_volume_shared.to(self.cfg['mapping']['device']) self.tsdf_volume_shared.share_memory_() # load tsdf grid bound if dataset == 'scannet': self.tsdf_bnds = torch.load(f'scannet_tsdf_volume/scene{scene_id}_bounds.pt') elif dataset == 'replica': self.tsdf_bnds = torch.load(f'replica_tsdf_volume/{scene_id}_bounds.pt') self.tsdf_bnds = torch.tensor(self.tsdf_bnds).to(self.cfg['mapping']['device']) self.tsdf_bnds.share_memory_() self.vol_bnds = self.tsdf_bnds self.vol_bnds.share_memory_() self.gt_c2w_list = torch.zeros((self.n_img, 4, 4)) self.gt_c2w_list.share_memory_() self.idx = torch.zeros((1)).int() self.idx.share_memory_() self.mapping_first_frame = torch.zeros((1)).int() self.mapping_first_frame.share_memory_() # the id of the newest frame Mapper is processing self.mapping_idx = torch.zeros((1)).int() self.mapping_idx.share_memory_() self.mapping_cnt = torch.zeros((1)).int() # counter for mapping self.mapping_cnt.share_memory_() for key, val in self.shared_c.items(): val = val.to(self.cfg['mapping']['device']) val.share_memory_() self.shared_c[key] = val self.shared_decoders = self.shared_decoders.to( self.cfg['mapping']['device']) self.shared_decoders.share_memory() self.renderer = Renderer(cfg, args, self) self.mesher = Mesher(cfg, args, self)
# import src.fusion as fusion # import open3d as o3d torch.multiprocessing.set_sharing_strategy('file_system') class DF_Prior(): """ DF_Prior main class. Mainly allocate shared resources, and dispatch mapping and tracking process. """ def __init__(self, cfg, args): self.cfg = cfg self.args = args self.occupancy = cfg['occupancy'] self.low_gpu_mem = cfg['low_gpu_mem'] self.verbose = cfg['verbose'] self.dataset = cfg['dataset'] if args.output is None: self.output = cfg['data']['output'] else: self.output = args.output self.ckptsdir = os.path.join(self.output, 'ckpts') os.makedirs(self.output, exist_ok=True) os.makedirs(self.ckptsdir, exist_ok=True) os.makedirs(f'{self.output}/mesh', exist_ok=True) self.H, self.W, self.fx, self.fy, self.cx, self.cy = cfg['cam']['H'], cfg['cam'][ 'W'], cfg['cam']['fx'], cfg['cam']['fy'], cfg['cam']['cx'], cfg['cam']['cy'] self.update_cam() model = config.get_model(cfg) self.shared_decoders = model self.scale = cfg['scale'] self.load_bound(cfg) self.load_pretrain(cfg) self.grid_init(cfg) # need to use spawn try: mp.set_start_method('spawn', force=True) except RuntimeError: pass self.frame_reader = get_dataset(cfg, args, self.scale) self.n_img = len(self.frame_reader) self.estimate_c2w_list = torch.zeros((self.n_img, 4, 4)) self.estimate_c2w_list.share_memory_() dataset = self.cfg['data']['dataset'] scene_id = self.cfg['data']['id'] self.scene_id = scene_id print(scene_id) # load tsdf grid if dataset == 'scannet': self.tsdf_volume_shared = torch.load(f'scannet_tsdf_volume/scene{scene_id}_tsdf_volume.pt') elif dataset == 'replica': self.tsdf_volume_shared = torch.load(f'replica_tsdf_volume/{scene_id}_tsdf_volume.pt') self.tsdf_volume_shared = self.tsdf_volume_shared.to(self.cfg['mapping']['device']) self.tsdf_volume_shared.share_memory_() # load tsdf grid bound if dataset == 'scannet': self.tsdf_bnds = torch.load(f'scannet_tsdf_volume/scene{scene_id}_bounds.pt') elif dataset == 'replica': self.tsdf_bnds = torch.load(f'replica_tsdf_volume/{scene_id}_bounds.pt') self.tsdf_bnds = torch.tensor(self.tsdf_bnds).to(self.cfg['mapping']['device']) self.tsdf_bnds.share_memory_() self.vol_bnds = self.tsdf_bnds self.vol_bnds.share_memory_() self.gt_c2w_list = torch.zeros((self.n_img, 4, 4)) self.gt_c2w_list.share_memory_() self.idx = torch.zeros((1)).int() self.idx.share_memory_() self.mapping_first_frame = torch.zeros((1)).int() self.mapping_first_frame.share_memory_() # the id of the newest frame Mapper is processing self.mapping_idx = torch.zeros((1)).int() self.mapping_idx.share_memory_() self.mapping_cnt = torch.zeros((1)).int() # counter for mapping self.mapping_cnt.share_memory_() for key, val in self.shared_c.items(): val = val.to(self.cfg['mapping']['device']) val.share_memory_() self.shared_c[key] = val self.shared_decoders = self.shared_decoders.to( self.cfg['mapping']['device']) self.shared_decoders.share_memory() self.renderer = Renderer(cfg, args, self) self.mesher = Mesher(cfg, args, self)
self.logger = Logger(cfg, args, self)
4
2023-10-13 00:49:57+00:00
24k
fury-05/BookRecomendApp
.pythonlibs/lib/python3.10/site-packages/sklearn/linear_model/_base.py
[ { "identifier": "BaseEstimator", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/base.py", "snippet": "class BaseEstimator(_MetadataRequester):\n \"\"\"Base class for all estimators in scikit-learn.\n\n Notes\n -----\n All estimators should specify all the parameters that can be se...
import numbers import warnings import numpy as np import scipy.sparse as sp from abc import ABCMeta, abstractmethod from numbers import Integral from scipy import linalg, optimize, sparse from scipy.sparse.linalg import lsqr from scipy.special import expit from ..base import ( BaseEstimator, ClassifierMixin, MultiOutputMixin, RegressorMixin, _fit_context, ) from ..preprocessing._data import _is_constant_feature from ..utils import check_array, check_random_state from ..utils._array_api import get_namespace from ..utils._seq_dataset import ( ArrayDataset32, ArrayDataset64, CSRDataset32, CSRDataset64, ) from ..utils.extmath import _incremental_mean_and_var, safe_sparse_dot from ..utils.parallel import Parallel, delayed from ..utils.sparsefuncs import inplace_column_scale, mean_variance_axis from ..utils.validation import FLOAT_DTYPES, _check_sample_weight, check_is_fitted
17,924
.. versionadded:: 0.24 Attributes ---------- coef_ : array of shape (n_features, ) or (n_targets, n_features) Estimated coefficients for the linear regression problem. If multiple targets are passed during the fit (y 2D), this is a 2D array of shape (n_targets, n_features), while if only one target is passed, this is a 1D array of length n_features. rank_ : int Rank of matrix `X`. Only available when `X` is dense. singular_ : array of shape (min(X, y),) Singular values of `X`. Only available when `X` is dense. intercept_ : float or array of shape (n_targets,) Independent term in the linear model. Set to 0.0 if `fit_intercept = False`. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- Ridge : Ridge regression addresses some of the problems of Ordinary Least Squares by imposing a penalty on the size of the coefficients with l2 regularization. Lasso : The Lasso is a linear model that estimates sparse coefficients with l1 regularization. ElasticNet : Elastic-Net is a linear regression model trained with both l1 and l2 -norm regularization of the coefficients. Notes ----- From the implementation point of view, this is just plain Ordinary Least Squares (scipy.linalg.lstsq) or Non Negative Least Squares (scipy.optimize.nnls) wrapped as a predictor object. Examples -------- >>> import numpy as np >>> from sklearn.linear_model import LinearRegression >>> X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]]) >>> # y = 1 * x_0 + 2 * x_1 + 3 >>> y = np.dot(X, np.array([1, 2])) + 3 >>> reg = LinearRegression().fit(X, y) >>> reg.score(X, y) 1.0 >>> reg.coef_ array([1., 2.]) >>> reg.intercept_ 3.0... >>> reg.predict(np.array([[3, 5]])) array([16.]) """ _parameter_constraints: dict = { "fit_intercept": ["boolean"], "copy_X": ["boolean"], "n_jobs": [None, Integral], "positive": ["boolean"], } def __init__( self, *, fit_intercept=True, copy_X=True, n_jobs=None, positive=False, ): self.fit_intercept = fit_intercept self.copy_X = copy_X self.n_jobs = n_jobs self.positive = positive @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y, sample_weight=None): """ Fit linear model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. Will be cast to X's dtype if necessary. sample_weight : array-like of shape (n_samples,), default=None Individual weights for each sample. .. versionadded:: 0.17 parameter *sample_weight* support to LinearRegression. Returns ------- self : object Fitted Estimator. """ n_jobs_ = self.n_jobs accept_sparse = False if self.positive else ["csr", "csc", "coo"] X, y = self._validate_data( X, y, accept_sparse=accept_sparse, y_numeric=True, multi_output=True ) has_sw = sample_weight is not None if has_sw:
""" Generalized Linear Models. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Fabian Pedregosa <fabian.pedregosa@inria.fr> # Olivier Grisel <olivier.grisel@ensta.org> # Vincent Michel <vincent.michel@inria.fr> # Peter Prettenhofer <peter.prettenhofer@gmail.com> # Mathieu Blondel <mathieu@mblondel.org> # Lars Buitinck # Maryan Morel <maryan.morel@polytechnique.edu> # Giorgio Patrini <giorgio.patrini@anu.edu.au> # Maria Telenczuk <https://github.com/maikia> # License: BSD 3 clause # TODO: bayesian_ridge_regression and bayesian_regression_ard # should be squashed into its respective objects. SPARSE_INTERCEPT_DECAY = 0.01 # For sparse data intercept updates are scaled by this decay factor to avoid # intercept oscillation. # TODO(1.4): remove # parameter 'normalize' should be removed from linear models def _deprecate_normalize(normalize, estimator_name): """Normalize is to be deprecated from linear models and a use of a pipeline with a StandardScaler is to be recommended instead. Here the appropriate message is selected to be displayed to the user depending on the default normalize value (as it varies between the linear models and normalize value selected by the user). Parameters ---------- normalize : bool, normalize value passed by the user estimator_name : str name of the linear estimator which calls this function. The name will be used for writing the deprecation warnings Returns ------- normalize : bool, normalize value which should further be used by the estimator at this stage of the depreciation process Notes ----- This function should be completely removed in 1.4. """ if normalize not in [True, False, "deprecated"]: raise ValueError( "Leave 'normalize' to its default value or set it to True or False" ) if normalize == "deprecated": _normalize = False else: _normalize = normalize pipeline_msg = ( "If you wish to scale the data, use Pipeline with a StandardScaler " "in a preprocessing stage. To reproduce the previous behavior:\n\n" "from sklearn.pipeline import make_pipeline\n\n" "model = make_pipeline(StandardScaler(with_mean=False), " f"{estimator_name}())\n\n" "If you wish to pass a sample_weight parameter, you need to pass it " "as a fit parameter to each step of the pipeline as follows:\n\n" "kwargs = {s[0] + '__sample_weight': sample_weight for s " "in model.steps}\n" "model.fit(X, y, **kwargs)\n\n" ) alpha_msg = "" if "LassoLars" in estimator_name: alpha_msg = "Set parameter alpha to: original_alpha * np.sqrt(n_samples). " if normalize != "deprecated" and normalize: warnings.warn( "'normalize' was deprecated in version 1.2 and will be removed in 1.4.\n" + pipeline_msg + alpha_msg, FutureWarning, ) elif not normalize: warnings.warn( ( "'normalize' was deprecated in version 1.2 and will be " "removed in 1.4. " "Please leave the normalize parameter to its default value to " "silence this warning. The default behavior of this estimator " "is to not do any normalization. If normalization is needed " "please use sklearn.preprocessing.StandardScaler instead." ), FutureWarning, ) return _normalize def make_dataset(X, y, sample_weight, random_state=None): """Create ``Dataset`` abstraction for sparse and dense inputs. This also returns the ``intercept_decay`` which is different for sparse datasets. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data y : array-like, shape (n_samples, ) Target values. sample_weight : numpy array of shape (n_samples,) The weight of each sample random_state : int, RandomState instance or None (default) Determines random number generation for dataset random sampling. It is not used for dataset shuffling. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- dataset The ``Dataset`` abstraction intercept_decay The intercept decay """ rng = check_random_state(random_state) # seed should never be 0 in SequentialDataset64 seed = rng.randint(1, np.iinfo(np.int32).max) if X.dtype == np.float32: CSRData = CSRDataset32 ArrayData = ArrayDataset32 else: CSRData = CSRDataset64 ArrayData = ArrayDataset64 if sp.issparse(X): dataset = CSRData(X.data, X.indptr, X.indices, y, sample_weight, seed=seed) intercept_decay = SPARSE_INTERCEPT_DECAY else: X = np.ascontiguousarray(X) dataset = ArrayData(X, y, sample_weight, seed=seed) intercept_decay = 1.0 return dataset, intercept_decay def _preprocess_data( X, y, fit_intercept, normalize=False, copy=True, copy_y=True, sample_weight=None, check_input=True, ): """Center and scale data. Centers data to have mean zero along axis 0. If fit_intercept=False or if the X is a sparse matrix, no centering is done, but normalization can still be applied. The function returns the statistics necessary to reconstruct the input data, which are X_offset, y_offset, X_scale, such that the output X = (X - X_offset) / X_scale X_scale is the L2 norm of X - X_offset. If sample_weight is not None, then the weighted mean of X and y is zero, and not the mean itself. If fit_intercept=True, the mean, eventually weighted, is returned, independently of whether X was centered (option used for optimization with sparse data in coordinate_descend). This is here because nearly all linear models will want their data to be centered. This function also systematically makes y consistent with X.dtype Returns ------- X_out : {ndarray, sparse matrix} of shape (n_samples, n_features) If copy=True a copy of the input X is triggered, otherwise operations are inplace. If input X is dense, then X_out is centered. If normalize is True, then X_out is rescaled (dense and sparse case) y_out : {ndarray, sparse matrix} of shape (n_samples,) or (n_samples, n_targets) Centered version of y. Likely performed inplace on input y. X_offset : ndarray of shape (n_features,) The mean per column of input X. y_offset : float or ndarray of shape (n_features,) X_scale : ndarray of shape (n_features,) The standard deviation per column of input X. """ if isinstance(sample_weight, numbers.Number): sample_weight = None if sample_weight is not None: sample_weight = np.asarray(sample_weight) if check_input: X = check_array(X, copy=copy, accept_sparse=["csr", "csc"], dtype=FLOAT_DTYPES) y = check_array(y, dtype=X.dtype, copy=copy_y, ensure_2d=False) else: y = y.astype(X.dtype, copy=copy_y) if copy: if sp.issparse(X): X = X.copy() else: X = X.copy(order="K") if fit_intercept: if sp.issparse(X): X_offset, X_var = mean_variance_axis(X, axis=0, weights=sample_weight) else: if normalize: X_offset, X_var, _ = _incremental_mean_and_var( X, last_mean=0.0, last_variance=0.0, last_sample_count=0.0, sample_weight=sample_weight, ) else: X_offset = np.average(X, axis=0, weights=sample_weight) X_offset = X_offset.astype(X.dtype, copy=False) X -= X_offset if normalize: X_var = X_var.astype(X.dtype, copy=False) # Detect constant features on the computed variance, before taking # the np.sqrt. Otherwise constant features cannot be detected with # sample weights. constant_mask = _is_constant_feature(X_var, X_offset, X.shape[0]) if sample_weight is None: X_var *= X.shape[0] else: X_var *= sample_weight.sum() X_scale = np.sqrt(X_var, out=X_var) X_scale[constant_mask] = 1.0 if sp.issparse(X): inplace_column_scale(X, 1.0 / X_scale) else: X /= X_scale else: X_scale = np.ones(X.shape[1], dtype=X.dtype) y_offset = np.average(y, axis=0, weights=sample_weight) y -= y_offset else: X_offset = np.zeros(X.shape[1], dtype=X.dtype) X_scale = np.ones(X.shape[1], dtype=X.dtype) if y.ndim == 1: y_offset = X.dtype.type(0) else: y_offset = np.zeros(y.shape[1], dtype=X.dtype) return X, y, X_offset, y_offset, X_scale # TODO: _rescale_data should be factored into _preprocess_data. # Currently, the fact that sag implements its own way to deal with # sample_weight makes the refactoring tricky. def _rescale_data(X, y, sample_weight, inplace=False): """Rescale data sample-wise by square root of sample_weight. For many linear models, this enables easy support for sample_weight because (y - X w)' S (y - X w) with S = diag(sample_weight) becomes ||y_rescaled - X_rescaled w||_2^2 when setting y_rescaled = sqrt(S) y X_rescaled = sqrt(S) X Returns ------- X_rescaled : {array-like, sparse matrix} y_rescaled : {array-like, sparse matrix} """ # Assume that _validate_data and _check_sample_weight have been called by # the caller. n_samples = X.shape[0] sample_weight_sqrt = np.sqrt(sample_weight) if sp.issparse(X) or sp.issparse(y): sw_matrix = sparse.dia_matrix( (sample_weight_sqrt, 0), shape=(n_samples, n_samples) ) if sp.issparse(X): X = safe_sparse_dot(sw_matrix, X) else: if inplace: X *= sample_weight_sqrt[:, np.newaxis] else: X = X * sample_weight_sqrt[:, np.newaxis] if sp.issparse(y): y = safe_sparse_dot(sw_matrix, y) else: if inplace: if y.ndim == 1: y *= sample_weight_sqrt else: y *= sample_weight_sqrt[:, np.newaxis] else: if y.ndim == 1: y = y * sample_weight_sqrt else: y = y * sample_weight_sqrt[:, np.newaxis] return X, y, sample_weight_sqrt class LinearModel(BaseEstimator, metaclass=ABCMeta): """Base class for Linear Models""" @abstractmethod def fit(self, X, y): """Fit model.""" def _decision_function(self, X): check_is_fitted(self) X = self._validate_data(X, accept_sparse=["csr", "csc", "coo"], reset=False) return safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ def predict(self, X): """ Predict using the linear model. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Samples. Returns ------- C : array, shape (n_samples,) Returns predicted values. """ return self._decision_function(X) def _set_intercept(self, X_offset, y_offset, X_scale): """Set the intercept_""" if self.fit_intercept: # We always want coef_.dtype=X.dtype. For instance, X.dtype can differ from # coef_.dtype if warm_start=True. self.coef_ = np.divide(self.coef_, X_scale, dtype=X_scale.dtype) self.intercept_ = y_offset - np.dot(X_offset, self.coef_.T) else: self.intercept_ = 0.0 def _more_tags(self): return {"requires_y": True} # XXX Should this derive from LinearModel? It should be a mixin, not an ABC. # Maybe the n_features checking can be moved to LinearModel. class LinearClassifierMixin(ClassifierMixin): """Mixin for linear classifiers. Handles prediction for sparse and dense X. """ def decision_function(self, X): """ Predict confidence scores for samples. The confidence score for a sample is proportional to the signed distance of that sample to the hyperplane. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data matrix for which we want to get the confidence scores. Returns ------- scores : ndarray of shape (n_samples,) or (n_samples, n_classes) Confidence scores per `(n_samples, n_classes)` combination. In the binary case, confidence score for `self.classes_[1]` where >0 means this class would be predicted. """ check_is_fitted(self) xp, _ = get_namespace(X) X = self._validate_data(X, accept_sparse="csr", reset=False) scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ return xp.reshape(scores, (-1,)) if scores.shape[1] == 1 else scores def predict(self, X): """ Predict class labels for samples in X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data matrix for which we want to get the predictions. Returns ------- y_pred : ndarray of shape (n_samples,) Vector containing the class labels for each sample. """ xp, _ = get_namespace(X) scores = self.decision_function(X) if len(scores.shape) == 1: indices = xp.astype(scores > 0, int) else: indices = xp.argmax(scores, axis=1) return xp.take(self.classes_, indices) def _predict_proba_lr(self, X): """Probability estimation for OvR logistic regression. Positive class probabilities are computed as 1. / (1. + np.exp(-self.decision_function(X))); multiclass is handled by normalizing that over all classes. """ prob = self.decision_function(X) expit(prob, out=prob) if prob.ndim == 1: return np.vstack([1 - prob, prob]).T else: # OvR normalization, like LibLinear's predict_probability prob /= prob.sum(axis=1).reshape((prob.shape[0], -1)) return prob class SparseCoefMixin: """Mixin for converting coef_ to and from CSR format. L1-regularizing estimators should inherit this. """ def densify(self): """ Convert coefficient matrix to dense array format. Converts the ``coef_`` member (back) to a numpy.ndarray. This is the default format of ``coef_`` and is required for fitting, so calling this method is only required on models that have previously been sparsified; otherwise, it is a no-op. Returns ------- self Fitted estimator. """ msg = "Estimator, %(name)s, must be fitted before densifying." check_is_fitted(self, msg=msg) if sp.issparse(self.coef_): self.coef_ = self.coef_.toarray() return self def sparsify(self): """ Convert coefficient matrix to sparse format. Converts the ``coef_`` member to a scipy.sparse matrix, which for L1-regularized models can be much more memory- and storage-efficient than the usual numpy.ndarray representation. The ``intercept_`` member is not converted. Returns ------- self Fitted estimator. Notes ----- For non-sparse models, i.e. when there are not many zeros in ``coef_``, this may actually *increase* memory usage, so use this method with care. A rule of thumb is that the number of zero elements, which can be computed with ``(coef_ == 0).sum()``, must be more than 50% for this to provide significant benefits. After calling this method, further fitting with the partial_fit method (if any) will not work until you call densify. """ msg = "Estimator, %(name)s, must be fitted before sparsifying." check_is_fitted(self, msg=msg) self.coef_ = sp.csr_matrix(self.coef_) return self class LinearRegression(MultiOutputMixin, RegressorMixin, LinearModel): """ Ordinary least squares Linear Regression. LinearRegression fits a linear model with coefficients w = (w1, ..., wp) to minimize the residual sum of squares between the observed targets in the dataset, and the targets predicted by the linear approximation. Parameters ---------- fit_intercept : bool, default=True Whether to calculate the intercept for this model. If set to False, no intercept will be used in calculations (i.e. data is expected to be centered). copy_X : bool, default=True If True, X will be copied; else, it may be overwritten. n_jobs : int, default=None The number of jobs to use for the computation. This will only provide speedup in case of sufficiently large problems, that is if firstly `n_targets > 1` and secondly `X` is sparse or if `positive` is set to `True`. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. positive : bool, default=False When set to ``True``, forces the coefficients to be positive. This option is only supported for dense arrays. .. versionadded:: 0.24 Attributes ---------- coef_ : array of shape (n_features, ) or (n_targets, n_features) Estimated coefficients for the linear regression problem. If multiple targets are passed during the fit (y 2D), this is a 2D array of shape (n_targets, n_features), while if only one target is passed, this is a 1D array of length n_features. rank_ : int Rank of matrix `X`. Only available when `X` is dense. singular_ : array of shape (min(X, y),) Singular values of `X`. Only available when `X` is dense. intercept_ : float or array of shape (n_targets,) Independent term in the linear model. Set to 0.0 if `fit_intercept = False`. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- Ridge : Ridge regression addresses some of the problems of Ordinary Least Squares by imposing a penalty on the size of the coefficients with l2 regularization. Lasso : The Lasso is a linear model that estimates sparse coefficients with l1 regularization. ElasticNet : Elastic-Net is a linear regression model trained with both l1 and l2 -norm regularization of the coefficients. Notes ----- From the implementation point of view, this is just plain Ordinary Least Squares (scipy.linalg.lstsq) or Non Negative Least Squares (scipy.optimize.nnls) wrapped as a predictor object. Examples -------- >>> import numpy as np >>> from sklearn.linear_model import LinearRegression >>> X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]]) >>> # y = 1 * x_0 + 2 * x_1 + 3 >>> y = np.dot(X, np.array([1, 2])) + 3 >>> reg = LinearRegression().fit(X, y) >>> reg.score(X, y) 1.0 >>> reg.coef_ array([1., 2.]) >>> reg.intercept_ 3.0... >>> reg.predict(np.array([[3, 5]])) array([16.]) """ _parameter_constraints: dict = { "fit_intercept": ["boolean"], "copy_X": ["boolean"], "n_jobs": [None, Integral], "positive": ["boolean"], } def __init__( self, *, fit_intercept=True, copy_X=True, n_jobs=None, positive=False, ): self.fit_intercept = fit_intercept self.copy_X = copy_X self.n_jobs = n_jobs self.positive = positive @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y, sample_weight=None): """ Fit linear model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. Will be cast to X's dtype if necessary. sample_weight : array-like of shape (n_samples,), default=None Individual weights for each sample. .. versionadded:: 0.17 parameter *sample_weight* support to LinearRegression. Returns ------- self : object Fitted Estimator. """ n_jobs_ = self.n_jobs accept_sparse = False if self.positive else ["csr", "csc", "coo"] X, y = self._validate_data( X, y, accept_sparse=accept_sparse, y_numeric=True, multi_output=True ) has_sw = sample_weight is not None if has_sw:
sample_weight = _check_sample_weight(
16
2023-10-07 13:19:48+00:00
24k
hellloxiaotian/KDNet
train_KDNet.py
[ { "identifier": "attempt_load", "path": "models/experimental.py", "snippet": "def attempt_load(weights, map_location=None):\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ensemble()\n # print('weights', weights) # /runs/train/yolov7_distill...
import argparse import logging import math import os import random import time import numpy as np import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler import torch.utils.data import yaml import test # import test.py to get mAP after each epoch from copy import deepcopy from pathlib import Path from threading import Thread from torch.cuda import amp from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm from models.experimental import attempt_load from models.experimental import attempt_loadv5 from models.experimental import attempt_load_zxy from models.yolo import Model from utils.autoanchor import check_anchors from utils.datasets import create_dataloader from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ check_requirements, print_mutation, set_logging, one_cycle, colorstr from utils.google_utils import attempt_download from utils.loss import ComputeLoss, ComputeLossOTA from utils.plots import plot_images, plot_labels, plot_results, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume from utils.distill_utils import getMask, compute_mask_loss
20,371
names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check # Model pretrained = weights.endswith('.pt') # load teacher model teacher = attempt_load_zxy(opt.teacher_weights, device=device) if pretrained: with torch_distributed_zero_first(rank): attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(state_dict, strict=False) # load logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create with torch_distributed_zero_first(rank): check_dataset(data_dict) # check train_path = data_dict['train'] test_path = data_dict['val'] # Freeze freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # parameter names to freeze (full or partial) for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): print('freezing %s' % k) v.requires_grad = False # Optimizer nbs = 64 # nominal batch size accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay logger.info(f"Scaled weight_decay = {hyp['weight_decay']}") pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in model.named_modules(): if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): pg2.append(v.bias) # biases if isinstance(v, nn.BatchNorm2d): pg0.append(v.weight) # no decay elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): pg1.append(v.weight) # apply decay if hasattr(v, 'im'): if hasattr(v.im, 'implicit'): pg0.append(v.im.implicit) else: for iv in v.im: pg0.append(iv.implicit) if hasattr(v, 'imc'): if hasattr(v.imc, 'implicit'): pg0.append(v.imc.implicit) else: for iv in v.imc: pg0.append(iv.implicit) if hasattr(v, 'imb'): if hasattr(v.imb, 'implicit'): pg0.append(v.imb.implicit) else: for iv in v.imb: pg0.append(iv.implicit) if hasattr(v, 'imo'): if hasattr(v.imo, 'implicit'): pg0.append(v.imo.implicit) else: for iv in v.imo: pg0.append(iv.implicit) if hasattr(v, 'ia'): if hasattr(v.ia, 'implicit'): pg0.append(v.ia.implicit) else: for iv in v.ia: pg0.append(iv.implicit) if hasattr(v, 'attn'): if hasattr(v.attn, 'logit_scale'): pg0.append(v.attn.logit_scale) if hasattr(v.attn, 'q_bias'): pg0.append(v.attn.q_bias) if hasattr(v.attn, 'v_bias'): pg0.append(v.attn.v_bias) if hasattr(v.attn, 'relative_position_bias_table'): pg0.append(v.attn.relative_position_bias_table) if hasattr(v, 'rbr_dense'): if hasattr(v.rbr_dense, 'weight_rbr_origin'): pg0.append(v.rbr_dense.weight_rbr_origin) if hasattr(v.rbr_dense, 'weight_rbr_avg_conv'): pg0.append(v.rbr_dense.weight_rbr_avg_conv) if hasattr(v.rbr_dense, 'weight_rbr_pfir_conv'): pg0.append(v.rbr_dense.weight_rbr_pfir_conv) if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_idconv1'): pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_idconv1) if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_conv2'): pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_conv2) if hasattr(v.rbr_dense, 'weight_rbr_gconv_dw'): pg0.append(v.rbr_dense.weight_rbr_gconv_dw) if hasattr(v.rbr_dense, 'weight_rbr_gconv_pw'): pg0.append(v.rbr_dense.weight_rbr_gconv_pw) if hasattr(v.rbr_dense, 'vector'): pg0.append(v.rbr_dense.vector) if opt.adam: optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 # Scheduler https://arxiv.org/pdf/1812.01187.pdf # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR if opt.linear_lr: lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear else:
logger = logging.getLogger(__name__) def train(hyp, opt, device, tb_writer=None): logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank, freeze = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, opt.freeze # Directories wdir = save_dir / 'weights' wdir.mkdir(parents=True, exist_ok=True) # make dir last = wdir / 'last.pt' best = wdir / 'best.pt' results_file = save_dir / 'results.txt' # Save run settings with open(save_dir / 'hyp.yaml', 'w') as f: yaml.dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: yaml.dump(vars(opt), f, sort_keys=False) # Configure plots = not opt.evolve # create plots cuda = device.type != 'cpu' init_seeds(2 + rank) with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict is_coco = opt.data.endswith('coco.yaml') # Logging- Doing this before checking the dataset. Might update data_dict loggers = {'wandb': None} # loggers dict if rank in [-1, 0]: opt.hyp = hyp # add hyperparameters run_id = torch.load(weights, map_location=device).get('wandb_id') if weights.endswith('.pt') and os.path.isfile( weights) else None wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict) loggers['wandb'] = wandb_logger.wandb data_dict = wandb_logger.data_dict if wandb_logger.wandb: weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check # Model pretrained = weights.endswith('.pt') # load teacher model teacher = attempt_load_zxy(opt.teacher_weights, device=device) if pretrained: with torch_distributed_zero_first(rank): attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(state_dict, strict=False) # load logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create with torch_distributed_zero_first(rank): check_dataset(data_dict) # check train_path = data_dict['train'] test_path = data_dict['val'] # Freeze freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # parameter names to freeze (full or partial) for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): print('freezing %s' % k) v.requires_grad = False # Optimizer nbs = 64 # nominal batch size accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay logger.info(f"Scaled weight_decay = {hyp['weight_decay']}") pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in model.named_modules(): if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): pg2.append(v.bias) # biases if isinstance(v, nn.BatchNorm2d): pg0.append(v.weight) # no decay elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): pg1.append(v.weight) # apply decay if hasattr(v, 'im'): if hasattr(v.im, 'implicit'): pg0.append(v.im.implicit) else: for iv in v.im: pg0.append(iv.implicit) if hasattr(v, 'imc'): if hasattr(v.imc, 'implicit'): pg0.append(v.imc.implicit) else: for iv in v.imc: pg0.append(iv.implicit) if hasattr(v, 'imb'): if hasattr(v.imb, 'implicit'): pg0.append(v.imb.implicit) else: for iv in v.imb: pg0.append(iv.implicit) if hasattr(v, 'imo'): if hasattr(v.imo, 'implicit'): pg0.append(v.imo.implicit) else: for iv in v.imo: pg0.append(iv.implicit) if hasattr(v, 'ia'): if hasattr(v.ia, 'implicit'): pg0.append(v.ia.implicit) else: for iv in v.ia: pg0.append(iv.implicit) if hasattr(v, 'attn'): if hasattr(v.attn, 'logit_scale'): pg0.append(v.attn.logit_scale) if hasattr(v.attn, 'q_bias'): pg0.append(v.attn.q_bias) if hasattr(v.attn, 'v_bias'): pg0.append(v.attn.v_bias) if hasattr(v.attn, 'relative_position_bias_table'): pg0.append(v.attn.relative_position_bias_table) if hasattr(v, 'rbr_dense'): if hasattr(v.rbr_dense, 'weight_rbr_origin'): pg0.append(v.rbr_dense.weight_rbr_origin) if hasattr(v.rbr_dense, 'weight_rbr_avg_conv'): pg0.append(v.rbr_dense.weight_rbr_avg_conv) if hasattr(v.rbr_dense, 'weight_rbr_pfir_conv'): pg0.append(v.rbr_dense.weight_rbr_pfir_conv) if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_idconv1'): pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_idconv1) if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_conv2'): pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_conv2) if hasattr(v.rbr_dense, 'weight_rbr_gconv_dw'): pg0.append(v.rbr_dense.weight_rbr_gconv_dw) if hasattr(v.rbr_dense, 'weight_rbr_gconv_pw'): pg0.append(v.rbr_dense.weight_rbr_gconv_pw) if hasattr(v.rbr_dense, 'vector'): pg0.append(v.rbr_dense.vector) if opt.adam: optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 # Scheduler https://arxiv.org/pdf/1812.01187.pdf # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR if opt.linear_lr: lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear else:
lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
6
2023-10-08 13:05:58+00:00
24k
falesiani/torch_ga
tests/test_keras.py
[ { "identifier": "GeometricProductDense", "path": "torch_ga/layers.py", "snippet": "class GeometricProductDense(GeometricAlgebraLayer):\n \"\"\"Analagous to Keras' Dense layer but using multivector-valued matrices\n instead of scalar ones and geometric multiplication instead of standard\n multip...
import unittest as ut import h5py import torch import torch.nn as nn import torch.nn.functional as F import torch from io import BytesIO from torch_ga.layers import ( GeometricProductDense, GeometricSandwichProductDense, GeometricProductElementwise, GeometricSandwichProductElementwise, GeometricProductConv1D, GeometricAlgebraExp, GeometricToTensor, GeometricToTensorWithKind, TensorToGeometric, TensorWithKindToGeometric, ) from torch_ga.blades import BladeKind from torch_ga import GeometricAlgebra
17,289
torch.manual_seed(0) class TestKerasLayers(ut.TestCase): def assertTensorsEqual(self, a, b): # self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b)) print(f"assertTensorsEqual(a={a},b={b})") assert torch.all(a.squeeze() == b.squeeze()), "%s not equal to %s" % (a, b) def test_tensor_to_geometric(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([32, 4]) gt_geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] tensor_to_geom_layer = TensorToGeometric(sta, vector_blade_indices) self.assertTensorsEqual(tensor_to_geom_layer(tensor), gt_geom_tensor) def test_tensor_with_kind_to_geometric(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([32, 4]) gt_geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] tensor_kind_to_geom_layer = TensorWithKindToGeometric( sta, BladeKind.VECTOR) self.assertTensorsEqual( tensor_kind_to_geom_layer(tensor), gt_geom_tensor) def test_geometric_to_tensor(self): sta = GeometricAlgebra([1, -1, -1, -1]) gt_tensor = torch.ones([32, 4]) geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] geom_to_tensor_layer = GeometricToTensor(sta, vector_blade_indices) self.assertTensorsEqual(geom_to_tensor_layer(geom_tensor), gt_tensor) def test_geometric_to_tensor_with_kind(self): sta = GeometricAlgebra([1, -1, -1, -1]) gt_tensor = torch.ones([32, 4]) geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] geom_to_tensor_kind_layer = GeometricToTensorWithKind( sta, BladeKind.VECTOR) self.assertTensorsEqual( geom_to_tensor_kind_layer(geom_tensor), gt_tensor) def test_geometric_product_dense_v_v(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.zeros([32, 6, 1]), torch.ones([32, 6, 4]), torch.zeros([32, 6, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4]
torch.manual_seed(0) class TestKerasLayers(ut.TestCase): def assertTensorsEqual(self, a, b): # self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b)) print(f"assertTensorsEqual(a={a},b={b})") assert torch.all(a.squeeze() == b.squeeze()), "%s not equal to %s" % (a, b) def test_tensor_to_geometric(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([32, 4]) gt_geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] tensor_to_geom_layer = TensorToGeometric(sta, vector_blade_indices) self.assertTensorsEqual(tensor_to_geom_layer(tensor), gt_geom_tensor) def test_tensor_with_kind_to_geometric(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([32, 4]) gt_geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] tensor_kind_to_geom_layer = TensorWithKindToGeometric( sta, BladeKind.VECTOR) self.assertTensorsEqual( tensor_kind_to_geom_layer(tensor), gt_geom_tensor) def test_geometric_to_tensor(self): sta = GeometricAlgebra([1, -1, -1, -1]) gt_tensor = torch.ones([32, 4]) geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] geom_to_tensor_layer = GeometricToTensor(sta, vector_blade_indices) self.assertTensorsEqual(geom_to_tensor_layer(geom_tensor), gt_tensor) def test_geometric_to_tensor_with_kind(self): sta = GeometricAlgebra([1, -1, -1, -1]) gt_tensor = torch.ones([32, 4]) geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] geom_to_tensor_kind_layer = GeometricToTensorWithKind( sta, BladeKind.VECTOR) self.assertTensorsEqual( geom_to_tensor_kind_layer(geom_tensor), gt_tensor) def test_geometric_product_dense_v_v(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.zeros([32, 6, 1]), torch.ones([32, 6, 4]), torch.zeros([32, 6, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4]
geom_prod_layer = GeometricProductDense(
0
2023-10-07 13:34:07+00:00
24k
Significant-Gravitas/autostandup
bot.py
[ { "identifier": "StreaksDB", "path": "streaks/streaks_db.py", "snippet": "class StreaksDB(BaseDB):\n \"\"\"\n StreaksDB class handles all operations related to the 'streaks' table.\n Inherits from the BaseDB class.\n \"\"\"\n\n def __init__(self, host, user, password, database, port):\n ...
import os import pytz import asyncio import openai import requests from typing import List from dotenv import load_dotenv from datetime import datetime, timedelta from multiprocessing import Process from streaks.streaks_db import StreaksDB from team_members.team_member_db import TeamMemberDB from updates.updates_db import UpdatesDB from weekly_posts.weekly_posts_db import WeeklyPostsDB from streaks.streaks_manager import StreaksManager from team_members.team_member_manager import TeamMemberManager from updates.updates_manager import UpdatesManager from weekly_posts.weekly_post_manager import WeeklyPostManager from scheduler import Scheduler from team_members.team_member import TeamMember from discord.ext import commands, tasks from discord import Intents, DMChannel from flask import Flask from asyncio import Task, ensure_future, CancelledError
15,369
await ctx.send("You're not authorized to update streaks.") return # Find the member object using the Discord ID member_to_update = team_member_manager.find_member(discord_id) if member_to_update: # Update the streak in the database streaks_manager.update_streak(discord_id, new_streak) member_to_update.update_streak(new_streak) # Update the Discord post using WeeklyPostManager await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send(f"Streak for user with Discord ID {discord_id} updated to {new_streak}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='forcepostrebuild') async def force_post_rebuild(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to force a post rebuild.") return # Rebuild the post await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send("Post rebuilt successfully.") @bot.command(name='deletelateststatus') async def delete_latest_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to delete status updates.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Delete the newest status using the UpdatesManager's method updates_manager.delete_newest_status(discord_id) await ctx.send(f"Latest status update for user with Discord ID {discord_id} deleted successfully.") @bot.command(name='viewuser') async def view_user(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to view user data.") return # Get the member's statuses using the UpdatesManager's method statuses = updates_manager.get_all_statuses_for_user(discord_id) if not statuses: await ctx.send(f"No status updates found for user with Discord ID {discord_id}.") return # Loop through the statuses and send individual messages for status in statuses: await ctx.send(f"### **Timestamp:** {status['timestamp']}") await ctx.send(f"### **Raw Status:** {status['status']}") await ctx.send(f"### **Summarized Status:** \n{status['summarized_status']}") @bot.command(name='setvacationstatus') async def set_vacation_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to set vacation status.") return member = team_member_manager.find_member(discord_id) if member: new_status = not member.on_vacation team_member_manager.set_member_vacation_status(discord_id, new_status) await ctx.send(f"Vacation status for user with Discord ID {discord_id} set to {'on vacation' if new_status else 'not on vacation'}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='weeklysummary') async def weekly_summary(ctx, discord_id: int, start_date: str, end_date: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to generate weekly summaries.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Convert the start_date and end_date strings to datetime objects # Adjusting the date format to MM-DD-YYYY and setting the time try: start_date = datetime.strptime(start_date, '%m-%d-%Y') end_date = datetime.strptime(end_date, '%m-%d-%Y') # Setting the time to ensure the whole week is captured start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0) end_date = end_date.replace(hour=23, minute=59, second=59, microsecond=999999) except ValueError: await ctx.send("Invalid date format. Please use MM-DD-YYYY.") return # Generate the weekly summary weekly_summary = await updates_manager.generate_weekly_summary(discord_id, start_date, end_date) # Send the weekly summary to the admin user admin_user = bot.get_user(ADMIN_DISCORD_ID) if admin_user: await admin_user.send(f"**{member.name}'s Weekly Summary for {start_date.strftime('%m-%d-%Y')} to {end_date.strftime('%m-%d-%Y')}:**\n{weekly_summary}") else: await ctx.send("Unable to find the admin user.") @bot.event async def on_ready(): print("Bot is online!") # Log that the bot is online streaks_db = StreaksDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT)
# Import required modules app = Flask(__name__) # Load environment variables from the .env file load_dotenv() # Retrieve bot, guild, and channel tokens from environment variables BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN') GUILD_TOKEN = int(os.getenv('DISCORD_GUILD_TOKEN')) CHANNEL_TOKEN = int(os.getenv('DISCORD_CHANNEL_TOKEN')) ADMIN_DISCORD_ID = int(os.getenv('ADMIN_DISCORD_ID')) # Retrieve database credentials from environment variables MYSQL_HOST = os.getenv('MYSQL_HOST') MYSQL_USER = os.getenv('MYSQL_USER') MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD') MYSQL_DB = os.getenv('MYSQL_DB') MYSQL_PORT = os.getenv('MYSQL_PORT') ORG_NAME = os.getenv('GITHUB_ORG_NAME') ORG_TOKEN = os.getenv('GITHUB_ORG_TOKEN') OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') # Initialize bot with default intents intents = Intents.default() intents.members = True intents.message_content = True bot = commands.Bot(command_prefix='!', intents=intents) openai.api_key = OPENAI_API_KEY # TODO: Remove these globals streaks_manager = None weekly_post_manager = None team_member_manager = None updates_manager = None scheduler = None ongoing_status_requests = {} THUMBS_UP_EMOJI = "👍" PENCIL_EMOJI = "✏️" REPORT_SUBMISSION_EMOJI = '📝' async def weekly_state_reset(weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, team_members: List[TeamMember]): # Reset streaks for the previous week for member in team_members: if not member.on_vacation and member.weekly_checkins < 5: streaks_manager.reset_streak(member.discord_id) member.reset_streak() member.reset_weekly_checkins() # Initialize new weekly post await weekly_post_manager.initialize_post(team_members) def get_all_commit_messages_for_user(org_name: str, token: str, member: TeamMember) -> list: """Retrieve all commit messages for a user across all repos in an organization from the last 24 hours.""" headers = { "Authorization": f"token {token}", "Accept": "application/vnd.github.v3+json" } last_update_timestamp, user_time_zone = updates_manager.get_last_update_timestamp(member.discord_id) if last_update_timestamp: # Convert the timestamp to UTC local_tz = pytz.timezone(user_time_zone) localized_timestamp = local_tz.localize(last_update_timestamp) utc_timestamp = localized_timestamp.astimezone(pytz.utc) # Format the timestamp for the GitHub API and append 'Z' since_date = utc_timestamp.isoformat() if not since_date.endswith('Z'): since_date = utc_timestamp.isoformat().replace('+00:00', '') + 'Z' else: # If no updates found, default to last 24 hours since_date = (datetime.utcnow() - timedelta(days=1)).isoformat() + 'Z' all_commit_messages = [] # Paginate through all repositories in the organization repos_url = f"https://api.github.com/orgs/{org_name}/repos?type=all&per_page=100" while repos_url: response = requests.get(repos_url, headers=headers) if response.status_code != 200: # Log error and break loop print(f"Failed to fetch repos: {response.status_code} {response.text}") break repos = response.json() # Iterate over each repository for repo in repos: repo_name = repo["name"] commits_url = f"https://api.github.com/repos/{org_name}/{repo_name}/commits?author={member.github_username}&since={since_date}&per_page=100" # Paginate through commits for the repository while commits_url: response = requests.get(commits_url, headers=headers) if response.status_code != 200: # Log error and continue to the next repository print(f"Failed to fetch commits for {repo_name}: {response.status_code} {response.text}") break commits = response.json() repo_commit_messages = [commit["commit"]["message"] for commit in commits] all_commit_messages.extend(repo_commit_messages) # Check for the 'next' link for commits pagination commits_url = get_pagination_link(response.headers, 'next') # Check for the 'next' link for repositories pagination repos_url = get_pagination_link(response.headers, 'next') return all_commit_messages def get_pagination_link(headers, rel): """Extract pagination link for the 'rel' type from the Link header.""" link = headers.get('Link', None) if link: links = link.split(', ') for link in links: if 'rel="{}"'.format(rel) in link: return link.split('; ')[0].strip('<>') return None async def send_status_request(member: TeamMember, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, updates_manager: UpdatesManager): if member.weekly_checkins == 5: return # If already completed 5 check-ins, do nothing user = bot.get_user(member.discord_id) if user: # Notify the admin that a status request is being sent admin_user = bot.get_user(ADMIN_DISCORD_ID) if admin_user: await admin_user.send(f"Status request sent to {member.name}.") # Cancel the previous task if it exists ongoing_task: Task = ongoing_status_requests.get(member.discord_id) if ongoing_task: ongoing_task.cancel() # Retrieve all commit messages for the member commit_messages = get_all_commit_messages_for_user(ORG_NAME, ORG_TOKEN, member) if not commit_messages: summarized_report = "You have no commits for the previous working day." msg = f"{summarized_report}\nReact with {THUMBS_UP_EMOJI} to confirm, {PENCIL_EMOJI} to iterate with AI, or {REPORT_SUBMISSION_EMOJI} to submit your own report." else: summarized_report = await updates_manager.summarize_technical_updates(commit_messages) msg = f"Here's your summarized report based on your commits:\n{summarized_report}\nReact with {THUMBS_UP_EMOJI} to confirm, {PENCIL_EMOJI} to iterate with AI, or {REPORT_SUBMISSION_EMOJI} to submit your own report." raw_updates = summarized_report # Send initial message and wait for reaction await user.send( f"# Good morning {member.name}, time for your daily status update!\n" f"### I'm first going to check your commit messages and try to build a technical report for you.\n" f"### Next I will ask you for any non-technical updates from your previous work day.\n" f"### Finally I will ask you what you plan to work on today." ) sent_message = await user.send(msg) await sent_message.add_reaction(THUMBS_UP_EMOJI) await sent_message.add_reaction(PENCIL_EMOJI) await sent_message.add_reaction(REPORT_SUBMISSION_EMOJI) def check(m) -> bool: return m.author == user and isinstance(m.channel, DMChannel) # Store the new wait_for reaction task in the global dictionary ongoing_task = ensure_future(bot.wait_for('reaction_add', check=lambda r, u: u == user and r.message.id == sent_message.id and isinstance(r.message.channel, DMChannel) and str(r.emoji) in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI])) ongoing_status_requests[member.discord_id] = ongoing_task reaction, reactor = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the reaction for emoji in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI]: await sent_message.remove_reaction(emoji, bot.user) while str(reaction.emoji) in [PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI]: if str(reaction.emoji) == PENCIL_EMOJI: await user.send("What would you like me to change?") # Store the new wait_for message (feedback) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task feedback = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the feedback # Send original + feedback to LLM for reformatting summarized_report = await updates_manager.summarize_feedback_and_revisions(summarized_report, feedback.content) elif str(reaction.emoji) == REPORT_SUBMISSION_EMOJI: await user.send("Please submit your technical report directly.") # Store the new wait_for message (report submission) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task direct_report = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the report summarized_report = direct_report.content break # Exit the while loop as the user has submitted their report directly msg = f"Here's the revised report:\n{summarized_report}\nReact with {THUMBS_UP_EMOJI} to confirm, {PENCIL_EMOJI} to iterate with AI, or {REPORT_SUBMISSION_EMOJI} to submit your own report." last_sent_message = await send_long_message(user, msg) if last_sent_message: await last_sent_message.add_reaction(THUMBS_UP_EMOJI) await last_sent_message.add_reaction(PENCIL_EMOJI) await last_sent_message.add_reaction(REPORT_SUBMISSION_EMOJI) # Store the new wait_for reaction task in the global dictionary ongoing_task = ensure_future(bot.wait_for('reaction_add', check=lambda r, u: u == user and r.message.id == last_sent_message.id and isinstance(r.message.channel, DMChannel) and str(r.emoji) in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI])) ongoing_status_requests[member.discord_id] = ongoing_task reaction, user = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the reaction for emoji in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI]: await last_sent_message.remove_reaction(emoji, bot.user) # Prompt user for non-technical updates from the previous day non_technical_msg_prompt = "Please provide any non-technical updates from your previous working day, e.g., important meetings, interviews, etc." await user.send(non_technical_msg_prompt) # Store the new wait_for message (non-technical update) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task non_technical_update_raw = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the non-technical update raw_updates += f"\n\n{non_technical_update_raw.content}" # Summarize non-technical update with LLM non_technical_update = await updates_manager.summarize_non_technical_updates(non_technical_update_raw.content) # Prompt user for their goals for the day goals_msg_prompt = "What do you plan to work on or accomplish today?" await user.send(goals_msg_prompt) # Store the new wait_for message (goals for the day) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task goals_for_today_raw = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the goals # Summarize goals for the day with LLM goals_for_today = await updates_manager.summarize_goals_for_the_day(goals_for_today_raw.content) # Update the streak for this member streak = streaks_manager.get_streak(member.discord_id) streaks_manager.update_streak(member.discord_id, streak + 1) member.update_streak(streaks_manager.get_streak(member.discord_id)) member.increment_weekly_checkins() raw_updates += f"\n\n{goals_for_today_raw.content}" final_updates = f"{summarized_report}\n\n{non_technical_update}\n\n{goals_for_today}" updates_manager.insert_status(member.discord_id, raw_updates, member.time_zone) updates_manager.update_summarized_status(member.discord_id, final_updates) # Update the Discord post using WeeklyPostManager await weekly_post_manager.rebuild_post(team_member_manager.team_members) # Member name update as a header member_update_header = f"## {member.name}'s Update:" # Compile the final report with Markdown formatting final_report = ( f"\n### Technical Update:\n" f"{summarized_report}\n" f"### Non-Technical Update:\n" f"{non_technical_update}\n" f"### Goals for Today:\n" f"{goals_for_today}" ) stand_up_feedback = await updates_manager.evaluate_performance(final_report) # Concatenate the member name update with the final report and send to the designated Discord channel complete_message = f"{member_update_header}{final_report}" guild = bot.get_guild(GUILD_TOKEN) channel_to_post_in = guild.get_channel(CHANNEL_TOKEN) await user.send(stand_up_feedback) await send_long_message(channel_to_post_in, complete_message) async def send_long_message(destination, msg): max_length = 2000 # Discord's max character limit for a message sent_messages = [] # Keep track of all messages sent while len(msg) > 0: # If the message is shorter than the max length, send it as is if len(msg) <= max_length: sent_message = await destination.send(msg) sent_messages.append(sent_message) break # The message is sent, so break out of the loop # Find the nearest newline character before the max_length split_index = msg.rfind('\n', 0, max_length) # If no newline is found, just split at max_length if split_index == -1: split_index = max_length # Split the message at the found index and send the first part part_to_send = msg[:split_index].strip() sent_message = await destination.send(part_to_send) sent_messages.append(sent_message) # Wait a bit to respect Discord's rate limits await asyncio.sleep(1) # Remove the part that was sent from the message msg = msg[split_index:].strip() # Return the last message sent for reaction addition return sent_messages[-1] if sent_messages else None @bot.command(name='viewscheduledjobs') async def view_scheduled_jobs(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to view scheduled jobs.") return # Get all scheduled jobs using the Scheduler's method scheduled_jobs = scheduler.get_all_scheduled_jobs(team_member_manager) # Send the scheduled jobs to the admin user for job in scheduled_jobs: await ctx.send(job) @bot.command(name='statusrequest') async def status_request(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to request status.") return # Find the member object using the Discord ID member_to_request = team_member_manager.find_member(discord_id) if member_to_request: for member in team_member_manager.team_members: scheduler.remove_job(member.discord_id) scheduler.unschedule_weekly_post() # Send the status request to the member await ctx.send(f"Status request sent to user with Discord ID {discord_id}.") for member in team_member_manager.team_members: scheduler.add_job(send_status_request, member, weekly_post_manager, streaks_manager, updates_manager) scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await send_status_request(member_to_request, weekly_post_manager, streaks_manager, updates_manager) await ctx.send(f"Status request received from user with Discord ID {discord_id}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='adduser') async def add_user(ctx, discord_id: int, time_zone: str, name: str, github_username: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to add users.") return # Add the new member using team_member_manager team_member_manager.add_member(discord_id, name, time_zone, github_username) # Update the weekly post to include the new member new_member = team_member_manager.find_member(discord_id) if new_member: await weekly_post_manager.rebuild_post(team_member_manager.team_members) scheduler.add_job(send_status_request, new_member, weekly_post_manager, streaks_manager, updates_manager) scheduler.unschedule_weekly_post() scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await ctx.send(f"User {name} added successfully.") @bot.command(name='removeuser') async def remove_user(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to remove users.") return # Find the member object member_to_remove = team_member_manager.find_member(discord_id) if member_to_remove: # Remove the member from the database team_member_manager.remove_member(discord_id) # Update the weekly post to remove the member await weekly_post_manager.rebuild_post(team_member_manager.team_members) scheduler.remove_job(discord_id) scheduler.unschedule_weekly_post() scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await ctx.send(f"User with Discord ID {discord_id} removed successfully.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='listusers') async def list_users(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to list users.") return # List users using team_member_manager users = [(member.discord_id, member.name, member.time_zone, member.github_username, member.current_streak) for member in team_member_manager.team_members] user_list = '\n'.join([f"Name: {user[1]}, Discord ID: {user[0]}, Time Zone: {user[2]}, GitHub Username: {user[3]}, Current Streak: {user[4]}" for user in users]) await ctx.send(f"List of users:\n{user_list}") @bot.command(name='updatetimezone') async def update_timezone(ctx, discord_id: int, new_time_zone: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to update timezones.") return # Find the member object using the Discord ID member_to_update = team_member_manager.find_member(discord_id) if member_to_update: # Update the timezone in the database team_member_manager.update_member_timezone(discord_id, new_time_zone) scheduler.remove_job(discord_id) scheduler.add_job(send_status_request, member_to_update, weekly_post_manager, streaks_manager, updates_manager) scheduler.unschedule_weekly_post() scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await ctx.send(f"Timezone for user with Discord ID {discord_id} updated to {new_time_zone}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='updatestreak') async def update_streak(ctx, discord_id: int, new_streak: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to update streaks.") return # Find the member object using the Discord ID member_to_update = team_member_manager.find_member(discord_id) if member_to_update: # Update the streak in the database streaks_manager.update_streak(discord_id, new_streak) member_to_update.update_streak(new_streak) # Update the Discord post using WeeklyPostManager await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send(f"Streak for user with Discord ID {discord_id} updated to {new_streak}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='forcepostrebuild') async def force_post_rebuild(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to force a post rebuild.") return # Rebuild the post await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send("Post rebuilt successfully.") @bot.command(name='deletelateststatus') async def delete_latest_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to delete status updates.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Delete the newest status using the UpdatesManager's method updates_manager.delete_newest_status(discord_id) await ctx.send(f"Latest status update for user with Discord ID {discord_id} deleted successfully.") @bot.command(name='viewuser') async def view_user(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to view user data.") return # Get the member's statuses using the UpdatesManager's method statuses = updates_manager.get_all_statuses_for_user(discord_id) if not statuses: await ctx.send(f"No status updates found for user with Discord ID {discord_id}.") return # Loop through the statuses and send individual messages for status in statuses: await ctx.send(f"### **Timestamp:** {status['timestamp']}") await ctx.send(f"### **Raw Status:** {status['status']}") await ctx.send(f"### **Summarized Status:** \n{status['summarized_status']}") @bot.command(name='setvacationstatus') async def set_vacation_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to set vacation status.") return member = team_member_manager.find_member(discord_id) if member: new_status = not member.on_vacation team_member_manager.set_member_vacation_status(discord_id, new_status) await ctx.send(f"Vacation status for user with Discord ID {discord_id} set to {'on vacation' if new_status else 'not on vacation'}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='weeklysummary') async def weekly_summary(ctx, discord_id: int, start_date: str, end_date: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to generate weekly summaries.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Convert the start_date and end_date strings to datetime objects # Adjusting the date format to MM-DD-YYYY and setting the time try: start_date = datetime.strptime(start_date, '%m-%d-%Y') end_date = datetime.strptime(end_date, '%m-%d-%Y') # Setting the time to ensure the whole week is captured start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0) end_date = end_date.replace(hour=23, minute=59, second=59, microsecond=999999) except ValueError: await ctx.send("Invalid date format. Please use MM-DD-YYYY.") return # Generate the weekly summary weekly_summary = await updates_manager.generate_weekly_summary(discord_id, start_date, end_date) # Send the weekly summary to the admin user admin_user = bot.get_user(ADMIN_DISCORD_ID) if admin_user: await admin_user.send(f"**{member.name}'s Weekly Summary for {start_date.strftime('%m-%d-%Y')} to {end_date.strftime('%m-%d-%Y')}:**\n{weekly_summary}") else: await ctx.send("Unable to find the admin user.") @bot.event async def on_ready(): print("Bot is online!") # Log that the bot is online streaks_db = StreaksDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT)
team_member_db = TeamMemberDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT)
1
2023-10-12 02:01:46+00:00
24k
azuline/rose
rose/cache_test.py
[ { "identifier": "TEST_COLLAGE_1", "path": "conftest.py", "snippet": "TEST_COLLAGE_1 = TESTDATA / \"Collage 1\"" }, { "identifier": "TEST_PLAYLIST_1", "path": "conftest.py", "snippet": "TEST_PLAYLIST_1 = TESTDATA / \"Playlist 1\"" }, { "identifier": "TEST_RELEASE_1", "path": "...
import dataclasses import hashlib import shutil import time import pytest import tomllib from pathlib import Path from conftest import TEST_COLLAGE_1, TEST_PLAYLIST_1, TEST_RELEASE_1, TEST_RELEASE_2, TEST_RELEASE_3 from rose.audiotags import AudioTags from rose.cache import ( CACHE_SCHEMA_PATH, STORED_DATA_FILE_REGEX, CachedCollage, CachedPlaylist, CachedRelease, CachedTrack, _unpack, artist_exists, connect, genre_exists, get_collage, get_playlist, get_release, get_release_logtext, get_track, get_track_logtext, get_tracks_associated_with_release, get_tracks_associated_with_releases, label_exists, list_artists, list_collages, list_genres, list_labels, list_playlists, list_releases, list_tracks, lock, maybe_invalidate_cache_database, update_cache, update_cache_evict_nonexistent_releases, update_cache_for_releases, ) from rose.common import VERSION, Artist, ArtistMapping from rose.config import Config
18,339
assert af.release_id is not None af = AudioTags.from_file(release_dir / "02.m4a") assert af.id is not None assert af.release_id is not None def test_update_cache_releases_already_fully_cached(config: Config) -> None: """Test that a fully cached release No Ops when updated again.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) update_cache_for_releases(config, [release_dir]) # Assert that the release metadata was read correctly. with connect(config) as conn: cursor = conn.execute( "SELECT id, source_path, title, releasetype, year, new FROM releases", ) row = cursor.fetchone() assert row["source_path"] == str(release_dir) assert row["title"] == "I Love Blackpink" assert row["releasetype"] == "album" assert row["year"] == 1990 assert row["new"] def test_update_cache_releases_disk_update_to_previously_cached(config: Config) -> None: """Test that a cached release is updated after a track updates.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) # I'm too lazy to mutagen update the files, so instead we're going to update the database. And # then touch a file to signify that "we modified it." with connect(config) as conn: conn.execute("UPDATE releases SET title = 'An Uncool Album'") (release_dir / "01.m4a").touch() update_cache_for_releases(config, [release_dir]) # Assert that the release metadata was re-read and updated correctly. with connect(config) as conn: cursor = conn.execute( "SELECT id, source_path, title, releasetype, year, new FROM releases", ) row = cursor.fetchone() assert row["source_path"] == str(release_dir) assert row["title"] == "I Love Blackpink" assert row["releasetype"] == "album" assert row["year"] == 1990 assert row["new"] def test_update_cache_releases_disk_update_to_datafile(config: Config) -> None: """Test that a cached release is updated after a datafile updates.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) with connect(config) as conn: conn.execute("UPDATE releases SET datafile_mtime = '0' AND new = false") update_cache_for_releases(config, [release_dir]) # Assert that the release metadata was re-read and updated correctly. with connect(config) as conn: cursor = conn.execute("SELECT new, added_at FROM releases") row = cursor.fetchone() assert row["new"] assert row["added_at"] def test_update_cache_releases_disk_upgrade_old_datafile(config: Config) -> None: """Test that a legacy invalid datafile is upgraded on index.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) datafile = release_dir / ".rose.lalala.toml" datafile.touch() update_cache_for_releases(config, [release_dir]) # Assert that the release metadata was re-read and updated correctly. with connect(config) as conn: cursor = conn.execute("SELECT id, new, added_at FROM releases") row = cursor.fetchone() assert row["id"] == "lalala" assert row["new"] assert row["added_at"] with datafile.open("r") as fp: data = fp.read() assert "new = true" in data assert "added_at = " in data def test_update_cache_releases_source_path_renamed(config: Config) -> None: """Test that a cached release is updated after a directory rename.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) moved_release_dir = config.music_source_dir / "moved lol" release_dir.rename(moved_release_dir) update_cache_for_releases(config, [moved_release_dir]) # Assert that the release metadata was re-read and updated correctly. with connect(config) as conn: cursor = conn.execute( "SELECT id, source_path, title, releasetype, year, new FROM releases", ) row = cursor.fetchone() assert row["source_path"] == str(moved_release_dir) assert row["title"] == "I Love Blackpink" assert row["releasetype"] == "album" assert row["year"] == 1990 assert row["new"] def test_update_cache_releases_delete_nonexistent(config: Config) -> None: """Test that deleted releases that are no longer on disk are cleared from cache.""" with connect(config) as conn: conn.execute( """ INSERT INTO releases (id, source_path, added_at, datafile_mtime, title, releasetype, disctotal, metahash) VALUES ('aaaaaa', '0000-01-01T00:00:00+00:00', '999', 'nonexistent', 'aa', 'unknown', false, '0') """ )
def test_schema(config: Config) -> None: """Test that the schema successfully bootstraps.""" with CACHE_SCHEMA_PATH.open("rb") as fp: schema_hash = hashlib.sha256(fp.read()).hexdigest() maybe_invalidate_cache_database(config) with connect(config) as conn: cursor = conn.execute("SELECT schema_hash, config_hash, version FROM _schema_hash") row = cursor.fetchone() assert row["schema_hash"] == schema_hash assert row["config_hash"] is not None assert row["version"] == VERSION def test_migration(config: Config) -> None: """Test that "migrating" the database correctly migrates it.""" config.cache_database_path.unlink() with connect(config) as conn: conn.execute( """ CREATE TABLE _schema_hash ( schema_hash TEXT , config_hash TEXT , version TEXT , PRIMARY KEY (schema_hash, config_hash, version) ) """ ) conn.execute( """ INSERT INTO _schema_hash (schema_hash, config_hash, version) VALUES ('haha', 'lala', 'blabla') """, ) with CACHE_SCHEMA_PATH.open("rb") as fp: latest_schema_hash = hashlib.sha256(fp.read()).hexdigest() maybe_invalidate_cache_database(config) with connect(config) as conn: cursor = conn.execute("SELECT schema_hash, config_hash, version FROM _schema_hash") row = cursor.fetchone() assert row["schema_hash"] == latest_schema_hash assert row["config_hash"] is not None assert row["version"] == VERSION cursor = conn.execute("SELECT COUNT(*) FROM _schema_hash") assert cursor.fetchone()[0] == 1 def test_locks(config: Config) -> None: """Test that taking locks works. The times are a bit loose b/c GH Actions is slow.""" lock_name = "lol" # Test that the locking and timeout work. start = time.time() with lock(config, lock_name, timeout=0.2): lock1_acq = time.time() with lock(config, lock_name, timeout=0.2): lock2_acq = time.time() # Assert that we had to wait ~0.1sec to get the second lock. assert lock1_acq - start < 0.08 assert lock2_acq - lock1_acq > 0.17 # Test that releasing a lock actually works. start = time.time() with lock(config, lock_name, timeout=0.2): lock1_acq = time.time() with lock(config, lock_name, timeout=0.2): lock2_acq = time.time() # Assert that we had to wait negligible time to get the second lock. assert lock1_acq - start < 0.08 assert lock2_acq - lock1_acq < 0.08 def test_update_cache_all(config: Config) -> None: """Test that the update all function works.""" shutil.copytree(TEST_RELEASE_1, config.music_source_dir / TEST_RELEASE_1.name) shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) # Test that we prune deleted releases too. with connect(config) as conn: conn.execute( """ INSERT INTO releases (id, source_path, added_at, datafile_mtime, title, releasetype, disctotal, metahash) VALUES ('aaaaaa', '0000-01-01T00:00:00+00:00', '999', 'nonexistent', 'aa', 'unknown', false, '0') """ ) update_cache(config) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 2 cursor = conn.execute("SELECT COUNT(*) FROM tracks") assert cursor.fetchone()[0] == 4 def test_update_cache_multiprocessing(config: Config) -> None: """Test that the update all function works.""" shutil.copytree(TEST_RELEASE_1, config.music_source_dir / TEST_RELEASE_1.name) shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) update_cache_for_releases(config, force_multiprocessing=True) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 2 cursor = conn.execute("SELECT COUNT(*) FROM tracks") assert cursor.fetchone()[0] == 4 def test_update_cache_releases(config: Config) -> None: release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) # Check that the release directory was given a UUID. release_id: str | None = None for f in release_dir.iterdir(): if m := STORED_DATA_FILE_REGEX.match(f.name): release_id = m[1] assert release_id is not None # Assert that the release metadata was read correctly. with connect(config) as conn: cursor = conn.execute( """ SELECT id, source_path, title, releasetype, year, new FROM releases WHERE id = ? """, (release_id,), ) row = cursor.fetchone() assert row["source_path"] == str(release_dir) assert row["title"] == "I Love Blackpink" assert row["releasetype"] == "album" assert row["year"] == 1990 assert row["new"] cursor = conn.execute( "SELECT genre FROM releases_genres WHERE release_id = ?", (release_id,), ) genres = {r["genre"] for r in cursor.fetchall()} assert genres == {"K-Pop", "Pop"} cursor = conn.execute( "SELECT label FROM releases_labels WHERE release_id = ?", (release_id,), ) labels = {r["label"] for r in cursor.fetchall()} assert labels == {"A Cool Label"} cursor = conn.execute( "SELECT artist, role FROM releases_artists WHERE release_id = ?", (release_id,), ) artists = {(r["artist"], r["role"]) for r in cursor.fetchall()} assert artists == { ("BLACKPINK", "main"), } for f in release_dir.iterdir(): if f.suffix != ".m4a": continue # Assert that the track metadata was read correctly. cursor = conn.execute( """ SELECT id, source_path, title, release_id, tracknumber, discnumber, duration_seconds FROM tracks WHERE source_path = ? """, (str(f),), ) row = cursor.fetchone() track_id = row["id"] assert row["title"].startswith("Track") assert row["release_id"] == release_id assert row["tracknumber"] != "" assert row["discnumber"] == "1" assert row["duration_seconds"] == 2 cursor = conn.execute( "SELECT artist, role FROM tracks_artists WHERE track_id = ?", (track_id,), ) artists = {(r["artist"], r["role"]) for r in cursor.fetchall()} assert artists == { ("BLACKPINK", "main"), } def test_update_cache_releases_uncached_with_existing_id(config: Config) -> None: """Test that IDs in filenames are read and preserved.""" release_dir = config.music_source_dir / TEST_RELEASE_2.name shutil.copytree(TEST_RELEASE_2, release_dir) update_cache_for_releases(config, [release_dir]) # Check that the release directory was given a UUID. release_id: str | None = None for f in release_dir.iterdir(): if m := STORED_DATA_FILE_REGEX.match(f.name): release_id = m[1] assert release_id == "ilovecarly" # Hardcoded ID for testing. def test_update_cache_releases_preserves_track_ids_across_rebuilds(config: Config) -> None: """Test that track IDs are preserved across cache rebuilds.""" release_dir = config.music_source_dir / TEST_RELEASE_3.name shutil.copytree(TEST_RELEASE_3, release_dir) update_cache_for_releases(config, [release_dir]) with connect(config) as conn: cursor = conn.execute("SELECT id FROM tracks") first_track_ids = {r["id"] for r in cursor} # Nuke the database. config.cache_database_path.unlink() maybe_invalidate_cache_database(config) # Repeat cache population. update_cache_for_releases(config, [release_dir]) with connect(config) as conn: cursor = conn.execute("SELECT id FROM tracks") second_track_ids = {r["id"] for r in cursor} # Assert IDs are equivalent. assert first_track_ids == second_track_ids def test_update_cache_releases_writes_ids_to_tags(config: Config) -> None: """Test that track IDs and release IDs are written to files.""" release_dir = config.music_source_dir / TEST_RELEASE_3.name shutil.copytree(TEST_RELEASE_3, release_dir) af = AudioTags.from_file(release_dir / "01.m4a") assert af.id is None assert af.release_id is None af = AudioTags.from_file(release_dir / "02.m4a") assert af.id is None assert af.release_id is None update_cache_for_releases(config, [release_dir]) af = AudioTags.from_file(release_dir / "01.m4a") assert af.id is not None assert af.release_id is not None af = AudioTags.from_file(release_dir / "02.m4a") assert af.id is not None assert af.release_id is not None def test_update_cache_releases_already_fully_cached(config: Config) -> None: """Test that a fully cached release No Ops when updated again.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) update_cache_for_releases(config, [release_dir]) # Assert that the release metadata was read correctly. with connect(config) as conn: cursor = conn.execute( "SELECT id, source_path, title, releasetype, year, new FROM releases", ) row = cursor.fetchone() assert row["source_path"] == str(release_dir) assert row["title"] == "I Love Blackpink" assert row["releasetype"] == "album" assert row["year"] == 1990 assert row["new"] def test_update_cache_releases_disk_update_to_previously_cached(config: Config) -> None: """Test that a cached release is updated after a track updates.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) # I'm too lazy to mutagen update the files, so instead we're going to update the database. And # then touch a file to signify that "we modified it." with connect(config) as conn: conn.execute("UPDATE releases SET title = 'An Uncool Album'") (release_dir / "01.m4a").touch() update_cache_for_releases(config, [release_dir]) # Assert that the release metadata was re-read and updated correctly. with connect(config) as conn: cursor = conn.execute( "SELECT id, source_path, title, releasetype, year, new FROM releases", ) row = cursor.fetchone() assert row["source_path"] == str(release_dir) assert row["title"] == "I Love Blackpink" assert row["releasetype"] == "album" assert row["year"] == 1990 assert row["new"] def test_update_cache_releases_disk_update_to_datafile(config: Config) -> None: """Test that a cached release is updated after a datafile updates.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) with connect(config) as conn: conn.execute("UPDATE releases SET datafile_mtime = '0' AND new = false") update_cache_for_releases(config, [release_dir]) # Assert that the release metadata was re-read and updated correctly. with connect(config) as conn: cursor = conn.execute("SELECT new, added_at FROM releases") row = cursor.fetchone() assert row["new"] assert row["added_at"] def test_update_cache_releases_disk_upgrade_old_datafile(config: Config) -> None: """Test that a legacy invalid datafile is upgraded on index.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) datafile = release_dir / ".rose.lalala.toml" datafile.touch() update_cache_for_releases(config, [release_dir]) # Assert that the release metadata was re-read and updated correctly. with connect(config) as conn: cursor = conn.execute("SELECT id, new, added_at FROM releases") row = cursor.fetchone() assert row["id"] == "lalala" assert row["new"] assert row["added_at"] with datafile.open("r") as fp: data = fp.read() assert "new = true" in data assert "added_at = " in data def test_update_cache_releases_source_path_renamed(config: Config) -> None: """Test that a cached release is updated after a directory rename.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) moved_release_dir = config.music_source_dir / "moved lol" release_dir.rename(moved_release_dir) update_cache_for_releases(config, [moved_release_dir]) # Assert that the release metadata was re-read and updated correctly. with connect(config) as conn: cursor = conn.execute( "SELECT id, source_path, title, releasetype, year, new FROM releases", ) row = cursor.fetchone() assert row["source_path"] == str(moved_release_dir) assert row["title"] == "I Love Blackpink" assert row["releasetype"] == "album" assert row["year"] == 1990 assert row["new"] def test_update_cache_releases_delete_nonexistent(config: Config) -> None: """Test that deleted releases that are no longer on disk are cleared from cache.""" with connect(config) as conn: conn.execute( """ INSERT INTO releases (id, source_path, added_at, datafile_mtime, title, releasetype, disctotal, metahash) VALUES ('aaaaaa', '0000-01-01T00:00:00+00:00', '999', 'nonexistent', 'aa', 'unknown', false, '0') """ )
update_cache_evict_nonexistent_releases(config)
35
2023-10-09 14:42:23+00:00
24k
zhaoyizhou1123/mbrcsl
examples/pointmaze/run_combo_maze.py
[ { "identifier": "MLP", "path": "offlinerlkit/nets/mlp.py", "snippet": "class MLP(nn.Module):\n def __init__(\n self,\n input_dim: int,\n hidden_dims: Union[List[int], Tuple[int]],\n output_dim: Optional[int] = None,\n activation: nn.Module = nn.ReLU,\n dropou...
import argparse import random import datetime import numpy as np import torch from offlinerlkit.nets import MLP from offlinerlkit.modules import ActorProb, Critic, TanhDiagGaussian, EnsembleDynamicsModel from offlinerlkit.dynamics import EnsembleDynamics from offlinerlkit.utils.scaler import StandardScaler from offlinerlkit.utils.termination_fns import termination_fn_default from offlinerlkit.buffer import ReplayBuffer from offlinerlkit.utils.logger import Logger, make_log_dirs from offlinerlkit.policy_trainer import MBPolicyTrainer from offlinerlkit.policy import COMBOPolicy from offlinerlkit.utils.none_or_str import none_or_str from envs.pointmaze.create_maze_dataset import create_env_dataset from envs.pointmaze.utils.trajectory import get_pointmaze_dataset from envs.pointmaze.utils.maze_utils import PointMazeObsWrapper
14,991
env.reset(seed=args.seed) # create policy model actor_backbone = MLP(input_dim=np.prod(args.obs_shape), hidden_dims=args.hidden_dims) critic1_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) critic2_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) dist = TanhDiagGaussian( latent_dim=getattr(actor_backbone, "output_dim"), output_dim=args.action_dim, unbounded=True, conditioned_sigma=True ) actor = ActorProb(actor_backbone, dist, args.device) critic1 = Critic(critic1_backbone, args.device) critic2 = Critic(critic2_backbone, args.device) actor_optim = torch.optim.Adam(actor.parameters(), lr=args.actor_lr) critic1_optim = torch.optim.Adam(critic1.parameters(), lr=args.critic_lr) critic2_optim = torch.optim.Adam(critic2.parameters(), lr=args.critic_lr) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(actor_optim, args.epoch) if args.auto_alpha: target_entropy = args.target_entropy if args.target_entropy \ else -np.prod(env.action_space.shape) args.target_entropy = target_entropy log_alpha = torch.zeros(1, requires_grad=True, device=args.device) alpha_optim = torch.optim.Adam([log_alpha], lr=args.alpha_lr) alpha = (target_entropy, log_alpha, alpha_optim) else: alpha = args.alpha # create dynamics load_dynamics_model = True if args.load_dynamics_path else False dynamics_model = EnsembleDynamicsModel( obs_dim=np.prod(args.obs_shape), action_dim=args.action_dim, hidden_dims=args.dynamics_hidden_dims, num_ensemble=args.n_ensemble, num_elites=args.n_elites, weight_decays=args.dynamics_weight_decay, device=args.device ) dynamics_optim = torch.optim.Adam( dynamics_model.parameters(), lr=args.dynamics_lr ) scaler = StandardScaler() termination_fn = termination_fn_default dynamics = EnsembleDynamics( dynamics_model, dynamics_optim, scaler, termination_fn ) if args.load_dynamics_path: print(f"Load dynamics from {args.load_dynamics_path}") dynamics.load(args.load_dynamics_path) # create policy policy = COMBOPolicy( dynamics, actor, critic1, critic2, actor_optim, critic1_optim, critic2_optim, action_space=env.action_space, tau=args.tau, gamma=args.gamma, alpha=alpha, cql_weight=args.cql_weight, temperature=args.temperature, max_q_backup=args.max_q_backup, deterministic_backup=args.deterministic_backup, with_lagrange=args.with_lagrange, lagrange_threshold=args.lagrange_threshold, cql_alpha_lr=args.cql_alpha_lr, num_repeart_actions=args.num_repeat_actions, uniform_rollout=args.uniform_rollout, rho_s=args.rho_s ) # create buffer real_buffer = ReplayBuffer( buffer_size=len(dataset["observations"]), obs_shape=args.obs_shape, obs_dtype=np.float32, action_dim=args.action_dim, action_dtype=np.float32, device=args.device ) real_buffer.load_dataset(dataset) fake_buffer = ReplayBuffer( buffer_size=args.rollout_batch_size*args.rollout_length*args.model_retain_epochs, obs_shape=args.obs_shape, obs_dtype=np.float32, action_dim=args.action_dim, action_dtype=np.float32, device=args.device ) # log timestamp = datetime.datetime.now().strftime("%y-%m%d-%H%M%S") exp_name = f"timestamp_{timestamp}&{args.seed}" log_dirs = make_log_dirs(args.task, args.algo_name, exp_name, vars(args)) # key: output file name, value: output handler type output_config = { "consoleout_backup": "stdout", "policy_training_progress": "csv", "dynamics_training_progress": "csv", "tb": "tensorboard" } logger = Logger(log_dirs, output_config) logger.log_hyperparameters(vars(args)) # create policy trainer
def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--algo_name", type=str, default="combo") parser.add_argument("--task", type=str, default="pointmaze") # Self-constructed environment parser.add_argument("--last_eval", action="store_true") # env config (general) parser.add_argument('--data_dir', type=str, required=True) parser.add_argument('--horizon', type=int, default=200, help="max path length for pickplace") # env config (pointmaze) parser.add_argument('--maze_config_file', type=str, default='envs/pointmaze/config/maze_default.json') parser.add_argument('--data_file', type=str, default='pointmaze.dat') parser.add_argument("--seed", type=int, default=0) parser.add_argument("--actor-lr", type=float, default=1e-4) parser.add_argument("--critic-lr", type=float, default=3e-4) parser.add_argument("--hidden-dims", type=int, nargs='*', default=[256, 256, 256]) parser.add_argument("--gamma", type=float, default=0.99) parser.add_argument("--tau", type=float, default=0.005) parser.add_argument("--alpha", type=float, default=0.2) parser.add_argument("--auto-alpha", default=True) parser.add_argument("--target-entropy", type=int, default=None) parser.add_argument("--alpha-lr", type=float, default=1e-4) parser.add_argument("--cql-weight", type=float, default=1.0) parser.add_argument("--temperature", type=float, default=1.0) parser.add_argument("--max-q-backup", type=bool, default=False) parser.add_argument("--deterministic-backup", type=bool, default=True) parser.add_argument("--with-lagrange", type=bool, default=False) parser.add_argument("--lagrange-threshold", type=float, default=10.0) parser.add_argument("--cql-alpha-lr", type=float, default=3e-4) parser.add_argument("--num-repeat-actions", type=int, default=10) parser.add_argument("--uniform-rollout", type=bool, default=False) parser.add_argument("--rho-s", type=str, default="mix", choices=["model", "mix"]) parser.add_argument("--dynamics-lr", type=float, default=1e-3) parser.add_argument("--dynamics-hidden-dims", type=int, nargs='*', default=[200, 200, 200, 200]) parser.add_argument("--dynamics-weight-decay", type=float, nargs='*', default=[2.5e-5, 5e-5, 7.5e-5, 7.5e-5, 1e-4]) parser.add_argument("--n-ensemble", type=int, default=7) parser.add_argument("--n-elites", type=int, default=5) parser.add_argument("--rollout-freq", type=int, default=1000) parser.add_argument("--rollout-batch-size", type=int, default=50000) parser.add_argument("--rollout-length", type=int, default=5) parser.add_argument("--model-retain-epochs", type=int, default=5) parser.add_argument("--real-ratio", type=float, default=0.5) parser.add_argument("--load-dynamics-path", type=none_or_str, default=None) parser.add_argument("--epoch", type=int, default=100) parser.add_argument("--step-per-epoch", type=int, default=1000) parser.add_argument("--eval_episodes", type=int, default=10) parser.add_argument("--batch-size", type=int, default=256) parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu") return parser.parse_args() def train(args=get_args()): # seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True # create env and dataset if args.task == 'pointmaze': env, trajs = create_env_dataset(args) env = PointMazeObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) dataset, _, _ = get_pointmaze_dataset(trajs) else: raise NotImplementedError env.reset(seed=args.seed) # create policy model actor_backbone = MLP(input_dim=np.prod(args.obs_shape), hidden_dims=args.hidden_dims) critic1_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) critic2_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) dist = TanhDiagGaussian( latent_dim=getattr(actor_backbone, "output_dim"), output_dim=args.action_dim, unbounded=True, conditioned_sigma=True ) actor = ActorProb(actor_backbone, dist, args.device) critic1 = Critic(critic1_backbone, args.device) critic2 = Critic(critic2_backbone, args.device) actor_optim = torch.optim.Adam(actor.parameters(), lr=args.actor_lr) critic1_optim = torch.optim.Adam(critic1.parameters(), lr=args.critic_lr) critic2_optim = torch.optim.Adam(critic2.parameters(), lr=args.critic_lr) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(actor_optim, args.epoch) if args.auto_alpha: target_entropy = args.target_entropy if args.target_entropy \ else -np.prod(env.action_space.shape) args.target_entropy = target_entropy log_alpha = torch.zeros(1, requires_grad=True, device=args.device) alpha_optim = torch.optim.Adam([log_alpha], lr=args.alpha_lr) alpha = (target_entropy, log_alpha, alpha_optim) else: alpha = args.alpha # create dynamics load_dynamics_model = True if args.load_dynamics_path else False dynamics_model = EnsembleDynamicsModel( obs_dim=np.prod(args.obs_shape), action_dim=args.action_dim, hidden_dims=args.dynamics_hidden_dims, num_ensemble=args.n_ensemble, num_elites=args.n_elites, weight_decays=args.dynamics_weight_decay, device=args.device ) dynamics_optim = torch.optim.Adam( dynamics_model.parameters(), lr=args.dynamics_lr ) scaler = StandardScaler() termination_fn = termination_fn_default dynamics = EnsembleDynamics( dynamics_model, dynamics_optim, scaler, termination_fn ) if args.load_dynamics_path: print(f"Load dynamics from {args.load_dynamics_path}") dynamics.load(args.load_dynamics_path) # create policy policy = COMBOPolicy( dynamics, actor, critic1, critic2, actor_optim, critic1_optim, critic2_optim, action_space=env.action_space, tau=args.tau, gamma=args.gamma, alpha=alpha, cql_weight=args.cql_weight, temperature=args.temperature, max_q_backup=args.max_q_backup, deterministic_backup=args.deterministic_backup, with_lagrange=args.with_lagrange, lagrange_threshold=args.lagrange_threshold, cql_alpha_lr=args.cql_alpha_lr, num_repeart_actions=args.num_repeat_actions, uniform_rollout=args.uniform_rollout, rho_s=args.rho_s ) # create buffer real_buffer = ReplayBuffer( buffer_size=len(dataset["observations"]), obs_shape=args.obs_shape, obs_dtype=np.float32, action_dim=args.action_dim, action_dtype=np.float32, device=args.device ) real_buffer.load_dataset(dataset) fake_buffer = ReplayBuffer( buffer_size=args.rollout_batch_size*args.rollout_length*args.model_retain_epochs, obs_shape=args.obs_shape, obs_dtype=np.float32, action_dim=args.action_dim, action_dtype=np.float32, device=args.device ) # log timestamp = datetime.datetime.now().strftime("%y-%m%d-%H%M%S") exp_name = f"timestamp_{timestamp}&{args.seed}" log_dirs = make_log_dirs(args.task, args.algo_name, exp_name, vars(args)) # key: output file name, value: output handler type output_config = { "consoleout_backup": "stdout", "policy_training_progress": "csv", "dynamics_training_progress": "csv", "tb": "tensorboard" } logger = Logger(log_dirs, output_config) logger.log_hyperparameters(vars(args)) # create policy trainer
policy_trainer = MBPolicyTrainer(
11
2023-10-11 08:36:06+00:00
24k
lmb-freiburg/ldce
scripts/ldce.py
[ { "identifier": "disabled_train", "path": "sampling_helpers.py", "snippet": "def disabled_train(self, mode=True):\n \"\"\"Overwrite model.train with this function to make sure train/eval mode\n does not change anymore.\"\"\"\n return self" }, { "identifier": "get_model", "path": "sa...
import argparse import os import psutil import yaml import copy import random import matplotlib.pyplot as plt import numpy as np import pathlib import torch import hydra import wandb import torchvision import json import sys import regex as re import open_clip from contextlib import nullcontext from torch import autocast from omegaconf import OmegaConf, open_dict from hydra.utils import instantiate from omegaconf import DictConfig, OmegaConf from torchvision import transforms, datasets from torchvision.utils import save_image from sampling_helpers import disabled_train, get_model, _unmap_img, generate_samples from sampling_helpers import load_model_hf from ldm import * from ldm.models.diffusion.cc_ddim import CCMDDIMSampler from data.imagenet_classnames import name_map, openai_imagenet_classes from utils.DecisionDensenetModel import DecisionDensenetModel from utils.preprocessor import Normalizer, CropAndNormalizer, ResizeAndNormalizer, GenericPreprocessing, Crop from utils.vision_language_wrapper import VisionLanguageWrapper from utils.madry_net import MadryNet from utils.dino_linear import LinearClassifier, DINOLinear
14,962
elif "Flowers102" in cfg.data._target_: with open("data/flowers_idx_to_label.json", "r") as f: flowers_idx_to_classname = json.load(f) flowers_idx_to_classname = {int(k)-1: v for k, v in flowers_idx_to_classname.items()} i2h = flowers_idx_to_classname elif "OxfordIIIPets" in cfg.data._target_: with open("data/pets_idx_to_label.json", "r") as f: pets_idx_to_classname = json.load(f) i2h = {int(k): v for k, v in pets_idx_to_classname.items()} else: raise NotImplementedError if "ImageNet" in cfg.data._target_: with open('data/synset_closest_idx.yaml', 'r') as file: synset_closest_idx = yaml.safe_load(file) elif "Flowers102" in cfg.data._target_: with open("data/flowers_closest_indices.json") as file: closest_indices = json.load(file) closest_indices = {int(k):v for k,v in closest_indices.items()} elif "OxfordIIIPets" in cfg.data._target_: with open("data/pets_closest_indices.json") as file: closest_indices = json.load(file) closest_indices = {int(k):v for k,v in closest_indices.items()} if not cfg.resume: torch.save({"last_data_idx": -1}, checkpoint_path) seed = cfg.seed if "seed" in cfg else 0 set_seed(seed=seed) for i, batch in enumerate(data_loader): if "fixed_seed" in cfg: set_seed(seed=cfg.get("seed", 0)) if cfg.fixed_seed else None seed = seed if cfg.fixed_seed else -1 if "return_tgt_cls" in cfg.data and cfg.data.return_tgt_cls: image, label, tgt_classes, unique_data_idx = batch tgt_classes = tgt_classes.to(device) #squeeze() else: image, label, unique_data_idx = batch if "ImageNet" in cfg.data._target_: tgt_classes = torch.tensor([random.choice(synset_closest_idx[l.item()]) for l in label]).to(device) elif "CelebAHQDataset" in cfg.data._target_: tgt_classes = (1 - label).type(torch.float32) elif "Flowers102" in cfg.data._target_ or "OxfordIIIPets" in cfg.data._target_: tgt_classes = torch.tensor([closest_indices[unique_data_idx[l].item()*cfg.data.num_shards + cfg.data.shard][0] for l in range(label.shape[0])]).to(device) else: raise NotImplementedError image = image.to(device) #squeeze() label = label.to(device) #.item() #squeeze() #tgt_classes = torch.tensor([random.choice(synset_closest_idx[l.item()]) for l in label]).to(device) #tgt_classes = synset_closest_idx[label] #tgt_classes = torch.tensor([random.choice(synset_closest_idx[l.item()]) for l in label]).to(device) #shuffle tgt_classes #random.shuffle(tgt_classes) #get classifcation prediction with torch.inference_mode(): #with precision_scope(): if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: logits = classifier_model(image) else: logits = sampler.get_classifier_logits(_unmap_img(image)) #converting to -1, 1 # TODO: handle binary vs multi-class if "ImageNet" in cfg.data._target_ or "OxfordIIIPets" in cfg.data._target_ or "Flowers102" in cfg.data._target_: # multi-class in_class_pred = logits.argmax(dim=1) in_confid = logits.softmax(dim=1).max(dim=1).values in_confid_tgt = logits.softmax(dim=1)[torch.arange(batch_size), tgt_classes] else: # binary in_class_pred = (logits >= 0).type(torch.int8) in_confid = torch.where(logits >= 0, logits.sigmoid(), 1 - logits.sigmoid()) in_confid_tgt = torch.where(tgt_classes.to(device) == 0, 1 - logits.sigmoid(), logits.sigmoid()) print("in class_pred: ", in_class_pred, in_confid) for j, l in enumerate(label): print(f"converting {i} from : {i2h[l.item()]} to: {i2h[int(tgt_classes[j].item())]}") init_image = image.clone() #image.repeat(n_samples_per_class, 1, 1, 1).to(device) sampler.init_images = init_image.to(device) sampler.init_labels = label # n_samples_per_class * [label] if isinstance(cfg.sampler.lp_custom, str) and "dino_" in cfg.sampler.lp_custom: if device != next(sampler.distance_criterion.dino.parameters()).device: sampler.distance_criterion.dino = sampler.distance_criterion.dino.to(device) sampler.dino_init_features = sampler.get_dino_features(sampler.init_images, device=device).clone() #mapped_image = _unmap_img(init_image) init_latent = model.get_first_stage_encoding( model.encode_first_stage(_unmap_img(init_image))) # move to latent space if "txt" == model.cond_stage_key: # text-conditional if "ImageNet" in cfg.data._target_: prompts = [f"a photo of a {openai_imagenet_classes[idx.item()]}." for idx in tgt_classes] elif "CelebAHQDataset" in cfg.data._target_: # query label 31 (smile): label=0 <-> no smile and label=1 <-> smile # query label 39 (age): label=0 <-> old and label=1 <-> young assert cfg.data.query_label in [31, 39] prompts = [] for target in tgt_classes: if cfg.data.query_label == 31 and target == 0: attr = "non-smiling" elif cfg.data.query_label == 31 and target == 1: attr = "smiling" elif cfg.data.query_label == 39 and target == 0: attr = "old" elif cfg.data.query_label == 39 and target == 1: attr = "young" else: raise NotImplementedError prompts.append(f"a photo of a {attr} person") elif "OxfordIIIPets" in cfg.data._target_: # prompts following https://github.com/openai/CLIP/blob/main/data/prompts.md prompts = [f"a photo of a {i2h[idx.item()]}, a type of pet." for idx in tgt_classes] elif "Flowers102" in cfg.data._target_: # prompts following https://github.com/openai/CLIP/blob/main/data/prompts.md prompts = [f"a photo of a {i2h[idx.item()]}, a type of flower." for idx in tgt_classes] else: raise NotImplementedError else: prompts = None
torch.backends.cuda.matmul.allow_tf32 = True # torch.backends.cudnn.benchmark = True try: except: print("Install OpenClip via: pip install open_clip_torch") def set_seed(seed: int = 0): torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) torch.cuda.manual_seed_all(seed) def blockPrint(): sys.stdout = open(os.devnull, 'w') def get_classifier(cfg, device): if "ImageNet" in cfg.data._target_: classifier_name = cfg.classifier_model.name if classifier_name == "robust_resnet50": classifier_model = MadryNet(cfg.classifier_model.ckpt, device) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: classifier_model = Crop(classifier_model) else: classifier_model = getattr(torchvision.models, classifier_name)(pretrained=True) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: classifier_model = CropAndNormalizer(classifier_model) elif "CelebAHQDataset" in cfg.data._target_: assert cfg.data.query_label in [20, 31, 39], 'Query label MUST be 20 (Gender), 31 (Smile), or 39 (Age) for CelebAHQ' ql = 0 if cfg.data.query_label in [31, 39]: ql = 1 if cfg.data.query_label == 31 else 2 classifier_model = DecisionDensenetModel(3, pretrained=False, query_label=ql) classifier_model.load_state_dict(torch.load(cfg.classifier_model.classifier_path, map_location='cpu')['model_state_dict']) if cfg.classifier_model.classifier_wrapper: classifier_model = Normalizer( classifier_model, [0.5] * 3, [0.5] * 3 ) elif "Flowers102" in cfg.data._target_: # fine-tuned Dino ViT B/8: https://arxiv.org/pdf/2104.14294.pdf dino = torch.hub.load('facebookresearch/dino:main', 'dino_vits8').to(device).eval() dim = dino.embed_dim linear_classifier = LinearClassifier(dim*cfg.classifier_model.n_last_blocks, 102) linear_classifier.load_state_dict(torch.load(cfg.classifier_model.classifier_path, map_location="cpu"), strict=True) linear_classifier = linear_classifier.eval().to(device) classifier_model = DINOLinear(dino, linear_classifier) transforms_list = [transforms.CenterCrop(224), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))] classifier_model = GenericPreprocessing(classifier_model, transforms.Compose(transforms_list)) elif "OxfordIIIPets" in cfg.data._target_: # zero-shot OpenClip: https://arxiv.org/pdf/2212.07143.pdf model, _, preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k') model = model.to(device).eval() tokenizer = open_clip.get_tokenizer('ViT-B-32') # prompts following https://github.com/openai/CLIP/blob/main/data/prompts.md with open("data/pets_idx_to_label.json", "r") as f: pets_idx_to_classname = json.load(f) prompts = [f"a photo of a {label}, a type of pet." for label in pets_idx_to_classname.values()] classifier_model = VisionLanguageWrapper(model, tokenizer, prompts) # try running optimization on 224x224 pixel image # transforms_list = [preprocess.transforms[0], preprocess.transforms[1], preprocess.transforms[4]] if cfg.classifier_model.classifier_wrapper: transforms_list = [preprocess.transforms[1], preprocess.transforms[4]] # CenterCrop(224, 224), Normalize classifier_model = GenericPreprocessing(classifier_model, transforms.Compose(transforms_list)) else: raise NotImplementedError return classifier_model def get_dataset(cfg, last_data_idx: int = 0): if "ImageNet" in cfg.data._target_: out_size = 256 transform_list = [ transforms.Resize((out_size, out_size)), transforms.ToTensor() ] transform = transforms.Compose(transform_list) dataset = instantiate(cfg.data, start_sample=cfg.data.start_sample, end_sample=cfg.data.end_sample, transform=transform, restart_idx=last_data_idx) elif "CelebAHQDataset" in cfg.data._target_: dataset = instantiate( cfg.data, image_size=256, data_dir=cfg.data.data_dir, random_crop=False, random_flip=False, partition='test', query_label=cfg.data.query_label, normalize=False, shard=cfg.data.shard, num_shards=cfg.data.num_shards, restart_idx=last_data_idx ) elif "Flowers102" in cfg.data._target_: transform = transforms.Compose([ transforms.Resize((256, 256)), transforms.ToTensor(), ]) dataset = instantiate( cfg.data, shard=cfg.data.shard, num_shards=cfg.data.num_shards, transform=transform, restart_idx=last_data_idx ) elif "OxfordIIIPets" in cfg.data._target_: # try running on 224x224 img def _convert_to_rgb(image): return image.convert('RGB') out_size = 256 transform_list = [ transforms.Resize((out_size, out_size)), # transforms.CenterCrop(out_size), _convert_to_rgb, transforms.ToTensor(), ] transform = transforms.Compose(transform_list) dataset = instantiate( cfg.data, shard=cfg.data.shard, num_shards=cfg.data.num_shards, transform=transform, restart_idx=last_data_idx ) else: raise NotImplementedError return dataset @hydra.main(version_base=None, config_path="../configs/ldce", config_name="v1") def main(cfg : DictConfig) -> None: if "verbose" not in cfg: with open_dict(cfg): cfg.verbose = True if "record_intermediate_results" not in cfg: with open_dict(cfg): cfg.record_intermediate_results = True if "verbose" in cfg and not cfg.verbose: blockPrint() os.makedirs(cfg.output_dir, exist_ok=True) os.chmod(cfg.output_dir, 0o777) if "ImageNet" in cfg.data._target_: out_dir = os.path.join(cfg.output_dir, f"bucket_{cfg.data.start_sample}_{cfg.data.end_sample}") else: out_dir = os.path.join(cfg.output_dir, f"bucket_{cfg.data.shard}_{cfg.data.num_shards}") os.makedirs(out_dir, exist_ok=True) os.chmod(out_dir, 0o777) checkpoint_path = os.path.join(out_dir, "last_saved_id.pth") config = {} if "ImageNet" in cfg.data._target_: run_id = f"{cfg.data.start_sample}_{cfg.data.end_sample}" else: run_id = f"{cfg.data.shard}_{cfg.data.num_shards}" if cfg.resume: print("run ID to resume: ", run_id) else: print("starting new run", run_id) config.update(OmegaConf.to_container(cfg, resolve=True)) print("current run id: ", run_id) last_data_idx = 0 if cfg.resume: # or os.path.isfile(checkpoint_path): resume only if asked to, allow restarts print(f"resuming from {checkpoint_path}") #check if checkpoint exists if not os.path.exists(checkpoint_path): print("checkpoint does not exist! starting from 0 ...") else: checkpoint = torch.load(checkpoint_path)# torch.load(restored_file.name) last_data_idx = checkpoint["last_data_idx"] + 1 if "last_data_idx" in checkpoint else 0 print(f"resuming from batch {last_data_idx}") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # device = torch.device("cpu") # there seems to be a CUDA/autograd instability in gradient computation print(f"using device: {device}") model = get_model(cfg_path=cfg.diffusion_model.cfg_path, ckpt_path = cfg.diffusion_model.ckpt_path).to(device).eval() classifier_model = get_classifier(cfg, device) classifier_model.to(device).eval() classifier_model.train = disabled_train ddim_steps = cfg.ddim_steps ddim_eta = cfg.ddim_eta scale = cfg.scale #for unconditional guidance strength = cfg.strength #for unconditional guidance sampler = CCMDDIMSampler(model, classifier_model, seg_model= None, classifier_wrapper="classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper, record_intermediate_results=cfg.record_intermediate_results, verbose=cfg.verbose, **cfg.sampler) sampler.make_schedule(ddim_num_steps=ddim_steps, ddim_eta=ddim_eta, verbose=False) assert 0. <= strength <= 1., 'can only work with strength in [0.0, 1.0]' t_enc = int(strength * len(sampler.ddim_timesteps)) assert len(sampler.ddim_timesteps) == ddim_steps, "ddim_steps should be equal to len(sampler.ddim_timesteps)" n_samples_per_class = cfg.n_samples_per_class batch_size = cfg.data.batch_size shuffle = cfg.get("shuffle", False) #save config to the output directory #check if the config file already exists else create a config file config_path = os.path.join(out_dir, "config.yaml") if os.path.exists(config_path): print("config file already exists! skipping ...") else: with open(os.path.join(out_dir, "config.yaml"), 'w') as f: print("saving config to ", os.path.join(out_dir, "config.yaml ...")) yaml.dump(config, f) os.chmod(os.path.join(out_dir, "config.yaml"), 0o555) #data_path = cfg.data_path dataset = get_dataset(cfg, last_data_idx=last_data_idx) print("dataset length: ", len(dataset)) data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=1) if "ImageNet" in cfg.data._target_: i2h = name_map elif "CelebAHQDataset" in cfg.data._target_: # query label 31 (smile): label=0 <-> no smile and label=1 <-> smile # query label 39 (age): label=0 <-> old and label=1 <-> young assert cfg.data.query_label in [31, 39] if 31 == cfg.data.query_label: i2h = ["no smile", "smile"] elif 39 == cfg.data.query_label: i2h = ["old", "young"] else: raise NotImplementedError elif "Flowers102" in cfg.data._target_: with open("data/flowers_idx_to_label.json", "r") as f: flowers_idx_to_classname = json.load(f) flowers_idx_to_classname = {int(k)-1: v for k, v in flowers_idx_to_classname.items()} i2h = flowers_idx_to_classname elif "OxfordIIIPets" in cfg.data._target_: with open("data/pets_idx_to_label.json", "r") as f: pets_idx_to_classname = json.load(f) i2h = {int(k): v for k, v in pets_idx_to_classname.items()} else: raise NotImplementedError if "ImageNet" in cfg.data._target_: with open('data/synset_closest_idx.yaml', 'r') as file: synset_closest_idx = yaml.safe_load(file) elif "Flowers102" in cfg.data._target_: with open("data/flowers_closest_indices.json") as file: closest_indices = json.load(file) closest_indices = {int(k):v for k,v in closest_indices.items()} elif "OxfordIIIPets" in cfg.data._target_: with open("data/pets_closest_indices.json") as file: closest_indices = json.load(file) closest_indices = {int(k):v for k,v in closest_indices.items()} if not cfg.resume: torch.save({"last_data_idx": -1}, checkpoint_path) seed = cfg.seed if "seed" in cfg else 0 set_seed(seed=seed) for i, batch in enumerate(data_loader): if "fixed_seed" in cfg: set_seed(seed=cfg.get("seed", 0)) if cfg.fixed_seed else None seed = seed if cfg.fixed_seed else -1 if "return_tgt_cls" in cfg.data and cfg.data.return_tgt_cls: image, label, tgt_classes, unique_data_idx = batch tgt_classes = tgt_classes.to(device) #squeeze() else: image, label, unique_data_idx = batch if "ImageNet" in cfg.data._target_: tgt_classes = torch.tensor([random.choice(synset_closest_idx[l.item()]) for l in label]).to(device) elif "CelebAHQDataset" in cfg.data._target_: tgt_classes = (1 - label).type(torch.float32) elif "Flowers102" in cfg.data._target_ or "OxfordIIIPets" in cfg.data._target_: tgt_classes = torch.tensor([closest_indices[unique_data_idx[l].item()*cfg.data.num_shards + cfg.data.shard][0] for l in range(label.shape[0])]).to(device) else: raise NotImplementedError image = image.to(device) #squeeze() label = label.to(device) #.item() #squeeze() #tgt_classes = torch.tensor([random.choice(synset_closest_idx[l.item()]) for l in label]).to(device) #tgt_classes = synset_closest_idx[label] #tgt_classes = torch.tensor([random.choice(synset_closest_idx[l.item()]) for l in label]).to(device) #shuffle tgt_classes #random.shuffle(tgt_classes) #get classifcation prediction with torch.inference_mode(): #with precision_scope(): if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: logits = classifier_model(image) else: logits = sampler.get_classifier_logits(_unmap_img(image)) #converting to -1, 1 # TODO: handle binary vs multi-class if "ImageNet" in cfg.data._target_ or "OxfordIIIPets" in cfg.data._target_ or "Flowers102" in cfg.data._target_: # multi-class in_class_pred = logits.argmax(dim=1) in_confid = logits.softmax(dim=1).max(dim=1).values in_confid_tgt = logits.softmax(dim=1)[torch.arange(batch_size), tgt_classes] else: # binary in_class_pred = (logits >= 0).type(torch.int8) in_confid = torch.where(logits >= 0, logits.sigmoid(), 1 - logits.sigmoid()) in_confid_tgt = torch.where(tgt_classes.to(device) == 0, 1 - logits.sigmoid(), logits.sigmoid()) print("in class_pred: ", in_class_pred, in_confid) for j, l in enumerate(label): print(f"converting {i} from : {i2h[l.item()]} to: {i2h[int(tgt_classes[j].item())]}") init_image = image.clone() #image.repeat(n_samples_per_class, 1, 1, 1).to(device) sampler.init_images = init_image.to(device) sampler.init_labels = label # n_samples_per_class * [label] if isinstance(cfg.sampler.lp_custom, str) and "dino_" in cfg.sampler.lp_custom: if device != next(sampler.distance_criterion.dino.parameters()).device: sampler.distance_criterion.dino = sampler.distance_criterion.dino.to(device) sampler.dino_init_features = sampler.get_dino_features(sampler.init_images, device=device).clone() #mapped_image = _unmap_img(init_image) init_latent = model.get_first_stage_encoding( model.encode_first_stage(_unmap_img(init_image))) # move to latent space if "txt" == model.cond_stage_key: # text-conditional if "ImageNet" in cfg.data._target_: prompts = [f"a photo of a {openai_imagenet_classes[idx.item()]}." for idx in tgt_classes] elif "CelebAHQDataset" in cfg.data._target_: # query label 31 (smile): label=0 <-> no smile and label=1 <-> smile # query label 39 (age): label=0 <-> old and label=1 <-> young assert cfg.data.query_label in [31, 39] prompts = [] for target in tgt_classes: if cfg.data.query_label == 31 and target == 0: attr = "non-smiling" elif cfg.data.query_label == 31 and target == 1: attr = "smiling" elif cfg.data.query_label == 39 and target == 0: attr = "old" elif cfg.data.query_label == 39 and target == 1: attr = "young" else: raise NotImplementedError prompts.append(f"a photo of a {attr} person") elif "OxfordIIIPets" in cfg.data._target_: # prompts following https://github.com/openai/CLIP/blob/main/data/prompts.md prompts = [f"a photo of a {i2h[idx.item()]}, a type of pet." for idx in tgt_classes] elif "Flowers102" in cfg.data._target_: # prompts following https://github.com/openai/CLIP/blob/main/data/prompts.md prompts = [f"a photo of a {i2h[idx.item()]}, a type of flower." for idx in tgt_classes] else: raise NotImplementedError else: prompts = None
out = generate_samples(
3
2023-10-10 09:40:10+00:00
24k
spla-tam/SplaTAM
scripts/post_splatam_opt.py
[ { "identifier": "AzureKinectDataset", "path": "datasets/gradslam_datasets/azure.py", "snippet": "class AzureKinectDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = ...
import argparse import os import random import sys import shutil import cv2 import numpy as np import torch import wandb from importlib.machinery import SourceFileLoader from tqdm import tqdm from datasets.gradslam_datasets import ( load_dataset_config, ICLDataset, ReplicaDataset, AzureKinectDataset, ScannetDataset, Ai2thorDataset, Record3DDataset, RealsenseDataset, TUMDataset, ScannetPPDataset, NeRFCaptureDataset ) from utils.common_utils import seed_everything, save_seq_params, save_params, save_params_ckpt, save_seq_params_ckpt from utils.recon_helpers import setup_camera from utils.gs_helpers import ( params2rendervar, params2depthplussilhouette, transformed_params2depthplussilhouette, transform_to_frame, report_progress, eval, l1_loss_v1, matrix_to_quaternion ) from utils.gs_external import ( calc_ssim, build_rotation, densify, get_expon_lr_func, update_learning_rate ) from diff_gaussian_rasterization import GaussianRasterizer as Renderer
18,414
elif config_dict["dataset_name"].lower() in ["record3d"]: return Record3DDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["realsense"]: return RealsenseDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["tum"]: return TUMDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["scannetpp"]: return ScannetPPDataset(basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["nerfcapture"]: return NeRFCaptureDataset(basedir, sequence, **kwargs) else: raise ValueError(f"Unknown dataset name {config_dict['dataset_name']}") def get_pointcloud(color, depth, intrinsics, w2c, transform_pts=True, mask=None, compute_mean_sq_dist=False, mean_sq_dist_method="projective"): width, height = color.shape[2], color.shape[1] CX = intrinsics[0][2] CY = intrinsics[1][2] FX = intrinsics[0][0] FY = intrinsics[1][1] # Compute indices of pixels x_grid, y_grid = torch.meshgrid(torch.arange(width).cuda().float(), torch.arange(height).cuda().float(), indexing='xy') xx = (x_grid - CX)/FX yy = (y_grid - CY)/FY xx = xx.reshape(-1) yy = yy.reshape(-1) depth_z = depth[0].reshape(-1) # Initialize point cloud pts_cam = torch.stack((xx * depth_z, yy * depth_z, depth_z), dim=-1) if transform_pts: pix_ones = torch.ones(height * width, 1).cuda().float() pts4 = torch.cat((pts_cam, pix_ones), dim=1) c2w = torch.inverse(w2c) pts = (c2w @ pts4.T).T[:, :3] else: pts = pts_cam # Compute mean squared distance for initializing the scale of the Gaussians if compute_mean_sq_dist: if mean_sq_dist_method == "projective": # Projective Geometry (this is fast, farther -> larger radius) scale_gaussian = depth_z / ((FX + FY)/2) mean3_sq_dist = scale_gaussian**2 else: raise ValueError(f"Unknown mean_sq_dist_method: {mean_sq_dist_method}") # Colorize point cloud cols = torch.permute(color, (1, 2, 0)).reshape(-1, 3) # (C, H, W) -> (H, W, C) -> (H * W, C) point_cld = torch.cat((pts, cols), -1) # Select points based on mask if mask is not None: point_cld = point_cld[mask] if compute_mean_sq_dist: mean3_sq_dist = mean3_sq_dist[mask] if compute_mean_sq_dist: return point_cld, mean3_sq_dist else: return point_cld def initialize_params(init_pt_cld, num_frames, mean3_sq_dist): num_pts = init_pt_cld.shape[0] means3D = init_pt_cld[:, :3] # [num_gaussians, 3] unnorm_rots = np.tile([1, 0, 0, 0], (num_pts, 1)) # [num_gaussians, 3] logit_opacities = torch.zeros((num_pts, 1), dtype=torch.float, device="cuda") params = { 'means3D': means3D, 'rgb_colors': init_pt_cld[:, 3:6], 'unnorm_rotations': unnorm_rots, 'logit_opacities': logit_opacities, 'log_scales': torch.tile(torch.log(torch.sqrt(mean3_sq_dist))[..., None], (1, 1)), } # Initialize a single gaussian trajectory to model the camera poses relative to the first frame cam_rots = np.tile([1, 0, 0, 0], (1, 1)) cam_rots = np.tile(cam_rots[:, :, None], (1, 1, num_frames)) params['cam_unnorm_rots'] = cam_rots params['cam_trans'] = np.zeros((1, 3, num_frames)) for k, v in params.items(): # Check if value is already a torch tensor if not isinstance(v, torch.Tensor): params[k] = torch.nn.Parameter(torch.tensor(v).cuda().float().contiguous().requires_grad_(True)) else: params[k] = torch.nn.Parameter(v.cuda().float().contiguous().requires_grad_(True)) variables = {'max_2D_radius': torch.zeros(params['means3D'].shape[0]).cuda().float(), 'means2D_gradient_accum': torch.zeros(params['means3D'].shape[0]).cuda().float(), 'denom': torch.zeros(params['means3D'].shape[0]).cuda().float()} return params, variables def initialize_optimizer(params, lrs_dict): lrs = lrs_dict param_groups = [{'params': [v], 'name': k, 'lr': lrs[k]} for k, v in params.items()] return torch.optim.Adam(param_groups, lr=0.0, eps=1e-15) def initialize_first_timestep_from_ckpt(ckpt_path,dataset, num_frames, lrs_dict, mean_sq_dist_method): # Get RGB-D Data & Camera Parameters color, depth, intrinsics, pose = dataset[0] # Process RGB-D Data color = color.permute(2, 0, 1) / 255 # (H, W, C) -> (C, H, W) depth = depth.permute(2, 0, 1) # (H, W, C) -> (C, H, W) # Process Camera Parameters intrinsics = intrinsics[:3, :3] w2c = torch.linalg.inv(pose) # Setup Camera
_BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _BASE_DIR) print("System Paths:") for p in sys.path: print(p) def get_dataset(config_dict, basedir, sequence, **kwargs): if config_dict["dataset_name"].lower() in ["icl"]: return ICLDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["replica"]: return ReplicaDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["azure", "azurekinect"]: return AzureKinectDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["scannet"]: return ScannetDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["ai2thor"]: return Ai2thorDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["record3d"]: return Record3DDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["realsense"]: return RealsenseDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["tum"]: return TUMDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["scannetpp"]: return ScannetPPDataset(basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["nerfcapture"]: return NeRFCaptureDataset(basedir, sequence, **kwargs) else: raise ValueError(f"Unknown dataset name {config_dict['dataset_name']}") def get_pointcloud(color, depth, intrinsics, w2c, transform_pts=True, mask=None, compute_mean_sq_dist=False, mean_sq_dist_method="projective"): width, height = color.shape[2], color.shape[1] CX = intrinsics[0][2] CY = intrinsics[1][2] FX = intrinsics[0][0] FY = intrinsics[1][1] # Compute indices of pixels x_grid, y_grid = torch.meshgrid(torch.arange(width).cuda().float(), torch.arange(height).cuda().float(), indexing='xy') xx = (x_grid - CX)/FX yy = (y_grid - CY)/FY xx = xx.reshape(-1) yy = yy.reshape(-1) depth_z = depth[0].reshape(-1) # Initialize point cloud pts_cam = torch.stack((xx * depth_z, yy * depth_z, depth_z), dim=-1) if transform_pts: pix_ones = torch.ones(height * width, 1).cuda().float() pts4 = torch.cat((pts_cam, pix_ones), dim=1) c2w = torch.inverse(w2c) pts = (c2w @ pts4.T).T[:, :3] else: pts = pts_cam # Compute mean squared distance for initializing the scale of the Gaussians if compute_mean_sq_dist: if mean_sq_dist_method == "projective": # Projective Geometry (this is fast, farther -> larger radius) scale_gaussian = depth_z / ((FX + FY)/2) mean3_sq_dist = scale_gaussian**2 else: raise ValueError(f"Unknown mean_sq_dist_method: {mean_sq_dist_method}") # Colorize point cloud cols = torch.permute(color, (1, 2, 0)).reshape(-1, 3) # (C, H, W) -> (H, W, C) -> (H * W, C) point_cld = torch.cat((pts, cols), -1) # Select points based on mask if mask is not None: point_cld = point_cld[mask] if compute_mean_sq_dist: mean3_sq_dist = mean3_sq_dist[mask] if compute_mean_sq_dist: return point_cld, mean3_sq_dist else: return point_cld def initialize_params(init_pt_cld, num_frames, mean3_sq_dist): num_pts = init_pt_cld.shape[0] means3D = init_pt_cld[:, :3] # [num_gaussians, 3] unnorm_rots = np.tile([1, 0, 0, 0], (num_pts, 1)) # [num_gaussians, 3] logit_opacities = torch.zeros((num_pts, 1), dtype=torch.float, device="cuda") params = { 'means3D': means3D, 'rgb_colors': init_pt_cld[:, 3:6], 'unnorm_rotations': unnorm_rots, 'logit_opacities': logit_opacities, 'log_scales': torch.tile(torch.log(torch.sqrt(mean3_sq_dist))[..., None], (1, 1)), } # Initialize a single gaussian trajectory to model the camera poses relative to the first frame cam_rots = np.tile([1, 0, 0, 0], (1, 1)) cam_rots = np.tile(cam_rots[:, :, None], (1, 1, num_frames)) params['cam_unnorm_rots'] = cam_rots params['cam_trans'] = np.zeros((1, 3, num_frames)) for k, v in params.items(): # Check if value is already a torch tensor if not isinstance(v, torch.Tensor): params[k] = torch.nn.Parameter(torch.tensor(v).cuda().float().contiguous().requires_grad_(True)) else: params[k] = torch.nn.Parameter(v.cuda().float().contiguous().requires_grad_(True)) variables = {'max_2D_radius': torch.zeros(params['means3D'].shape[0]).cuda().float(), 'means2D_gradient_accum': torch.zeros(params['means3D'].shape[0]).cuda().float(), 'denom': torch.zeros(params['means3D'].shape[0]).cuda().float()} return params, variables def initialize_optimizer(params, lrs_dict): lrs = lrs_dict param_groups = [{'params': [v], 'name': k, 'lr': lrs[k]} for k, v in params.items()] return torch.optim.Adam(param_groups, lr=0.0, eps=1e-15) def initialize_first_timestep_from_ckpt(ckpt_path,dataset, num_frames, lrs_dict, mean_sq_dist_method): # Get RGB-D Data & Camera Parameters color, depth, intrinsics, pose = dataset[0] # Process RGB-D Data color = color.permute(2, 0, 1) / 255 # (H, W, C) -> (C, H, W) depth = depth.permute(2, 0, 1) # (H, W, C) -> (C, H, W) # Process Camera Parameters intrinsics = intrinsics[:3, :3] w2c = torch.linalg.inv(pose) # Setup Camera
cam = setup_camera(color.shape[2], color.shape[1], intrinsics.cpu().numpy(), w2c.detach().cpu().numpy())
16
2023-11-30 20:26:47+00:00
24k
zhyever/PatchFusion
zoedepth/trainers/zoedepth_custom_trainer.py
[ { "identifier": "SILogLoss", "path": "zoedepth/trainers/loss_sample.py", "snippet": "class SILogLoss(nn.Module):\n \"\"\"SILog loss (pixel-wise)\"\"\"\n def __init__(self, beta=0.15):\n super(SILogLoss, self).__init__()\n self.name = 'SILog'\n self.beta = beta\n\n def forwa...
import os import torch import torch.cuda.amp as amp import torch.nn as nn import numpy as np import wandb import uuid import torch.distributed as dist import copy import torch.optim as optim import matplotlib.pyplot as plt from zoedepth.trainers.loss_sample import SILogLoss, DistributionLoss from zoedepth.trainers.loss import SILogLoss as DenseSILogLoss from zoedepth.trainers.loss import BudgetConstraint, HistogramMatchingLoss, SSIM, ConsistencyLoss from zoedepth.utils.config import DATASETS_CONFIG from zoedepth.utils.misc import compute_metrics from zoedepth.data.preprocess import get_black_border from .base_trainer import BaseTrainer, is_rank_zero, colors, flatten from torchvision import transforms from PIL import Image from tqdm import tqdm from datetime import datetime as dt from zoedepth.utils.misc import generatemask
15,603
# For now, this may be a bit slow due to converting to numpy and back # We assume no normalization is done on the input image # get the black border assert x.shape[0] == 1, "Only batch size 1 is supported for now" x_pil = transforms.ToPILImage()(x[0].cpu()) x_np = np.array(x_pil, dtype=np.uint8) black_border_params = get_black_border(x_np) top, bottom, left, right = black_border_params.top, black_border_params.bottom, black_border_params.left, black_border_params.right x_np_cropped = x_np[top:bottom, left:right, :] x_cropped = transforms.ToTensor()(Image.fromarray(x_np_cropped)) # run inference on the cropped image pred_depths_cropped = self.eval_infer(x_cropped.unsqueeze(0).to(self.device)) # resize the prediction to x_np_cropped's size pred_depths_cropped = nn.functional.interpolate( pred_depths_cropped, size=(x_np_cropped.shape[0], x_np_cropped.shape[1]), mode="bilinear", align_corners=False) # pad the prediction back to the original size pred_depths = torch.zeros((1, 1, x_np.shape[0], x_np.shape[1]), device=pred_depths_cropped.device, dtype=pred_depths_cropped.dtype) pred_depths[:, :, top:bottom, left:right] = pred_depths_cropped return pred_depths def validate_on_batch(self, batch, val_step): images = batch['image'].to(self.device) depths_gt = batch['depth'].to(self.device) dataset = batch['dataset'][0] image_raw = batch['image_raw'].to(self.device) mask = batch["mask"].to(self.device) disp_gt_edges = batch['disp_gt_edges'].squeeze().numpy() bboxs = batch.get("bbox", None) if bboxs is not None: bboxs = bboxs.to(self.device) bbox_raw = batch.get("bbox_raw", None) if bbox_raw is not None: bbox_raw = bbox_raw.to(self.device) crop_area = batch.get("crop_area", None) if crop_area is not None: crop_area = crop_area.to(self.device) if 'has_valid_depth' in batch: if not batch['has_valid_depth']: return None, None depths_gt = depths_gt.squeeze().unsqueeze(0).unsqueeze(0) mask = mask.squeeze().unsqueeze(0).unsqueeze(0) # if dataset == 'nyu': # pred_depths = self.crop_aware_infer(images, image_raw) # else: # pred_depths = self.eval_infer(images, image_raw, bboxs, crop_area, dataset, bbox_raw) pred_depths = self.eval_infer(images, image_raw, bboxs, crop_area, dataset, bbox_raw) pred_depths = pred_depths.squeeze().unsqueeze(0).unsqueeze(0) # print(pred_depths.shape) # torch.Size([1, 1, 2160, 3840]) # print(depths_gt.shape) # torch.Size([1, 1, 2160, 3840]) with amp.autocast(enabled=self.config.use_amp): if self.sampled_training: l_depth = self.silog_loss( pred_depths, depths_gt, mask=mask.to(torch.bool)) else: l_depth = self.dense_silog_loss( pred_depths, depths_gt, mask=mask.to(torch.bool), interpolate=True) metrics = compute_metrics(depths_gt, pred_depths, disp_gt_edges=disp_gt_edges, **self.config) losses = {f"{self.silog_loss.name}": l_depth.item()} if self.should_log and self.config.get("debug", False): print(metrics) if val_step in [21, 27] and self.should_log: if self.config.get("debug", False): pass else: if self.sec_stage: log_rgb = image_raw else: log_rgb = images scale_pred = nn.functional.interpolate( pred_depths[0:1], depths_gt.shape[-2:], mode='bilinear', align_corners=True)[0] depths_gt[torch.logical_not(mask)] = DATASETS_CONFIG[dataset]['max_depth'] self.log_images(rgb={"Input": log_rgb[0]}, depth={"GT": depths_gt[0], "PredictedMono": scale_pred}, prefix="Test", min_depth=DATASETS_CONFIG[dataset]['min_depth'], max_depth=DATASETS_CONFIG[dataset]['max_depth']) return metrics, losses def train(self): print(f"Training {self.config.name}") if self.config.uid is None: self.config.uid = str(uuid.uuid4()).split('-')[-1] run_id = f"{dt.now().strftime('%d-%h_%H-%M')}-{self.config.uid}" self.config.run_id = run_id self.config.experiment_id = f"{self.config.wandb_start}_{self.config.name}{self.config.version_name}_{run_id}" self.should_write = ((not self.config.distributed) or self.config.rank == 0) self.should_log = self.should_write # and logging if self.should_log: if self.config.get("debug", False): pass else: tags = self.config.tags.split( ',') if self.config.tags != '' else None wandb.init(project=self.config.project, name=self.config.experiment_id, config=flatten(self.config), dir=self.config.root, tags=tags, notes=self.config.notes, settings=wandb.Settings(start_method="fork")) self.model.train() self.step = 0 best_loss = np.inf validate_every = int(self.config.validate_every * self.iters_per_epoch) if self.config.prefetch: for i, batch in tqdm(enumerate(self.train_loader), desc=f"Prefetching...",
# MIT License # Copyright (c) 2022 Intelligent Systems Lab Org # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # File author: Zhenyu Li # This file is partly inspired from ZoeDepth (https://github.com/isl-org/ZoeDepth/blob/main/zoedepth/trainers/zoedepth_trainer.py); author: Shariq Farooq Bhat class Trainer(BaseTrainer): def __init__(self, config, model, train_loader, test_loader=None, device=None): self.addf = config.get("addf", False) self.lazy_epoch = -1 self.boostingdepth = config.get("boostingdepth", False) super().__init__(config, model, train_loader, test_loader=test_loader, device=device) self.device = device self.silog_loss = SILogLoss(beta=config.get("beta", 0.15)) self.dense_silog_loss = DenseSILogLoss(beta=config.get("beta", 0.15)) print("sigloss's beta is set to {}".format(config.get("beta", 0.15))) self.scaler = amp.GradScaler(enabled=self.config.use_amp) self.distribution_loss = DistributionLoss(max_depth=self.config.max_depth) self.sampled_training = config.get("sampled_training", False) self.sec_stage = config.get("sec_stage", False) self.multi_consistency = config.get("multi_consistency", False) self.use_blur = config.get("use_blur", False) self.dynamic = config.get("dynamic", False) if self.dynamic: self.dynamic_unupdate_rate = config.get("dynamic_unupdate_rate", 0.0) self.budget_loss = BudgetConstraint(loss_mu=0.0, flops_all=21552.5684, warm_up=True) self.use_scale_loss = config.get("use_scale_loss", False) if self.use_scale_loss: if config.get("scale_type", "ssim"): self.scale_loss = SSIM(window_size=config.get("window_size", int(11))) else: self.scale_loss = HistogramMatchingLoss(min_depth=self.config.min_depth, max_depth=self.config.max_depth) self.scale_target = config.get("scale_target", None) self.consistency_training = config.get("consistency_training", False) if self.consistency_training: self.consistency_target = config.get("consistency_target", None) self.consistency_loss = ConsistencyLoss(self.consistency_target, config.get("focus_flatten", False), config.get("w_p", 1.0)) print("current weight for consistency loss is {}. focus_flatten is {}. w_p is {}".format(self.config.w_consistency, config.get("focus_flatten", False), config.get("w_p", 1.0))) def train_on_batch(self, batch, train_step, step_rate): """ Expects a batch of images and depth as input batch["image"].shape : batch_size, c, h, w batch["depth"].shape : batch_size, 1, h, w """ images, depths_gt = batch['image'].to(self.device), batch['depth'].to(self.device) image_raw = batch.get("image_raw", None) if image_raw is not None: image_raw = image_raw.to(self.device) sample_points = None if self.sampled_training: sample_points = batch['sample_points'].to(self.device) bbox = batch.get("bbox", None) if bbox is not None: bbox = bbox.to(self.device) bbox_raw = batch.get("bbox_raw", None) if bbox_raw is not None: bbox_raw = bbox_raw.to(self.device) depth_raw = batch.get("depth_raw", None) if depth_raw is not None: depth_raw = depth_raw.to(self.device) crop_area = batch.get("crop_area", None) if crop_area is not None: crop_area = crop_area.to(self.device) shift = batch.get("shift", None) if shift is not None: shift = shift.to(self.device) dataset = batch['dataset'][0] b, c, h, w = images.size() mask = batch["mask"].to(self.device).to(torch.bool) sample_mask = batch.get("sample_mask", None) if sample_mask is not None: sample_mask = sample_mask.to(self.device).to(torch.bool) mask_raw = batch.get("mask_raw", None) if mask_raw is not None: mask_raw = mask_raw.to(self.device).to(torch.bool) losses = {} with amp.autocast(enabled=self.config.use_amp): if self.sampled_training: output = self.model(images, sample_points, mode='train', image_raw=image_raw, bbox=bbox, depth_raw=depth_raw, crop_area=crop_area, shift=shift, bbox_raw=bbox_raw) else: output = self.model(images, None, mode='train', image_raw=image_raw, bbox=bbox, depth_raw=depth_raw, crop_area=crop_area, shift=shift, bbox_raw=bbox_raw) if self.boostingdepth: if self.lazy_epoch < self.epoch: output.update_learning_rate() self.lazy_epoch = self.epoch input_dict = dict() input_dict['data_gtfake'] = depths_gt output.set_input_train_gt(input_dict) output.optimize_parameters() pred_depths = output.fake_B pred = output.fake_B # print(torch.min(pred), torch.max(pred)) losses = output.get_current_losses() else: pred_depths = output['metric_depth'] if self.sampled_training: sampled_depth_gt = sample_points[:, :, -1].float().unsqueeze(dim=-1) sampled_depth_gt = sampled_depth_gt.permute(0, 2, 1) if self.config.get("representation", "") == 'biLaplacian': # only for sampled training for now l_dist, l_si = self.distribution_loss(output, sampled_depth_gt, mask=sample_mask) loss = self.config.w_dist * l_dist + self.config.w_si * l_si losses['distribution_loss'] = l_dist losses['sigloss'] = l_si if self.multi_consistency: coarse, fine = output['coarse_depth_pred'], output['fine_depth_pred'] l_si_f = self.dense_silog_loss( fine, depths_gt, mask=mask, interpolate=True, return_interpolated=False) l_si_c = self.dense_silog_loss( coarse, depth_raw, mask=mask_raw, interpolate=True, return_interpolated=False) losses['sigloss_f'] = l_si_f losses['l_si_c'] = l_si_c loss += self.config.w_si * (l_si_f + l_si_c) else: if self.sampled_training: l_si = self.silog_loss( pred_depths, sampled_depth_gt, mask=sample_mask) loss = self.config.w_si * l_si losses[self.silog_loss.name] = l_si if self.multi_consistency: coarse, fine = output['coarse_depth_pred'], output['fine_depth_pred'] l_si_f = self.dense_silog_loss( fine, depths_gt, mask=mask, interpolate=True, return_interpolated=False) l_si_c = self.dense_silog_loss( coarse, depth_raw, mask=mask_raw, interpolate=True, return_interpolated=False) losses['sigloss_f'] = l_si_f losses['l_si_c'] = l_si_c loss += self.config.w_si * (l_si_f + l_si_c) else: if self.multi_consistency: #### here here here pred_depths, coarse, fine = output['metric_depth'], output['coarse_depth_pred'], output['fine_depth_pred'] if self.consistency_training: depths_gt = torch.split(depths_gt, 1, dim=1) depths_gt = torch.cat(depths_gt, dim=0) mask = torch.split(mask, 1, dim=-1) mask = torch.cat(mask, dim=0).permute(0, 3, 1, 2) mask_raw = torch.cat([mask_raw, mask_raw], dim=0) depth_raw = torch.cat([depth_raw, depth_raw], dim=0) temp_features = output.get('temp_features', None) l_si_1, pred = self.dense_silog_loss( pred_depths, depths_gt, mask=mask, interpolate=True, return_interpolated=True) l_si_f, pred_f = self.dense_silog_loss( fine, depths_gt, mask=mask, interpolate=True, return_interpolated=True) l_si_c = self.dense_silog_loss( coarse, depth_raw, mask=mask_raw, interpolate=True, return_interpolated=False) losses[self.silog_loss.name] = l_si_1 losses['sigloss_f'] = l_si_f losses['l_si_c'] = l_si_c # loss = l_si_1 + l_si_f + l_si_c loss = l_si_1 if self.consistency_training: try: # depths_gt? pred_f? l_consistency = self.consistency_loss(pred, shift, mask, temp_features, pred_f=depths_gt) # use the resized pred except RuntimeError as e: print(e) print("some runtime error here! Hack with 0") l_consistency = torch.Tensor([0]).squeeze() losses[self.consistency_loss.name] = l_consistency loss += l_consistency * self.config.w_consistency else: l_si, pred = self.dense_silog_loss( pred_depths, depths_gt, mask=mask, interpolate=True, return_interpolated=True) loss = self.config.w_si * l_si losses[self.silog_loss.name] = l_si if self.dynamic: if step_rate > self.dynamic_unupdate_rate: warm_up_rate = min(1.0, (step_rate - self.dynamic_unupdate_rate) / 0.02) flop_cost = self.budget_loss(output['all_cell_flops'], warm_up_rate=warm_up_rate) loss += self.config.w_flop * flop_cost losses['flop_loss'] = flop_cost else: flop_cost = self.budget_loss(output['all_cell_flops'], warm_up_rate=1) loss += 0 * flop_cost losses['flop_loss'] = flop_cost if self.use_scale_loss: if self.scale_target == 'coarse': h_loss = self.scale_loss(pred_depths, output['coarse_depth_pred_roi'], mask, interpolate=True) else: h_loss = self.scale_loss(pred_depths, depths_gt, mask, interpolate=True) loss += self.config.w_scale * h_loss losses['scale_loss'] = h_loss # self.scaler.scale(loss).backward() # if self.config.clip_grad > 0: # self.scaler.unscale_(self.optimizer) # nn.utils.clip_grad_norm_( # self.model.parameters(), self.config.clip_grad) # self.scaler.step(self.optimizer) # self.scaler.update() # self.optimizer.zero_grad() self.scaler.scale(loss).backward() if self.config.clip_grad > 0: self.scaler.unscale_(self.optimizer) nn.utils.clip_grad_norm_( self.model.parameters(), self.config.clip_grad) self.scaler.step(self.optimizer) self.scaler.update() self.optimizer.zero_grad() if self.should_log and (self.step % int(self.config.log_images_every * self.iters_per_epoch)) == 0: if self.config.get("debug", False): pred = nn.functional.interpolate( pred[0:1], depths_gt.shape[-2:], mode='bilinear', align_corners=True)[0] plt.imshow(pred.squeeze().detach().cpu().numpy()) plt.savefig('debug.png') pass else: pred = nn.functional.interpolate( pred[0:1], depths_gt.shape[-2:], mode='bilinear', align_corners=True)[0] depths_gt[torch.logical_not(mask)] = DATASETS_CONFIG[dataset]['max_depth'] if self.consistency_training: split_images = torch.split(images, 3, dim=1) images = torch.cat(split_images, dim=0) self.log_images(rgb={"Input": images[0, ...]}, depth={"GT": depths_gt[0], "PredictedMono": pred}, prefix="Train", min_depth=DATASETS_CONFIG[dataset]['min_depth'], max_depth=DATASETS_CONFIG[dataset]['max_depth']) return losses @torch.no_grad() def eval_infer(self, x, image_raw, bboxs=None, crop_area=None, dataset='u4k', bbox_raw=None): m = self.model.module if self.config.multigpu else self.model if dataset == 'u4k': base_h = 540 base_w = 960 elif dataset == 'gta': base_h = 270 base_w = 480 elif dataset == 'nyu': base_h = 120 * 2 base_w = 160 * 2 else: raise NotImplementedError if dataset == 'nyu': if self.sec_stage: images_crops = torch.split(x, 3, dim=1) bboxs_list = torch.split(bboxs, 1, dim=1) crop_areas = torch.split(crop_area, 1, dim=1) pred_depth_crops = [] for i, (img, bbox, crop_area) in enumerate(zip(images_crops, bboxs_list, crop_areas)): with amp.autocast(enabled=self.config.use_amp): if i == 0: out_dict = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None) # whole_depth_pred = out_dict['coarse_depth_pred'] pred_depth_crop = out_dict['metric_depth'] else: pred_depth_crop = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None)['metric_depth'] pred_depth_crop = nn.functional.interpolate( pred_depth_crop, (base_h, base_w), mode='bilinear', align_corners=True) pred_depth_crops.append(pred_depth_crop) x_start, y_start = [0, base_h], [0, base_w] pred_depth = torch.zeros((base_h*2, base_w*2)).cuda() inner_idx = 0 for ii, x in enumerate(x_start): for jj, y in enumerate(y_start): if self.use_blur: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() # do not care about boundry during validation else: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() inner_idx += 1 pred_depth = pred_depth.squeeze(dim=0) else: with amp.autocast(enabled=self.config.use_amp): pred_depth = m(x, mode='eval', image_raw=image_raw)['metric_depth'] else: if self.sec_stage: images_crops = torch.split(x, 3, dim=1) bboxs_list = torch.split(bboxs, 1, dim=1) crop_areas = torch.split(crop_area, 1, dim=1) pred_depth_crops = [] for i, (img, bbox, crop_area) in enumerate(zip(images_crops, bboxs_list, crop_areas)): with amp.autocast(enabled=self.config.use_amp): if i == 0: out_dict = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None) # whole_depth_pred = out_dict['coarse_depth_pred'] pred_depth_crop = out_dict['metric_depth'] else: pred_depth_crop = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None)['metric_depth'] pred_depth_crop = nn.functional.interpolate( pred_depth_crop, (base_h, base_w), mode='bilinear', align_corners=True) pred_depth_crops.append(pred_depth_crop) x_start, y_start = [0, base_h], [0, base_w] pred_depth = torch.zeros((base_h*2, base_w*2)).cuda() inner_idx = 0 for ii, x in enumerate(x_start): for jj, y in enumerate(y_start): if self.use_blur: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() # do not care about boundry during validation else: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() inner_idx += 1 pred_depth = pred_depth.squeeze(dim=0) else: with amp.autocast(enabled=self.config.use_amp): pred_depth = m(x, mode='eval', image_raw=image_raw)['metric_depth'] return pred_depth @torch.no_grad() def crop_aware_infer(self, x, image_raw): # if we are not avoiding the black border, we can just use the normal inference if not self.config.get("avoid_boundary", False): return self.eval_infer(x) # otherwise, we need to crop the image to avoid the black border # For now, this may be a bit slow due to converting to numpy and back # We assume no normalization is done on the input image # get the black border assert x.shape[0] == 1, "Only batch size 1 is supported for now" x_pil = transforms.ToPILImage()(x[0].cpu()) x_np = np.array(x_pil, dtype=np.uint8) black_border_params = get_black_border(x_np) top, bottom, left, right = black_border_params.top, black_border_params.bottom, black_border_params.left, black_border_params.right x_np_cropped = x_np[top:bottom, left:right, :] x_cropped = transforms.ToTensor()(Image.fromarray(x_np_cropped)) # run inference on the cropped image pred_depths_cropped = self.eval_infer(x_cropped.unsqueeze(0).to(self.device)) # resize the prediction to x_np_cropped's size pred_depths_cropped = nn.functional.interpolate( pred_depths_cropped, size=(x_np_cropped.shape[0], x_np_cropped.shape[1]), mode="bilinear", align_corners=False) # pad the prediction back to the original size pred_depths = torch.zeros((1, 1, x_np.shape[0], x_np.shape[1]), device=pred_depths_cropped.device, dtype=pred_depths_cropped.dtype) pred_depths[:, :, top:bottom, left:right] = pred_depths_cropped return pred_depths def validate_on_batch(self, batch, val_step): images = batch['image'].to(self.device) depths_gt = batch['depth'].to(self.device) dataset = batch['dataset'][0] image_raw = batch['image_raw'].to(self.device) mask = batch["mask"].to(self.device) disp_gt_edges = batch['disp_gt_edges'].squeeze().numpy() bboxs = batch.get("bbox", None) if bboxs is not None: bboxs = bboxs.to(self.device) bbox_raw = batch.get("bbox_raw", None) if bbox_raw is not None: bbox_raw = bbox_raw.to(self.device) crop_area = batch.get("crop_area", None) if crop_area is not None: crop_area = crop_area.to(self.device) if 'has_valid_depth' in batch: if not batch['has_valid_depth']: return None, None depths_gt = depths_gt.squeeze().unsqueeze(0).unsqueeze(0) mask = mask.squeeze().unsqueeze(0).unsqueeze(0) # if dataset == 'nyu': # pred_depths = self.crop_aware_infer(images, image_raw) # else: # pred_depths = self.eval_infer(images, image_raw, bboxs, crop_area, dataset, bbox_raw) pred_depths = self.eval_infer(images, image_raw, bboxs, crop_area, dataset, bbox_raw) pred_depths = pred_depths.squeeze().unsqueeze(0).unsqueeze(0) # print(pred_depths.shape) # torch.Size([1, 1, 2160, 3840]) # print(depths_gt.shape) # torch.Size([1, 1, 2160, 3840]) with amp.autocast(enabled=self.config.use_amp): if self.sampled_training: l_depth = self.silog_loss( pred_depths, depths_gt, mask=mask.to(torch.bool)) else: l_depth = self.dense_silog_loss( pred_depths, depths_gt, mask=mask.to(torch.bool), interpolate=True) metrics = compute_metrics(depths_gt, pred_depths, disp_gt_edges=disp_gt_edges, **self.config) losses = {f"{self.silog_loss.name}": l_depth.item()} if self.should_log and self.config.get("debug", False): print(metrics) if val_step in [21, 27] and self.should_log: if self.config.get("debug", False): pass else: if self.sec_stage: log_rgb = image_raw else: log_rgb = images scale_pred = nn.functional.interpolate( pred_depths[0:1], depths_gt.shape[-2:], mode='bilinear', align_corners=True)[0] depths_gt[torch.logical_not(mask)] = DATASETS_CONFIG[dataset]['max_depth'] self.log_images(rgb={"Input": log_rgb[0]}, depth={"GT": depths_gt[0], "PredictedMono": scale_pred}, prefix="Test", min_depth=DATASETS_CONFIG[dataset]['min_depth'], max_depth=DATASETS_CONFIG[dataset]['max_depth']) return metrics, losses def train(self): print(f"Training {self.config.name}") if self.config.uid is None: self.config.uid = str(uuid.uuid4()).split('-')[-1] run_id = f"{dt.now().strftime('%d-%h_%H-%M')}-{self.config.uid}" self.config.run_id = run_id self.config.experiment_id = f"{self.config.wandb_start}_{self.config.name}{self.config.version_name}_{run_id}" self.should_write = ((not self.config.distributed) or self.config.rank == 0) self.should_log = self.should_write # and logging if self.should_log: if self.config.get("debug", False): pass else: tags = self.config.tags.split( ',') if self.config.tags != '' else None wandb.init(project=self.config.project, name=self.config.experiment_id, config=flatten(self.config), dir=self.config.root, tags=tags, notes=self.config.notes, settings=wandb.Settings(start_method="fork")) self.model.train() self.step = 0 best_loss = np.inf validate_every = int(self.config.validate_every * self.iters_per_epoch) if self.config.prefetch: for i, batch in tqdm(enumerate(self.train_loader), desc=f"Prefetching...",
total=self.iters_per_epoch) if is_rank_zero(self.config) else enumerate(self.train_loader):
10
2023-12-04 08:43:15+00:00
24k
alvinliu0/HumanGaussian
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
14,711
self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters():
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters():
broadcast(param, src=0)
9
2023-11-27 02:39:39+00:00
24k
EricGuo5513/momask-codes
gen_t2m.py
[ { "identifier": "MaskTransformer", "path": "models/mask_transformer/transformer.py", "snippet": "class MaskTransformer(nn.Module):\n def __init__(self, code_dim, cond_mode, latent_dim=256, ff_size=1024, num_layers=8,\n num_heads=4, dropout=0.1, clip_dim=512, cond_drop_prob=0.1,\n ...
import os import torch import torch.nn.functional as F import numpy as np from os.path import join as pjoin from models.mask_transformer.transformer import MaskTransformer, ResidualTransformer from models.vq.model import RVQVAE, LengthEstimator from options.eval_option import EvalT2MOptions from utils.get_opt import get_opt from utils.fixseed import fixseed from visualization.joints2bvh import Joint2BVHConvertor from torch.distributions.categorical import Categorical from utils.motion_process import recover_from_ric from utils.plot_script import plot_3d_motion from utils.paramUtil import t2m_kinematic_chain
17,884
################################# ######Loading R-Transformer###### ################################# res_opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.res_name, 'opt.txt') res_opt = get_opt(res_opt_path, device=opt.device) res_model = load_res_model(res_opt, vq_opt, opt) assert res_opt.vq_name == model_opt.vq_name ################################# ######Loading M-Transformer###### ################################# t2m_transformer = load_trans_model(model_opt, opt, 'latest.tar') ################################## #####Loading Length Predictor##### ################################## length_estimator = load_len_estimator(model_opt) t2m_transformer.eval() vq_model.eval() res_model.eval() length_estimator.eval() res_model.to(opt.device) t2m_transformer.to(opt.device) vq_model.to(opt.device) length_estimator.to(opt.device) ##### ---- Dataloader ---- ##### opt.nb_joints = 21 if opt.dataset_name == 'kit' else 22 mean = np.load(pjoin(opt.checkpoints_dir, opt.dataset_name, model_opt.vq_name, 'meta', 'mean.npy')) std = np.load(pjoin(opt.checkpoints_dir, opt.dataset_name, model_opt.vq_name, 'meta', 'std.npy')) def inv_transform(data): return data * std + mean prompt_list = [] length_list = [] est_length = False if opt.text_prompt != "": prompt_list.append(opt.text_prompt) if opt.motion_length == 0: est_length = True else: length_list.append(opt.motion_length) elif opt.text_path != "": with open(opt.text_path, 'r') as f: lines = f.readlines() for line in lines: infos = line.split('#') prompt_list.append(infos[0]) if len(infos) == 1 or (not infos[1].isdigit()): est_length = True length_list = [] else: length_list.append(int(infos[-1])) else: raise "A text prompt, or a file a text prompts are required!!!" # print('loading checkpoint {}'.format(file)) if est_length: print("Since no motion length are specified, we will use estimated motion lengthes!!") text_embedding = t2m_transformer.encode_text(prompt_list) pred_dis = length_estimator(text_embedding) probs = F.softmax(pred_dis, dim=-1) # (b, ntoken) token_lens = Categorical(probs).sample() # (b, seqlen) # lengths = torch.multinomial() else: token_lens = torch.LongTensor(length_list) // 4 token_lens = token_lens.to(opt.device).long() m_length = token_lens * 4 captions = prompt_list sample = 0 kinematic_chain = t2m_kinematic_chain converter = Joint2BVHConvertor() for r in range(opt.repeat_times): print("-->Repeat %d"%r) with torch.no_grad(): mids = t2m_transformer.generate(captions, token_lens, timesteps=opt.time_steps, cond_scale=opt.cond_scale, temperature=opt.temperature, topk_filter_thres=opt.topkr, gsample=opt.gumbel_sample) # print(mids) # print(mids.shape) mids = res_model.generate(mids, captions, token_lens, temperature=1, cond_scale=5) pred_motions = vq_model.forward_decoder(mids) pred_motions = pred_motions.detach().cpu().numpy() data = inv_transform(pred_motions) for k, (caption, joint_data) in enumerate(zip(captions, data)): print("---->Sample %d: %s %d"%(k, caption, m_length[k])) animation_path = pjoin(animation_dir, str(k)) joint_path = pjoin(joints_dir, str(k)) os.makedirs(animation_path, exist_ok=True) os.makedirs(joint_path, exist_ok=True) joint_data = joint_data[:m_length[k]] joint = recover_from_ric(torch.from_numpy(joint_data).float(), 22).numpy() bvh_path = pjoin(animation_path, "sample%d_repeat%d_len%d_ik.bvh"%(k, r, m_length[k])) _, ik_joint = converter.convert(joint, filename=bvh_path, iterations=100) bvh_path = pjoin(animation_path, "sample%d_repeat%d_len%d.bvh" % (k, r, m_length[k])) _, joint = converter.convert(joint, filename=bvh_path, iterations=100, foot_ik=False) save_path = pjoin(animation_path, "sample%d_repeat%d_len%d.mp4"%(k, r, m_length[k])) ik_save_path = pjoin(animation_path, "sample%d_repeat%d_len%d_ik.mp4"%(k, r, m_length[k]))
clip_version = 'ViT-B/32' def load_vq_model(vq_opt): # opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'opt.txt') vq_model = RVQVAE(vq_opt, vq_opt.dim_pose, vq_opt.nb_code, vq_opt.code_dim, vq_opt.output_emb_width, vq_opt.down_t, vq_opt.stride_t, vq_opt.width, vq_opt.depth, vq_opt.dilation_growth_rate, vq_opt.vq_act, vq_opt.vq_norm) ckpt = torch.load(pjoin(vq_opt.checkpoints_dir, vq_opt.dataset_name, vq_opt.name, 'model', 'net_best_fid.tar'), map_location='cpu') model_key = 'vq_model' if 'vq_model' in ckpt else 'net' vq_model.load_state_dict(ckpt[model_key]) print(f'Loading VQ Model {vq_opt.name} Completed!') return vq_model, vq_opt def load_trans_model(model_opt, opt, which_model): t2m_transformer = MaskTransformer(code_dim=model_opt.code_dim, cond_mode='text', latent_dim=model_opt.latent_dim, ff_size=model_opt.ff_size, num_layers=model_opt.n_layers, num_heads=model_opt.n_heads, dropout=model_opt.dropout, clip_dim=512, cond_drop_prob=model_opt.cond_drop_prob, clip_version=clip_version, opt=model_opt) ckpt = torch.load(pjoin(model_opt.checkpoints_dir, model_opt.dataset_name, model_opt.name, 'model', which_model), map_location='cpu') model_key = 't2m_transformer' if 't2m_transformer' in ckpt else 'trans' # print(ckpt.keys()) missing_keys, unexpected_keys = t2m_transformer.load_state_dict(ckpt[model_key], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Transformer {opt.name} from epoch {ckpt["ep"]}!') return t2m_transformer def load_res_model(res_opt, vq_opt, opt): res_opt.num_quantizers = vq_opt.num_quantizers res_opt.num_tokens = vq_opt.nb_code res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, cond_mode='text', latent_dim=res_opt.latent_dim, ff_size=res_opt.ff_size, num_layers=res_opt.n_layers, num_heads=res_opt.n_heads, dropout=res_opt.dropout, clip_dim=512, shared_codebook=vq_opt.shared_codebook, cond_drop_prob=res_opt.cond_drop_prob, # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, share_weight=res_opt.share_weight, clip_version=clip_version, opt=res_opt) ckpt = torch.load(pjoin(res_opt.checkpoints_dir, res_opt.dataset_name, res_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) missing_keys, unexpected_keys = res_transformer.load_state_dict(ckpt['res_transformer'], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Residual Transformer {res_opt.name} from epoch {ckpt["ep"]}!') return res_transformer def load_len_estimator(opt): model = LengthEstimator(512, 50) ckpt = torch.load(pjoin(opt.checkpoints_dir, opt.dataset_name, 'length_estimator', 'model', 'finest.tar'), map_location=opt.device) model.load_state_dict(ckpt['estimator']) print(f'Loading Length Estimator from epoch {ckpt["epoch"]}!') return model if __name__ == '__main__': parser = EvalT2MOptions() opt = parser.parse() fixseed(opt.seed) opt.device = torch.device("cpu" if opt.gpu_id == -1 else "cuda:" + str(opt.gpu_id)) torch.autograd.set_detect_anomaly(True) dim_pose = 251 if opt.dataset_name == 'kit' else 263 # out_dir = pjoin(opt.check) root_dir = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name) model_dir = pjoin(root_dir, 'model') result_dir = pjoin('./generation', opt.ext) joints_dir = pjoin(result_dir, 'joints') animation_dir = pjoin(result_dir, 'animations') os.makedirs(joints_dir, exist_ok=True) os.makedirs(animation_dir,exist_ok=True) model_opt_path = pjoin(root_dir, 'opt.txt') model_opt = get_opt(model_opt_path, device=opt.device) ####################### ######Loading RVQ###### ####################### vq_opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, model_opt.vq_name, 'opt.txt') vq_opt = get_opt(vq_opt_path, device=opt.device) vq_opt.dim_pose = dim_pose vq_model, vq_opt = load_vq_model(vq_opt) model_opt.num_tokens = vq_opt.nb_code model_opt.num_quantizers = vq_opt.num_quantizers model_opt.code_dim = vq_opt.code_dim ################################# ######Loading R-Transformer###### ################################# res_opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.res_name, 'opt.txt') res_opt = get_opt(res_opt_path, device=opt.device) res_model = load_res_model(res_opt, vq_opt, opt) assert res_opt.vq_name == model_opt.vq_name ################################# ######Loading M-Transformer###### ################################# t2m_transformer = load_trans_model(model_opt, opt, 'latest.tar') ################################## #####Loading Length Predictor##### ################################## length_estimator = load_len_estimator(model_opt) t2m_transformer.eval() vq_model.eval() res_model.eval() length_estimator.eval() res_model.to(opt.device) t2m_transformer.to(opt.device) vq_model.to(opt.device) length_estimator.to(opt.device) ##### ---- Dataloader ---- ##### opt.nb_joints = 21 if opt.dataset_name == 'kit' else 22 mean = np.load(pjoin(opt.checkpoints_dir, opt.dataset_name, model_opt.vq_name, 'meta', 'mean.npy')) std = np.load(pjoin(opt.checkpoints_dir, opt.dataset_name, model_opt.vq_name, 'meta', 'std.npy')) def inv_transform(data): return data * std + mean prompt_list = [] length_list = [] est_length = False if opt.text_prompt != "": prompt_list.append(opt.text_prompt) if opt.motion_length == 0: est_length = True else: length_list.append(opt.motion_length) elif opt.text_path != "": with open(opt.text_path, 'r') as f: lines = f.readlines() for line in lines: infos = line.split('#') prompt_list.append(infos[0]) if len(infos) == 1 or (not infos[1].isdigit()): est_length = True length_list = [] else: length_list.append(int(infos[-1])) else: raise "A text prompt, or a file a text prompts are required!!!" # print('loading checkpoint {}'.format(file)) if est_length: print("Since no motion length are specified, we will use estimated motion lengthes!!") text_embedding = t2m_transformer.encode_text(prompt_list) pred_dis = length_estimator(text_embedding) probs = F.softmax(pred_dis, dim=-1) # (b, ntoken) token_lens = Categorical(probs).sample() # (b, seqlen) # lengths = torch.multinomial() else: token_lens = torch.LongTensor(length_list) // 4 token_lens = token_lens.to(opt.device).long() m_length = token_lens * 4 captions = prompt_list sample = 0 kinematic_chain = t2m_kinematic_chain converter = Joint2BVHConvertor() for r in range(opt.repeat_times): print("-->Repeat %d"%r) with torch.no_grad(): mids = t2m_transformer.generate(captions, token_lens, timesteps=opt.time_steps, cond_scale=opt.cond_scale, temperature=opt.temperature, topk_filter_thres=opt.topkr, gsample=opt.gumbel_sample) # print(mids) # print(mids.shape) mids = res_model.generate(mids, captions, token_lens, temperature=1, cond_scale=5) pred_motions = vq_model.forward_decoder(mids) pred_motions = pred_motions.detach().cpu().numpy() data = inv_transform(pred_motions) for k, (caption, joint_data) in enumerate(zip(captions, data)): print("---->Sample %d: %s %d"%(k, caption, m_length[k])) animation_path = pjoin(animation_dir, str(k)) joint_path = pjoin(joints_dir, str(k)) os.makedirs(animation_path, exist_ok=True) os.makedirs(joint_path, exist_ok=True) joint_data = joint_data[:m_length[k]] joint = recover_from_ric(torch.from_numpy(joint_data).float(), 22).numpy() bvh_path = pjoin(animation_path, "sample%d_repeat%d_len%d_ik.bvh"%(k, r, m_length[k])) _, ik_joint = converter.convert(joint, filename=bvh_path, iterations=100) bvh_path = pjoin(animation_path, "sample%d_repeat%d_len%d.bvh" % (k, r, m_length[k])) _, joint = converter.convert(joint, filename=bvh_path, iterations=100, foot_ik=False) save_path = pjoin(animation_path, "sample%d_repeat%d_len%d.mp4"%(k, r, m_length[k])) ik_save_path = pjoin(animation_path, "sample%d_repeat%d_len%d_ik.mp4"%(k, r, m_length[k]))
plot_3d_motion(ik_save_path, kinematic_chain, ik_joint, title=caption, fps=20)
9
2023-11-29 19:21:27+00:00
24k
dvlab-research/LLMGA
llmga/serve/gradio_web_server.py
[ { "identifier": "default_conversation", "path": "llmga/llava/conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n H...
import argparse import datetime import json import os import time import gradio as gr import requests import hashlib import torch import copy from llmga.llava.conversation import (default_conversation, conv_templates, SeparatorStyle) from llmga.llava.constants import LOGDIR from llmga.llava.utils import (build_logger, server_error_msg, violates_moderation, moderation_msg) from llmga.llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN from llmga.llava.conversation import conv_templates, SeparatorStyle from llmga.llava.model.builder import load_pretrained_model from llmga.llava.utils import disable_torch_init from llmga.llava.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria from llmga.diffusers.pipeline_stable_diffusion_xl_lpw import StableDiffusionXLPipeline from diffusers import DiffusionPipeline
15,597
disable_btn = gr.Button.update(interactive=False) get_window_url_params = """ function() { const params = new URLSearchParams(window.location.search); url_params = Object.fromEntries(params); console.log(url_params); return url_params; } """ def load_demo(url_params, request: gr.Request): dropdown_update = gr.Dropdown.update(visible=True) if "model" in url_params: model = url_params["model"] if model in models: dropdown_update = gr.Dropdown.update( value=model, visible=True) state = default_conversation.copy() return state, dropdown_update def load_demo_refresh_model_list(request: gr.Request): state = default_conversation.copy() dropdown_update = gr.Dropdown.update( choices=models, value=models[0] if len(models) > 0 else "" ) return state, dropdown_update def regenerate(state, image_process_mode, request: gr.Request): # logger.info(f"regenerate. ip: {request.client.host}") state.messages[-1][-1] = None prev_human_msg = state.messages[-2] if type(prev_human_msg[1]) in (tuple, list): prev_human_msg[1] = (*prev_human_msg[1][:2], image_process_mode) state.skip_next = False return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 3 def clear_history(request: gr.Request): # logger.info(f"clear_history. ip: {request.client.host}") state = default_conversation.copy() return (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 3 def add_text(state, text, image, image_process_mode, request: gr.Request): # logger.info(f"add_text. ip: {request.client.host}. len: {len(text)}") if len(text) <= 0 and image is None: state.skip_next = True return (state, state.to_gradio_chatbot(), "", None) + (no_change_btn,) * 3 if args.moderate: flagged = violates_moderation(text) if flagged: state.skip_next = True return (state, state.to_gradio_chatbot(), moderation_msg, None) + ( no_change_btn,) * 3 text = text[:1536] # Hard cut-off if image is not None: text = text[:1200] # Hard cut-off for images if '<image>' not in text: # text = '<Image><image></Image>' + text if model.config.mm_use_im_start_end: text = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + text else: text = DEFAULT_IMAGE_TOKEN + '\n' + text text = (text, image, image_process_mode) if len(state.get_images(return_pil=True)) > 0: state = default_conversation.copy() state.append_message(state.roles[0], text) state.append_message(state.roles[1], None) state.skip_next = False tp=state.to_gradio_chatbot() for tpp in tp: if tpp[-1] is None: continue tpp[-1] = tpp[-1].replace("\n\n","\n") if "<gen_image>" in tpp[-1] and "</gen_image>" in tpp[-1]: tpp[-1]="The generation is finished: \n\n" + tpp[-1] return (state, tp, "", None) + (disable_btn,) * 3 def http_bot(state, model_selector, temperature, top_p, max_new_tokens, request: gr.Request): # logger.info(f"http_bot. ip: {request.client.host}") start_tstamp = time.time() model_name = model_selector if state.skip_next: # This generate call is skipped due to invalid inputs yield (state, state.to_gradio_chatbot()) + (no_change_btn,) * 5 return if len(state.messages) == state.offset + 2: # First round of conversation template_name = "llava_llama_2" new_state = conv_templates[template_name].copy() new_state.append_message(new_state.roles[0], state.messages[-2][1]) new_state.append_message(new_state.roles[1], None) state = new_state prompt = state.get_prompt() images = state.get_images(return_pil=True) image_tensors =[image_processor.preprocess(image, return_tensors='pt')['pixel_values'].half().cuda() for image in images] if len(image_tensors)==0: image_tensor=None elif len(image_tensors)==1: image_tensor=image_tensors[0] else: image_tensor=image_tensors
headers = {"User-Agent": "LLMGA Client"} no_change_btn = gr.Button.update() enable_btn = gr.Button.update(interactive=True) disable_btn = gr.Button.update(interactive=False) get_window_url_params = """ function() { const params = new URLSearchParams(window.location.search); url_params = Object.fromEntries(params); console.log(url_params); return url_params; } """ def load_demo(url_params, request: gr.Request): dropdown_update = gr.Dropdown.update(visible=True) if "model" in url_params: model = url_params["model"] if model in models: dropdown_update = gr.Dropdown.update( value=model, visible=True) state = default_conversation.copy() return state, dropdown_update def load_demo_refresh_model_list(request: gr.Request): state = default_conversation.copy() dropdown_update = gr.Dropdown.update( choices=models, value=models[0] if len(models) > 0 else "" ) return state, dropdown_update def regenerate(state, image_process_mode, request: gr.Request): # logger.info(f"regenerate. ip: {request.client.host}") state.messages[-1][-1] = None prev_human_msg = state.messages[-2] if type(prev_human_msg[1]) in (tuple, list): prev_human_msg[1] = (*prev_human_msg[1][:2], image_process_mode) state.skip_next = False return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 3 def clear_history(request: gr.Request): # logger.info(f"clear_history. ip: {request.client.host}") state = default_conversation.copy() return (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 3 def add_text(state, text, image, image_process_mode, request: gr.Request): # logger.info(f"add_text. ip: {request.client.host}. len: {len(text)}") if len(text) <= 0 and image is None: state.skip_next = True return (state, state.to_gradio_chatbot(), "", None) + (no_change_btn,) * 3 if args.moderate: flagged = violates_moderation(text) if flagged: state.skip_next = True return (state, state.to_gradio_chatbot(), moderation_msg, None) + ( no_change_btn,) * 3 text = text[:1536] # Hard cut-off if image is not None: text = text[:1200] # Hard cut-off for images if '<image>' not in text: # text = '<Image><image></Image>' + text if model.config.mm_use_im_start_end: text = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + text else: text = DEFAULT_IMAGE_TOKEN + '\n' + text text = (text, image, image_process_mode) if len(state.get_images(return_pil=True)) > 0: state = default_conversation.copy() state.append_message(state.roles[0], text) state.append_message(state.roles[1], None) state.skip_next = False tp=state.to_gradio_chatbot() for tpp in tp: if tpp[-1] is None: continue tpp[-1] = tpp[-1].replace("\n\n","\n") if "<gen_image>" in tpp[-1] and "</gen_image>" in tpp[-1]: tpp[-1]="The generation is finished: \n\n" + tpp[-1] return (state, tp, "", None) + (disable_btn,) * 3 def http_bot(state, model_selector, temperature, top_p, max_new_tokens, request: gr.Request): # logger.info(f"http_bot. ip: {request.client.host}") start_tstamp = time.time() model_name = model_selector if state.skip_next: # This generate call is skipped due to invalid inputs yield (state, state.to_gradio_chatbot()) + (no_change_btn,) * 5 return if len(state.messages) == state.offset + 2: # First round of conversation template_name = "llava_llama_2" new_state = conv_templates[template_name].copy() new_state.append_message(new_state.roles[0], state.messages[-2][1]) new_state.append_message(new_state.roles[1], None) state = new_state prompt = state.get_prompt() images = state.get_images(return_pil=True) image_tensors =[image_processor.preprocess(image, return_tensors='pt')['pixel_values'].half().cuda() for image in images] if len(image_tensors)==0: image_tensor=None elif len(image_tensors)==1: image_tensor=image_tensors[0] else: image_tensor=image_tensors
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
10
2023-11-27 18:46:55+00:00
24k
JiahuiLei/GART
solver.py
[ { "identifier": "prepare_real_seq", "path": "lib_data/get_data.py", "snippet": "def prepare_real_seq(\n seq_name,\n dataset_mode,\n split=\"train\",\n image_zoom_ratio=0.5,\n balance=False,\n ins_avt_wild_start_end_skip=None,\n):\n logging.info(\"Prepare real seq: {}\".format(seq_na...
from matplotlib import pyplot as plt from pytorch3d.transforms import matrix_to_axis_angle from tqdm import tqdm from transforms3d.euler import euler2mat from omegaconf import OmegaConf from lib_data.get_data import prepare_real_seq from lib_data.data_provider import DatabasePoseProvider from lib_gart.templates import get_template from lib_gart.model import GaussianTemplateModel, AdditionalBones from lib_gart.optim_utils import * from lib_render.gauspl_renderer import render_cam_pcl from lib_gart.model_utils import transform_mu_frame from utils.misc import * from utils.viz import viz_render from pytorch3d.transforms import axis_angle_to_matrix, matrix_to_axis_angle from pytorch3d.ops import knn_points from lib_guidance.camera_sampling import sample_camera, fov2K, opencv2blender from viz_utils import viz_spinning, viz_human_all, viz_dog_all from utils.ssim import ssim from datetime import datetime from test_utils import test from lib_guidance.mvdream.mvdream_guidance import MVDream from utils.lpips import LPIPS import imageio import torch import numpy as np import os, os.path as osp, shutil, sys import time import logging import argparse
20,917
self.template_model_path = template_model_path self.device = device # * auto set attr cfg = OmegaConf.load(profile_fn) # assign the cfg to self attribute for k, v in cfg.items(): setattr(self, k, v) for k, v in kwargs.items(): setattr(self, k, v) # * explicitly set flags self.FAST_TRAINING = getattr(self, "FAST_TRAINING", False) self.LAMBDA_SSIM = getattr(self, "LAMBDA_SSIM", 0.0) self.LAMBDA_LPIPS = getattr(self, "LAMBDA_LPIPS", 0.0) if self.LAMBDA_LPIPS > 0: self.lpips = LPIPS(net="vgg").to(self.device) for param in self.lpips.parameters(): param.requires_grad = False if isinstance(self.RESET_OPACITY_STEPS, int): self.RESET_OPACITY_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.RESET_OPACITY_STEPS == 0 ] if isinstance(self.REGAUSSIAN_STEPS, int): self.REGAUSSIAN_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.REGAUSSIAN_STEPS == 0 ] # prepare base R if self.mode == "human": viz_base_R_opencv = np.asarray(euler2mat(np.pi, 0, 0, "sxyz")) else: viz_base_R_opencv = np.asarray(euler2mat(np.pi / 2.0, 0, np.pi, "rxyz")) viz_base_R_opencv = torch.from_numpy(viz_base_R_opencv).float() self.viz_base_R = viz_base_R_opencv.to(self.device) if self.mode == "human": self.reg_base_R_global = ( matrix_to_axis_angle( torch.as_tensor(euler2mat(np.pi / 2.0, 0, np.pi / 2.0, "sxyz"))[ None ] )[0] .float() .to(self.device) ) else: # TODO, for generation of dog pass self.writer = create_log( self.log_dir, name=osp.basename(self.profile_fn).split(".")[0], debug=False ) return def prepare_fake_data(self, mode, *args, **kwargs): if mode == "amass": # todo: change to amass provider = DatabasePoseProvider(*args, **kwargs, device=torch.device("cpu")) return provider return provider def prepare_real_seq( self, seq_name, dataset_mode, split, ins_avt_wild_start_end_skip=None, image_zoom_ratio=0.5, data_stay_gpu_flag=True, ): provider, dataset = prepare_real_seq( seq_name=seq_name, dataset_mode=dataset_mode, split=split, ins_avt_wild_start_end_skip=ins_avt_wild_start_end_skip, image_zoom_ratio=getattr( self, "IMAGE_ZOOM_RATIO", image_zoom_ratio ), # ! this overwrite the func arg balance=getattr(self, "VIEW_BALANCE_FLAG", False), ) provider.to(self.device) if getattr(self, "DATA_STAY_GPU_FLAG", data_stay_gpu_flag): provider.move_images_to_device(self.device) provider.viz_selection_prob( osp.join(self.log_dir, f"split_{split}_view_prob.png") ) return provider, dataset def load_saved_model(self, ckpt_path=None): if ckpt_path is None: ckpt_path = osp.join(self.log_dir, "model.pth") ret = self._get_model_optimizer(betas=None) model = ret[0] model.load(torch.load(ckpt_path)) model.to(self.device) model.eval() logging.info("After loading:") model.summary() return model def _get_model_optimizer(self, betas, add_bones_total_t=0): seed_everything(self.SEED) template = get_template( mode=self.mode, template_model_path=self.template_model_path, init_beta=betas, cano_pose_type=getattr(self, "CANO_POSE_TYPE", "t_pose"), voxel_deformer_res=getattr(self, "VOXEL_DEFORMER_RES", 64), )
# from lib_marchingcubes.gaumesh_utils import MeshExtractor try: # from lib_guidance.sd_utils import StableDiffusion except: logging.warning("No guidance module") class TGFitter: def __init__( self, log_dir, profile_fn, mode, template_model_path="data/smpl_model/SMPL_NEUTRAL.pkl", device=torch.device("cuda:0"), **kwargs, ) -> None: self.log_dir = log_dir os.makedirs(self.log_dir, exist_ok=True) self.profile_fn = profile_fn try: shutil.copy(profile_fn, osp.join(self.log_dir, osp.basename(profile_fn))) except: pass self.mode = mode assert self.mode in ["human", "dog"], "Only support human and dog for now" self.template_model_path = template_model_path self.device = device # * auto set attr cfg = OmegaConf.load(profile_fn) # assign the cfg to self attribute for k, v in cfg.items(): setattr(self, k, v) for k, v in kwargs.items(): setattr(self, k, v) # * explicitly set flags self.FAST_TRAINING = getattr(self, "FAST_TRAINING", False) self.LAMBDA_SSIM = getattr(self, "LAMBDA_SSIM", 0.0) self.LAMBDA_LPIPS = getattr(self, "LAMBDA_LPIPS", 0.0) if self.LAMBDA_LPIPS > 0: self.lpips = LPIPS(net="vgg").to(self.device) for param in self.lpips.parameters(): param.requires_grad = False if isinstance(self.RESET_OPACITY_STEPS, int): self.RESET_OPACITY_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.RESET_OPACITY_STEPS == 0 ] if isinstance(self.REGAUSSIAN_STEPS, int): self.REGAUSSIAN_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.REGAUSSIAN_STEPS == 0 ] # prepare base R if self.mode == "human": viz_base_R_opencv = np.asarray(euler2mat(np.pi, 0, 0, "sxyz")) else: viz_base_R_opencv = np.asarray(euler2mat(np.pi / 2.0, 0, np.pi, "rxyz")) viz_base_R_opencv = torch.from_numpy(viz_base_R_opencv).float() self.viz_base_R = viz_base_R_opencv.to(self.device) if self.mode == "human": self.reg_base_R_global = ( matrix_to_axis_angle( torch.as_tensor(euler2mat(np.pi / 2.0, 0, np.pi / 2.0, "sxyz"))[ None ] )[0] .float() .to(self.device) ) else: # TODO, for generation of dog pass self.writer = create_log( self.log_dir, name=osp.basename(self.profile_fn).split(".")[0], debug=False ) return def prepare_fake_data(self, mode, *args, **kwargs): if mode == "amass": # todo: change to amass provider = DatabasePoseProvider(*args, **kwargs, device=torch.device("cpu")) return provider return provider def prepare_real_seq( self, seq_name, dataset_mode, split, ins_avt_wild_start_end_skip=None, image_zoom_ratio=0.5, data_stay_gpu_flag=True, ): provider, dataset = prepare_real_seq( seq_name=seq_name, dataset_mode=dataset_mode, split=split, ins_avt_wild_start_end_skip=ins_avt_wild_start_end_skip, image_zoom_ratio=getattr( self, "IMAGE_ZOOM_RATIO", image_zoom_ratio ), # ! this overwrite the func arg balance=getattr(self, "VIEW_BALANCE_FLAG", False), ) provider.to(self.device) if getattr(self, "DATA_STAY_GPU_FLAG", data_stay_gpu_flag): provider.move_images_to_device(self.device) provider.viz_selection_prob( osp.join(self.log_dir, f"split_{split}_view_prob.png") ) return provider, dataset def load_saved_model(self, ckpt_path=None): if ckpt_path is None: ckpt_path = osp.join(self.log_dir, "model.pth") ret = self._get_model_optimizer(betas=None) model = ret[0] model.load(torch.load(ckpt_path)) model.to(self.device) model.eval() logging.info("After loading:") model.summary() return model def _get_model_optimizer(self, betas, add_bones_total_t=0): seed_everything(self.SEED) template = get_template( mode=self.mode, template_model_path=self.template_model_path, init_beta=betas, cano_pose_type=getattr(self, "CANO_POSE_TYPE", "t_pose"), voxel_deformer_res=getattr(self, "VOXEL_DEFORMER_RES", 64), )
add_bones = AdditionalBones(
4
2023-11-27 17:30:04+00:00
24k
skhu101/GauHuman
scene/dataset_readers.py
[ { "identifier": "read_extrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while T...
import os import sys import numpy as np import torch import json import imageio import cv2 import random from PIL import Image from typing import NamedTuple from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \ read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal from pathlib import Path from plyfile import PlyData, PlyElement from utils.sh_utils import SH2RGB from scene.gaussian_model import BasicPointCloud from smpl.smpl_numpy import SMPL from smplx.body_models import SMPLX from data.dna_rendering.dna_rendering_sample_code.SMCReader import SMCReader
14,436
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact george.drettakis@inria.fr # class CameraInfo(NamedTuple): uid: int pose_id: int R: np.array T: np.array K: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str bkgd_mask: np.array bound_mask: np.array width: int height: int smpl_param: dict world_vertex: np.array world_bound: np.array big_pose_smpl_param: dict big_pose_world_vertex: np.array big_pose_world_bound: np.array class SceneInfo(NamedTuple): point_cloud: BasicPointCloud train_cameras: list test_cameras: list nerf_normalization: dict ply_path: str def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info: W2C = getWorld2View2(cam.R, cam.T) C2W = np.linalg.inv(W2C) cam_centers.append(C2W[:3, 3:4]) center, diagonal = get_center_and_diag(cam_centers) radius = diagonal * 1.1 translate = -center return {"translate": translate, "radius": radius} def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder): cam_infos = [] for idx, key in enumerate(cam_extrinsics): sys.stdout.write('\r') # the exact output you're looking for: sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics))) sys.stdout.flush() extr = cam_extrinsics[key] intr = cam_intrinsics[extr.camera_id] height = intr.height width = intr.width uid = intr.id R = np.transpose(qvec2rotmat(extr.qvec)) T = np.array(extr.tvec) if intr.model=="SIMPLE_PINHOLE": focal_length_x = intr.params[0]
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact george.drettakis@inria.fr # class CameraInfo(NamedTuple): uid: int pose_id: int R: np.array T: np.array K: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str bkgd_mask: np.array bound_mask: np.array width: int height: int smpl_param: dict world_vertex: np.array world_bound: np.array big_pose_smpl_param: dict big_pose_world_vertex: np.array big_pose_world_bound: np.array class SceneInfo(NamedTuple): point_cloud: BasicPointCloud train_cameras: list test_cameras: list nerf_normalization: dict ply_path: str def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info: W2C = getWorld2View2(cam.R, cam.T) C2W = np.linalg.inv(W2C) cam_centers.append(C2W[:3, 3:4]) center, diagonal = get_center_and_diag(cam_centers) radius = diagonal * 1.1 translate = -center return {"translate": translate, "radius": radius} def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder): cam_infos = [] for idx, key in enumerate(cam_extrinsics): sys.stdout.write('\r') # the exact output you're looking for: sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics))) sys.stdout.flush() extr = cam_extrinsics[key] intr = cam_intrinsics[extr.camera_id] height = intr.height width = intr.width uid = intr.id R = np.transpose(qvec2rotmat(extr.qvec)) T = np.array(extr.tvec) if intr.model=="SIMPLE_PINHOLE": focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
8
2023-11-29 07:10:39+00:00
24k
xmu-xiaoma666/X-Dreamer
train_x_dreamer.py
[ { "identifier": "DatasetMesh", "path": "dataset/dataset_mesh.py", "snippet": "class DatasetMesh(torch.utils.data.Dataset):\n\n\n def __init__(self, glctx, FLAGS, validate=False, gif=False):\n # Init \n self.glctx = glctx\n self.FLAGS = FLAGS\n sel...
import os import time import argparse import json import math import numpy as np import torch import nvdiffrast.torch as dr import itertools import xatlas import open3d as o3d import random import imageio import os.path as osp import pickle from dataset.dataset_mesh import DatasetMesh from dataset.dataset_mesh import get_camera_params from geometry.dmtet_x_dreamer import DMTetGeometry from geometry.dlmesh_x_dreamer import DLMesh from render import obj from render import material from render import util from render import mesh from render import texture from render import mlptexture from render import light from render import render from sd_cglora import StableDiffusion from tqdm import tqdm from render import util from render.video import Video
15,000
# Mix validation background target = prepare_batch(target, 'white') result_image, result_dict = validate_itr(glctx, target, geometry, opt_material, lgt, FLAGS, relight) for k in result_dict.keys(): np_img = result_dict[k].detach().cpu().numpy() if k == 'shaded': util.save_image(shaded_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'relight': util.save_image(relight_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'kd': util.save_image(kd_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'ks': util.save_image(ks_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'normal': util.save_image(normal_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'mask': util.save_image(mask_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) if 'shaded' in result_dict.keys(): save_gif(shaded_dir,30) if 'relight' in result_dict.keys(): save_gif(relight_dir,30) if 'kd' in result_dict.keys(): save_gif(kd_dir,30) if 'ks' in result_dict.keys(): save_gif(ks_dir,30) if 'normal' in result_dict.keys(): save_gif(normal_dir,30) return 0 ############################################################################### # Main shape fitter function / optimization loop ############################################################################### class Trainer(torch.nn.Module): def __init__(self, glctx, geometry, lgt, mat, optimize_geometry, optimize_light, FLAGS, guidance): super(Trainer, self).__init__() self.glctx = glctx self.geometry = geometry self.light = lgt self.material = mat self.optimize_geometry = optimize_geometry self.optimize_light = optimize_light self.FLAGS = FLAGS self.guidance = guidance self.if_flip_the_normal = FLAGS.if_flip_the_normal self.if_use_bump = FLAGS.if_use_bump if self.FLAGS.mode == 'appearance_modeling': if not self.optimize_light: with torch.no_grad(): self.light.build_mips() self.params = list(self.material.parameters()) self.params += list(self.geometry.pos_encoder.parameters()) self.params += list(self.light.parameters()) if optimize_light else [] self.geo_params = list(self.geometry.parameters()) if optimize_geometry else [] def forward(self, target, it, if_normal, if_pretrain, scene_and_vertices ): if self.FLAGS.mode == 'appearance_modeling': if self.optimize_light: self.light.build_mips() if self.FLAGS.camera_space_light: self.light.xfm(target['mv']) if if_pretrain: return self.geometry.decoder.pre_train_ellipsoid(it, scene_and_vertices) else: return self.geometry.tick(glctx, target, self.light, self.material, it , if_normal, self.guidance, self.FLAGS.mode, self.if_flip_the_normal, self.if_use_bump) def optimize_mesh( glctx, geometry, opt_material, lgt, dataset_train, dataset_validate, FLAGS, log_interval=10, optimize_light=True, optimize_geometry=True, guidance = None, scene_and_vertices = None, ): dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=FLAGS.batch, collate_fn=dataset_train.collate, shuffle=False) dataloader_validate = torch.utils.data.DataLoader(dataset_validate, batch_size=1, collate_fn=dataset_train.collate) model = Trainer(glctx, geometry, lgt, opt_material, optimize_geometry, optimize_light, FLAGS, guidance) if optimize_geometry: optimizer_mesh = torch.optim.AdamW(model.geo_params, lr=0.001, betas=(0.9, 0.99), eps=1e-15) optimizer = torch.optim.AdamW(model.params, lr=0.01, betas=(0.9, 0.99), eps=1e-15) optimizer_lora = torch.optim.SGD(itertools.chain(*guidance.unet_lora_params), lr=1e-5) if FLAGS.multi_gpu: model = model.cuda() model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[FLAGS.local_rank], find_unused_parameters= True ) img_cnt = 0 img_loss_vec = [] reg_loss_vec = [] iter_dur_vec = [] def cycle(iterable): iterator = iter(iterable) while True: try: yield next(iterator) except StopIteration: iterator = iter(iterable) v_it = cycle(dataloader_validate) scaler = torch.cuda.amp.GradScaler(enabled=True) rot_ang = 0 if FLAGS.local_rank == 0:
############################################################################### # Mix background into a dataset image ############################################################################### @torch.no_grad() def prepare_batch(target, background= 'black'): target['mv'] = target['mv'].cuda() target['mvp'] = target['mvp'].cuda() target['campos'] = target['campos'].cuda() target['fov'] = target['fov'].cuda() target['normal_rotate'] = target['normal_rotate'].cuda() batch_size = target['mv'].shape[0] resolution = target['resolution'] if background == 'white': target['background']= torch.ones(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') if background == 'black': target['background'] = torch.zeros(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') return target ############################################################################### # UV - map geometry & convert to a mesh ############################################################################### @torch.no_grad() def xatlas_uvmap(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) # Create uvs with xatlas v_pos = eval_mesh.v_pos.detach().cpu().numpy() t_pos_idx = eval_mesh.t_pos_idx.detach().cpu().numpy() vmapping, indices, uvs = xatlas.parametrize(v_pos, t_pos_idx) # Convert to tensors indices_int64 = indices.astype(np.uint64, casting='same_kind').view(np.int64) uvs = torch.tensor(uvs, dtype=torch.float32, device='cuda') faces = torch.tensor(indices_int64, dtype=torch.int64, device='cuda') new_mesh = mesh.Mesh(v_tex=uvs, t_tex_idx=faces, base=eval_mesh) mask, kd, ks, normal = render.render_uv(glctx, new_mesh, FLAGS.texture_res, eval_mesh.material['kd_ks_normal']) if FLAGS.layers > 1: kd = torch.cat((kd, torch.rand_like(kd[...,0:1])), dim=-1) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') new_mesh.material = material.Material({ 'bsdf' : mat['bsdf'], 'kd' : texture.Texture2D(kd, min_max=[kd_min, kd_max]), 'ks' : texture.Texture2D(ks, min_max=[ks_min, ks_max]), 'normal' : texture.Texture2D(normal, min_max=[nrm_min, nrm_max]) }) return new_mesh @torch.no_grad() def xatlas_uvmap1(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) new_mesh = mesh.Mesh( base=eval_mesh) mask, kd, ks, normal = render.render_uv1(glctx, new_mesh, FLAGS.texture_res, eval_mesh.material['kd_ks_normal'], FLAGS.uv_padding_block) if FLAGS.layers > 1: kd = torch.cat((kd, torch.rand_like(kd[...,0:1])), dim=-1) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') new_mesh.material = material.Material({ 'bsdf' : mat['bsdf'], 'kd' : texture.Texture2D(kd, min_max=[kd_min, kd_max]), 'ks' : texture.Texture2D(ks, min_max=[ks_min, ks_max]), 'normal' : texture.Texture2D(normal, min_max=[nrm_min, nrm_max]) }) return new_mesh ############################################################################### # Utility functions for material ############################################################################### def get_normalize_mesh(pro_path): mesh = o3d.io.read_triangle_mesh(pro_path) vertices = np.asarray(mesh.vertices) shift = np.mean(vertices,axis=0) scale = np.max(np.linalg.norm(vertices-shift, ord=2, axis=1)) vertices = (vertices-shift) / scale mesh.vertices = o3d.cuda.pybind.utility.Vector3dVector(vertices) return mesh def initial_guness_material(geometry, mlp, FLAGS, init_mat=None): # ipdb.set_trace(()) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') if mlp: mlp_min = torch.cat((kd_min[0:3], ks_min, nrm_min), dim=0) mlp_max = torch.cat((kd_max[0:3], ks_max, nrm_max), dim=0) mlp_map_opt = mlptexture.MLPTexture3D(geometry.getAABB(), channels=9, min_max=[mlp_min, mlp_max]) mat = material.Material({'kd_ks_normal' : mlp_map_opt}) else: # Setup Kd (albedo) and Ks (x, roughness, metalness) textures if FLAGS.random_textures or init_mat is None: num_channels = 4 if FLAGS.layers > 1 else 3 kd_init = torch.rand(size=FLAGS.texture_res + [num_channels], device='cuda') * (kd_max - kd_min)[None, None, 0:num_channels] + kd_min[None, None, 0:num_channels] kd_map_opt = texture.create_trainable(kd_init , FLAGS.texture_res, not FLAGS.custom_mip, [kd_min, kd_max]) ksR = np.random.uniform(size=FLAGS.texture_res + [1], low=0.0, high=0.01) ksG = np.random.uniform(size=FLAGS.texture_res + [1], low=ks_min[1].cpu(), high=ks_max[1].cpu()) ksB = np.random.uniform(size=FLAGS.texture_res + [1], low=ks_min[2].cpu(), high=ks_max[2].cpu()) ks_map_opt = texture.create_trainable(np.concatenate((ksR, ksG, ksB), axis=2), FLAGS.texture_res, not FLAGS.custom_mip, [ks_min, ks_max]) else: kd_map_opt = texture.create_trainable(init_mat['kd'], FLAGS.texture_res, not FLAGS.custom_mip, [kd_min, kd_max]) ks_map_opt = texture.create_trainable(init_mat['ks'], FLAGS.texture_res, not FLAGS.custom_mip, [ks_min, ks_max]) # Setup normal map if FLAGS.random_textures or init_mat is None or 'normal' not in init_mat: normal_map_opt = texture.create_trainable(np.array([0, 0, 1]), FLAGS.texture_res, not FLAGS.custom_mip, [nrm_min, nrm_max]) else: normal_map_opt = texture.create_trainable(init_mat['normal'], FLAGS.texture_res, not FLAGS.custom_mip, [nrm_min, nrm_max]) mat = material.Material({ 'kd' : kd_map_opt, 'ks' : ks_map_opt, 'normal' : normal_map_opt }) if init_mat is not None: mat['bsdf'] = init_mat['bsdf'] else: mat['bsdf'] = 'pbr' return mat ############################################################################### # Validation & testing ############################################################################### # @torch.no_grad() def validate_itr(glctx, target, geometry, opt_material, lgt, FLAGS, relight = None): result_dict = {} with torch.no_grad(): if FLAGS.mode == 'appearance_modeling': with torch.no_grad(): lgt.build_mips() if FLAGS.camera_space_light: lgt.xfm(target['mv']) if relight != None: relight.build_mips() buffers = geometry.render(glctx, target, lgt, opt_material, if_use_bump = FLAGS.if_use_bump) result_dict['shaded'] = buffers['shaded'][0, ..., 0:3] result_dict['shaded'] = util.rgb_to_srgb(result_dict['shaded']) if relight != None: result_dict['relight'] = geometry.render(glctx, target, relight, opt_material, if_use_bump = FLAGS.if_use_bump)['shaded'][0, ..., 0:3] result_dict['relight'] = util.rgb_to_srgb(result_dict['relight']) result_dict['mask'] = (buffers['shaded'][0, ..., 3:4]) result_image = result_dict['shaded'] if FLAGS.display is not None : # white_bg = torch.ones_like(target['background']) for layer in FLAGS.display: if 'latlong' in layer and layer['latlong']: if isinstance(lgt, light.EnvironmentLight): result_dict['light_image'] = util.cubemap_to_latlong(lgt.base, FLAGS.display_res) result_image = torch.cat([result_image, result_dict['light_image']], axis=1) elif 'bsdf' in layer: buffers = geometry.render(glctx, target, lgt, opt_material, bsdf=layer['bsdf'], if_use_bump = FLAGS.if_use_bump) if layer['bsdf'] == 'kd': result_dict[layer['bsdf']] = util.rgb_to_srgb(buffers['shaded'][0, ..., 0:3]) elif layer['bsdf'] == 'normal': result_dict[layer['bsdf']] = (buffers['shaded'][0, ..., 0:3] + 1) * 0.5 else: result_dict[layer['bsdf']] = buffers['shaded'][0, ..., 0:3] result_image = torch.cat([result_image, result_dict[layer['bsdf']]], axis=1) return result_image, result_dict def save_gif(dir,fps): imgpath = dir frames = [] for idx in sorted(os.listdir(imgpath)): img = osp.join(imgpath,idx) frames.append(imageio.imread(img)) imageio.mimsave(os.path.join(dir, 'eval.gif'),frames,'GIF',duration=1/fps,loop=0) @torch.no_grad() def validate(glctx, geometry, opt_material, lgt, dataset_validate, out_dir, FLAGS, relight= None): # ============================================================================================== # Validation loop # ============================================================================================== mse_values = [] psnr_values = [] dataloader_validate = torch.utils.data.DataLoader(dataset_validate, batch_size=1, collate_fn=dataset_validate.collate) os.makedirs(out_dir, exist_ok=True) shaded_dir = os.path.join(out_dir, "shaded") relight_dir = os.path.join(out_dir, "relight") kd_dir = os.path.join(out_dir, "kd") ks_dir = os.path.join(out_dir, "ks") normal_dir = os.path.join(out_dir, "normal") mask_dir = os.path.join(out_dir, "mask") os.makedirs(shaded_dir, exist_ok=True) os.makedirs(relight_dir, exist_ok=True) os.makedirs(kd_dir, exist_ok=True) os.makedirs(ks_dir, exist_ok=True) os.makedirs(normal_dir, exist_ok=True) os.makedirs(mask_dir, exist_ok=True) print("Running validation") dataloader_validate = tqdm(dataloader_validate) for it, target in enumerate(dataloader_validate): # Mix validation background target = prepare_batch(target, 'white') result_image, result_dict = validate_itr(glctx, target, geometry, opt_material, lgt, FLAGS, relight) for k in result_dict.keys(): np_img = result_dict[k].detach().cpu().numpy() if k == 'shaded': util.save_image(shaded_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'relight': util.save_image(relight_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'kd': util.save_image(kd_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'ks': util.save_image(ks_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'normal': util.save_image(normal_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'mask': util.save_image(mask_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) if 'shaded' in result_dict.keys(): save_gif(shaded_dir,30) if 'relight' in result_dict.keys(): save_gif(relight_dir,30) if 'kd' in result_dict.keys(): save_gif(kd_dir,30) if 'ks' in result_dict.keys(): save_gif(ks_dir,30) if 'normal' in result_dict.keys(): save_gif(normal_dir,30) return 0 ############################################################################### # Main shape fitter function / optimization loop ############################################################################### class Trainer(torch.nn.Module): def __init__(self, glctx, geometry, lgt, mat, optimize_geometry, optimize_light, FLAGS, guidance): super(Trainer, self).__init__() self.glctx = glctx self.geometry = geometry self.light = lgt self.material = mat self.optimize_geometry = optimize_geometry self.optimize_light = optimize_light self.FLAGS = FLAGS self.guidance = guidance self.if_flip_the_normal = FLAGS.if_flip_the_normal self.if_use_bump = FLAGS.if_use_bump if self.FLAGS.mode == 'appearance_modeling': if not self.optimize_light: with torch.no_grad(): self.light.build_mips() self.params = list(self.material.parameters()) self.params += list(self.geometry.pos_encoder.parameters()) self.params += list(self.light.parameters()) if optimize_light else [] self.geo_params = list(self.geometry.parameters()) if optimize_geometry else [] def forward(self, target, it, if_normal, if_pretrain, scene_and_vertices ): if self.FLAGS.mode == 'appearance_modeling': if self.optimize_light: self.light.build_mips() if self.FLAGS.camera_space_light: self.light.xfm(target['mv']) if if_pretrain: return self.geometry.decoder.pre_train_ellipsoid(it, scene_and_vertices) else: return self.geometry.tick(glctx, target, self.light, self.material, it , if_normal, self.guidance, self.FLAGS.mode, self.if_flip_the_normal, self.if_use_bump) def optimize_mesh( glctx, geometry, opt_material, lgt, dataset_train, dataset_validate, FLAGS, log_interval=10, optimize_light=True, optimize_geometry=True, guidance = None, scene_and_vertices = None, ): dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=FLAGS.batch, collate_fn=dataset_train.collate, shuffle=False) dataloader_validate = torch.utils.data.DataLoader(dataset_validate, batch_size=1, collate_fn=dataset_train.collate) model = Trainer(glctx, geometry, lgt, opt_material, optimize_geometry, optimize_light, FLAGS, guidance) if optimize_geometry: optimizer_mesh = torch.optim.AdamW(model.geo_params, lr=0.001, betas=(0.9, 0.99), eps=1e-15) optimizer = torch.optim.AdamW(model.params, lr=0.01, betas=(0.9, 0.99), eps=1e-15) optimizer_lora = torch.optim.SGD(itertools.chain(*guidance.unet_lora_params), lr=1e-5) if FLAGS.multi_gpu: model = model.cuda() model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[FLAGS.local_rank], find_unused_parameters= True ) img_cnt = 0 img_loss_vec = [] reg_loss_vec = [] iter_dur_vec = [] def cycle(iterable): iterator = iter(iterable) while True: try: yield next(iterator) except StopIteration: iterator = iter(iterable) v_it = cycle(dataloader_validate) scaler = torch.cuda.amp.GradScaler(enabled=True) rot_ang = 0 if FLAGS.local_rank == 0:
video = Video(FLAGS.out_dir)
14
2023-11-27 13:44:01+00:00
24k
camenduru/magicanimate-hf
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,...
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.controlnet import ControlNetModel from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
19,468
v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate: v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f) new_latents[:,:,new_index,:,:] = v.to(latents.device) new_index += 1 new_latents[:,:,new_index,:,:] = v1 new_index += 1 return new_latents def select_controlnet_res_samples(self, controlnet_res_samples_cache_dict, context, do_classifier_free_guidance, b, f): _down_block_res_samples = [] _mid_block_res_sample = [] for i in np.concatenate(np.array(context)): _down_block_res_samples.append(controlnet_res_samples_cache_dict[i][0]) _mid_block_res_sample.append(controlnet_res_samples_cache_dict[i][1]) down_block_res_samples = [[] for _ in range(len(controlnet_res_samples_cache_dict[i][0]))] for res_t in _down_block_res_samples: for i, res in enumerate(res_t): down_block_res_samples[i].append(res) down_block_res_samples = [torch.cat(res) for res in down_block_res_samples] mid_block_res_sample = torch.cat(_mid_block_res_sample) # reshape controlnet output to match the unet3d inputs b = b // 2 if do_classifier_free_guidance else b _down_block_res_samples = [] for sample in down_block_res_samples: sample = rearrange(sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: sample = sample.repeat(2, 1, 1, 1, 1) _down_block_res_samples.append(sample) down_block_res_samples = _down_block_res_samples mid_block_res_sample = rearrange(mid_block_res_sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: mid_block_res_sample = mid_block_res_sample.repeat(2, 1, 1, 1, 1) return down_block_res_samples, mid_block_res_sample @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, controlnet_condition: list = None, controlnet_conditioning_scale: float = 1.0, context_frames: int = 16, context_stride: int = 1, context_overlap: int = 4, context_batch_size: int = 1, context_schedule: str = "uniform", init_latents: Optional[torch.FloatTensor] = None, num_actual_inference_steps: Optional[int] = None, appearance_encoder = None, reference_control_writer = None, reference_control_reader = None, source_image: str = None, decoder_consistency = None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ controlnet = self.controlnet # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size)
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, controlnet: ControlNetModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents, rank, decoder_consistency=None): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0]), disable=(rank!=0)): if decoder_consistency is not None: video.append(decoder_consistency(latents[frame_idx:frame_idx+1])) else: video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None, clip_length=16): shape = (batch_size, num_channels_latents, clip_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) latents = latents.repeat(1, 1, video_length//clip_length, 1, 1) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_condition(self, condition, num_videos_per_prompt, device, dtype, do_classifier_free_guidance): # prepare conditions for controlnet condition = torch.from_numpy(condition.copy()).to(device=device, dtype=dtype) / 255.0 condition = torch.stack([condition for _ in range(num_videos_per_prompt)], dim=0) condition = rearrange(condition, 'b f h w c -> (b f) c h w').clone() if do_classifier_free_guidance: condition = torch.cat([condition] * 2) return condition def next_step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0., verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate: v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f) new_latents[:,:,new_index,:,:] = v.to(latents.device) new_index += 1 new_latents[:,:,new_index,:,:] = v1 new_index += 1 return new_latents def select_controlnet_res_samples(self, controlnet_res_samples_cache_dict, context, do_classifier_free_guidance, b, f): _down_block_res_samples = [] _mid_block_res_sample = [] for i in np.concatenate(np.array(context)): _down_block_res_samples.append(controlnet_res_samples_cache_dict[i][0]) _mid_block_res_sample.append(controlnet_res_samples_cache_dict[i][1]) down_block_res_samples = [[] for _ in range(len(controlnet_res_samples_cache_dict[i][0]))] for res_t in _down_block_res_samples: for i, res in enumerate(res_t): down_block_res_samples[i].append(res) down_block_res_samples = [torch.cat(res) for res in down_block_res_samples] mid_block_res_sample = torch.cat(_mid_block_res_sample) # reshape controlnet output to match the unet3d inputs b = b // 2 if do_classifier_free_guidance else b _down_block_res_samples = [] for sample in down_block_res_samples: sample = rearrange(sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: sample = sample.repeat(2, 1, 1, 1, 1) _down_block_res_samples.append(sample) down_block_res_samples = _down_block_res_samples mid_block_res_sample = rearrange(mid_block_res_sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: mid_block_res_sample = mid_block_res_sample.repeat(2, 1, 1, 1, 1) return down_block_res_samples, mid_block_res_sample @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, controlnet_condition: list = None, controlnet_conditioning_scale: float = 1.0, context_frames: int = 16, context_stride: int = 1, context_overlap: int = 4, context_batch_size: int = 1, context_schedule: str = "uniform", init_latents: Optional[torch.FloatTensor] = None, num_actual_inference_steps: Optional[int] = None, appearance_encoder = None, reference_control_writer = None, reference_control_reader = None, source_image: str = None, decoder_consistency = None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ controlnet = self.controlnet # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size)
reference_control_writer = ReferenceAttentionControl(appearance_encoder, do_classifier_free_guidance=True, mode='write', batch_size=context_batch_size)
2
2023-12-04 20:47:34+00:00
24k
metatube-community/metatube-plex-plugins
MetaTube.bundle/Contents/Libraries/Shared/urllib3/poolmanager.py
[ { "identifier": "HTTPHeaderDict", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(MutableMapping):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-i...
import collections import functools import logging from ._collections import HTTPHeaderDict, RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme from .exceptions import ( LocationValueError, MaxRetryError, ProxySchemeUnknown, ProxySchemeUnsupported, URLSchemeUnknown, ) from .packages import six from .packages.six.moves.urllib.parse import urljoin from .request import RequestMethods from .util.proxy import connection_requires_http_tunnel from .util.retry import Retry from .util.url import parse_url
14,704
# connections, open a new ConnectionPool. pool = self.pools.get(pool_key) if pool: return pool # Make a fresh ConnectionPool of the desired type scheme = request_context["scheme"] host = request_context["host"] port = request_context["port"] pool = self._new_pool(scheme, host, port, request_context=request_context) self.pools[pool_key] = pool return pool def connection_from_url(self, url, pool_kwargs=None): """ Similar to :func:`urllib3.connectionpool.connection_from_url`. If ``pool_kwargs`` is not provided and a new pool needs to be constructed, ``self.connection_pool_kw`` is used to initialize the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` is provided, it is used instead. Note that if a new pool does not need to be created for the request, the provided ``pool_kwargs`` are not used. """ u = parse_url(url) return self.connection_from_host( u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs ) def _merge_pool_kwargs(self, override): """ Merge a dictionary of override values for self.connection_pool_kw. This does not modify self.connection_pool_kw and returns a new dict. Any keys in the override dictionary with a value of ``None`` are removed from the merged dictionary. """ base_pool_kwargs = self.connection_pool_kw.copy() if override: for key, value in override.items(): if value is None: try: del base_pool_kwargs[key] except KeyError: pass else: base_pool_kwargs[key] = value return base_pool_kwargs def _proxy_requires_url_absolute_form(self, parsed_url): """ Indicates if the proxy requires the complete destination URL in the request. Normally this is only needed when not using an HTTP CONNECT tunnel. """ if self.proxy is None: return False return not connection_requires_http_tunnel( self.proxy, self.proxy_config, parsed_url.scheme ) def _validate_proxy_scheme_url_selection(self, url_scheme): """ Validates that were not attempting to do TLS in TLS connections on Python2 or with unsupported SSL implementations. """ if self.proxy is None or url_scheme != "https": return if self.proxy.scheme != "https": return if six.PY2 and not self.proxy_config.use_forwarding_for_https: raise ProxySchemeUnsupported( "Contacting HTTPS destinations through HTTPS proxies " "'via CONNECT tunnels' is not supported in Python 2" ) def urlopen(self, method, url, redirect=True, **kw): """ Same as :meth:`urllib3.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. """ u = parse_url(url) self._validate_proxy_scheme_url_selection(u.scheme) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) kw["assert_same_host"] = False kw["redirect"] = False if "headers" not in kw: kw["headers"] = self.headers.copy() if self._proxy_requires_url_absolute_form(u): response = conn.urlopen(method, url, **kw) else: response = conn.urlopen(method, u.request_uri, **kw) redirect_location = redirect and response.get_redirect_location() if not redirect_location: return response # Support relative URLs for redirecting. redirect_location = urljoin(url, redirect_location) if response.status == 303: # Change the method according to RFC 9110, Section 15.4.4. method = "GET" # And lose the body not to transfer anything sensitive. kw["body"] = None kw["headers"] = HTTPHeaderDict(kw["headers"])._prepare_for_method_change() retries = kw.get("retries")
from __future__ import absolute_import __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. _key_fields = ( "key_scheme", # str "key_host", # str "key_port", # int "key_timeout", # int or float or Timeout "key_retries", # int or Retry "key_strict", # bool "key_block", # bool "key_source_address", # str "key_key_file", # str "key_key_password", # str "key_cert_file", # str "key_cert_reqs", # str "key_ca_certs", # str "key_ssl_version", # str "key_ca_cert_dir", # str "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext "key_maxsize", # int "key_headers", # dict "key__proxy", # parsed proxy url "key__proxy_headers", # dict "key__proxy_config", # class "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples "key__socks_options", # dict "key_assert_hostname", # bool or string "key_assert_fingerprint", # str "key_server_hostname", # str ) #: The namedtuple class used to construct keys for the connection pool. #: All custom key schemes should include the fields in this key at a minimum. PoolKey = collections.namedtuple("PoolKey", _key_fields) _proxy_config_fields = ("ssl_context", "use_forwarding_for_https") ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields) def _default_key_normalizer(key_class, request_context): """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context["scheme"] = context["scheme"].lower() context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get("socket_options") if socket_opts is not None: context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), } pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool} class PoolManager(RequestMethods): """ Allows for arbitrary requests while transparently keeping track of necessary connection pools for you. :param num_pools: Number of connection pools to cache before discarding the least recently used pool. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param \\**connection_pool_kw: Additional parameters are used to create fresh :class:`urllib3.connectionpool.ConnectionPool` instances. Example:: >>> manager = PoolManager(num_pools=2) >>> r = manager.request('GET', 'http://google.com/') >>> r = manager.request('GET', 'http://google.com/mail') >>> r = manager.request('GET', 'http://yahoo.com/') >>> len(manager.pools) 2 """ proxy = None proxy_config = None def __init__(self, num_pools=10, headers=None, **connection_pool_kw): RequestMethods.__init__(self, headers) self.connection_pool_kw = connection_pool_kw self.pools = RecentlyUsedContainer(num_pools) # Locally set the pool classes and keys so other PoolManagers can # override them. self.pool_classes_by_scheme = pool_classes_by_scheme self.key_fn_by_scheme = key_fn_by_scheme.copy() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.clear() # Return False to re-raise any potential exceptions return False def _new_pool(self, scheme, host, port, request_context=None): """ Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and any additional pool keyword arguments. If ``request_context`` is provided, it is provided as keyword arguments to the pool class used. This method is used to actually create the connection pools handed out by :meth:`connection_from_url` and companion methods. It is intended to be overridden for customization. """ pool_cls = self.pool_classes_by_scheme[scheme] if request_context is None: request_context = self.connection_pool_kw.copy() # Although the context has everything necessary to create the pool, # this function has historically only used the scheme, host, and port # in the positional args. When an API change is acceptable these can # be removed. for key in ("scheme", "host", "port"): request_context.pop(key, None) if scheme == "http": for kw in SSL_KEYWORDS: request_context.pop(kw, None) return pool_cls(host, port, **request_context) def clear(self): """ Empty our store of pools and direct them all to close. This will not affect in-flight connections, but they will not be re-used after completion. """ self.pools.clear() def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme. If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is provided, it is merged with the instance's ``connection_pool_kw`` variable and used to create the new connection pool, if one is needed. """ if not host: raise LocationValueError("No host specified.") request_context = self._merge_pool_kwargs(pool_kwargs) request_context["scheme"] = scheme or "http" if not port: port = port_by_scheme.get(request_context["scheme"].lower(), 80) request_context["port"] = port request_context["host"] = host return self.connection_from_context(request_context) def connection_from_context(self, request_context): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context. ``request_context`` must at least contain the ``scheme`` key and its value must be a key in ``key_fn_by_scheme`` instance variable. """ scheme = request_context["scheme"].lower() pool_key_constructor = self.key_fn_by_scheme.get(scheme) if not pool_key_constructor: raise URLSchemeUnknown(scheme) pool_key = pool_key_constructor(request_context) return self.connection_from_pool_key(pool_key, request_context=request_context) def connection_from_pool_key(self, pool_key, request_context=None): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key. ``pool_key`` should be a namedtuple that only contains immutable objects. At a minimum it must have the ``scheme``, ``host``, and ``port`` fields. """ with self.pools.lock: # If the scheme, host, or port doesn't match existing open # connections, open a new ConnectionPool. pool = self.pools.get(pool_key) if pool: return pool # Make a fresh ConnectionPool of the desired type scheme = request_context["scheme"] host = request_context["host"] port = request_context["port"] pool = self._new_pool(scheme, host, port, request_context=request_context) self.pools[pool_key] = pool return pool def connection_from_url(self, url, pool_kwargs=None): """ Similar to :func:`urllib3.connectionpool.connection_from_url`. If ``pool_kwargs`` is not provided and a new pool needs to be constructed, ``self.connection_pool_kw`` is used to initialize the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` is provided, it is used instead. Note that if a new pool does not need to be created for the request, the provided ``pool_kwargs`` are not used. """ u = parse_url(url) return self.connection_from_host( u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs ) def _merge_pool_kwargs(self, override): """ Merge a dictionary of override values for self.connection_pool_kw. This does not modify self.connection_pool_kw and returns a new dict. Any keys in the override dictionary with a value of ``None`` are removed from the merged dictionary. """ base_pool_kwargs = self.connection_pool_kw.copy() if override: for key, value in override.items(): if value is None: try: del base_pool_kwargs[key] except KeyError: pass else: base_pool_kwargs[key] = value return base_pool_kwargs def _proxy_requires_url_absolute_form(self, parsed_url): """ Indicates if the proxy requires the complete destination URL in the request. Normally this is only needed when not using an HTTP CONNECT tunnel. """ if self.proxy is None: return False return not connection_requires_http_tunnel( self.proxy, self.proxy_config, parsed_url.scheme ) def _validate_proxy_scheme_url_selection(self, url_scheme): """ Validates that were not attempting to do TLS in TLS connections on Python2 or with unsupported SSL implementations. """ if self.proxy is None or url_scheme != "https": return if self.proxy.scheme != "https": return if six.PY2 and not self.proxy_config.use_forwarding_for_https: raise ProxySchemeUnsupported( "Contacting HTTPS destinations through HTTPS proxies " "'via CONNECT tunnels' is not supported in Python 2" ) def urlopen(self, method, url, redirect=True, **kw): """ Same as :meth:`urllib3.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. """ u = parse_url(url) self._validate_proxy_scheme_url_selection(u.scheme) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) kw["assert_same_host"] = False kw["redirect"] = False if "headers" not in kw: kw["headers"] = self.headers.copy() if self._proxy_requires_url_absolute_form(u): response = conn.urlopen(method, url, **kw) else: response = conn.urlopen(method, u.request_uri, **kw) redirect_location = redirect and response.get_redirect_location() if not redirect_location: return response # Support relative URLs for redirecting. redirect_location = urljoin(url, redirect_location) if response.status == 303: # Change the method according to RFC 9110, Section 15.4.4. method = "GET" # And lose the body not to transfer anything sensitive. kw["body"] = None kw["headers"] = HTTPHeaderDict(kw["headers"])._prepare_for_method_change() retries = kw.get("retries")
if not isinstance(retries, Retry):
11
2023-11-27 07:01:39+00:00
24k
NobiDeveloper/Nobita-Filter-Bot
plugins/p_ttishow.py
[ { "identifier": "ADMINS", "path": "info.py", "snippet": "ADMINS = [int(admin) if id_pattern.search(admin) else admin for admin in environ.get('ADMINS', '').split()]" }, { "identifier": "LOG_CHANNEL", "path": "info.py", "snippet": "LOG_CHANNEL = int(environ.get('LOG_CHANNEL', ''))" }, ...
from pyrogram import Client, filters, enums from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery from pyrogram.errors.exceptions.bad_request_400 import MessageTooLong, PeerIdInvalid from info import ADMINS, LOG_CHANNEL, SUPPORT_CHAT, MELCOW_NEW_USERS, MELCOW_VID, CHNL_LNK, GRP_LNK from database.users_chats_db import db from database.ia_filterdb import Media from utils import get_size, temp, get_settings from Script import script from pyrogram.errors import ChatAdminRequired import asyncio
15,236
"""----------------------------------------- https://github.com/NobiDeveloper/Nobita-Filter-Bot --------------------------------------""" @Client.on_message(filters.new_chat_members & filters.group) async def save_group(bot, message): r_j_check = [u.id for u in message.new_chat_members] if temp.ME in r_j_check: if not await db.get_chat(message.chat.id): total=await bot.get_chat_members_count(message.chat.id) r_j = message.from_user.mention if message.from_user else "Anonymous" await bot.send_message(LOG_CHANNEL, script.LOG_TEXT_G.format(message.chat.title, message.chat.id, total, r_j)) await db.add_chat(message.chat.id, message.chat.title) if message.chat.id in temp.BANNED_CHATS: # Inspired from a boat of a banana tree buttons = [[ InlineKeyboardButton('Support', url='https://telegram.me/NobiDeveloperSupport') ]] reply_markup=InlineKeyboardMarkup(buttons) k = await message.reply( text='<b>CHAT NOT ALLOWED 🐞\n\nMy admins has restricted me from working here ! If you want to know more about it contact support..</b>', reply_markup=reply_markup, ) try: await k.pin() except: pass await bot.leave_chat(message.chat.id) return buttons = [[ InlineKeyboardButton('🥷 ʜᴇʟᴘ 🥷', url='https://telegram.me/NobiDeveloperSupport'), InlineKeyboardButton('♻️ ᴜᴘᴅᴀᴛᴇꜱ ♻️', url='https://telegram.me/NobiDeveloper') ]] reply_markup=InlineKeyboardMarkup(buttons) await message.reply_text( text=f"<b>☤ ᴛʜᴀɴᴋ ʏᴏᴜ ꜰᴏʀ ᴀᴅᴅɪɴɢ ᴍᴇ ɪɴ {message.chat.title}\n\n🤖 ᴅᴏɴ’ᴛ ꜰᴏʀɢᴇᴛ ᴛᴏ ᴍᴀᴋᴇ ᴍᴇ ᴀᴅᴍɪɴ 🤖\n\n🕵️ ɪꜰ ʏᴏᴜ ʜᴀᴠᴇ ᴀɴʏ ᴅᴏᴜʙᴛ ʏᴏᴜ ᴄʟᴇᴀʀ ɪᴛ ᴜsɪɴɢ ʙᴇʟᴏᴡ ʙᴜᴛᴛᴏɴs</b>", reply_markup=reply_markup) else:
"""----------------------------------------- https://github.com/NobiDeveloper/Nobita-Filter-Bot --------------------------------------""" @Client.on_message(filters.new_chat_members & filters.group) async def save_group(bot, message): r_j_check = [u.id for u in message.new_chat_members] if temp.ME in r_j_check: if not await db.get_chat(message.chat.id): total=await bot.get_chat_members_count(message.chat.id) r_j = message.from_user.mention if message.from_user else "Anonymous" await bot.send_message(LOG_CHANNEL, script.LOG_TEXT_G.format(message.chat.title, message.chat.id, total, r_j)) await db.add_chat(message.chat.id, message.chat.title) if message.chat.id in temp.BANNED_CHATS: # Inspired from a boat of a banana tree buttons = [[ InlineKeyboardButton('Support', url='https://telegram.me/NobiDeveloperSupport') ]] reply_markup=InlineKeyboardMarkup(buttons) k = await message.reply( text='<b>CHAT NOT ALLOWED 🐞\n\nMy admins has restricted me from working here ! If you want to know more about it contact support..</b>', reply_markup=reply_markup, ) try: await k.pin() except: pass await bot.leave_chat(message.chat.id) return buttons = [[ InlineKeyboardButton('🥷 ʜᴇʟᴘ 🥷', url='https://telegram.me/NobiDeveloperSupport'), InlineKeyboardButton('♻️ ᴜᴘᴅᴀᴛᴇꜱ ♻️', url='https://telegram.me/NobiDeveloper') ]] reply_markup=InlineKeyboardMarkup(buttons) await message.reply_text( text=f"<b>☤ ᴛʜᴀɴᴋ ʏᴏᴜ ꜰᴏʀ ᴀᴅᴅɪɴɢ ᴍᴇ ɪɴ {message.chat.title}\n\n🤖 ᴅᴏɴ’ᴛ ꜰᴏʀɢᴇᴛ ᴛᴏ ᴍᴀᴋᴇ ᴍᴇ ᴀᴅᴍɪɴ 🤖\n\n🕵️ ɪꜰ ʏᴏᴜ ʜᴀᴠᴇ ᴀɴʏ ᴅᴏᴜʙᴛ ʏᴏᴜ ᴄʟᴇᴀʀ ɪᴛ ᴜsɪɴɢ ʙᴇʟᴏᴡ ʙᴜᴛᴛᴏɴs</b>", reply_markup=reply_markup) else:
settings = await get_settings(message.chat.id)
11
2023-11-28 13:36:56+00:00
24k
chenxx89/BFRffusion
models/models.py
[ { "identifier": "timestep_embedding", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\...
import torch import os import numpy as np import math import shutil import safetensors.torch from ldm.modules.diffusionmodules.util import timestep_embedding from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.diffusionmodules.openaimodel import UNetModel from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import log_txt_as_img, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from data.dataset_instantiate import instantiate_from_config as instantiate_dataset_from_config from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm from metrics.metrics_all import calculate_psnr_ssim, calculate_lpips, calculate_NIQE, calculate_fid_folder from torch.utils.data import DataLoader from PIL import Image from torch.optim.lr_scheduler import LambdaLR from omegaconf import OmegaConf
20,379
def get_state_dict(d): return d.get('state_dict', d) def load_state_dict(ckpt_path, location='cpu'): _, extension = os.path.splitext(ckpt_path) if extension.lower() == ".safetensors": state_dict = safetensors.torch.load_file(ckpt_path, device=location) else: state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location))) state_dict = get_state_dict(state_dict) print(f'Loaded state_dict from [{ckpt_path}]') return state_dict def create_model(config_path): config = OmegaConf.load(config_path) model = instantiate_from_config(config.model).cpu() print(f'Loaded model config from [{config_path}]') return model
def get_state_dict(d): return d.get('state_dict', d) def load_state_dict(ckpt_path, location='cpu'): _, extension = os.path.splitext(ckpt_path) if extension.lower() == ".safetensors": state_dict = safetensors.torch.load_file(ckpt_path, device=location) else: state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location))) state_dict = get_state_dict(state_dict) print(f'Loaded state_dict from [{ckpt_path}]') return state_dict def create_model(config_path): config = OmegaConf.load(config_path) model = instantiate_from_config(config.model).cpu() print(f'Loaded model config from [{config_path}]') return model
class ControlledUnetModel(UNetModel):
1
2023-11-30 13:50:58+00:00
24k
IanYeung/MGLD-VSR
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import pytorch_lightning as pl import random import torch.nn.functional as F import copy import os import cv2 import matplotlib.pyplot as plt import numpy as np import numpy as np from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from basicsr.utils import DiffJPEG, USMSharp from basicsr.utils.img_process_util import filter2D from basicsr.data.transforms import paired_random_crop, triplet_random_crop from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt, random_add_speckle_noise_pt, random_add_saltpepper_noise_pt, bivariate_Gaussian from basicsr.archs.arch_util import flow_warp, resize_flow from scripts.util_flow import forward_backward_consistency_check, get_warped_and_mask from ldm.modules.diffusionmodules.util import make_ddim_timesteps from sklearn.decomposition import PCA from numpy import pi, exp, sqrt from numpy import pi, exp, sqrt
20,877
is a number of steps to use the striding from the DDIM paper. :return: a set of diffusion steps from the original process to use. """ if isinstance(section_counts, str): if section_counts.startswith("ddim"): desired_count = int(section_counts[len("ddim"):]) for i in range(1, num_timesteps): if len(range(0, num_timesteps, i)) == desired_count: return set(range(0, num_timesteps, i)) raise ValueError( f"cannot create exactly {num_timesteps} steps with an integer stride" ) section_counts = [int(x) for x in section_counts.split(",")] #[250,] size_per = num_timesteps // len(section_counts) extra = num_timesteps % len(section_counts) start_idx = 0 all_steps = [] for i, section_count in enumerate(section_counts): size = size_per + (1 if i < extra else 0) if size < section_count: raise ValueError( f"cannot divide section of {size} steps into {section_count}" ) if section_count <= 1: frac_stride = 1 else: frac_stride = (size - 1) / (section_count - 1) cur_idx = 0.0 taken_steps = [] for _ in range(section_count): taken_steps.append(start_idx + round(cur_idx)) cur_idx += frac_stride all_steps += taken_steps start_idx += size return set(all_steps) def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def torch2img(input): input_ = input[0] input_ = input_.permute(1,2,0) input_ = input_.data.cpu().numpy() input_ = (input_ + 1.0) / 2 cv2.imwrite('./test.png', input_[:,:,::-1]*255.0) def cal_pca_components(input, n_components=3): pca = PCA(n_components=n_components) c, h, w = input.size() pca_data = input.permute(1,2,0) pca_data = pca_data.reshape(h*w, c) pca_data = pca.fit_transform(pca_data.data.cpu().numpy()) pca_data = pca_data.reshape((h, w, n_components)) return pca_data def visualize_fea(save_path, fea_img): fig = plt.figure(figsize = (fea_img.shape[1]/10, fea_img.shape[0]/10)) # Your image (W)idth and (H)eight in inches plt.subplots_adjust(left = 0, right = 1.0, top = 1.0, bottom = 0) im = plt.imshow(fea_img, vmin=0.0, vmax=1.0, cmap='jet', aspect='auto') # Show the image plt.savefig(save_path) plt.clf() def calc_mean_std(feat, eps=1e-5): """Calculate mean and std for adaptive_instance_normalization. Args: feat (Tensor): 4D tensor. eps (float): A small value added to the variance to avoid divide-by-zero. Default: 1e-5. """ size = feat.size() assert len(size) == 4, 'The input feature should be 4D tensor.' b, c = size[:2] feat_var = feat.view(b, c, -1).var(dim=2) + eps feat_std = feat_var.sqrt().view(b, c, 1, 1) feat_mean = feat.view(b, c, -1).mean(dim=2).view(b, c, 1, 1) return feat_mean, feat_std def adaptive_instance_normalization(content_feat, style_feat): """Adaptive instance normalization. Adjust the reference features to have the similar color and illuminations as those in the degradate features. Args: content_feat (Tensor): The reference feature. style_feat (Tensor): The degradate features. """ size = content_feat.size() style_mean, style_std = calc_mean_std(style_feat) content_mean, content_std = calc_mean_std(content_feat) normalized_feat = (content_feat - content_mean.expand(size)) / content_std.expand(size) return normalized_feat * style_std.expand(size) + style_mean.expand(size) def space_timesteps(num_timesteps, section_counts): """ Create a list of timesteps to use from an original diffusion process, given the number of timesteps we want to take from equally-sized portions of the original process. For example, if there's 300 timesteps and the section counts are [10,15,20] then the first 100 timesteps are strided to be 10 timesteps, the second 100 are strided to be 15 timesteps, and the final 100 are strided to be 20. If the stride is a string starting with "ddim", then the fixed striding from the DDIM paper is used, and only one section is allowed. :param num_timesteps: the number of diffusion steps in the original process to divide up. :param section_counts: either a list of numbers, or a string containing comma-separated numbers, indicating the step count per section. As a special case, use "ddimN" where N is a number of steps to use the striding from the DDIM paper. :return: a set of diffusion steps from the original process to use. """ if isinstance(section_counts, str): if section_counts.startswith("ddim"): desired_count = int(section_counts[len("ddim"):]) for i in range(1, num_timesteps): if len(range(0, num_timesteps, i)) == desired_count: return set(range(0, num_timesteps, i)) raise ValueError( f"cannot create exactly {num_timesteps} steps with an integer stride" ) section_counts = [int(x) for x in section_counts.split(",")] #[250,] size_per = num_timesteps // len(section_counts) extra = num_timesteps % len(section_counts) start_idx = 0 all_steps = [] for i, section_count in enumerate(section_counts): size = size_per + (1 if i < extra else 0) if size < section_count: raise ValueError( f"cannot divide section of {size} steps into {section_count}" ) if section_count <= 1: frac_stride = 1 else: frac_stride = (size - 1) / (section_count - 1) cur_idx = 0.0 taken_steps = [] for _ in range(section_count): taken_steps.append(start_idx + round(cur_idx)) cur_idx += frac_stride all_steps += taken_steps start_idx += size return set(all_steps) def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if exists(given_betas):
1
2023-11-30 01:50:29+00:00
24k
Czm369/MixPL
mmdet/datasets/transforms/transforms.py
[ { "identifier": "TRANSFORMS", "path": "mmdet/registry.py", "snippet": "TRANSFORMS = Registry(\n 'transform',\n parent=MMENGINE_TRANSFORMS,\n locations=['mmdet.datasets.transforms'])" }, { "identifier": "autocast_box_type", "path": "mmdet/structures/bbox/box_type.py", "snippet": ...
import copy import inspect import math import warnings import cv2 import mmcv import numpy as np import albumentations from typing import List, Optional, Sequence, Tuple, Union from mmcv.image import imresize from mmcv.image.geometric import _scale_size from mmcv.transforms import BaseTransform from mmcv.transforms import Pad as MMCV_Pad from mmcv.transforms import RandomFlip as MMCV_RandomFlip from mmcv.transforms import Resize as MMCV_Resize from mmcv.transforms.utils import avoid_cache_randomness, cache_randomness from mmengine.dataset import BaseDataset from mmengine.utils import is_str from numpy import random from mmdet.registry import TRANSFORMS from mmdet.structures.bbox import HorizontalBoxes, autocast_box_type from mmdet.structures.mask import BitmapMasks, PolygonMasks from mmdet.utils import log_img_scale from imagecorruptions import corrupt from albumentations import Compose
15,904
# Copyright (c) OpenMMLab. All rights reserved. try: except ImportError: corrupt = None try: except ImportError: albumentations = None Compose = None Number = Union[int, float] def _fixed_scale_size( size: Tuple[int, int], scale: Union[float, int, tuple], ) -> Tuple[int, int]: """Rescale a size by a ratio. Args: size (tuple[int]): (w, h). scale (float | tuple(float)): Scaling factor. Returns: tuple[int]: scaled size. """ if isinstance(scale, (float, int)): scale = (scale, scale) w, h = size # don't need o.5 offset return int(w * float(scale[0])), int(h * float(scale[1])) def rescale_size(old_size: tuple, scale: Union[float, int, tuple], return_scale: bool = False) -> tuple: """Calculate the new size to be rescaled to. Args: old_size (tuple[int]): The old size (w, h) of image. scale (float | tuple[int]): The scaling factor or maximum size. If it is a float number, then the image will be rescaled by this factor, else if it is a tuple of 2 integers, then the image will be rescaled as large as possible within the scale. return_scale (bool): Whether to return the scaling factor besides the rescaled image size. Returns: tuple[int]: The new rescaled image size. """ w, h = old_size if isinstance(scale, (float, int)): if scale <= 0: raise ValueError(f'Invalid scale {scale}, must be positive.') scale_factor = scale elif isinstance(scale, tuple): max_long_edge = max(scale) max_short_edge = min(scale) scale_factor = min(max_long_edge / max(h, w), max_short_edge / min(h, w)) else: raise TypeError( f'Scale must be a number or tuple of int, but got {type(scale)}') # only change this new_size = _fixed_scale_size((w, h), scale_factor) if return_scale: return new_size, scale_factor else: return new_size def imrescale( img: np.ndarray, scale: Union[float, Tuple[int, int]], return_scale: bool = False, interpolation: str = 'bilinear', backend: Optional[str] = None ) -> Union[np.ndarray, Tuple[np.ndarray, float]]: """Resize image while keeping the aspect ratio. Args: img (ndarray): The input image. scale (float | tuple[int]): The scaling factor or maximum size. If it is a float number, then the image will be rescaled by this factor, else if it is a tuple of 2 integers, then the image will be rescaled as large as possible within the scale. return_scale (bool): Whether to return the scaling factor besides the rescaled image. interpolation (str): Same as :func:`resize`. backend (str | None): Same as :func:`resize`. Returns: ndarray: The rescaled image. """ h, w = img.shape[:2] new_size, scale_factor = rescale_size((w, h), scale, return_scale=True) rescaled_img = imresize( img, new_size, interpolation=interpolation, backend=backend) if return_scale: return rescaled_img, scale_factor else: return rescaled_img
# Copyright (c) OpenMMLab. All rights reserved. try: except ImportError: corrupt = None try: except ImportError: albumentations = None Compose = None Number = Union[int, float] def _fixed_scale_size( size: Tuple[int, int], scale: Union[float, int, tuple], ) -> Tuple[int, int]: """Rescale a size by a ratio. Args: size (tuple[int]): (w, h). scale (float | tuple(float)): Scaling factor. Returns: tuple[int]: scaled size. """ if isinstance(scale, (float, int)): scale = (scale, scale) w, h = size # don't need o.5 offset return int(w * float(scale[0])), int(h * float(scale[1])) def rescale_size(old_size: tuple, scale: Union[float, int, tuple], return_scale: bool = False) -> tuple: """Calculate the new size to be rescaled to. Args: old_size (tuple[int]): The old size (w, h) of image. scale (float | tuple[int]): The scaling factor or maximum size. If it is a float number, then the image will be rescaled by this factor, else if it is a tuple of 2 integers, then the image will be rescaled as large as possible within the scale. return_scale (bool): Whether to return the scaling factor besides the rescaled image size. Returns: tuple[int]: The new rescaled image size. """ w, h = old_size if isinstance(scale, (float, int)): if scale <= 0: raise ValueError(f'Invalid scale {scale}, must be positive.') scale_factor = scale elif isinstance(scale, tuple): max_long_edge = max(scale) max_short_edge = min(scale) scale_factor = min(max_long_edge / max(h, w), max_short_edge / min(h, w)) else: raise TypeError( f'Scale must be a number or tuple of int, but got {type(scale)}') # only change this new_size = _fixed_scale_size((w, h), scale_factor) if return_scale: return new_size, scale_factor else: return new_size def imrescale( img: np.ndarray, scale: Union[float, Tuple[int, int]], return_scale: bool = False, interpolation: str = 'bilinear', backend: Optional[str] = None ) -> Union[np.ndarray, Tuple[np.ndarray, float]]: """Resize image while keeping the aspect ratio. Args: img (ndarray): The input image. scale (float | tuple[int]): The scaling factor or maximum size. If it is a float number, then the image will be rescaled by this factor, else if it is a tuple of 2 integers, then the image will be rescaled as large as possible within the scale. return_scale (bool): Whether to return the scaling factor besides the rescaled image. interpolation (str): Same as :func:`resize`. backend (str | None): Same as :func:`resize`. Returns: ndarray: The rescaled image. """ h, w = img.shape[:2] new_size, scale_factor = rescale_size((w, h), scale, return_scale=True) rescaled_img = imresize( img, new_size, interpolation=interpolation, backend=backend) if return_scale: return rescaled_img, scale_factor else: return rescaled_img
@TRANSFORMS.register_module()
0
2023-11-30 08:58:00+00:00
24k
SEU-ProactiveSecurity-Group/MalPurifier
core/defense/amd_dla.py
[ { "identifier": "Max", "path": "core/attack/max.py", "snippet": "class Max(BaseAttack):\n \"\"\"\n Max攻击:迭代地从多个攻击方法中选择结果。\n\n 参数\n --------\n @param attack_list: List, 已实例化的攻击对象的列表。\n @param varepsilon: Float, 用于判断收敛性的标量。\n \"\"\"\n\n def __init__(self, attack_list, varepsilon=1e...
import time import os.path as path import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import numpy as np from core.attack.max import Max from core.attack.stepwise_max import StepwiseMax from core.defense.md_dnn import MalwareDetectionDNN from core.defense.amd_template import DetectorTemplate from config import config, logging, ErrorHandler from tools import utils from sklearn.metrics import f1_score, accuracy_score, confusion_matrix, balanced_accuracy_score
19,698
""" @inproceedings{sperl2020dla, title={DLA: dense-layer-analysis for adversarial example detection}, author={Sperl, Philip and Kao, Ching-Yu and Chen, Peng and Lei, Xiao and B{\"o}ttinger, Konstantin}, booktitle={2020 IEEE European Symposium on Security and Privacy (EuroS\&P)}, pages={198--215}, year={2020}, organization={IEEE} } This implementation is not an official version, but adapted from: https://github.com/v-wangg/OrthogonalPGD/ """ from __future__ import absolute_import from __future__ import division from __future__ import print_function logger = logging.getLogger('core.defense.amd_dla') logger.addHandler(ErrorHandler)
""" @inproceedings{sperl2020dla, title={DLA: dense-layer-analysis for adversarial example detection}, author={Sperl, Philip and Kao, Ching-Yu and Chen, Peng and Lei, Xiao and B{\"o}ttinger, Konstantin}, booktitle={2020 IEEE European Symposium on Security and Privacy (EuroS\&P)}, pages={198--215}, year={2020}, organization={IEEE} } This implementation is not an official version, but adapted from: https://github.com/v-wangg/OrthogonalPGD/ """ from __future__ import absolute_import from __future__ import division from __future__ import print_function logger = logging.getLogger('core.defense.amd_dla') logger.addHandler(ErrorHandler)
class AMalwareDetectionDLA(nn.Module, DetectorTemplate):
3
2023-11-27 02:00:23+00:00
24k
Matrixeigs/UncertaintyManagementInteroperablePowerTransportationSystems
TestCaseDistributionSystems/uc_mmgs_tess_stochastic.py
[ { "identifier": "case33", "path": "TestCaseDistributionSystems/test_cases/case33.py", "snippet": "def case33():\n \"\"\"Power flow data for 33 bus, 6 generator case.\n Please see L{caseformat} for details on the case file format.\n\n Based on data from ...\n\n Alsac, O. & Stott, B., I{\"Opti...
from TestCaseDistributionSystems.test_cases import case33 from TestCasesMicrogrids.test_cases.cases_unit_commitment import micro_grid from TestCasesTransportationSystems.test_cases import case3, TIME, LOCATION from numpy import zeros, shape, ones, diag, concatenate, eye from scipy.sparse import csr_matrix as sparse from scipy.sparse import hstack, vstack, lil_matrix from numpy import flatnonzero as find from numpy import array, tile, arange, random from pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, RATE_A from pypower.idx_bus import PD, VMAX, VMIN, QD from pypower.idx_gen import GEN_BUS, PMAX, PMIN, QMAX, QMIN from pypower.ext2int import ext2int from Solvers.mixed_integer_quadratic_constrained_cplex import mixed_integer_quadratic_constrained_programming as miqcp from Solvers.mixed_integer_solvers_cplex import mixed_integer_linear_programming as milp from copy import deepcopy from TestCaseDistributionSystems.data_format.idx_MG import PBIC_AC2DC, PG, PESS_DC, PBIC_DC2AC, PUG, PESS_CH, \ PMESS, EESS, NX_MG, QBIC, QUG, QG from TestCaseDistributionSystems.database_management import DataBaseManagement from StochasticOptimization.scenario_reduction import ScenarioReduction
15,826
beq = concatenate((beq, beq_temp)) nv_second_stage = nv_index_ev[-1] nv_first_stage = self.nv_first_stage self.nv_second_stage = nv_second_stage Qc = dict() # 4) Pij**2+Qij**2<=Vi*Iij for t in range(T): for i in range(nl): Qc[(T * nl + T * nmg) * index + t * nl + i] = [ [int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + 2 * nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + f[i] + 3 * nl)], [int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + f[i] + 3 * nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + 2 * nl)], [1, 1, -1 / 2, -1 / 2]] Rc = zeros(nl * T) # 5) (Pbic_ac2dc+Pbic_dc2ac)**2+Qbic**2<=Sbic**2 Rc_temp = zeros(nmg * T) for i in range(nmg): for t in range(T): Qc[(T * nl + T * nmg) * index + T * nl + T * i + t] = [ [int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_AC2DC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_DC2AC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_AC2DC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_DC2AC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + QBIC)], [int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_AC2DC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_DC2AC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_DC2AC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_AC2DC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + QBIC)], [1, 1, 1, 1, 1]] Rc_temp[i * T + t] = mgs[i]["BIC"]["SMAX"] ** 2 Rc = concatenate([Rc, Rc_temp]) ## IV. Coupling constraints between the first stage and second stage decision variables # pg, pg_mg, pess_mg, pess_tess # Ts*x+Ws*ys<=hs ## IV) Formulate the coupling constraints between the first-stage and second-stage problems # 1) -Pg -Rg + pg <= 0 _nv_first_stage = self._nv_first_stage Ts = lil_matrix((ng * T, nv_first_stage)) Ws = lil_matrix((ng * T, nv_second_stage)) hs = zeros(ng * T) for i in range(T): for j in range(ng): Ts[i * ng + j, i * _nv_first_stage + ng * 3 + j] = -1 Ts[i * ng + j, i * _nv_first_stage + ng * 4 + j] = -1 Ws[i * ng + j, i * _nv_second_stage + 3 * nl + nb + j] = 1 # 2) Pg-Rg - pg <= 0 Ts_temp = lil_matrix((ng * T, nv_first_stage)) Ws_temp = lil_matrix((ng * T, nv_second_stage)) hs_temp = zeros(ng * T) for i in range(T): for j in range(ng): Ts_temp[i * ng + j, i * _nv_first_stage + ng * 3 + j] = 1 Ts_temp[i * ng + j, i * _nv_first_stage + ng * 4 + j] = -1 Ws_temp[i * ng + j, i * _nv_second_stage + 3 * nl + nb + j] = -1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 3) Qg <= IgQg_max Ts_temp = lil_matrix((ng * T, nv_first_stage)) Ws_temp = lil_matrix((ng * T, nv_second_stage)) hs_temp = zeros(ng * T) for i in range(T): for j in range(ng): Ts_temp[i * ng + j, i * _nv_first_stage + ng * 2 + j] = -qg_u[j] Ws_temp[i * ng + j, i * _nv_second_stage + 3 * nl + nb + ng + j] = 1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 4) Qg >= IgQg_min Ts_temp = lil_matrix((ng * T, nv_first_stage)) Ws_temp = lil_matrix((ng * T, nv_second_stage)) hs_temp = zeros(ng * T) for i in range(T): for j in range(ng): Ts_temp[i * ng + j, i * _nv_first_stage + ng * 2 + j] = qg_l[j] Ws_temp[i * ng + j, i * _nv_second_stage + 3 * nl + nb + ng + j] = -1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 3) -Pg_mg - Rg_mg + pg_mg <= 0 Ts_temp = lil_matrix((nmg * T, nv_first_stage)) Ws_temp = lil_matrix((nmg * T, nv_second_stage)) hs_temp = zeros(nmg * T) for i in range(T): for j in range(nmg): Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + j] = -1 Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg + j] = -1 Ws_temp[i * nmg + j, nv_index[j] + i * NX_MG + PG] = 1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 4) Pg_mg - Rg_mg - pg_mg <= 0 Ts_temp = lil_matrix((nmg * T, nv_first_stage)) Ws_temp = lil_matrix((nmg * T, nv_second_stage)) hs_temp = zeros(nmg * T) for i in range(T): for j in range(nmg): Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + j] = 1 Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg + j] = -1 Ws_temp[i * nmg + j, nv_index[j] + i * NX_MG + PG] = -1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 5) pess_dc - pess_ch <= Pess_dc - Pess_ch + Ress Ts_temp = lil_matrix((nmg * T, nv_first_stage)) Ws_temp = lil_matrix((nmg * T, nv_second_stage)) hs_temp = zeros(nmg * T) for i in range(T): for j in range(nmg): Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg * 2 + j] = 1 # Charging Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg * 3 + j] = -1 # Dis-charging Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg * 4 + j] = -1 # Reserve Ws_temp[i * nmg + j, nv_index[j] + i * NX_MG + PESS_CH] = -1
""" Stochastic optimal power flow with multiple microgrids and mobile energy storage systems @author: Zhao Tianyang @e-mail: zhaoty@ntu.edu.sg @date: 10 Jan 2019 Major updates: 1) Update code style using PEP 8 -- Style Guide for Python Code 2) Store data in database 3) Scenario generation and reduction 4) Automatic results analysis Nomenclature: nV: number of variables mg: microgrid ds: distribution systems me: mobile energy storage systems ch: charging dc: discharging ele: electricity tra: traffic i,j,k: index t: time index T: time periods tns:traffic networks pns:power networks """ class StochasticDynamicOptimalPowerFlowTess(): def __init__(self): self.name = "Stochastic optimal power flow with tess" def main(self, power_networks, micro_grids, profile, mess, traffic_networks, ns=100): """ Main entrance for network reconfiguration problems :param case: electric network information :param profile: load profile within the distribution networks :param micrgrids: dictionary for microgrids :param tess: dictionary for tess :return: network reconfiguration, distribution network status, and microgrid status """ T = len(profile) # Time spans self.T = T nmg = len(micro_grids) # Number of microgrids self.nmg = nmg nmes = len(mess) # Number of mobile energy storage systems self.nmes = nmes nb_tra = traffic_networks["bus"].shape[0] # Number of buses in the transportation networks self.nb_tra = nb_tra assert nb_tra == nmg, "The microgrids within the transportation networks are not synchronized!" # 1) Formulate the first stage optimization problem model_first_stage = self.first_stage_problem_formualtion(pns=power_networks, mgs=micro_grids, mess=mess, tns=traffic_networks) # (sol_first_stage, obj, success) = milp(model_first_stage["c"], Aeq=model_first_stage["Aeq"], # beq=model_first_stage["beq"], # A=model_first_stage["A"], b=model_first_stage["b"], # vtypes=model_first_stage["vtypes"], # xmax=model_first_stage["ub"], xmin=model_first_stage["lb"]) # sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 2) Formulate the second stage optimization problem # Formulate the second stage scenarios (ds_second_stage, mgs_second_stage, weight) = self.scenario_generation_reduction(profile=profile, micro_grids=micro_grids, ns=ns, pns=power_networks, ns_reduced=round(0.98 * ns)) ns -= round(0.98 * ns) model_second_stage = {} for i in range(ns): model_second_stage[i] = self.second_stage_problem_formualtion(pns=power_networks, mgs=mgs_second_stage[i], mess=mess, tns=traffic_networks, profile=ds_second_stage[i, :], index=i, weight=weight[i]) # 3) Merge the first-stage problem and second stage problem lb = model_first_stage["lb"] ub = model_first_stage["ub"] vtypes = model_first_stage["vtypes"] c = model_first_stage["c"] Qc = dict() if model_first_stage["Aeq"] is not None: neq = model_first_stage["Aeq"].shape[0] else: neq = 0 if model_first_stage["A"] is not None: nineq = model_first_stage["A"].shape[0] else: nineq = 0 nv_first_stage = self.nv_first_stage nv_second_stage = self.nv_second_stage q = zeros(nv_first_stage) nv_index = zeros(ns + 1).astype(int) neq_index = zeros(ns + 1).astype(int) nineq_index = zeros(ns + 1).astype(int) neq_index[0] = neq nineq_index[0] = nineq nv_index[0] = nv_first_stage beq = model_first_stage["beq"] for i in range(ns): if model_second_stage[i]["Aeq"] is not None: neq_index[i + 1] = neq_index[i] + model_second_stage[i]["Aeq"].shape[0] else: neq_index[i + 1] = neq_index[i] if model_second_stage[i]["Ts"] is not None: nineq_index[i + 1] = nineq_index[i] + model_second_stage[i]["Ts"].shape[0] else: nineq_index[i + 1] = nineq_index[i] nv_index[i + 1] = nv_index[i] + nv_second_stage c = concatenate([c, model_second_stage[i]["c"]]) q = concatenate([q, model_second_stage[i]["q"]]) lb = concatenate([lb, model_second_stage[i]["lb"]]) ub = concatenate([ub, model_second_stage[i]["ub"]]) vtypes += model_second_stage[i]["vtypes"] beq = concatenate([beq, model_second_stage[i]["beq"]]) Aeq_full = lil_matrix((neq_index[-1], nv_index[-1])) Aeq_full[0:neq_index[0], 0:nv_index[0]] = model_first_stage["Aeq"] rc = zeros(0) for i in range(ns): Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Aeq"] Qc.update(model_second_stage[i]["Qc"]) rc = concatenate([rc, model_second_stage[i]["rc"]]) A_full = lil_matrix((nineq_index[-1], nv_index[-1])) b = model_first_stage["b"] A_full[0:int(nineq_index[0]), 0:int(nv_index[0])] = model_first_stage["A"] for i in range(ns): A_full[nineq_index[i]:nineq_index[i + 1], 0:nv_index[0]] = model_second_stage[i]["Ts"] A_full[nineq_index[i]:nineq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Ws"] b = concatenate([b, model_second_stage[i]["hs"]]) # 3) Obtain the results for first-stage and second stage optimization problems # 3.1) Obtain the integrated solution (sol, obj, success) = miqcp(c, q, Aeq=Aeq_full, beq=beq, A=A_full, b=b, Qc=Qc, rc=rc, xmin=lb, xmax=ub, vtypes=vtypes) # 3.2) decouple the solution into multiple subsystems sol_first_stage = sol[0:nv_second_stage] sol_second_stage = {} for i in range(ns): sol_second_stage[i] = sol[int(nv_index[i]):int(nv_index[i + 1])] # 4) Verify the first-stage and second stage optization problem # 4.1) First-stage solution sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 4.2) Second-stage solution sol_second_stage_checked = {} db_management = DataBaseManagement() db_management.create_table(table_name="distribution_networks", nl=self.nl, nb=self.nb, ng=self.ng) db_management.create_table(table_name="micro_grids", nmg=self.nmg) db_management.create_table(table_name="mobile_energy_storage_systems", nmg=self.nmg) db_management.create_table(table_name="first_stage_solutions", nmg=self.nmg, ng=self.ng, nmes=self.nmes) db_management.create_table(table_name="fisrt_stage_mess", nmg=self.nmg) for t in range(T): db_management.insert_data_first_stage(table_name="first_stage_solutions", time=t, ng=self.ng, nmg=self.nmg, pg=sol_first_stage["pg"][:, t].tolist(), rg=sol_first_stage["rg"][:, t].tolist(), pg_mg=sol_first_stage["pg_mg"][:, t].tolist(), rg_mg=sol_first_stage["rg_mg"][:, t].tolist(), pess_ch=sol_first_stage["pess_ch"][:, t].tolist(), pess_dc=sol_first_stage["pess_dc"][:, t].tolist(), ress=sol_first_stage["ress"][:, t].tolist(), ess=sol_first_stage["eess"][:, t].tolist(), iess=sol_first_stage["iess"][:, t].tolist()) for i in range(nmes): for t in range(T): db_management.insert_data_first_stage_mess(table_name="fisrt_stage_mess", nmg=self.nmg, time=t, mess=i, imess=sol_first_stage["MESS"][i]["idc"][:, t].tolist(), rmess=sol_first_stage["MESS"][i]["rmess"][:, t].tolist(), pmess_ch= sol_first_stage["MESS"][i]["pmess_ch"][:, t].tolist(), pmess_dc= sol_first_stage["MESS"][i]["pmess_dc"][:, t].tolist(), mess_f_stop=sol_first_stage["MESS"][i]["VRP"][t + 1][0], mess_t_stop=sol_first_stage["MESS"][i]["VRP"][t + 1][1]) for i in range(ns): sol_second_stage_checked[i] = self.second_stage_solution_validation(sol_second_stage[i]) for i in range(ns): for t in range(T): db_management.insert_data_ds(table_name="distribution_networks", nl=self.nl, nb=self.nb, ng=self.ng, scenario=i, time=t, pij=sol_second_stage_checked[i]["DS"]["pij"][:, t].tolist(), qij=sol_second_stage_checked[i]["DS"]["qij"][:, t].tolist(), lij=sol_second_stage_checked[i]["DS"]["lij"][:, t].tolist(), vi=sol_second_stage_checked[i]["DS"]["vi"][:, t].tolist(), pg=sol_second_stage_checked[i]["DS"]["pg"][:, t].tolist(), qg=sol_second_stage_checked[i]["DS"]["qg"][:, t].tolist(), ) for i in range(ns): for j in range(nmg): for t in range(T): db_management.insert_data_mg(table_name="micro_grids", scenario=i, time=t, mg=j, pg=sol_second_stage_checked[i]["MG"]["pg"][j, t], qg=sol_second_stage_checked[i]["MG"]["qg"][j, t], pug=sol_second_stage_checked[i]["MG"]["pug"][j, t], qug=sol_second_stage_checked[i]["MG"]["qug"][j, t], pbic_ac2dc=sol_second_stage_checked[i]["MG"]["pbic_ac2dc"][j, t], pbic_dc2ac=sol_second_stage_checked[i]["MG"]["pbic_dc2ac"][j, t], qbic=sol_second_stage_checked[i]["MG"]["qbic"][j, t], pess_ch=sol_second_stage_checked[i]["MG"]["pess_ch"][j, t], pess_dc=sol_second_stage_checked[i]["MG"]["pess_dc"][j, t], eess=sol_second_stage_checked[i]["MG"]["eess"][j, t], pmess=sol_second_stage_checked[i]["MG"]["pmess"][j, t]) for i in range(ns): for j in range(nmes): for t in range(T): db_management.insert_data_mess(table_name="mobile_energy_storage_systems", scenario=i, time=t, mess=j, nmg=self.nmg, pmess_dc= sol_second_stage_checked[i]["MESS"][j]["pmess_dc"][:, t].tolist(), pmess_ch= sol_second_stage_checked[i]["MESS"][j]["pmess_ch"][:, t].tolist(), emess=sol_second_stage_checked[i]["MESS"][j]["emess"][0, t]) # 4.3) Cross validation of the first-stage and second-stage decision variables tess_check = {} for i in range(ns): tess_temp = {} for j in range(nmes): tess_temp[j] = sol_second_stage_checked[i]["MESS"][j]["pmess_dc"] - \ sol_second_stage_checked[i]["MESS"][j]["pmess_ch"] - \ sol_first_stage["MESS"][j]["pmess_dc"] + \ sol_first_stage["MESS"][j]["pmess_ch"] - \ sol_first_stage["MESS"][j]["rmess"] tess_temp[j + nmes] = sol_second_stage_checked[i]["MESS"][j]["pmess_ch"] - \ sol_second_stage_checked[i]["MESS"][j]["pmess_dc"] - \ sol_first_stage["MESS"][j]["pmess_ch"] + \ sol_first_stage["MESS"][j]["pmess_dc"] - \ sol_first_stage["MESS"][j]["rmess"] tess_check[i] = tess_temp # return sol_distribution_network, sol_microgrids, sol_tess return sol_first_stage, sol_second_stage_checked def first_stage_problem_formualtion(self, pns, mgs, mess, tns): """ Problem formulation for the first stage optimization, Decision variables include, DGs within power networks, DGs within MGs, EESs within MGs and TESSs :param power_networks: Parameters for the power networks :param micro_grids: Parameters for the microgrids :param tess: Parameters for the mobile energy storage systems :param traffic_networks: Parameters for the transportation networks :return: Formulated first-stage problem """ T = self.T # Time slots nmg = self.nmg # Number of mgs nmes = self.nmes # Number of tess mpc = ext2int(pns) baseMVA, bus, gen, branch, gencost = mpc["baseMVA"], mpc["bus"], mpc["gen"], mpc["branch"], mpc["gencost"] ng = shape(mpc['gen'])[0] ## number of dispatchable injections nb = shape(mpc["bus"])[0] self.nb = nb self.ng = ng # Obtain the initial status, start-up and shut down of generators Ig0 = gen[:, -1].astype(int) MIN_DOWN = gen[:, -2].astype(int) MIN_UP = gen[:, -3].astype(int) alpha_l = zeros(ng) beta_l = zeros(ng) Ig_l = zeros(ng) pg_l = zeros(ng) # Boundary for DGs within distribution networks rg_l = zeros(ng) alpha_u = ones(ng) beta_u = ones(ng) Ig_u = ones(ng) pg_u = gen[:, PMAX] / baseMVA rg_u = gen[:, PMAX] / baseMVA c_alpha = gencost[:, 0] c_beta = gencost[:, 1] c_ig = gencost[:, 6] cg = gencost[:, 5] * baseMVA cr = zeros(ng) pg_mg_l = zeros(nmg) # Boundary for DGs within MGs rg_mg_l = zeros(nmg) pg_mg_u = zeros(nmg) rg_mg_u = zeros(nmg) cg_mg = zeros(nmg) cr_mg = zeros(nmg) for i in range(nmg): pg_mg_l[i] = mgs[i]["DG"]["PMIN"] pg_mg_u[i] = mgs[i]["DG"]["PMAX"] rg_mg_u[i] = mgs[i]["DG"]["PMAX"] cg_mg[i] = mgs[i]["DG"]["COST_B"] pes_ch_l = zeros(nmg) # Lower boundary for ESSs within MGs pes_dc_l = zeros(nmg) ees_l = zeros(nmg) res_l = zeros(nmg) ies_l = zeros(nmg) pes_ch_u = zeros(nmg) # Upper boundary for ESSs within MGs pes_dc_u = zeros(nmg) ees_u = zeros(nmg) res_u = zeros(nmg) ies_u = ones(nmg) ces_ch = zeros(nmg) # Cost boundary for ESSs within MGs ces_dc = zeros(nmg) ces_r = zeros(nmg) ces = zeros(nmg) ces_i = zeros(nmg) for i in range(nmg): pes_ch_u[i] = mgs[i]["ESS"]["PCH_MAX"] pes_dc_u[i] = mgs[i]["ESS"]["PDC_MAX"] + mgs[i]["ESS"]["PCH_MAX"] res_u[i] = mgs[i]["ESS"]["PCH_MAX"] ees_l[i] = mgs[i]["ESS"]["EMIN"] ees_u[i] = mgs[i]["ESS"]["EMAX"] _nv_first_stage = ng * 5 + nmg * 2 + nmg * 5 nv_first_stage = _nv_first_stage * T # Formulate the boundaries lb = concatenate( [tile(concatenate( [alpha_l, beta_l, Ig_l, pg_l, rg_l, pg_mg_l, rg_mg_l, pes_ch_l, pes_dc_l, res_l, ees_l, ies_l]), T)]) ub = concatenate( [tile(concatenate( [alpha_u, beta_u, Ig_u, pg_u, rg_u, pg_mg_u, rg_mg_u, pes_ch_u, pes_dc_u, res_u, ees_u, ies_u]), T)]) # Objective value c = concatenate( [tile(concatenate([c_alpha, c_beta, c_ig, cg, cr, cg_mg, cr_mg, ces_ch, ces_dc, ces, ces_r, ces_i]), T)]) # Variable types vtypes = (["b"] * ng * 3 + ["c"] * (ng * 2 + nmg * 2 + nmg * 4) + ["b"] * nmg) * T ## Constraint sets # 1) Pg+Rg<=PguIg A = lil_matrix((ng * T, nv_first_stage)) b = zeros(ng * T) for t in range(T): for j in range(ng): A[t * ng + j, t * _nv_first_stage + ng * 3 + j] = 1 A[t * ng + j, t * _nv_first_stage + ng * 4 + j] = 1 A[t * ng + j, t * _nv_first_stage + ng * 2 + j] = -pg_u[j] # 2) Pg-Rg>=IgPgl A_temp = lil_matrix((ng * T, nv_first_stage)) b_temp = zeros(ng * T) for t in range(T): for j in range(ng): A_temp[t * ng + j, t * _nv_first_stage + ng * 3 + j] = -1 A_temp[t * ng + j, t * _nv_first_stage + ng * 4 + j] = 1 A_temp[t * ng + j, t * _nv_first_stage + j] = pg_l[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 3) Start-up and shut-down constraints of DGs UP_LIMIT = zeros(ng).astype(int) DOWN_LIMIT = zeros(ng).astype(int) for i in range(ng): UP_LIMIT[i] = T - MIN_UP[i] DOWN_LIMIT[i] = T - MIN_DOWN[i] # 3.1) Up limit A_temp = lil_matrix((sum(UP_LIMIT), nv_first_stage)) b_temp = zeros(sum(UP_LIMIT)) for i in range(ng): for t in range(MIN_UP[i], T): for k in range(t - MIN_UP[i], t): A_temp[sum(UP_LIMIT[0:i]) + t - MIN_UP[i], k * _nv_first_stage + i] = 1 A_temp[sum(UP_LIMIT[0:i]) + t - MIN_UP[i], t * _nv_first_stage + ng * 2 + i] = -1 A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # # 3.2) Down limit A_temp = lil_matrix((sum(DOWN_LIMIT), nv_first_stage)) b_temp = ones(sum(DOWN_LIMIT)) for i in range(ng): for t in range(MIN_DOWN[i], T): for k in range(t - MIN_DOWN[i], t): A_temp[sum(DOWN_LIMIT[0:i]) + t - MIN_DOWN[i], k * _nv_first_stage + ng + i] = 1 A_temp[sum(DOWN_LIMIT[0:i]) + t - MIN_DOWN[i], t * _nv_first_stage + ng * 2 + i] = 1 A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 4) Status transformation of each unit Aeq = lil_matrix((T * ng, nv_first_stage)) beq = zeros(T * ng) for i in range(ng): for t in range(T): Aeq[i * T + t, t * _nv_first_stage + i] = 1 Aeq[i * T + t, t * _nv_first_stage + ng + i] = -1 Aeq[i * T + t, t * _nv_first_stage + ng * 2 + i] = -1 if t != 0: Aeq[i * T + t, (t - 1) * _nv_first_stage + ng * 2 + i] = 1 else: beq[i * T + t] = -Ig0[i] # 3) Pg_mg+Rg_mg<=Pg_mg_u A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg + j] = 1 b_temp[t * nmg + j] = pg_mg_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 4) Pg_mg-Rg_mg<=Pg_mg_l A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + j] = -1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg + j] = 1 b_temp[t * nmg + j] = pg_mg_l[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 5) Pess_dc-Pess_ch+Ress<=Pess_dc_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = -1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 2 + j] = 1 b_temp[t * nmg + j] = pes_dc_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 6) Pess_ch-Pess_dc+Ress<=Pess_ch_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, ng * 5 + nmg * 2 + t] = 1 A_temp[t * nmg + j, ng * 5 + nmg * 2 + nmg + t] = -1 A_temp[t * nmg + j, ng * 5 + nmg * 2 + nmg * 2 + t] = 1 b_temp[t * nmg + j] = pes_ch_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 7) Energy storage balance equation Aeq_temp = lil_matrix((T * nmg, nv_first_stage)) beq_temp = zeros(T * nmg) for t in range(T): for j in range(nmg): Aeq_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 3 + j] = 1 Aeq_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = -mgs[j]["ESS"]["EFF_CH"] Aeq_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = 1 / mgs[j]["ESS"]["EFF_DC"] if t == 0: beq_temp[i * nmg + j] = mgs[j]["ESS"]["E0"] else: Aeq_temp[i * nmg + j, (i - 1) * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 3 + j] = -1 Aeq = vstack([Aeq, Aeq_temp]) beq = concatenate([beq, beq_temp]) # 8) Pess_ch<=I*Pess_ch_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 4 + j] = -pes_ch_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 9) Pess_dc<=(1-I)*Pess_dc_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 4 + j] = pes_dc_u[j] b_temp[t * nmg + j] = pes_dc_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 2) Transportation energy storage systems problem model_mess = {} for i in range(nmes): model_mess[i] = self.problem_formulation_tess(mess=mess[i], tns=tns) # 3) Merge the DGs, ESSs and TESSs neq = Aeq.shape[0] nineq = A.shape[0] nV_index = zeros(nmes + 1).astype(int) neq_index = zeros(nmes + 1).astype(int) nineq_index = zeros(nmes + 1).astype(int) nV_index[0] = nv_first_stage neq_index[0] = neq nineq_index[0] = nineq for i in range(nmes): nV_index[i + 1] = nV_index[i] + len(model_mess[i]["c"]) neq_index[i + 1] = neq_index[i] + model_mess[i]["Aeq"].shape[0] nineq_index[i + 1] = nineq_index[i] + model_mess[i]["A"].shape[0] neq += model_mess[i]["Aeq"].shape[0] nineq += model_mess[i]["A"].shape[0] # Merge the objective function, boundaries, types and rhs c = concatenate([c, model_mess[i]["c"]]) lb = concatenate([lb, model_mess[i]["lb"]]) ub = concatenate([ub, model_mess[i]["ub"]]) vtypes += model_mess[i]["vtypes"] beq = concatenate([beq, model_mess[i]["beq"]]) b = concatenate([b, model_mess[i]["b"]]) A_full = lil_matrix((nineq_index[-1], nV_index[-1])) Aeq_full = lil_matrix((neq_index[-1], nV_index[-1])) if Aeq is not None: Aeq_full[0:int(neq_index[0]), 0:int(nV_index[0])] = Aeq if A is not None: A_full[0:int(nineq_index[0]), 0:int(nV_index[0])] = A for i in range(nmes): Aeq_full[neq_index[i]:neq_index[i + 1], nV_index[i]:nV_index[i + 1]] = model_mess[i]["Aeq"] A_full[nineq_index[i]:nineq_index[i + 1], nV_index[i]:nV_index[i + 1]] = model_mess[i]["A"] self.nv_first_stage = nV_index[-1] # The number of first stage decision variables self._nv_first_stage = _nv_first_stage model_first_stage = {"c": c, "lb": lb, "ub": ub, "vtypes": vtypes, "A": A_full, "b": b, "Aeq": Aeq_full, "beq": beq, } return model_first_stage def first_stage_solution_validation(self, sol): """ Validation of the first-stage solution :param sol: The first stage solution :return: the first stage solution """ T = self.T ng = self.ng nmg = self.nmg nmes = self.nmes # Set-points of DGs within DSs, MGs and ESSs _nv_first_stage = self._nv_first_stage alpha = zeros((ng, T)) beta = zeros((ng, T)) Ig = zeros((ng, T)) Pg = zeros((ng, T)) Rg = zeros((ng, T)) Pg_mg = zeros((nmg, T)) Rg_mg = zeros((nmg, T)) Pess_dc = zeros((nmg, T)) Pess_ch = zeros((nmg, T)) Ress = zeros((nmg, T)) Eess = zeros((nmg, T)) Iess = zeros((nmg, T)) for i in range(T): alpha[:, i] = sol[_nv_first_stage * i:_nv_first_stage * i + ng] beta[:, i] = sol[_nv_first_stage * i + ng:_nv_first_stage * i + ng * 2] Ig[:, i] = sol[_nv_first_stage * i + ng * 2:_nv_first_stage * i + ng * 3] Pg[:, i] = sol[_nv_first_stage * i + ng * 3:_nv_first_stage * i + ng * 4] Rg[:, i] = sol[_nv_first_stage * i + ng * 4:_nv_first_stage * i + ng * 5] Pg_mg[:, i] = sol[_nv_first_stage * i + ng * 5:_nv_first_stage * i + ng * 5 + nmg] Rg_mg[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg:_nv_first_stage * i + ng * 5 + nmg * 2] Pess_ch[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 2:_nv_first_stage * i + ng * 5 + nmg * 3] Pess_dc[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 3:_nv_first_stage * i + ng * 5 + nmg * 4] Ress[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 4:_nv_first_stage * i + ng * 5 + nmg * 5] Eess[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 5:_nv_first_stage * i + ng * 5 + nmg * 6] Iess[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 6:_nv_first_stage * i + ng * 5 + nmg * 7] # Set-points and scheduling of mobile energy storage systems nv_tra = self.nv_tra nl_traffic = self.nl_tra n_stops = self.n_stops nb_tra_ele = self.nb_tra_ele sol_ev = {} for i in range(nmes): ev_temp = {} ev_temp["VRP"] = [] for t in range(nl_traffic): if sol[_nv_first_stage * T + nv_tra * i + t] > 0: # obtain the solution for vrp if self.connection_matrix[t, TIME] > 0: for j in range(int(self.connection_matrix[t, TIME])): ev_temp["VRP"].append(((self.connection_matrix[t, F_BUS] - 1) % nmg, (self.connection_matrix[t, T_BUS] - 1) % nmg)) else: ev_temp["VRP"].append(((self.connection_matrix[t, F_BUS] - 1) % nmg, (self.connection_matrix[t, T_BUS] - 1) % nmg)) ev_temp["idc"] = zeros((nb_tra_ele, T)) ev_temp["pmess_dc"] = zeros((nb_tra_ele, T)) ev_temp["pmess_ch"] = zeros((nb_tra_ele, T)) ev_temp["rmess"] = zeros((nb_tra_ele, T)) for t in range(T): for k in range(nb_tra_ele): ev_temp["idc"][k, t] = sol[_nv_first_stage * T + nv_tra * i + nl_traffic + nb_tra_ele * t + k] ev_temp["pmess_dc"][k, t] = \ sol[_nv_first_stage * T + nv_tra * i + nl_traffic + n_stops + nb_tra_ele * t + k] ev_temp["pmess_ch"][k, t] = \ sol[_nv_first_stage * T + nv_tra * i + nl_traffic + n_stops * 2 + nb_tra_ele * t + k] ev_temp["rmess"][k, t] = \ sol[_nv_first_stage * T + nv_tra * i + nl_traffic + n_stops * 3 + nb_tra_ele * t + k] sol_ev[i] = ev_temp sol_first_stage = {"alpha": alpha, "beta": beta, "ig": Ig, "rg": Rg, "pg": Pg, "pg_mg": Pg_mg, "rg_mg": Rg_mg, "pess_ch": Pess_ch, "pess_dc": Pess_dc, "ress": Ress, "eess": Eess, "iess": Iess, "MESS": sol_ev, } return sol_first_stage def second_stage_problem_formualtion(self, pns, mgs, mess, tns, profile, index=0, weight=1): """ Second-stage problem formulation, the decision variables includes DGs within power networks, DGs within MGs, EESs within MGs and TESSs and other systems' information :param power_networks: :param micro_grids: :param tess: :param traffic_networks: :return: The second stage problems as list, including coupling constraints, and other constraint set """ # I) Formulate the problem for distribution systems operator T = self.T mpc = ext2int(pns) baseMVA, bus, gen, branch, gencost = mpc["baseMVA"], mpc["bus"], mpc["gen"], mpc["branch"], mpc["gencost"] nb = shape(mpc['bus'])[0] ## number of buses nl = shape(mpc['branch'])[0] ## number of branches ng = shape(mpc['gen'])[0] ## number of dispatchable injections nmg = self.nmg nmes = self.nmes self.nl = nl self.nb = nb self.ng = ng m = zeros(nmg) ## list of integration index pmg_l = zeros(nmg) ## list of lower boundary pmg_u = zeros(nmg) ## list of upper boundary qmg_l = zeros(nmg) ## list of lower boundary qmg_u = zeros(nmg) ## list of upper boundary for i in range(nmg): m[i] = mgs[i]["BUS"] pmg_l[i] = mgs[i]["UG"]["PMIN"] / 1000 / baseMVA pmg_u[i] = mgs[i]["UG"]["PMAX"] / 1000 / baseMVA qmg_l[i] = mgs[i]["UG"]["QMIN"] / 1000 / baseMVA qmg_u[i] = mgs[i]["UG"]["QMAX"] / 1000 / baseMVA f = branch[:, F_BUS] ## list of "from" buses t = branch[:, T_BUS] ## list of "to" buses i = range(nl) ## double set of row indices self.f = f ## record from bus for each branch # Connection matrix Cf = sparse((ones(nl), (i, f)), (nl, nb)) Ct = sparse((ones(nl), (i, t)), (nl, nb)) Cg = sparse((ones(ng), (gen[:, GEN_BUS], range(ng))), (nb, ng)) Cmg = sparse((ones(nmg), (m, range(nmg))), (nb, nmg)) Branch_R = branch[:, BR_R] Branch_X = branch[:, BR_X] Cf = Cf.T Ct = Ct.T # Obtain the boundary information slmax = branch[:, RATE_A] / baseMVA pij_l = -slmax qij_l = -slmax lij_l = zeros(nl) vm_l = bus[:, VMIN] ** 2 pg_l = gen[:, PMIN] / baseMVA qg_l = gen[:, QMIN] / baseMVA pij_u = slmax qij_u = slmax lij_u = slmax vm_u = bus[:, VMAX] ** 2 pg_u = 2 * gen[:, PMAX] / baseMVA qg_u = 2 * gen[:, QMAX] / baseMVA _nv_second_stage = int(3 * nl + nb + 2 * ng + 2 * nmg) self._nv_second_stage = _nv_second_stage # Number of decision variable within each time slot lb = concatenate([tile(concatenate([pij_l, qij_l, lij_l, vm_l, pg_l, qg_l, pmg_l, qmg_l]), T)]) ub = concatenate([tile(concatenate([pij_u, qij_u, lij_u, vm_u, pg_u, qg_u, pmg_u, qmg_u]), T)]) vtypes = ["c"] * _nv_second_stage * T nv_ds = _nv_second_stage * T # Number of total decision variables # Add system level constraints # 1) Active power balance Aeq_p = lil_matrix((nb * T, nv_ds)) beq_p = zeros(nb * T) for i in range(T): Aeq_p[i * nb:(i + 1) * nb, i * _nv_second_stage: (i + 1) * _nv_second_stage] = \ hstack([Ct - Cf, zeros((nb, nl)), -diag(Ct * Branch_R) * Ct, zeros((nb, nb)), Cg, zeros((nb, ng)), -Cmg, zeros((nb, nmg))]) beq_p[i * nb:(i + 1) * nb] = profile[i * nb:(i + 1) * nb] / baseMVA # 2) Reactive power balance Aeq_q = lil_matrix((nb * T, nv_ds)) beq_q = zeros(nb * T) for i in range(T): Aeq_q[i * nb:(i + 1) * nb, i * _nv_second_stage: (i + 1) * _nv_second_stage] = \ hstack([zeros((nb, nl)), Ct - Cf, -diag(Ct * Branch_X) * Ct, zeros((nb, nb)), zeros((nb, ng)), Cg, zeros((nb, nmg)), -Cmg]) for j in range(nb): if bus[j, PD] > 0: beq_q[i * nb:(i + 1) * nb] = profile[i * nb + j] / bus[j, PD] * bus[j, QD] / baseMVA # 3) KVL equation Aeq_kvl = lil_matrix((nl * T, nv_ds)) beq_kvl = zeros(nl * T) for i in range(T): Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage: i * _nv_second_stage + nl] = -2 * diag(Branch_R) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + nl: i * _nv_second_stage + 2 * nl] = -2 * diag(Branch_X) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + 2 * nl: i * _nv_second_stage + 3 * nl] = diag( Branch_R ** 2) + diag(Branch_X ** 2) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + 3 * nl:i * _nv_second_stage + 3 * nl + nb] = ( Cf.T - Ct.T).toarray() Aeq = vstack([Aeq_p, Aeq_q, Aeq_kvl]) beq = concatenate([beq_p, beq_q, beq_kvl]) c = zeros(nv_ds) q = zeros(nv_ds) c0 = 0 for t in range(T): for i in range(ng): c[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 5] * baseMVA q[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 4] * baseMVA * baseMVA c0 += gencost[i, 6] # Coupling constraints between the distribution systems and micro_grids Ax2y = lil_matrix((2 * nmg * T, nv_ds)) # connection matrix with the microgrids for i in range(T): for j in range(nmg): # Active power Ax2y[i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + j] = 1000 * baseMVA # Reactive power Ax2y[nmg * T + i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + nmg + j] = 1000 * baseMVA # II) Formulate the problem for microgrids model_microgrids = {} for i in range(nmg): model_microgrids[i] = self.problem_formulation_microgrid(mg=mgs[i], mess=mess) # II.A) Combine the distribution system operation problem and microgrid systems if Aeq is not None: neq_ds = Aeq.shape[0] else: neq_ds = 0 nVariables = int(nv_ds) neq = int(neq_ds) nv_index = zeros(nmg + 1).astype(int) neq_index = zeros(nmg + 1).astype(int) nv_index[0] = nv_ds neq_index[0] = int(neq_ds) for i in range(nmg): nv_index[i + 1] = nv_index[i] + len(model_microgrids[i]["c"]) neq_index[i + 1] = neq_index[i] + model_microgrids[i]["Aeq"].shape[0] nVariables += len(model_microgrids[i]["c"]) neq += int(model_microgrids[i]["Aeq"].shape[0]) Aeq_full = lil_matrix((int(neq_index[-1]), int(nv_index[-1]))) Aeq_full[0:neq_ds, 0:nv_ds] = Aeq for i in range(nmg): lb = concatenate([lb, model_microgrids[i]["lb"]]) ub = concatenate([ub, model_microgrids[i]["ub"]]) c = concatenate([c, model_microgrids[i]["c"]]) q = concatenate([q, model_microgrids[i]["q"]]) vtypes += model_microgrids[i]["vtypes"] beq = concatenate([beq, model_microgrids[i]["beq"]]) Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_microgrids[i]["Aeq"] # Add coupling constraints, between the microgrids and distribution networks Ay2x = lil_matrix((2 * nmg * T, nv_index[-1] - nv_index[0])) for i in range(T): for j in range(nmg): Ay2x[i * nmg + j, int(nv_index[j] - nv_index[0]) + i * NX_MG + PUG] = -1 Ay2x[nmg * T + i * nmg + j, int(nv_index[j] - nv_index[0]) + i * NX_MG + QUG] = -1 Aeq_temp = hstack([Ax2y, Ay2x]) beq_temp = zeros(2 * nmg * T) Aeq_full = vstack([Aeq_full, Aeq_temp]) beq = concatenate([beq, beq_temp]) # III) Formulate the optimization problem for tess in the second stage optimization model_tess = {} for i in range(nmes): model_tess[i] = self.problem_formulation_tess_second_stage(mess=mess[i]) # III.1) Merge the models of mirogrids and distribution # Formulate the index nv_index_ev = zeros(1 + nmes).astype(int) neq_index_temp = zeros(1 + nmes).astype(int) nv_index_ev[0] = int(Aeq_full.shape[1]) neq_index_temp[0] = int(Aeq_full.shape[0]) for i in range(nmes): nv_index_ev[i + 1] = nv_index_ev[i] + len(model_tess[i]["c"]) neq_index_temp[i + 1] = neq_index_temp[i] + model_tess[i]["Aeq"].shape[0] Aeq = lil_matrix((int(neq_index_temp[-1]), int(nv_index_ev[-1]))) Aeq[0:int(neq_index_temp[0]), 0:int(nv_index_ev[0])] = Aeq_full for i in range(nmes): lb = concatenate([lb, model_tess[i]["lb"]]) ub = concatenate([ub, model_tess[i]["ub"]]) c = concatenate([c, model_tess[i]["c"]]) q = concatenate([q, model_tess[i]["q"]]) vtypes += model_tess[i]["vtypes"] beq = concatenate([beq, model_tess[i]["beq"]]) Aeq[neq_index_temp[i]:neq_index_temp[i + 1], nv_index_ev[i]:nv_index_ev[i + 1]] = model_tess[i]["Aeq"] # III.2) Coupling constraints between the microgrids and mobile energy storage systems # Additional equal constraints, nmg*T Aeq_temp = lil_matrix((nmg * T, nv_index_ev[-1])) beq_temp = zeros(nmg * T) for i in range(nmg): for t in range(T): Aeq_temp[i * T + t, nv_index[i] + t * NX_MG + PMESS] = 1 # TESSs injections to the MGs for j in range(nmes): Aeq_temp[i * T + t, nv_index_ev[j] + t * self.nb_tra_ele + i] = -1 # Discharging Aeq_temp[i * T + t, nv_index_ev[j] + self.nb_tra_ele * T + t * self.nb_tra_ele + i] = 1 # Sort by order Aeq = vstack([Aeq, Aeq_temp]) beq = concatenate((beq, beq_temp)) nv_second_stage = nv_index_ev[-1] nv_first_stage = self.nv_first_stage self.nv_second_stage = nv_second_stage Qc = dict() # 4) Pij**2+Qij**2<=Vi*Iij for t in range(T): for i in range(nl): Qc[(T * nl + T * nmg) * index + t * nl + i] = [ [int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + 2 * nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + f[i] + 3 * nl)], [int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + f[i] + 3 * nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + 2 * nl)], [1, 1, -1 / 2, -1 / 2]] Rc = zeros(nl * T) # 5) (Pbic_ac2dc+Pbic_dc2ac)**2+Qbic**2<=Sbic**2 Rc_temp = zeros(nmg * T) for i in range(nmg): for t in range(T): Qc[(T * nl + T * nmg) * index + T * nl + T * i + t] = [ [int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_AC2DC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_DC2AC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_AC2DC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_DC2AC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + QBIC)], [int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_AC2DC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_DC2AC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_DC2AC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_AC2DC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + QBIC)], [1, 1, 1, 1, 1]] Rc_temp[i * T + t] = mgs[i]["BIC"]["SMAX"] ** 2 Rc = concatenate([Rc, Rc_temp]) ## IV. Coupling constraints between the first stage and second stage decision variables # pg, pg_mg, pess_mg, pess_tess # Ts*x+Ws*ys<=hs ## IV) Formulate the coupling constraints between the first-stage and second-stage problems # 1) -Pg -Rg + pg <= 0 _nv_first_stage = self._nv_first_stage Ts = lil_matrix((ng * T, nv_first_stage)) Ws = lil_matrix((ng * T, nv_second_stage)) hs = zeros(ng * T) for i in range(T): for j in range(ng): Ts[i * ng + j, i * _nv_first_stage + ng * 3 + j] = -1 Ts[i * ng + j, i * _nv_first_stage + ng * 4 + j] = -1 Ws[i * ng + j, i * _nv_second_stage + 3 * nl + nb + j] = 1 # 2) Pg-Rg - pg <= 0 Ts_temp = lil_matrix((ng * T, nv_first_stage)) Ws_temp = lil_matrix((ng * T, nv_second_stage)) hs_temp = zeros(ng * T) for i in range(T): for j in range(ng): Ts_temp[i * ng + j, i * _nv_first_stage + ng * 3 + j] = 1 Ts_temp[i * ng + j, i * _nv_first_stage + ng * 4 + j] = -1 Ws_temp[i * ng + j, i * _nv_second_stage + 3 * nl + nb + j] = -1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 3) Qg <= IgQg_max Ts_temp = lil_matrix((ng * T, nv_first_stage)) Ws_temp = lil_matrix((ng * T, nv_second_stage)) hs_temp = zeros(ng * T) for i in range(T): for j in range(ng): Ts_temp[i * ng + j, i * _nv_first_stage + ng * 2 + j] = -qg_u[j] Ws_temp[i * ng + j, i * _nv_second_stage + 3 * nl + nb + ng + j] = 1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 4) Qg >= IgQg_min Ts_temp = lil_matrix((ng * T, nv_first_stage)) Ws_temp = lil_matrix((ng * T, nv_second_stage)) hs_temp = zeros(ng * T) for i in range(T): for j in range(ng): Ts_temp[i * ng + j, i * _nv_first_stage + ng * 2 + j] = qg_l[j] Ws_temp[i * ng + j, i * _nv_second_stage + 3 * nl + nb + ng + j] = -1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 3) -Pg_mg - Rg_mg + pg_mg <= 0 Ts_temp = lil_matrix((nmg * T, nv_first_stage)) Ws_temp = lil_matrix((nmg * T, nv_second_stage)) hs_temp = zeros(nmg * T) for i in range(T): for j in range(nmg): Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + j] = -1 Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg + j] = -1 Ws_temp[i * nmg + j, nv_index[j] + i * NX_MG + PG] = 1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 4) Pg_mg - Rg_mg - pg_mg <= 0 Ts_temp = lil_matrix((nmg * T, nv_first_stage)) Ws_temp = lil_matrix((nmg * T, nv_second_stage)) hs_temp = zeros(nmg * T) for i in range(T): for j in range(nmg): Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + j] = 1 Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg + j] = -1 Ws_temp[i * nmg + j, nv_index[j] + i * NX_MG + PG] = -1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 5) pess_dc - pess_ch <= Pess_dc - Pess_ch + Ress Ts_temp = lil_matrix((nmg * T, nv_first_stage)) Ws_temp = lil_matrix((nmg * T, nv_second_stage)) hs_temp = zeros(nmg * T) for i in range(T): for j in range(nmg): Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg * 2 + j] = 1 # Charging Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg * 3 + j] = -1 # Dis-charging Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg * 4 + j] = -1 # Reserve Ws_temp[i * nmg + j, nv_index[j] + i * NX_MG + PESS_CH] = -1
Ws_temp[i * nmg + j, nv_index[j] + i * NX_MG + PESS_DC] = 1
4
2023-11-27 15:57:53+00:00
24k
girgle/DouZero_For_New_HLDDZ
main.py
[ { "identifier": "GameHelper", "path": "GameHelper.py", "snippet": "class GameHelper:\n def __init__(self):\n self.ScreenZoomRate = None\n self.counter = QTime()\n self.Pics = {}\n self.PicsCV = {}\n st = time.time()\n self.Handle = win32gui.FindWindow(\"Unity...
import GameHelper as gh import os import sys import time import threading import pyautogui import win32gui import multiprocessing as mp import DetermineColor as DC import cv2 import numpy as np import traceback import BidModel import LandlordModel import FarmerModel from GameHelper import GameHelper from PIL import Image from skimage.metrics import structural_similarity as ssim from collections import defaultdict from douzero.env.move_detector import get_move_type from PyQt5 import QtGui, QtWidgets, QtCore from PyQt5.QtWidgets import QTableWidgetItem, QInputDialog, QMessageBox from PyQt5.QtGui import QPixmap, QIcon from PyQt5.QtCore import QTime, QEventLoop, Qt from MainWindow import Ui_Form from douzero.env.game import GameEnv from douzero.evaluation.deep_agent import DeepAgent
15,236
def init_display(self): self.WinRate.setText("评分") self.label.setText("游戏状态") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0);') self.UserHandCards.setText("手牌") # self.LBrowser.clear() # self.RBrowser.clear() self.LPlayedCard.setText("上家出牌区域") self.RPlayedCard.setText("下家出牌区域") self.PredictedCard.setText("AI出牌区域") self.ThreeLandlordCards.setText("地主牌") self.recorder2zero() for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') def init_cards(self): self.RunGame = True GameHelper.Interrupt = False self.user_hand_cards_real = "" self.user_hand_cards_env = [] # 其他玩家出牌 self.other_played_cards_real = "" self.other_played_cards_env = [] # 其他玩家手牌(整副牌减去玩家手牌,后续再减掉历史出牌) self.other_hand_cards = [] # 三张底牌 self.three_landlord_cards_real = "" self.three_landlord_cards_env = [] # 玩家角色代码:0-地主上家, 1-地主, 2-地主下家 self.user_position_code = None self.user_position = "" # 开局时三个玩家的手牌 self.card_play_data_list = {} # 识别玩家手牌 self.user_hand_cards_real = self.find_my_cards() while len(self.user_hand_cards_real) != 17 and len(self.user_hand_cards_real) != 20: self.detect_start_btn() if not self.RunGame: break self.sleep(200) self.user_hand_cards_real = self.find_my_cards() self.user_hand_cards_env = [RealCard2EnvCard[c] for c in list(self.user_hand_cards_real)] # 识别三张底牌 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] while len(self.three_landlord_cards_env) != 3: self.detect_start_btn() if not self.RunGame: break if len(self.three_landlord_cards_env) > 3: self.ThreeLandlordCardsConfidence += 0.05 elif len(self.three_landlord_cards_env) < 3: self.ThreeLandlordCardsConfidence -= 0.05 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] # 识别玩家的角色 self.sleep(500) self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) while self.user_position_code is None: self.detect_start_btn() if not self.RunGame: break self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) print("正在出牌人的代码: ", self.user_position_code) if self.user_position_code is None: items = ("地主上家", "地主", "地主下家") item, okPressed = QInputDialog.getItem(self, "选择角色", "未识别到地主,请手动选择角色:", items, 0, False) if okPressed and item: self.user_position_code = items.index(item) else: return self.user_position = ['landlord_up', 'landlord', 'landlord_down'][self.user_position_code] print("我现在在地主的方向:", self.user_position) for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') self.Players[self.user_position_code].setStyleSheet('background-color: rgba(0, 255, 0, 0.5);') # 整副牌减去玩家手上的牌,就是其他人的手牌,再分配给另外两个角色(如何分配对AI判断没有影响) for i in set(AllEnvCard): self.other_hand_cards.extend([i] * (AllEnvCard.count(i) - self.user_hand_cards_env.count(i))) self.other_hands_cards_str = str(''.join([EnvCard2RealCard[c] for c in self.other_hand_cards]))[::-1] self.cards_recorder(self.other_hands_cards_str) self.card_play_data_list.update({ 'three_landlord_cards': self.three_landlord_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 0) % 3]: self.user_hand_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 1) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 != 1 else self.other_hand_cards[17:], ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 2) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 == 1 else self.other_hand_cards[17:] }) print("开始对局") print("手牌:", self.user_hand_cards_real) print("地主牌:", self.three_landlord_cards_real) # 生成手牌结束,校验手牌数量 if len(self.card_play_data_list["three_landlord_cards"]) != 3: QMessageBox.critical(self, "底牌识别出错", "底牌必须是3张!", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return if len(self.card_play_data_list["landlord_up"]) != 17 or \ len(self.card_play_data_list["landlord_down"]) != 17 or \ len(self.card_play_data_list["landlord"]) != 20: QMessageBox.critical(self, "手牌识别出错", "初始手牌数目有误", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return # 出牌顺序:0-玩家出牌, 1-玩家下家出牌, 2-玩家上家出牌 self.play_order = 0 if self.user_position == "landlord" else 1 if self.user_position == "landlord_up" else 2 # 创建一个代表玩家的AI ai_players = [0, 0] ai_players[0] = self.user_position ai_players[1] = DeepAgent(self.user_position, self.card_play_model_path_dict[self.user_position])
# -*- coding: utf-8 -*- # Created by: Raf # Modify by: Vincentzyx EnvCard2RealCard = {3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'T', 11: 'J', 12: 'Q', 13: 'K', 14: 'A', 17: '2', 20: 'X', 30: 'D'} RealCard2EnvCard = {'3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'T': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 14, '2': 17, 'X': 20, 'D': 30} AllEnvCard = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30] AllCards = ['D', 'X', '2', 'A', 'K', 'Q', 'J', 'T', '9', '8', '7', '6', '5', '4', '3'] helper = GameHelper() class MyPyQT_Form(QtWidgets.QWidget, Ui_Form): def __init__(self): super(MyPyQT_Form, self).__init__() self.other_hands_cards_str = None self.stop_sign = None self.loop_sign = None self.env = None self.three_landlord_cards_env = None self.three_landlord_cards_real = None self.user_hand_cards_env = None self.user_hand_cards_real = None self.play_order = None self.card_play_data_list = None self.other_hand_cards = None self.other_played_cards_env = None self.other_played_cards_real = None self.user_position = None self.user_position_code = None self.setupUi(self) self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint | # 使能最小化按钮 QtCore.Qt.WindowStaysOnTopHint | # 窗体总在最前端 QtCore.Qt.WindowCloseButtonHint) self.setWindowIcon(QIcon(':/pics/favicon.ico')) self.setWindowTitle("DouZero欢乐斗地主v2.0") self.setFixedSize(self.width(), self.height()) # 固定窗体大小 self.move(50, 50) # self.setWindowIcon(QIcon('pics/favicon.ico')) window_pale = QtGui.QPalette() # window_pale.setBrush(self.backgroundRole(), QtGui.QBrush(QtGui.QPixmap("pics/bg.png"))) self.setPalette(window_pale) self.SingleButton.clicked.connect(self.game_single) self.LoopButton.clicked.connect(self.game_loop) self.StopButton.clicked.connect(self.stop) # self.Players = [self.RPlayer, self.Player, self.LPlayer] self.Players = [self.RPlayedCard, self.PredictedCard, self.LPlayedCard] self.counter = QTime() # 参数 self.MyConfidence = 0.8 # 我的牌的置信度 self.OtherConfidence = 0.8 # 别人的牌的置信度 self.WhiteConfidence = 0.85 # 检测白块的置信度 self.LandlordFlagConfidence = 0.8 # 检测地主标志的置信度 self.ThreeLandlordCardsConfidence = 0.8 # 检测地主底牌的置信度 self.PassConfidence = 0.7 self.PassConfidence = 0.8 self.WaitTime = 1 # 等待状态稳定延时 self.MyFilter = 40 # 我的牌检测结果过滤参数 self.OtherFilter = 25 # 别人的牌检测结果过滤参数 self.SleepTime = 0.1 # 循环中睡眠时间 self.RunGame = False self.AutoPlay = False self.BidThreshold1 = 65 # 叫地主阈值 self.BidThreshold2 = 72 # 抢地主阈值 self.JiabeiThreshold = ( (85, 72), # 叫地主 超级加倍 加倍 阈值 (85, 75) # 叫地主 超级加倍 加倍 阈值 (在地主是抢来的情况下) ) self.MingpaiThreshold = 92 # 坐标 self.MyHandCardsPos = (180, 560, 1050, 90) # 我的截图区域 self.LPlayedCardsPos = (320, 280, 500, 120) # 左边出牌截图区域 self.RPlayedCardsPos = (600, 280, 500, 120) # 右边出牌截图区域 self.LandlordCardsPos = (600, 33, 220, 103) # 地主底牌截图区域 self.LPassPos = (360, 360, 120, 80) # 左边不出截图区域 self.RPassPos = (940, 360, 120, 80) # 右边不出截图区域 self.PassBtnPos = (200, 450, 1000, 120) # 要不起截图区域 self.GeneralBtnPos = (200, 450, 1000, 120) # 叫地主、抢地主、加倍按钮截图区域 self.LandlordFlagPos = [(1247, 245, 48, 52), (12, 661, 51, 53), (123, 243, 52, 54)] # 地主标志截图区域(右-我-左) self.card_play_model_path_dict = { 'landlord': "baselines/resnet/resnet_landlord.ckpt", 'landlord_up': "baselines/resnet/resnet_landlord_up.ckpt", 'landlord_down': "baselines/resnet/resnet_landlord_down.ckpt" } def game_single(self): self.loop_sign = 0 self.stop_sign = 0 self.detect_start_btn() self.before_start() self.init_cards() def game_loop(self): self.loop_sign = 1 self.stop_sign = 0 while True: if self.stop_sign == 1: break self.detect_start_btn() self.before_start() self.init_cards() self.sleep(5000) def stop(self): self.stop_sign = 1 print("按下停止键") try: self.RunGame = False self.loop_sign = 0 self.env.game_over = True self.env.reset() self.init_display() self.PreWinrate.setText("局前胜率: ") self.BidWinrate.setText("叫牌胜率: ") except AttributeError as e: traceback.print_exc() def init_display(self): self.WinRate.setText("评分") self.label.setText("游戏状态") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0);') self.UserHandCards.setText("手牌") # self.LBrowser.clear() # self.RBrowser.clear() self.LPlayedCard.setText("上家出牌区域") self.RPlayedCard.setText("下家出牌区域") self.PredictedCard.setText("AI出牌区域") self.ThreeLandlordCards.setText("地主牌") self.recorder2zero() for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') def init_cards(self): self.RunGame = True GameHelper.Interrupt = False self.user_hand_cards_real = "" self.user_hand_cards_env = [] # 其他玩家出牌 self.other_played_cards_real = "" self.other_played_cards_env = [] # 其他玩家手牌(整副牌减去玩家手牌,后续再减掉历史出牌) self.other_hand_cards = [] # 三张底牌 self.three_landlord_cards_real = "" self.three_landlord_cards_env = [] # 玩家角色代码:0-地主上家, 1-地主, 2-地主下家 self.user_position_code = None self.user_position = "" # 开局时三个玩家的手牌 self.card_play_data_list = {} # 识别玩家手牌 self.user_hand_cards_real = self.find_my_cards() while len(self.user_hand_cards_real) != 17 and len(self.user_hand_cards_real) != 20: self.detect_start_btn() if not self.RunGame: break self.sleep(200) self.user_hand_cards_real = self.find_my_cards() self.user_hand_cards_env = [RealCard2EnvCard[c] for c in list(self.user_hand_cards_real)] # 识别三张底牌 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] while len(self.three_landlord_cards_env) != 3: self.detect_start_btn() if not self.RunGame: break if len(self.three_landlord_cards_env) > 3: self.ThreeLandlordCardsConfidence += 0.05 elif len(self.three_landlord_cards_env) < 3: self.ThreeLandlordCardsConfidence -= 0.05 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] # 识别玩家的角色 self.sleep(500) self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) while self.user_position_code is None: self.detect_start_btn() if not self.RunGame: break self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) print("正在出牌人的代码: ", self.user_position_code) if self.user_position_code is None: items = ("地主上家", "地主", "地主下家") item, okPressed = QInputDialog.getItem(self, "选择角色", "未识别到地主,请手动选择角色:", items, 0, False) if okPressed and item: self.user_position_code = items.index(item) else: return self.user_position = ['landlord_up', 'landlord', 'landlord_down'][self.user_position_code] print("我现在在地主的方向:", self.user_position) for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') self.Players[self.user_position_code].setStyleSheet('background-color: rgba(0, 255, 0, 0.5);') # 整副牌减去玩家手上的牌,就是其他人的手牌,再分配给另外两个角色(如何分配对AI判断没有影响) for i in set(AllEnvCard): self.other_hand_cards.extend([i] * (AllEnvCard.count(i) - self.user_hand_cards_env.count(i))) self.other_hands_cards_str = str(''.join([EnvCard2RealCard[c] for c in self.other_hand_cards]))[::-1] self.cards_recorder(self.other_hands_cards_str) self.card_play_data_list.update({ 'three_landlord_cards': self.three_landlord_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 0) % 3]: self.user_hand_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 1) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 != 1 else self.other_hand_cards[17:], ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 2) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 == 1 else self.other_hand_cards[17:] }) print("开始对局") print("手牌:", self.user_hand_cards_real) print("地主牌:", self.three_landlord_cards_real) # 生成手牌结束,校验手牌数量 if len(self.card_play_data_list["three_landlord_cards"]) != 3: QMessageBox.critical(self, "底牌识别出错", "底牌必须是3张!", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return if len(self.card_play_data_list["landlord_up"]) != 17 or \ len(self.card_play_data_list["landlord_down"]) != 17 or \ len(self.card_play_data_list["landlord"]) != 20: QMessageBox.critical(self, "手牌识别出错", "初始手牌数目有误", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return # 出牌顺序:0-玩家出牌, 1-玩家下家出牌, 2-玩家上家出牌 self.play_order = 0 if self.user_position == "landlord" else 1 if self.user_position == "landlord_up" else 2 # 创建一个代表玩家的AI ai_players = [0, 0] ai_players[0] = self.user_position ai_players[1] = DeepAgent(self.user_position, self.card_play_model_path_dict[self.user_position])
self.env = GameEnv(ai_players)
3
2023-12-01 04:04:30+00:00
24k
super1207/satoricq
satori.py
[ { "identifier": "AdapterKook", "path": "kook_adapter.py", "snippet": "class AdapterKook:\n def __init__(self,config = {}) -> None:\n '''用于初始化一些配置信息,尽量不要在这里阻塞,因为此处不具备异步环境,如果你需要读写配置文件,请在init_after中进行'''\n self._access_token = config[\"access_token\"]\n self._http_url = \"https://ww...
import asyncio import aiohttp import json import uuid from kook_adapter import AdapterKook from mihoyo_adapter import AdapterMihoyo from onebot_adapter import AdapterOnebot from config import Config from aiohttp import web from qq_adapter import AdapterQQ from tool import remove_json_null
16,772
class Satori: def __init__(self) -> None: self._config:Config = Config() self.adapterlist = [] self.wsmap = {} self._evt_id = 100 async def _get_adapter(self,platform,self_id): ''' 用于获取适配器 ''' for adapter in self.adapterlist: info = adapter["info"] for bot in info: if self_id == bot["self_id"] and bot["platform"] == platform: return adapter["adapter"] return None async def ws_send_json(ws,js) -> None: js = remove_json_null(js) print("--------ws_send_json",json.dumps(js)) await ws.send_json(js) async def _handle_http_normal(self,request:web.Request): print("----http normal",request) '''在这里处理普通api调用''' # 鉴权 if self._config.access_token != "": if request.headers.get("Authorization") != "Bearer " + self._config.access_token: print("token err") return web.Response(text="token err") method = request.url.path platform = request.headers.get("X-Platform") self_id = request.headers.get("X-Self-ID")
class Satori: def __init__(self) -> None: self._config:Config = Config() self.adapterlist = [] self.wsmap = {} self._evt_id = 100 async def _get_adapter(self,platform,self_id): ''' 用于获取适配器 ''' for adapter in self.adapterlist: info = adapter["info"] for bot in info: if self_id == bot["self_id"] and bot["platform"] == platform: return adapter["adapter"] return None async def ws_send_json(ws,js) -> None: js = remove_json_null(js) print("--------ws_send_json",json.dumps(js)) await ws.send_json(js) async def _handle_http_normal(self,request:web.Request): print("----http normal",request) '''在这里处理普通api调用''' # 鉴权 if self._config.access_token != "": if request.headers.get("Authorization") != "Bearer " + self._config.access_token: print("token err") return web.Response(text="token err") method = request.url.path platform = request.headers.get("X-Platform") self_id = request.headers.get("X-Self-ID")
adapter:AdapterOnebot = await self._get_adapter(platform,self_id)
2
2023-12-03 13:53:47+00:00
24k