repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
gardenifi/server
app/raspi/mqtt.py
[ { "identifier": "Services", "path": "app/raspi/services.py", "snippet": "class Services:\n \"\"\"\n The `Services` class provides various methods for managing and controlling\n services related to a Raspberry Pi device, such as turning on/off valves,\n storing and deleting program cycles, lo...
import time import os import json import threading import sys import paho.mqtt.client as mqtt from threading import Thread from loguru import logger from app.raspi.services import Services from app.raspi.const import ( MQTT_CLIENT_ID, MQTT_TOPIC_STATUS, MQTT_TOPIC_METADATA, MQTT_TOPIC_CONFIG, MQTT_TOPIC_CMD, MQTT_TOPIC_VALVES, MQTT_STATUS_ERR, PROGRAM, PROGRAM_EXT, MQTT_STATUS_OK, MQTT_OK, MQTT_END, MQTT_USER, MQTT_PASS, MQTT_HOST, MQTT_PORT, ) from app.raspi.helpers import Helpers from app.raspi.const import Command
11,320
Inputs: None Outputs: None """ logger.debug(f"Destroying Mqtt Object Class: {cls.__instance}") cls.__instance = None cls._mqtt_thread = None cls._periodic_updates_thread = None def get_mqtt_thread(self): """Getter.""" logger.debug(f"Getting current thread: {self._mqtt_thread}") return self._mqtt_thread def set_mqtt_thread(self, mqtt_thread): """Setter.""" logger.debug(f"Setting new thread: {mqtt_thread}") self._mqtt_thread = mqtt_thread def get_periodic_updates_thread(self): """Getter.""" return self._periodic_updates_thread def set_periodic_updates_thread(self, periodic_updates_thread): """Setter.""" self._periodic_updates_thread = periodic_updates_thread def is_running(self): """Check whether mqtt thread state.""" # logger.info(str(mqtt_thread)) # logger.info(str(mqtt_thread is not None)) # logger.info(str(mqtt_thread.is_alive())) return self._mqtt_thread is not None and self._mqtt_thread.is_alive() @staticmethod def on_disconnect(client, data, return_code=0): """OnDisconnect callback.""" logger.debug(f"MQTT OnDisconnect: {client}:{data}:{return_code}") # The callback for when the client # receives a CONNACK response from the server. @staticmethod def on_connect(client, userdata, flags, return_code): """OnConnect callback.""" logger.debug(f"MQTT OnConnect: {client}:{userdata}:{flags}:{return_code}") client.connected_flag = True # subscribe to the RASPIRRI TOPICS logger.debug( f"MQTT OnConnect: Subscribing to topics:\ {MQTT_TOPIC_STATUS},\ {MQTT_TOPIC_CONFIG},\ {MQTT_TOPIC_CMD},\ {MQTT_TOPIC_VALVES}" ) client.subscribe(MQTT_TOPIC_STATUS) client.subscribe(MQTT_TOPIC_CONFIG) client.subscribe(MQTT_TOPIC_CMD) client.subscribe(MQTT_TOPIC_VALVES) if return_code == 0: logger.info("Connected successfully") Helpers().load_toggle_statuses_from_file() if Mqtt().get_periodic_updates_thread() is None: Mqtt().set_periodic_updates_thread( Thread(daemon=True, name="PeriodicUpdatesThread", target=Mqtt.send_periodic_updates, args=(client,)) ) Mqtt().get_periodic_updates_thread().start() else: logger.info(f"Connect returned result code: {return_code}") @staticmethod def handle_valves(client, data): """Handle valves.""" try: logger.info(f"valves data received={data}") Helpers().set_valves(data) except Exception as exception: logger.error(f"Error: {exception}") Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + str(exception)[0:128] + MQTT_END) # Program Configuration handler # 1. It should parse the configuration as a JSON string # 2. If it is correct it should store it as a local file # 3. A scheduler should launch to turn on the irrigator for every cycle @staticmethod def handle_config(client, data): """Handle cfg.""" try: json_data = json.loads(data) logger.info(f"prestored programs={json_data}") for program in json_data: logger.info(f"program={program}") if program == {}: Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + MQTT_OK + MQTT_END) return Services().store_program_cycles(program, True) Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + MQTT_OK + MQTT_END) except Exception as exception: logger.error(f"Error: {exception}") Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + str(exception)[0:128] + MQTT_END) @staticmethod def handle_command(client, data): """Handle cmd.""" try: json_data = json.loads(data) logger.info(json_data) cmd = json_data["cmd"] command = Command(cmd) try: valve = json_data["out"] except Exception as exception: logger.warning( f"Could not find valve out parameter. \ Will use valve 1: {exception}" ) valve = 1
"""MIT License Copyright (c) 2023, Marios Karagiannopoulos Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. **Attribution Requirement:** When using or distributing the software, an attribution to Marios Karagiannopoulos must be included. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ class Mqtt: """MQTT Methods Class.""" __instance = None __lock = threading.Lock() client = None def __new__(cls): """ Create a new instance of the Mqtt class using the singleton design pattern. Returns: An instance of the Mqtt class. Example Usage: instance = Mqtt() """ if cls.__instance is None: with cls.__lock: cls.__instance = super().__new__(cls) # pylint: disable=duplicate-code cls._mqtt_thread = None cls._periodic_updates_thread = None logger.debug(f"Returning Mqtt Object Class: {cls.__instance}") return cls.__instance @classmethod def destroy_instance(cls): """ Destroy the instance of the Mqtt class. This method sets the instance of the Mqtt class to None, effectively destroying the instance. Example Usage: ```python instance = Mqtt() # Create an instance of the Mqtt class Mqtt.destroy_instance() # Destroy the instance print(instance) # Output: None ``` Inputs: None Outputs: None """ logger.debug(f"Destroying Mqtt Object Class: {cls.__instance}") cls.__instance = None cls._mqtt_thread = None cls._periodic_updates_thread = None def get_mqtt_thread(self): """Getter.""" logger.debug(f"Getting current thread: {self._mqtt_thread}") return self._mqtt_thread def set_mqtt_thread(self, mqtt_thread): """Setter.""" logger.debug(f"Setting new thread: {mqtt_thread}") self._mqtt_thread = mqtt_thread def get_periodic_updates_thread(self): """Getter.""" return self._periodic_updates_thread def set_periodic_updates_thread(self, periodic_updates_thread): """Setter.""" self._periodic_updates_thread = periodic_updates_thread def is_running(self): """Check whether mqtt thread state.""" # logger.info(str(mqtt_thread)) # logger.info(str(mqtt_thread is not None)) # logger.info(str(mqtt_thread.is_alive())) return self._mqtt_thread is not None and self._mqtt_thread.is_alive() @staticmethod def on_disconnect(client, data, return_code=0): """OnDisconnect callback.""" logger.debug(f"MQTT OnDisconnect: {client}:{data}:{return_code}") # The callback for when the client # receives a CONNACK response from the server. @staticmethod def on_connect(client, userdata, flags, return_code): """OnConnect callback.""" logger.debug(f"MQTT OnConnect: {client}:{userdata}:{flags}:{return_code}") client.connected_flag = True # subscribe to the RASPIRRI TOPICS logger.debug( f"MQTT OnConnect: Subscribing to topics:\ {MQTT_TOPIC_STATUS},\ {MQTT_TOPIC_CONFIG},\ {MQTT_TOPIC_CMD},\ {MQTT_TOPIC_VALVES}" ) client.subscribe(MQTT_TOPIC_STATUS) client.subscribe(MQTT_TOPIC_CONFIG) client.subscribe(MQTT_TOPIC_CMD) client.subscribe(MQTT_TOPIC_VALVES) if return_code == 0: logger.info("Connected successfully") Helpers().load_toggle_statuses_from_file() if Mqtt().get_periodic_updates_thread() is None: Mqtt().set_periodic_updates_thread( Thread(daemon=True, name="PeriodicUpdatesThread", target=Mqtt.send_periodic_updates, args=(client,)) ) Mqtt().get_periodic_updates_thread().start() else: logger.info(f"Connect returned result code: {return_code}") @staticmethod def handle_valves(client, data): """Handle valves.""" try: logger.info(f"valves data received={data}") Helpers().set_valves(data) except Exception as exception: logger.error(f"Error: {exception}") Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + str(exception)[0:128] + MQTT_END) # Program Configuration handler # 1. It should parse the configuration as a JSON string # 2. If it is correct it should store it as a local file # 3. A scheduler should launch to turn on the irrigator for every cycle @staticmethod def handle_config(client, data): """Handle cfg.""" try: json_data = json.loads(data) logger.info(f"prestored programs={json_data}") for program in json_data: logger.info(f"program={program}") if program == {}: Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + MQTT_OK + MQTT_END) return Services().store_program_cycles(program, True) Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + MQTT_OK + MQTT_END) except Exception as exception: logger.error(f"Error: {exception}") Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + str(exception)[0:128] + MQTT_END) @staticmethod def handle_command(client, data): """Handle cmd.""" try: json_data = json.loads(data) logger.info(json_data) cmd = json_data["cmd"] command = Command(cmd) try: valve = json_data["out"] except Exception as exception: logger.warning( f"Could not find valve out parameter. \ Will use valve 1: {exception}" ) valve = 1
file_path = PROGRAM + str(valve) + PROGRAM_EXT
9
2023-12-22 08:06:09+00:00
16k
shibing624/chatgpt-webui
src/models.py
[ { "identifier": "shared", "path": "src/shared.py", "snippet": "class State:\n def interrupt(self):\n def recover(self):\n def set_api_host(self, api_host: str):\n def reset_api_host(self):\n def reset_all(self):\n def set_api_key_queue(self, api_key_list):\n def switching_api_key(se...
import base64 import datetime import json import os import colorama import gradio as gr import requests import traceback import traceback from io import BytesIO from PIL import Image from loguru import logger from src import shared, config from src.base_model import BaseLLMModel, ModelType from src.chatglm import ChatGLMClient from src.llama import LLaMAClient from src.presets import ( INITIAL_SYSTEM_PROMPT, TIMEOUT_ALL, TIMEOUT_STREAMING, STANDARD_ERROR_MSG, CONNECTION_TIMEOUT_MSG, READ_TIMEOUT_MSG, ERROR_RETRIEVE_MSG, GENERAL_ERROR_MSG, CHAT_COMPLETION_URL, SUMMARY_CHAT_SYSTEM_PROMPT ) from src.utils import ( hide_middle_chars, count_token, construct_system, construct_user, get_last_day_of_month, i18n, replace_special_symbols, )
12,948
self.headers = { "Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}", } def _get_billing_data(self, billing_url): with config.retrieve_proxy(): response = requests.get( billing_url, headers=self.headers, timeout=TIMEOUT_ALL, ) if response.status_code == 200: data = response.json() return data else: raise Exception( f"API request failed with status code {response.status_code}: {response.text}" ) def _decode_chat_response(self, response): error_msg = "" for chunk in response.iter_lines(): if chunk: chunk = chunk.decode() chunk_length = len(chunk) try: chunk = json.loads(chunk[6:]) except: print(i18n("JSON解析错误,收到的内容: ") + f"{chunk}") error_msg += chunk continue try: if chunk_length > 6 and "delta" in chunk["choices"][0]: if "finish_details" in chunk["choices"][0]: finish_reason = chunk["choices"][0]["finish_details"] elif "finish_reason" in chunk["choices"][0]: finish_reason = chunk["choices"][0]["finish_reason"] else: finish_reason = chunk["finish_details"] if finish_reason == "stop": break try: yield chunk["choices"][0]["delta"]["content"] except Exception as e: # logger.error(f"Error: {e}") continue except: print(f"ERROR: {chunk}") continue if error_msg and not error_msg == "data: [DONE]": raise Exception(error_msg) def set_key(self, new_access_key): ret = super().set_key(new_access_key) self._refresh_header() return ret def _single_query_at_once(self, history, temperature=1.0): timeout = TIMEOUT_ALL headers = { "Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}", "temperature": f"{temperature}", } payload = { "model": self.model_name, "messages": history, } # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求 if shared.state.chat_completion_url != CHAT_COMPLETION_URL: logger.debug(f"使用自定义API URL: {shared.state.chat_completion_url}") with config.retrieve_proxy(): response = requests.post( shared.state.chat_completion_url, headers=headers, json=payload, stream=False, timeout=timeout, ) return response def get_model( model_name, lora_model_path=None, access_key=None, temperature=None, top_p=None, system_prompt=None, user_name="", original_model=None, ): msg = i18n("模型设置为了:") + f" {model_name}" model_type = ModelType.get_type(model_name) lora_choices = ["No LoRA"] if model_type != ModelType.OpenAI: config.local_embedding = True model = original_model chatbot = gr.Chatbot.update(label=model_name) try: if model_type == ModelType.OpenAI: logger.info(f"正在加载OpenAI模型: {model_name}") model = OpenAIClient( model_name=model_name, api_key=access_key, system_prompt=system_prompt, user_name=user_name, ) logger.info(f"OpenAI模型加载完成: {model_name}") elif model_type == ModelType.OpenAIVision: logger.info(f"正在加载OpenAI Vision模型: {model_name}") access_key = os.environ.get("OPENAI_API_KEY", access_key) model = OpenAIVisionClient( model_name, api_key=access_key, user_name=user_name) elif model_type == ModelType.ChatGLM: logger.info(f"正在加载ChatGLM模型: {model_name}")
# -*- coding: utf-8 -*- """ Get model client from model name """ class OpenAIClient(BaseLLMModel): def __init__( self, model_name, api_key, system_prompt=INITIAL_SYSTEM_PROMPT, temperature=1.0, top_p=1.0, user_name="", ) -> None: super().__init__( model_name=model_name, temperature=temperature, top_p=top_p, system_prompt=system_prompt, user=user_name, ) self.api_key = api_key self.need_api_key = True self._refresh_header() def get_answer_stream_iter(self): if not self.api_key: raise ValueError("API key is not set") response = self._get_response(stream=True) if response is not None: iter = self._decode_chat_response(response) partial_text = "" for i in iter: partial_text += i yield partial_text else: yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG def get_answer_at_once(self): if not self.api_key: raise ValueError("API key is not set") response = self._get_response() response = json.loads(response.text) content = response["choices"][0]["message"]["content"] total_token_count = response["usage"]["total_tokens"] return content, total_token_count def count_token(self, user_input): input_token_count = count_token(construct_user(user_input)) if self.system_prompt is not None and len(self.all_token_counts) == 0: system_prompt_token_count = count_token( construct_system(self.system_prompt) ) return input_token_count + system_prompt_token_count return input_token_count def billing_info(self): try: curr_time = datetime.datetime.now() last_day_of_month = get_last_day_of_month( curr_time).strftime("%Y-%m-%d") first_day_of_month = curr_time.replace(day=1).strftime("%Y-%m-%d") usage_url = f"{shared.state.usage_api_url}?start_date={first_day_of_month}&end_date={last_day_of_month}" try: usage_data = self._get_billing_data(usage_url) except Exception as e: logger.warning(f"获取API使用情况失败:" + str(e)) return i18n("**获取API使用情况失败**") rounded_usage = "{:.5f}".format(usage_data["total_usage"] / 100) return i18n("**本月使用金额** ") + f"\u3000 ${rounded_usage}" except requests.exceptions.ConnectTimeout: status_text = ( STANDARD_ERROR_MSG + CONNECTION_TIMEOUT_MSG + ERROR_RETRIEVE_MSG ) return status_text except requests.exceptions.ReadTimeout: status_text = STANDARD_ERROR_MSG + READ_TIMEOUT_MSG + ERROR_RETRIEVE_MSG return status_text except Exception as e: traceback.print_exc() logger.error(i18n("获取API使用情况失败:") + str(e)) return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG def set_token_upper_limit(self, new_upper_limit): pass @shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用 def _get_response(self, stream=False): openai_api_key = self.api_key system_prompt = self.system_prompt history = self.history logger.debug(f"{history}") headers = { "Authorization": f"Bearer {openai_api_key}", "Content-Type": "application/json", } if system_prompt is not None: history = [construct_system(system_prompt), *history] payload = { "model": self.model_name, "messages": history, "temperature": self.temperature, "top_p": self.top_p, "n": self.n_choices, "stream": stream, "presence_penalty": self.presence_penalty, "frequency_penalty": self.frequency_penalty, } if self.max_generation_token is not None: payload["max_tokens"] = self.max_generation_token if self.stop_sequence is not None: payload["stop"] = self.stop_sequence if self.logit_bias is not None: payload["logit_bias"] = self.logit_bias if self.user_identifier is not None: payload["user"] = self.user_identifier if stream: timeout = TIMEOUT_STREAMING else: timeout = TIMEOUT_ALL # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求 if shared.state.chat_completion_url != CHAT_COMPLETION_URL: logger.debug(f"使用自定义API URL: {shared.state.chat_completion_url}") with config.retrieve_proxy(): try: response = requests.post( shared.state.chat_completion_url, headers=headers, json=payload, stream=stream, timeout=timeout, ) except Exception as e: logger.error(f"Error: {e}") response = None return response def _refresh_header(self): self.headers = { "Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json", } def _get_billing_data(self, billing_url): with config.retrieve_proxy(): response = requests.get( billing_url, headers=self.headers, timeout=TIMEOUT_ALL, ) if response.status_code == 200: data = response.json() return data else: raise Exception( f"API request failed with status code {response.status_code}: {response.text}" ) def _decode_chat_response(self, response): error_msg = "" for chunk in response.iter_lines(): if chunk: chunk = chunk.decode() chunk_length = len(chunk) try: chunk = json.loads(chunk[6:]) except Exception as e: print(i18n("JSON解析错误,收到的内容: ") + f"{chunk}") error_msg += chunk continue try: if chunk_length > 6 and "delta" in chunk["choices"][0]: if "finish_reason" in chunk["choices"][0]: finish_reason = chunk["choices"][0]["finish_reason"] else: finish_reason = chunk["finish_reason"] if finish_reason == "stop": break try: yield chunk["choices"][0]["delta"]["content"] except Exception as e: logger.error(f"Error: {e}") continue except: print(f"ERROR: {chunk}") continue if error_msg and not error_msg == "data: [DONE]": raise Exception(error_msg) def set_key(self, new_access_key): ret = super().set_key(new_access_key) self._refresh_header() return ret def _single_query_at_once(self, history, temperature=1.0): timeout = TIMEOUT_ALL headers = { "Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}", "temperature": f"{temperature}", } payload = { "model": self.model_name, "messages": history, } # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求 if shared.state.chat_completion_url != CHAT_COMPLETION_URL: logger.debug(f"使用自定义API URL: {shared.state.chat_completion_url}") with config.retrieve_proxy(): response = requests.post( shared.state.chat_completion_url, headers=headers, json=payload, stream=False, timeout=timeout, ) return response def auto_name_chat_history(self, name_chat_method, user_question, chatbot, single_turn_checkbox): if len(self.history) == 2 and not single_turn_checkbox and not config.hide_history_when_not_logged_in: user_question = self.history[0]["content"] if name_chat_method == i18n("模型自动总结(消耗tokens)"): ai_answer = self.history[1]["content"] try: history = [ {"role": "system", "content": SUMMARY_CHAT_SYSTEM_PROMPT}, {"role": "user", "content": f"Please write a title based on the following conversation:\n---\nUser: {user_question}\nAssistant: {ai_answer}"} ] response = self._single_query_at_once(history, temperature=0.0) response = json.loads(response.text) content = response["choices"][0]["message"]["content"] filename = replace_special_symbols(content) + ".json" except Exception as e: logger.info(f"自动命名失败。{e}") filename = replace_special_symbols(user_question)[:16] + ".json" return self.rename_chat_history(filename, chatbot) elif name_chat_method == i18n("第一条提问"): filename = replace_special_symbols(user_question)[:16] + ".json" return self.rename_chat_history(filename, chatbot) else: return gr.update() else: return gr.update() class OpenAIVisionClient(BaseLLMModel): def __init__( self, model_name, api_key, system_prompt=INITIAL_SYSTEM_PROMPT, temperature=1.0, top_p=1.0, user_name="" ) -> None: super().__init__( model_name=model_name, temperature=temperature, top_p=top_p, system_prompt=system_prompt, user=user_name ) self.api_key = api_key self.need_api_key = True self.max_generation_token = 4096 self.images = [] self._refresh_header() def get_answer_stream_iter(self): response = self._get_response(stream=True) if response is not None: iter = self._decode_chat_response(response) partial_text = "" for i in iter: partial_text += i yield partial_text else: yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG def get_answer_at_once(self): response = self._get_response() response = json.loads(response.text) content = response["choices"][0]["message"]["content"] total_token_count = response["usage"]["total_tokens"] return content, total_token_count def try_read_image(self, filepath): def is_image_file(filepath): # 判断文件是否为图片 valid_image_extensions = [ ".jpg", ".jpeg", ".png", ".bmp", ".gif", ".tiff"] file_extension = os.path.splitext(filepath)[1].lower() return file_extension in valid_image_extensions def image_to_base64(image_path): # 打开并加载图片 img = Image.open(image_path) # 获取图片的宽度和高度 width, height = img.size # 计算压缩比例,以确保最长边小于4096像素 max_dimension = 2048 scale_ratio = min(max_dimension / width, max_dimension / height) if scale_ratio < 1: # 按压缩比例调整图片大小 new_width = int(width * scale_ratio) new_height = int(height * scale_ratio) img = img.resize((new_width, new_height), Image.LANCZOS) # 将图片转换为jpg格式的二进制数据 buffer = BytesIO() if img.mode == "RGBA": img = img.convert("RGB") img.save(buffer, format='JPEG') binary_image = buffer.getvalue() # 对二进制数据进行Base64编码 base64_image = base64.b64encode(binary_image).decode('utf-8') return base64_image if is_image_file(filepath): logger.info(f"读取图片文件: {filepath}") base64_image = image_to_base64(filepath) self.images.append({ "path": filepath, "base64": base64_image, }) def handle_file_upload(self, files, chatbot, language): """if the model accepts multi modal input, implement this function""" if files: for file in files: if file.name: self.try_read_image(file.name) if self.images is not None: chatbot = chatbot + [([image["path"] for image in self.images], None)] return None, chatbot, None def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot, load_from_cache_if_possible=True): fake_inputs = real_inputs display_append = "" limited_context = False return limited_context, fake_inputs, display_append, real_inputs, chatbot def count_token(self, user_input): input_token_count = count_token(construct_user(user_input)) if self.system_prompt is not None and len(self.all_token_counts) == 0: system_prompt_token_count = count_token( construct_system(self.system_prompt) ) return input_token_count + system_prompt_token_count return input_token_count def billing_info(self): try: curr_time = datetime.datetime.now() last_day_of_month = get_last_day_of_month( curr_time).strftime("%Y-%m-%d") first_day_of_month = curr_time.replace(day=1).strftime("%Y-%m-%d") usage_url = f"{shared.state.usage_api_url}?start_date={first_day_of_month}&end_date={last_day_of_month}" try: usage_data = self._get_billing_data(usage_url) except Exception as e: logger.warning(f"获取API使用情况失败:" + str(e)) return i18n("**获取API使用情况失败**") rounded_usage = "{:.5f}".format(usage_data["total_usage"] / 100) return i18n("**本月使用金额** ") + f"\u3000 ${rounded_usage}" except requests.exceptions.ConnectTimeout: status_text = ( STANDARD_ERROR_MSG + CONNECTION_TIMEOUT_MSG + ERROR_RETRIEVE_MSG ) return status_text except requests.exceptions.ReadTimeout: status_text = STANDARD_ERROR_MSG + READ_TIMEOUT_MSG + ERROR_RETRIEVE_MSG return status_text except Exception as e: traceback.print_exc() logger.error(i18n("获取API使用情况失败:") + str(e)) return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG @shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用 def _get_response(self, stream=False): openai_api_key = self.api_key system_prompt = self.system_prompt history = self.history if self.images: self.history[-1]["content"] = [ {"type": "text", "text": self.history[-1]["content"]}, *[{"type": "image_url", "image_url": "data:image/jpeg;base64," + image["base64"]} for image in self.images] ] self.images = [] logger.debug(colorama.Fore.YELLOW + f"{history}" + colorama.Fore.RESET) headers = { "Content-Type": "application/json", "Authorization": f"Bearer {openai_api_key}", } if system_prompt is not None: history = [construct_system(system_prompt), *history] payload = { "model": self.model_name, "messages": history, "temperature": self.temperature, "top_p": self.top_p, "n": self.n_choices, "stream": stream, "presence_penalty": self.presence_penalty, "frequency_penalty": self.frequency_penalty, "max_tokens": 4096 } if self.stop_sequence is not None: payload["stop"] = self.stop_sequence if self.logit_bias is not None: payload["logit_bias"] = self.encoded_logit_bias() if self.user_identifier: payload["user"] = self.user_identifier if stream: timeout = TIMEOUT_STREAMING else: timeout = TIMEOUT_ALL # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求 if shared.state.chat_completion_url != CHAT_COMPLETION_URL: logger.debug(f"使用自定义API URL: {shared.state.chat_completion_url}") with config.retrieve_proxy(): try: response = requests.post( shared.state.chat_completion_url, headers=headers, json=payload, stream=stream, timeout=timeout, ) except: return None return response def _refresh_header(self): self.headers = { "Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}", } def _get_billing_data(self, billing_url): with config.retrieve_proxy(): response = requests.get( billing_url, headers=self.headers, timeout=TIMEOUT_ALL, ) if response.status_code == 200: data = response.json() return data else: raise Exception( f"API request failed with status code {response.status_code}: {response.text}" ) def _decode_chat_response(self, response): error_msg = "" for chunk in response.iter_lines(): if chunk: chunk = chunk.decode() chunk_length = len(chunk) try: chunk = json.loads(chunk[6:]) except: print(i18n("JSON解析错误,收到的内容: ") + f"{chunk}") error_msg += chunk continue try: if chunk_length > 6 and "delta" in chunk["choices"][0]: if "finish_details" in chunk["choices"][0]: finish_reason = chunk["choices"][0]["finish_details"] elif "finish_reason" in chunk["choices"][0]: finish_reason = chunk["choices"][0]["finish_reason"] else: finish_reason = chunk["finish_details"] if finish_reason == "stop": break try: yield chunk["choices"][0]["delta"]["content"] except Exception as e: # logger.error(f"Error: {e}") continue except: print(f"ERROR: {chunk}") continue if error_msg and not error_msg == "data: [DONE]": raise Exception(error_msg) def set_key(self, new_access_key): ret = super().set_key(new_access_key) self._refresh_header() return ret def _single_query_at_once(self, history, temperature=1.0): timeout = TIMEOUT_ALL headers = { "Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}", "temperature": f"{temperature}", } payload = { "model": self.model_name, "messages": history, } # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求 if shared.state.chat_completion_url != CHAT_COMPLETION_URL: logger.debug(f"使用自定义API URL: {shared.state.chat_completion_url}") with config.retrieve_proxy(): response = requests.post( shared.state.chat_completion_url, headers=headers, json=payload, stream=False, timeout=timeout, ) return response def get_model( model_name, lora_model_path=None, access_key=None, temperature=None, top_p=None, system_prompt=None, user_name="", original_model=None, ): msg = i18n("模型设置为了:") + f" {model_name}" model_type = ModelType.get_type(model_name) lora_choices = ["No LoRA"] if model_type != ModelType.OpenAI: config.local_embedding = True model = original_model chatbot = gr.Chatbot.update(label=model_name) try: if model_type == ModelType.OpenAI: logger.info(f"正在加载OpenAI模型: {model_name}") model = OpenAIClient( model_name=model_name, api_key=access_key, system_prompt=system_prompt, user_name=user_name, ) logger.info(f"OpenAI模型加载完成: {model_name}") elif model_type == ModelType.OpenAIVision: logger.info(f"正在加载OpenAI Vision模型: {model_name}") access_key = os.environ.get("OPENAI_API_KEY", access_key) model = OpenAIVisionClient( model_name, api_key=access_key, user_name=user_name) elif model_type == ModelType.ChatGLM: logger.info(f"正在加载ChatGLM模型: {model_name}")
model = ChatGLMClient(model_name, user_name=user_name)
4
2023-12-27 12:14:26+00:00
16k
camenduru/AnyDoor-online-hf
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools import torch.nn.functional as F from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
11,835
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """
mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
14
2023-12-25 04:48:34+00:00
16k
AContesini/Convert_PDF_to_DOCX_or_vice-versa
venv/Lib/site-packages/tqdm/auto.py
[ { "identifier": "TqdmExperimentalWarning", "path": "venv/Lib/site-packages/tqdm/std.py", "snippet": "class TqdmExperimentalWarning(TqdmWarning, FutureWarning):\n \"\"\"beta feature, unstable API and behaviour\"\"\"\n pass" }, { "identifier": "tqdm", "path": "venv/Lib/site-packages/tqdm...
import warnings from .std import TqdmExperimentalWarning from .autonotebook import tqdm as notebook_tqdm from .asyncio import tqdm as asyncio_tqdm from .std import tqdm as std_tqdm
12,682
""" Enables multiple commonly used features. Method resolution order: - `tqdm.autonotebook` without import warnings - `tqdm.asyncio` - `tqdm.std` base class Usage: >>> from tqdm.auto import trange, tqdm >>> for i in trange(10): ... ... """ with warnings.catch_warnings(): warnings.simplefilter("ignore", category=TqdmExperimentalWarning) if notebook_tqdm != std_tqdm:
""" Enables multiple commonly used features. Method resolution order: - `tqdm.autonotebook` without import warnings - `tqdm.asyncio` - `tqdm.std` base class Usage: >>> from tqdm.auto import trange, tqdm >>> for i in trange(10): ... ... """ with warnings.catch_warnings(): warnings.simplefilter("ignore", category=TqdmExperimentalWarning) if notebook_tqdm != std_tqdm:
class tqdm(notebook_tqdm, asyncio_tqdm): # pylint: disable=inconsistent-mro
0
2023-12-24 15:46:18+00:00
16k
pkariz/grin-explorer
backend/api/views.py
[ { "identifier": "fetch_and_store_block", "path": "backend/api/bootstrap.py", "snippet": "def fetch_and_store_block(blockchain, block_height, prefetch=True):\n # initialize node api\n node_api = NodeV2API(blockchain.node)\n if block_height < 0:\n # no such block height\n raise Node...
from asgiref.sync import async_to_sync from django.contrib.contenttypes.models import ContentType from django.db.models.deletion import ProtectedError from django.views.generic import TemplateView from django.views.decorators.cache import never_cache from dramatiq_abort import abort from rest_framework import status from rest_framework.exceptions import APIException from rest_framework.exceptions import NotFound from rest_framework.exceptions import ValidationError as DRFValidationError from rest_framework.decorators import action from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from slugify import slugify from .bootstrap import fetch_and_store_block, update_blockchain_progress from .exceptions import UpdateBlockchainProgressError from .helpers import get_filter_backends, load_data_from_redis from .filters import ( BlockFilter, CustomBlockSearchFilter, NodeFilter, NodeGroupFilter, ) from .mixins import CustomModelViewSet from .models import Blockchain, Block, Reorg, Node, NodeGroup, DramatiqTask from .serializers import ( BlockchainSerializer, BlockchainExtendedSerializer, BlockSerializer, BlockDetailSerializer, NodeSerializer, NodeGroupSerializer, DramatiqTaskSerializer, ) from .tasks import bootstrap_blockchain, delete_blockchain import channels import logging import pytz
11,114
# happens but websocket calls for first blocks fails while for later it # doesn't and then the code bellow wouldn't spot a reorg) block_at_this_height = blockchain.blocks\ .filter(height=height, reorg__isnull=True)\ .first() # we fetch here because anyone can call this view - we don't want to # work with fake data new_block = fetch_and_store_block(blockchain, height, prefetch=False) if block_at_this_height: if block_at_this_height.hash == new_block.hash: # probably have fetched this block while bootstraping, accepted # view got called a bit later so we already have it, noop return Response(status=status.HTTP_200_OK) logger.info( 'Block accepted - reorg spotted', extra={ 'block_at_this_height': block_at_this_height, 'block_at_this_height.hash': block_at_this_height.hash, 'block_at_this_height.reorg': block_at_this_height.reorg, 'hash': new_block.hash }, ) # reorg spotted reorged_blocks = list(blockchain.blocks\ .filter(height__gte=height, reorg__isnull=True) .exclude(pk=new_block.pk) .order_by('height')) logger.info('reorged_blocks at start: {}'.format(reorged_blocks)) # these reorged blocks are guaranteed to be reorged, now find any # previous blocks which were also reorged - aka get common # ancestor of the reorged block at 'height' and the new (main) block # find the common ancestor of this block and the reorged block at # the same height. We start with the current height to avoid more # logic for Reorg instance params if new_block.hash == block_at_this_height.hash: # at height X we got H1, then we got H2 (this call), but now it # reorged back to H1, so we don't do anything, no reorg is # stored since we didn't fetch the block in time from the node logger.info('Reorg cancelled out, noop') return Response(status=status.HTTP_200_OK) logger.info('new_block', extra={'hash': new_block.hash, 'prev_hash': new_block.prev_hash}) prev_block_new_chain = new_block prev_block_old_chain = reorged_blocks[0] logger.info('prev_block_new_chain: {}, prev_block_old_chain: {}'.format(prev_block_new_chain, prev_block_old_chain)) # remove the first one since it will get added again reorged_blocks = reorged_blocks[1:] logger.info('reorged_blocks after [1:]: {}'.format(reorged_blocks)) main_blocks = [] while True: # theoretically we might be missing the block in db but we don't # cover such cases currently if not prev_block_new_chain: logger.info('reached break in IF NOT prev_block_new_chain') # this means that prev_block_old_chain is also None, since # they're both "previous" of their genesis block break if prev_block_new_chain == prev_block_old_chain: logger.info('reached break in IF NOT prev_block_new_chain == prev_block_old_chain') # found the common ancestor break # add to the left because we want to keep it sorted by height reorged_blocks.insert(0, prev_block_old_chain) main_blocks.insert(0, prev_block_new_chain) logger.info('new reorged_blocks: {}'.format(reorged_blocks)) logger.info('new main_blocks: {}'.format(main_blocks)) prev_block_new_chain = prev_block_new_chain.get_previous_block() prev_block_old_chain = prev_block_old_chain.get_previous_block() logger.info('new prev_block_new_chain: {}, prev_block_old_chain: {}'.format(prev_block_new_chain, prev_block_old_chain)) logger.info('before reorg create: reorged_blocks: {}, main_blocks: {}'.format(reorged_blocks, main_blocks)) reorg = Reorg.objects.create( blockchain=blockchain, start_reorg_block=reorged_blocks[0], end_reorg_block=reorged_blocks[-1], start_main_block=main_blocks[0], ) # Reorg post_save signal fixes .reorg on new/old blocks and fixes # inputs/outputs web_socket_msg_type = 'reorged' web_socket_msg = BlockSerializer(new_block).data if web_socket_msg_type == 'reorged': web_socket_msg = blockchain.slug # TODO: check if channels-redis 4.x is fixed: https://github.com/django/channels_redis/issues/332 channel_layer = channels.layers.get_channel_layer() async_to_sync(channel_layer.group_send)( 'default_group', { 'type': web_socket_msg_type, 'message': web_socket_msg, } ) # update the loading progress since it could be skewed due to the # periodic task updating it before this view has been called try: update_blockchain_progress(blockchain) except UpdateBlockchainProgressError: # ignore it, let it update itself the next time pass return Response(status=status.HTTP_200_OK) def get_permissions(self): """ Add, delete and update require authentication, others don't. """ # accepted view can currently be called by anyone, we ignore its data though # and fetch it from our node. Maybe in the future node could send some # header to prevent potential spam permission_classes = [] if self.action not in ['list', 'retrieve', 'accepted', 'graphs']: permission_classes = [IsAuthenticated] return [permission() for permission in permission_classes] class BlockViewSet(CustomModelViewSet): """API endpoint for Block. This ViewSet is nested in BlockchainViewSet.""" queryset = Block.objects\ .order_by('-height')\ .all() filter_backends = get_filter_backends({
logger = logging.getLogger(__name__) # Serve Vue Application index_view = never_cache(TemplateView.as_view(template_name='index.html')) class NodeGroupViewSet(CustomModelViewSet): """API endpoint for NodeGroup.""" queryset = NodeGroup.objects.all() filterset_class = NodeGroupFilter serializer_class = NodeGroupSerializer lookup_field = 'slug' permission_classes = [IsAuthenticated] def create(self, request, *args, **kwargs): slug = request.data.get('slug') if not slug: request.data['slug'] = slugify(request.data['name'], to_lower=True) return super().create(request, *args, **kwargs) def destroy(self, request, *args, **kwargs): try: return super().destroy(request, *args, **kwargs) except ProtectedError as e: raise DRFValidationError( detail='Node group is related to nodes, delete them first') class NodeViewSet(CustomModelViewSet): """API endpoint for Node.""" queryset = Node.objects.all() filterset_class = NodeFilter serializer_class = NodeSerializer # currently all node views require authentication permission_classes = [IsAuthenticated] lookup_field = 'slug' def create(self, request, *args, **kwargs): slug = request.data.get('slug') if not slug: request.data['slug'] = slugify(request.data['name'], to_lower=True) request.data['group'] = NodeGroup.objects.get(slug=request.data['group']).pk return super().create(request, *args, **kwargs) def update(self, request, *args, **kwargs): # NOTE: super().partial_update calls update(..., partial=True) if not kwargs.get('partial'): # we don't allow full updates - aka PUT raise DRFPermissionDenied() return super().update(request, *args, **kwargs) def partial_update(self, request, slug=None): request.data['group'] = NodeGroup.objects.get(slug=request.data['group']).pk return super().partial_update(request, slug=slug) @action(detail=True, methods=['get']) def reachable(self, request, slug=None): node = self.get_object() try: res = node.is_reachable() except Exception as e: logger.exception('Unreachable node') res = False return Response(res, status=status.HTTP_200_OK) def destroy(self, request, *args, **kwargs): try: return super().destroy(request, *args, **kwargs) except ProtectedError as e: raise DRFValidationError( detail='Node is related to blockchains, delete them first') class BlockchainViewSet(CustomModelViewSet): """API endpoint for Blockchain.""" queryset = Blockchain.objects.all() serializer_class = BlockchainSerializer lookup_field = 'slug' def get_serializer_class(self): # when authenticated we return also NodeSerializer data if self.request.user.is_authenticated: return BlockchainExtendedSerializer return BlockchainSerializer def create(self, request, *args, **kwargs): slug = request.data.get('slug') if not slug: request.data['slug'] = slugify(request.data['name'], to_lower=True) request.data['node'] = request.data['node'] return super().create(request, *args, **kwargs) def destroy(self, request, slug=None): instance = self.get_object() message = delete_blockchain.send(instance.slug) task = DramatiqTask.objects.create( type=DramatiqTask.Type.BLOCKCHAIN_DELETE, status=DramatiqTask.Status.IN_PROGRESS, message_id=message.message_id, content_object=instance, ) return Response( DramatiqTaskSerializer(task).data, status=status.HTTP_200_OK) def _abort_previous_tasks(self, blockchain): conflicting_message_ids = DramatiqTask.objects.filter( status=DramatiqTask.Status.IN_PROGRESS, object_id=blockchain.id, content_type=ContentType.objects.get_for_model(blockchain) ).values_list('message_id', flat=True) # abort previous conflicting tasks if they exist for conflicting_message_id in conflicting_message_ids: abort(conflicting_message_id) @action(detail=True, methods=['post']) def bootstrap(self, request, slug=None): blockchain = self.get_object() if not blockchain.node.is_reachable: raise APIException(detail='Node is unreachable') self._abort_previous_tasks(blockchain) # create a new task message = bootstrap_blockchain.send(blockchain.slug) task = DramatiqTask.objects.create( type=DramatiqTask.Type.BOOTSTRAP, status=DramatiqTask.Status.IN_PROGRESS, message_id=message.message_id, content_object=blockchain, ) return Response( DramatiqTaskSerializer(task).data, status=status.HTTP_200_OK) @action( detail=True, methods=['post'], url_path='bootstrap/abort', url_name='bootstrap-abort', ) def abort_bootstrap(self, request, slug=None): blockchain = self.get_object() self._abort_previous_tasks(blockchain) return Response(status=status.HTTP_200_OK) @action(detail=True, methods=['get']) def graphs(self, request, slug=None): """Returns data for all graphs.""" data = { 'transaction_graph': load_data_from_redis(f'tx_graph__{slug}'), } return Response(data=data, status=status.HTTP_200_OK) @action(detail=True, methods=['post']) def accepted(self, request, slug=None): # NOTE: if node is offline and then you start it again then it will # call this view for each block it will get. In this case there will be # many fast sequential calls to this view, there might be too many # postgres connections opened so view executions might actually fail. # The suggested solution is to comment out 'block_accepted_url' in # node's config file, run the node, wait for it to sync, uncomment # 'block_accepted_url' and then manually bootstrap it. blockchain = self.get_object() # check if new block has been receiver when this blockchain is in the # process of being deleted. deleting = DramatiqTask.objects.filter( type=DramatiqTask.Type.BLOCKCHAIN_DELETE, object_id=blockchain.id, content_type=ContentType.objects.get_for_model(blockchain) ).exists() if deleting: # nothing to do, ignore the new block return Response(status=status.HTTP_404_NOT_FOUND) # get request data height = request.data['data']['header']['height'] hash = request.data['hash'] # prev_hash comes as list of int bytes, so we convert it to hex # NOTE: the same is true for some other data which we currently don't # need so we don't transform it, eg. data.header.kernel_root prev_hash = None if request.data['data']['header']['prev_hash']: prev_hash = bytes(request.data['data']['header']['prev_hash']).hex() logger.info( 'Block accepted', extra={ 'height': height, 'hash': hash, 'prev_hash': prev_hash, 'blockchain': blockchain.slug, }, ) web_socket_msg_type = 'send_block' # handle reorg case # we expect blocks to come ordered by height, there are some edge cases # here which are not handled, but they're unlikely to happen (eg. reorg # happens but websocket calls for first blocks fails while for later it # doesn't and then the code bellow wouldn't spot a reorg) block_at_this_height = blockchain.blocks\ .filter(height=height, reorg__isnull=True)\ .first() # we fetch here because anyone can call this view - we don't want to # work with fake data new_block = fetch_and_store_block(blockchain, height, prefetch=False) if block_at_this_height: if block_at_this_height.hash == new_block.hash: # probably have fetched this block while bootstraping, accepted # view got called a bit later so we already have it, noop return Response(status=status.HTTP_200_OK) logger.info( 'Block accepted - reorg spotted', extra={ 'block_at_this_height': block_at_this_height, 'block_at_this_height.hash': block_at_this_height.hash, 'block_at_this_height.reorg': block_at_this_height.reorg, 'hash': new_block.hash }, ) # reorg spotted reorged_blocks = list(blockchain.blocks\ .filter(height__gte=height, reorg__isnull=True) .exclude(pk=new_block.pk) .order_by('height')) logger.info('reorged_blocks at start: {}'.format(reorged_blocks)) # these reorged blocks are guaranteed to be reorged, now find any # previous blocks which were also reorged - aka get common # ancestor of the reorged block at 'height' and the new (main) block # find the common ancestor of this block and the reorged block at # the same height. We start with the current height to avoid more # logic for Reorg instance params if new_block.hash == block_at_this_height.hash: # at height X we got H1, then we got H2 (this call), but now it # reorged back to H1, so we don't do anything, no reorg is # stored since we didn't fetch the block in time from the node logger.info('Reorg cancelled out, noop') return Response(status=status.HTTP_200_OK) logger.info('new_block', extra={'hash': new_block.hash, 'prev_hash': new_block.prev_hash}) prev_block_new_chain = new_block prev_block_old_chain = reorged_blocks[0] logger.info('prev_block_new_chain: {}, prev_block_old_chain: {}'.format(prev_block_new_chain, prev_block_old_chain)) # remove the first one since it will get added again reorged_blocks = reorged_blocks[1:] logger.info('reorged_blocks after [1:]: {}'.format(reorged_blocks)) main_blocks = [] while True: # theoretically we might be missing the block in db but we don't # cover such cases currently if not prev_block_new_chain: logger.info('reached break in IF NOT prev_block_new_chain') # this means that prev_block_old_chain is also None, since # they're both "previous" of their genesis block break if prev_block_new_chain == prev_block_old_chain: logger.info('reached break in IF NOT prev_block_new_chain == prev_block_old_chain') # found the common ancestor break # add to the left because we want to keep it sorted by height reorged_blocks.insert(0, prev_block_old_chain) main_blocks.insert(0, prev_block_new_chain) logger.info('new reorged_blocks: {}'.format(reorged_blocks)) logger.info('new main_blocks: {}'.format(main_blocks)) prev_block_new_chain = prev_block_new_chain.get_previous_block() prev_block_old_chain = prev_block_old_chain.get_previous_block() logger.info('new prev_block_new_chain: {}, prev_block_old_chain: {}'.format(prev_block_new_chain, prev_block_old_chain)) logger.info('before reorg create: reorged_blocks: {}, main_blocks: {}'.format(reorged_blocks, main_blocks)) reorg = Reorg.objects.create( blockchain=blockchain, start_reorg_block=reorged_blocks[0], end_reorg_block=reorged_blocks[-1], start_main_block=main_blocks[0], ) # Reorg post_save signal fixes .reorg on new/old blocks and fixes # inputs/outputs web_socket_msg_type = 'reorged' web_socket_msg = BlockSerializer(new_block).data if web_socket_msg_type == 'reorged': web_socket_msg = blockchain.slug # TODO: check if channels-redis 4.x is fixed: https://github.com/django/channels_redis/issues/332 channel_layer = channels.layers.get_channel_layer() async_to_sync(channel_layer.group_send)( 'default_group', { 'type': web_socket_msg_type, 'message': web_socket_msg, } ) # update the loading progress since it could be skewed due to the # periodic task updating it before this view has been called try: update_blockchain_progress(blockchain) except UpdateBlockchainProgressError: # ignore it, let it update itself the next time pass return Response(status=status.HTTP_200_OK) def get_permissions(self): """ Add, delete and update require authentication, others don't. """ # accepted view can currently be called by anyone, we ignore its data though # and fetch it from our node. Maybe in the future node could send some # header to prevent potential spam permission_classes = [] if self.action not in ['list', 'retrieve', 'accepted', 'graphs']: permission_classes = [IsAuthenticated] return [permission() for permission in permission_classes] class BlockViewSet(CustomModelViewSet): """API endpoint for Block. This ViewSet is nested in BlockchainViewSet.""" queryset = Block.objects\ .order_by('-height')\ .all() filter_backends = get_filter_backends({
'SearchFilter': CustomBlockSearchFilter,
6
2023-12-24 22:15:11+00:00
16k
lchen1019/Image_Cropper
ISAT/widgets/mainwindow.py
[ { "identifier": "Ui_MainWindow", "path": "ISAT/ui/MainWindow.py", "snippet": "class Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(1280, 764)\n MainWindow.setMinimumSize(QtCore.QSize(800, 600))\n font ...
from PyQt5 import QtWidgets, QtCore, QtGui from ISAT.ui.MainWindow import Ui_MainWindow from ISAT.widgets.files_dock_widget import FilesDockWidget from ISAT.widgets.canvas import AnnotationScene, AnnotationView from ISAT.configs import STATUSMode, MAPMode, load_config, save_config, CONFIG_FILE, DEFAULT_CONFIG_FILE, CHECKPOINT_PATH, ISAT_ROOT from ISAT.annotation import Object, Annotation from ISAT.widgets.polygon import Polygon, PromptPoint from ISAT.widgets.converter_dialog import ConverterDialog from PIL import Image from PyQt5.QtCore import QThread, pyqtSignal import os import json import functools import imgviz import ISAT.icons_rc import numpy as np import cv2 # 调整图像饱和度
12,655
# -*- coding: utf-8 -*- # @Author : LG class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self): super(MainWindow, self).__init__() self.setupUi(self) self.image_root: str = None self.label_root:str = None self.files_list: list = [] self.current_index = None self.current_file_index: int = None self.current_group = 1 self.config_file = CONFIG_FILE if os.path.exists(CONFIG_FILE) else DEFAULT_CONFIG_FILE self.saved = True self.can_be_annotated = True self.load_finished = False self.png_palette = None # 图像拥有调色盘,说明是单通道的标注png文件 self.instance_cmap = imgviz.label_colormap() # 标注目标 self.current_label:Annotation = None # 新增 手动/自动 group选择 self.group_select_mode = 'auto' # 所有labels self.rects = [] self.is_show_bitmap = False self.init_ui() self.init_connect() self.reset_action() def init_ui(self): #q
# -*- coding: utf-8 -*- # @Author : LG class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self): super(MainWindow, self).__init__() self.setupUi(self) self.image_root: str = None self.label_root:str = None self.files_list: list = [] self.current_index = None self.current_file_index: int = None self.current_group = 1 self.config_file = CONFIG_FILE if os.path.exists(CONFIG_FILE) else DEFAULT_CONFIG_FILE self.saved = True self.can_be_annotated = True self.load_finished = False self.png_palette = None # 图像拥有调色盘,说明是单通道的标注png文件 self.instance_cmap = imgviz.label_colormap() # 标注目标 self.current_label:Annotation = None # 新增 手动/自动 group选择 self.group_select_mode = 'auto' # 所有labels self.rects = [] self.is_show_bitmap = False self.init_ui() self.init_connect() self.reset_action() def init_ui(self): #q
self.files_dock_widget = FilesDockWidget(mainwindow=self)
1
2023-12-24 16:19:16+00:00
16k
facebookresearch/ca_body
ca_body/models/mesh_vae_drivable.py
[ { "identifier": "ConvBlock", "path": "ca_body/nn/blocks.py", "snippet": "class ConvBlock(nn.Module):\n def __init__(\n self,\n in_channels,\n out_channels,\n size,\n lrelu_slope=0.2,\n kernel_size=3,\n padding=1,\n wnorm_dim=0,\n ):\n ...
import logging import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F import ca_body.nn.layers as la from typing import Dict, Optional, Tuple from torchvision.utils import make_grid from torchvision.transforms.functional import gaussian_blur from ca_body.nn.blocks import ( ConvBlock, ConvDownBlock, UpConvBlockDeep, tile2d, weights_initializer, ) from ca_body.nn.dof_cal import LearnableBlur from ca_body.utils.geom import ( GeometryModule, compute_view_cos, depth_discontuity_mask, depth2normals, ) from ca_body.nn.shadow import ShadowUNet, PoseToShadow from ca_body.nn.unet import UNetWB from ca_body.nn.color_cal import CalV5 from ca_body.utils.image import linear2displayBatch from ca_body.utils.lbs import LBSModule from ca_body.utils.render import RenderLayer from ca_body.utils.seams import SeamSampler from ca_body.utils.render import RenderLayer from ca_body.nn.face import FaceDecoderFrontal
11,387
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. logger = logging.getLogger(__name__) class CameraPixelBias(nn.Module): def __init__(self, image_height, image_width, cameras, ds_rate) -> None: super().__init__() self.image_height = image_height self.image_width = image_width self.cameras = cameras self.n_cameras = len(cameras) bias = th.zeros( (self.n_cameras, 1, image_width // ds_rate, image_height // ds_rate), dtype=th.float32 ) self.register_parameter("bias", nn.Parameter(bias)) def forward(self, idxs: th.Tensor): bias_up = F.interpolate( self.bias[idxs], size=(self.image_height, self.image_width), mode='bilinear' ) return bias_up class AutoEncoder(nn.Module): def __init__( self, encoder, decoder, decoder_view, encoder_face, # hqlp decoder to get the codes decoder_face, shadow_net, upscale_net, assets, pose_to_shadow=None, renderer=None, cal=None, pixel_cal=None, learn_blur: bool = True, ): super().__init__() # TODO: should we have a shared LBS here? self.geo_fn = GeometryModule( assets.topology.vi, assets.topology.vt, assets.topology.vti, assets.topology.v2uv, uv_size=1024, impaint=True, ) self.lbs_fn = LBSModule( assets.lbs_model_json, assets.lbs_config_dict, assets.lbs_template_verts, assets.lbs_scale, assets.global_scaling, ) self.seam_sampler = SeamSampler(assets.seam_data_1024) self.seam_sampler_2k = SeamSampler(assets.seam_data_2048) # joint tex -> body and clothes # TODO: why do we have a joint one in the first place? tex_mean = gaussian_blur(th.as_tensor(assets.tex_mean)[np.newaxis], kernel_size=11) self.register_buffer("tex_mean", F.interpolate(tex_mean, (2048, 2048), mode='bilinear')) # this is shared self.tex_std = assets.tex_var if 'tex_var' in assets else 64.0 face_cond_mask = th.as_tensor(assets.face_cond_mask, dtype=th.float32)[ np.newaxis, np.newaxis ] self.register_buffer("face_cond_mask", face_cond_mask) meye_mask = self.geo_fn.to_uv( th.as_tensor(assets.mouth_eyes_mask_geom[np.newaxis, :, np.newaxis]) ) meye_mask = F.interpolate(meye_mask, (2048, 2048), mode='bilinear') self.register_buffer("meye_mask", meye_mask) self.decoder = ConvDecoder( geo_fn=self.geo_fn, seam_sampler=self.seam_sampler, **decoder, assets=assets, ) # embs for everything but face non_head_mask = 1.0 - assets.face_mask self.encoder = Encoder( geo_fn=self.geo_fn, mask=non_head_mask, **encoder, ) self.encoder_face = FaceEncoder( assets=assets, **encoder_face, ) # using face decoder to generate better conditioning decoder_face_ckpt_path = None if 'ckpt' in decoder_face: decoder_face_ckpt_path = decoder_face.pop('ckpt')
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. logger = logging.getLogger(__name__) class CameraPixelBias(nn.Module): def __init__(self, image_height, image_width, cameras, ds_rate) -> None: super().__init__() self.image_height = image_height self.image_width = image_width self.cameras = cameras self.n_cameras = len(cameras) bias = th.zeros( (self.n_cameras, 1, image_width // ds_rate, image_height // ds_rate), dtype=th.float32 ) self.register_parameter("bias", nn.Parameter(bias)) def forward(self, idxs: th.Tensor): bias_up = F.interpolate( self.bias[idxs], size=(self.image_height, self.image_width), mode='bilinear' ) return bias_up class AutoEncoder(nn.Module): def __init__( self, encoder, decoder, decoder_view, encoder_face, # hqlp decoder to get the codes decoder_face, shadow_net, upscale_net, assets, pose_to_shadow=None, renderer=None, cal=None, pixel_cal=None, learn_blur: bool = True, ): super().__init__() # TODO: should we have a shared LBS here? self.geo_fn = GeometryModule( assets.topology.vi, assets.topology.vt, assets.topology.vti, assets.topology.v2uv, uv_size=1024, impaint=True, ) self.lbs_fn = LBSModule( assets.lbs_model_json, assets.lbs_config_dict, assets.lbs_template_verts, assets.lbs_scale, assets.global_scaling, ) self.seam_sampler = SeamSampler(assets.seam_data_1024) self.seam_sampler_2k = SeamSampler(assets.seam_data_2048) # joint tex -> body and clothes # TODO: why do we have a joint one in the first place? tex_mean = gaussian_blur(th.as_tensor(assets.tex_mean)[np.newaxis], kernel_size=11) self.register_buffer("tex_mean", F.interpolate(tex_mean, (2048, 2048), mode='bilinear')) # this is shared self.tex_std = assets.tex_var if 'tex_var' in assets else 64.0 face_cond_mask = th.as_tensor(assets.face_cond_mask, dtype=th.float32)[ np.newaxis, np.newaxis ] self.register_buffer("face_cond_mask", face_cond_mask) meye_mask = self.geo_fn.to_uv( th.as_tensor(assets.mouth_eyes_mask_geom[np.newaxis, :, np.newaxis]) ) meye_mask = F.interpolate(meye_mask, (2048, 2048), mode='bilinear') self.register_buffer("meye_mask", meye_mask) self.decoder = ConvDecoder( geo_fn=self.geo_fn, seam_sampler=self.seam_sampler, **decoder, assets=assets, ) # embs for everything but face non_head_mask = 1.0 - assets.face_mask self.encoder = Encoder( geo_fn=self.geo_fn, mask=non_head_mask, **encoder, ) self.encoder_face = FaceEncoder( assets=assets, **encoder_face, ) # using face decoder to generate better conditioning decoder_face_ckpt_path = None if 'ckpt' in decoder_face: decoder_face_ckpt_path = decoder_face.pop('ckpt')
self.decoder_face = FaceDecoderFrontal(assets=assets, **decoder_face)
19
2023-12-27 15:31:35+00:00
16k
open-mmlab/Amphion
models/tts/valle/valle_inference.py
[ { "identifier": "G2PModule", "path": "text/g2p_module.py", "snippet": "class G2PModule:\n \"\"\"Phonemize Text.\"\"\"\n\n def __init__(\n self,\n language=\"en-us\",\n backend=\"espeak\",\n separator=Separator(word=\"_\", syllable=\"-\", phone=\"|\"),\n preserve_...
import os import numpy as np import torch import torchaudio import argparse from text.g2p_module import G2PModule from utils.tokenizer import AudioTokenizer, tokenize_audio from models.tts.valle.valle import VALLE from models.tts.base.tts_inferece import TTSInference from models.tts.valle.valle_dataset import VALLETestDataset, VALLETestCollator from processors.phone_extractor import phoneExtractor from text.text_token_collation import phoneIDCollation
13,115
# Copyright (c) 2023 Amphion. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. class VALLEInference(TTSInference): def __init__(self, args=None, cfg=None): TTSInference.__init__(self, args, cfg) self.g2p_module = G2PModule(backend=self.cfg.preprocess.phone_extractor) text_token_path = os.path.join( cfg.preprocess.processed_dir, cfg.dataset[0], cfg.preprocess.symbols_dict ) self.audio_tokenizer = AudioTokenizer() def _build_model(self): model = VALLE(self.cfg.model) return model def _build_test_dataset(self): return VALLETestDataset, VALLETestCollator def inference_one_clip(self, text, text_prompt, audio_file, save_name="pred"): # get phone symbol file phone_symbol_file = None if self.cfg.preprocess.phone_extractor != "lexicon": phone_symbol_file = os.path.join( self.exp_dir, self.cfg.preprocess.symbols_dict ) assert os.path.exists(phone_symbol_file) # convert text to phone sequence
# Copyright (c) 2023 Amphion. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. class VALLEInference(TTSInference): def __init__(self, args=None, cfg=None): TTSInference.__init__(self, args, cfg) self.g2p_module = G2PModule(backend=self.cfg.preprocess.phone_extractor) text_token_path = os.path.join( cfg.preprocess.processed_dir, cfg.dataset[0], cfg.preprocess.symbols_dict ) self.audio_tokenizer = AudioTokenizer() def _build_model(self): model = VALLE(self.cfg.model) return model def _build_test_dataset(self): return VALLETestDataset, VALLETestCollator def inference_one_clip(self, text, text_prompt, audio_file, save_name="pred"): # get phone symbol file phone_symbol_file = None if self.cfg.preprocess.phone_extractor != "lexicon": phone_symbol_file = os.path.join( self.exp_dir, self.cfg.preprocess.symbols_dict ) assert os.path.exists(phone_symbol_file) # convert text to phone sequence
phone_extractor = phoneExtractor(self.cfg)
7
2023-11-15 09:19:27+00:00
16k
BobaZooba/xllm
tests/conftest.py
[ { "identifier": "enums", "path": "src/xllm/enums.py", "snippet": "class General:\nclass Transformers:\nclass Registry:\nclass Datasets:\nclass Collators:\nclass Trainers:\nclass Experiments:\nclass EnvironmentVariables:\nclass LogLevel:" }, { "identifier": "LMCollator", "path": "src/xllm/col...
import json import os import pytest from _pytest.tmpdir import TempPathFactory from peft import LoraConfig, PeftModel, get_peft_model from transformers import ( AutoTokenizer, FalconConfig, FalconForCausalLM, LlamaConfig, LlamaForCausalLM, PreTrainedTokenizer, TrainingArguments, ) from src.xllm import enums from src.xllm.collators.lm import LMCollator from src.xllm.core.config import Config from src.xllm.datasets.soda import SodaDataset from tests.helpers.constants import ( FALCON_TOKENIZER_DIR, LLAMA_TOKENIZER_DIR, LORA_FOR_LLAMA_DEFAULT_TARGET_MODULES, ) from tests.helpers.dummy_data import DATA, SODA_DATASET
13,723
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @pytest.fixture(scope="session") def llama_tokenizer() -> PreTrainedTokenizer: tokenizer = AutoTokenizer.from_pretrained(LLAMA_TOKENIZER_DIR) tokenizer.pad_token = tokenizer.eos_token return tokenizer @pytest.fixture(scope="session") def llama_model_config(llama_tokenizer: PreTrainedTokenizer) -> LlamaConfig: config = LlamaConfig( vocab_size=len(llama_tokenizer), hidden_size=8, intermediate_size=32, num_hidden_layers=2, num_attention_heads=2, max_position_embeddings=32, ) return config @pytest.fixture(scope="session") def llama_model(llama_model_config: LlamaConfig) -> LlamaForCausalLM: model = LlamaForCausalLM(config=llama_model_config) return model @pytest.fixture(scope="session") def llama_lora_config() -> LoraConfig: lora_config = LoraConfig( r=2, target_modules=LORA_FOR_LLAMA_DEFAULT_TARGET_MODULES, bias="none", task_type="CAUSAL_LM", lora_alpha=8, lora_dropout=0.1, ) return lora_config @pytest.fixture(scope="session") def llama_lora_model(llama_model: LlamaForCausalLM, llama_lora_config: LoraConfig) -> PeftModel: llama_model = get_peft_model(model=llama_model, peft_config=llama_lora_config) return llama_model @pytest.fixture(scope="session") def falcon_tokenizer() -> PreTrainedTokenizer: tokenizer = AutoTokenizer.from_pretrained(FALCON_TOKENIZER_DIR) tokenizer.pad_token = tokenizer.eos_token return tokenizer @pytest.fixture(scope="session") def falcon_model_config(falcon_tokenizer: PreTrainedTokenizer) -> FalconConfig: config = FalconConfig( vocab_size=len(falcon_tokenizer), hidden_size=8, intermediate_size=32, num_hidden_layers=2, num_attention_heads=2, max_position_embeddings=32, ) return config @pytest.fixture(scope="session") def falcon_model(falcon_model_config: FalconConfig) -> FalconForCausalLM: model = FalconForCausalLM(config=falcon_model_config) return model @pytest.fixture(scope="session") def soda_dataset() -> SodaDataset: dataset = SodaDataset(data=SODA_DATASET) return dataset @pytest.fixture(scope="session") def llama_lm_collator(llama_tokenizer: PreTrainedTokenizer) -> LMCollator: collator = LMCollator(tokenizer=llama_tokenizer, max_length=32) return collator @pytest.fixture(scope="session") def path_to_outputs(tmp_path_factory: TempPathFactory) -> str: path = tmp_path_factory.mktemp("tmp") / "outputs/" return os.path.abspath(path) @pytest.fixture(scope="session") def training_arguments(path_to_outputs: str) -> TrainingArguments: arguments = TrainingArguments( output_dir=path_to_outputs, per_device_train_batch_size=2, gradient_accumulation_steps=2, warmup_steps=50, learning_rate=2e-4, max_steps=500, num_train_epochs=1, weight_decay=0.001, max_grad_norm=1.0, label_smoothing_factor=0.1, logging_steps=10, save_strategy="steps", save_steps=100, save_total_limit=1, hub_strategy="checkpoint", push_to_hub=False, save_safetensors=True, remove_unused_columns=False,
# Copyright 2023 Boris Zubarev. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @pytest.fixture(scope="session") def llama_tokenizer() -> PreTrainedTokenizer: tokenizer = AutoTokenizer.from_pretrained(LLAMA_TOKENIZER_DIR) tokenizer.pad_token = tokenizer.eos_token return tokenizer @pytest.fixture(scope="session") def llama_model_config(llama_tokenizer: PreTrainedTokenizer) -> LlamaConfig: config = LlamaConfig( vocab_size=len(llama_tokenizer), hidden_size=8, intermediate_size=32, num_hidden_layers=2, num_attention_heads=2, max_position_embeddings=32, ) return config @pytest.fixture(scope="session") def llama_model(llama_model_config: LlamaConfig) -> LlamaForCausalLM: model = LlamaForCausalLM(config=llama_model_config) return model @pytest.fixture(scope="session") def llama_lora_config() -> LoraConfig: lora_config = LoraConfig( r=2, target_modules=LORA_FOR_LLAMA_DEFAULT_TARGET_MODULES, bias="none", task_type="CAUSAL_LM", lora_alpha=8, lora_dropout=0.1, ) return lora_config @pytest.fixture(scope="session") def llama_lora_model(llama_model: LlamaForCausalLM, llama_lora_config: LoraConfig) -> PeftModel: llama_model = get_peft_model(model=llama_model, peft_config=llama_lora_config) return llama_model @pytest.fixture(scope="session") def falcon_tokenizer() -> PreTrainedTokenizer: tokenizer = AutoTokenizer.from_pretrained(FALCON_TOKENIZER_DIR) tokenizer.pad_token = tokenizer.eos_token return tokenizer @pytest.fixture(scope="session") def falcon_model_config(falcon_tokenizer: PreTrainedTokenizer) -> FalconConfig: config = FalconConfig( vocab_size=len(falcon_tokenizer), hidden_size=8, intermediate_size=32, num_hidden_layers=2, num_attention_heads=2, max_position_embeddings=32, ) return config @pytest.fixture(scope="session") def falcon_model(falcon_model_config: FalconConfig) -> FalconForCausalLM: model = FalconForCausalLM(config=falcon_model_config) return model @pytest.fixture(scope="session") def soda_dataset() -> SodaDataset: dataset = SodaDataset(data=SODA_DATASET) return dataset @pytest.fixture(scope="session") def llama_lm_collator(llama_tokenizer: PreTrainedTokenizer) -> LMCollator: collator = LMCollator(tokenizer=llama_tokenizer, max_length=32) return collator @pytest.fixture(scope="session") def path_to_outputs(tmp_path_factory: TempPathFactory) -> str: path = tmp_path_factory.mktemp("tmp") / "outputs/" return os.path.abspath(path) @pytest.fixture(scope="session") def training_arguments(path_to_outputs: str) -> TrainingArguments: arguments = TrainingArguments( output_dir=path_to_outputs, per_device_train_batch_size=2, gradient_accumulation_steps=2, warmup_steps=50, learning_rate=2e-4, max_steps=500, num_train_epochs=1, weight_decay=0.001, max_grad_norm=1.0, label_smoothing_factor=0.1, logging_steps=10, save_strategy="steps", save_steps=100, save_total_limit=1, hub_strategy="checkpoint", push_to_hub=False, save_safetensors=True, remove_unused_columns=False,
log_level=enums.LogLevel.info,
0
2023-11-10 17:55:03+00:00
16k
AMAAI-Lab/mustango
diffusers/src/diffusers/pipelines/pipeline_flax_utils.py
[ { "identifier": "ConfigMixin", "path": "diffusers/src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. Stores all configuration parameters under `self.config` Also handles all\n methods for loading/downloading/saving classes...
import importlib import inspect import os import flax import numpy as np import PIL from typing import Any, Dict, List, Optional, Union from flax.core.frozen_dict import FrozenDict from huggingface_hub import snapshot_download from PIL import Image from tqdm.auto import tqdm from ..configuration_utils import ConfigMixin from ..models.modeling_flax_utils import FLAX_WEIGHTS_NAME, FlaxModelMixin from ..schedulers.scheduling_utils_flax import SCHEDULER_CONFIG_NAME, FlaxSchedulerMixin from ..utils import CONFIG_NAME, DIFFUSERS_CACHE, BaseOutput, http_user_agent, is_transformers_available, logging from transformers import FlaxPreTrainedModel from diffusers import pipelines from diffusers import pipelines
11,427
Instantiate a Flax diffusion pipeline from pre-trained pipeline weights. The pipeline is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A string, the *repo id* of a pretrained pipeline hosted inside a model repo on https://huggingface.co/ Valid repo ids have to be located under a user or organization name, like `CompVis/ldm-text2im-large-256`. - A path to a *directory* containing pipeline weights saved using [`~FlaxDiffusionPipeline.save_pretrained`], e.g., `./my_pipeline_directory/`. dtype (`str` or `jnp.dtype`, *optional*): Override the default `jnp.dtype` and load the model under this dtype. If `"auto"` is passed the dtype will be automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (i.e., do not try to download the model). use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. mirror (`str`, *optional*): Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information. specify the folder name here. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load - and saveable variables - *i.e.* the pipeline components - of the specific pipeline class. The overwritten components are then directly passed to the pipelines `__init__` method. See example below for more information. <Tip> It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), *e.g.* `"runwayml/stable-diffusion-v1-5"` </Tip> <Tip> Activate the special ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a firewalled environment. </Tip> Examples: ```py >>> from diffusers import FlaxDiffusionPipeline >>> # Download pipeline from huggingface.co and cache. >>> # Requires to be logged in to Hugging Face hub, >>> # see more in [the documentation](https://huggingface.co/docs/hub/security-tokens) >>> pipeline, params = FlaxDiffusionPipeline.from_pretrained( ... "runwayml/stable-diffusion-v1-5", ... revision="bf16", ... dtype=jnp.bfloat16, ... ) >>> # Download pipeline, but use a different scheduler >>> from diffusers import FlaxDPMSolverMultistepScheduler >>> model_id = "runwayml/stable-diffusion-v1-5" >>> dpmpp, dpmpp_state = FlaxDPMSolverMultistepScheduler.from_pretrained( ... model_id, ... subfolder="scheduler", ... ) >>> dpm_pipe, dpm_params = FlaxStableDiffusionPipeline.from_pretrained( ... model_id, revision="bf16", dtype=jnp.bfloat16, scheduler=dpmpp ... ) >>> dpm_params["scheduler"] = dpmpp_state ``` """ cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) use_auth_token = kwargs.pop("use_auth_token", None) revision = kwargs.pop("revision", None) from_pt = kwargs.pop("from_pt", False) dtype = kwargs.pop("dtype", None) # 1. Download the checkpoints and configs # use snapshot download here to get it working from from_pretrained if not os.path.isdir(pretrained_model_name_or_path): config_dict = cls.load_config( pretrained_model_name_or_path, cache_dir=cache_dir, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision, ) # make sure we only download sub-folders and `diffusers` filenames folder_names = [k for k in config_dict.keys() if not k.startswith("_")] allow_patterns = [os.path.join(k, "*") for k in folder_names]
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. if is_transformers_available(): INDEX_FILE = "diffusion_flax_model.bin" logger = logging.get_logger(__name__) LOADABLE_CLASSES = { "diffusers": { "FlaxModelMixin": ["save_pretrained", "from_pretrained"], "FlaxSchedulerMixin": ["save_pretrained", "from_pretrained"], "FlaxDiffusionPipeline": ["save_pretrained", "from_pretrained"], }, "transformers": { "PreTrainedTokenizer": ["save_pretrained", "from_pretrained"], "PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"], "FlaxPreTrainedModel": ["save_pretrained", "from_pretrained"], "FeatureExtractionMixin": ["save_pretrained", "from_pretrained"], "ProcessorMixin": ["save_pretrained", "from_pretrained"], "ImageProcessingMixin": ["save_pretrained", "from_pretrained"], }, } ALL_IMPORTABLE_CLASSES = {} for library in LOADABLE_CLASSES: ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library]) def import_flax_or_no_model(module, class_name): try: # 1. First make sure that if a Flax object is present, import this one class_obj = getattr(module, "Flax" + class_name) except AttributeError: # 2. If this doesn't work, it's not a model and we don't append "Flax" class_obj = getattr(module, class_name) except AttributeError: raise ValueError(f"Neither Flax{class_name} nor {class_name} exist in {module}") return class_obj @flax.struct.dataclass class FlaxImagePipelineOutput(BaseOutput): """ Output class for image pipelines. Args: images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. """ images: Union[List[PIL.Image.Image], np.ndarray] class FlaxDiffusionPipeline(ConfigMixin): r""" Base class for all models. [`FlaxDiffusionPipeline`] takes care of storing all components (models, schedulers, processors) for diffusion pipelines and handles methods for loading, downloading and saving models as well as a few methods common to all pipelines to: - enabling/disabling the progress bar for the denoising iteration Class attributes: - **config_name** ([`str`]) -- name of the config file that will store the class and module names of all components of the diffusion pipeline. """ config_name = "model_index.json" def register_modules(self, **kwargs): # import it here to avoid circular import for name, module in kwargs.items(): if module is None: register_dict = {name: (None, None)} else: # retrieve library library = module.__module__.split(".")[0] # check if the module is a pipeline module pipeline_dir = module.__module__.split(".")[-2] path = module.__module__.split(".") is_pipeline_module = pipeline_dir in path and hasattr(pipelines, pipeline_dir) # if library is not in LOADABLE_CLASSES, then it is a custom module. # Or if it's a pipeline module, then the module is inside the pipeline # folder so we set the library to module name. if library not in LOADABLE_CLASSES or is_pipeline_module: library = pipeline_dir # retrieve class_name class_name = module.__class__.__name__ register_dict = {name: (library, class_name)} # save model index config self.register_to_config(**register_dict) # set models setattr(self, name, module) def save_pretrained(self, save_directory: Union[str, os.PathLike], params: Union[Dict, FrozenDict]): # TODO: handle inference_state """ Save all variables of the pipeline that can be saved and loaded as well as the pipelines configuration file to a directory. A pipeline variable can be saved and loaded if its class implements both a save and loading method. The pipeline can easily be re-loaded using the `[`~FlaxDiffusionPipeline.from_pretrained`]` class method. Arguments: save_directory (`str` or `os.PathLike`): Directory to which to save. Will be created if it doesn't exist. """ self.save_config(save_directory) model_index_dict = dict(self.config) model_index_dict.pop("_class_name") model_index_dict.pop("_diffusers_version") model_index_dict.pop("_module", None) for pipeline_component_name in model_index_dict.keys(): sub_model = getattr(self, pipeline_component_name) if sub_model is None: # edge case for saving a pipeline with safety_checker=None continue model_cls = sub_model.__class__ save_method_name = None # search for the model's base class in LOADABLE_CLASSES for library_name, library_classes in LOADABLE_CLASSES.items(): library = importlib.import_module(library_name) for base_class, save_load_methods in library_classes.items(): class_candidate = getattr(library, base_class, None) if class_candidate is not None and issubclass(model_cls, class_candidate): # if we found a suitable base class in LOADABLE_CLASSES then grab its save method save_method_name = save_load_methods[0] break if save_method_name is not None: break save_method = getattr(sub_model, save_method_name) expects_params = "params" in set(inspect.signature(save_method).parameters.keys()) if expects_params: save_method( os.path.join(save_directory, pipeline_component_name), params=params[pipeline_component_name] ) else: save_method(os.path.join(save_directory, pipeline_component_name)) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): r""" Instantiate a Flax diffusion pipeline from pre-trained pipeline weights. The pipeline is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A string, the *repo id* of a pretrained pipeline hosted inside a model repo on https://huggingface.co/ Valid repo ids have to be located under a user or organization name, like `CompVis/ldm-text2im-large-256`. - A path to a *directory* containing pipeline weights saved using [`~FlaxDiffusionPipeline.save_pretrained`], e.g., `./my_pipeline_directory/`. dtype (`str` or `jnp.dtype`, *optional*): Override the default `jnp.dtype` and load the model under this dtype. If `"auto"` is passed the dtype will be automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (i.e., do not try to download the model). use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. mirror (`str`, *optional*): Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information. specify the folder name here. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load - and saveable variables - *i.e.* the pipeline components - of the specific pipeline class. The overwritten components are then directly passed to the pipelines `__init__` method. See example below for more information. <Tip> It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), *e.g.* `"runwayml/stable-diffusion-v1-5"` </Tip> <Tip> Activate the special ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a firewalled environment. </Tip> Examples: ```py >>> from diffusers import FlaxDiffusionPipeline >>> # Download pipeline from huggingface.co and cache. >>> # Requires to be logged in to Hugging Face hub, >>> # see more in [the documentation](https://huggingface.co/docs/hub/security-tokens) >>> pipeline, params = FlaxDiffusionPipeline.from_pretrained( ... "runwayml/stable-diffusion-v1-5", ... revision="bf16", ... dtype=jnp.bfloat16, ... ) >>> # Download pipeline, but use a different scheduler >>> from diffusers import FlaxDPMSolverMultistepScheduler >>> model_id = "runwayml/stable-diffusion-v1-5" >>> dpmpp, dpmpp_state = FlaxDPMSolverMultistepScheduler.from_pretrained( ... model_id, ... subfolder="scheduler", ... ) >>> dpm_pipe, dpm_params = FlaxStableDiffusionPipeline.from_pretrained( ... model_id, revision="bf16", dtype=jnp.bfloat16, scheduler=dpmpp ... ) >>> dpm_params["scheduler"] = dpmpp_state ``` """ cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) use_auth_token = kwargs.pop("use_auth_token", None) revision = kwargs.pop("revision", None) from_pt = kwargs.pop("from_pt", False) dtype = kwargs.pop("dtype", None) # 1. Download the checkpoints and configs # use snapshot download here to get it working from from_pretrained if not os.path.isdir(pretrained_model_name_or_path): config_dict = cls.load_config( pretrained_model_name_or_path, cache_dir=cache_dir, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision, ) # make sure we only download sub-folders and `diffusers` filenames folder_names = [k for k in config_dict.keys() if not k.startswith("_")] allow_patterns = [os.path.join(k, "*") for k in folder_names]
allow_patterns += [FLAX_WEIGHTS_NAME, SCHEDULER_CONFIG_NAME, CONFIG_NAME, cls.config_name]
1
2023-11-14 23:29:31+00:00
16k
BraveGroup/Drive-WM
tests/pipelines/controlnet/test_controlnet_sdxl.py
[ { "identifier": "IMAGE_TO_IMAGE_IMAGE_PARAMS", "path": "tests/pipelines/pipeline_params.py", "snippet": "IMAGE_TO_IMAGE_IMAGE_PARAMS = frozenset([\"image\"])" }, { "identifier": "TEXT_TO_IMAGE_BATCH_PARAMS", "path": "tests/pipelines/pipeline_params.py", "snippet": "TEXT_TO_IMAGE_BATCH_PA...
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, EulerDiscreteScheduler, LCMScheduler, StableDiffusionXLControlNetPipeline, UNet2DConditionModel, ) from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, slow, torch_device from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, )
11,424
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. enable_full_determinism() class StableDiffusionXLControlNetPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionXLControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. enable_full_determinism() class StableDiffusionXLControlNetPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionXLControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
2
2023-11-18 01:40:55+00:00
16k
BAAI-DCAI/SegVol
inference_demo.py
[ { "identifier": "sam_model_registry", "path": "segment_anything_volumetric/build_sam.py", "snippet": "def build_sam_vit_3d(args, checkpoint=None):\ndef _build_sam(\n image_encoder_type,\n embed_dim,\n patch_size,\n checkpoint,\n image_size,\n):" }, { "identifier": "SegVol", "p...
import argparse import os import torch import torch.nn.functional as F import json import monai.transforms as transforms from segment_anything_volumetric import sam_model_registry from network.model import SegVol from data_process.demo_data_process import process_ct_gt from utils.monai_inferers_utils import sliding_window_inference, generate_box, select_points, build_binary_cube, build_binary_points, logits2roi_coor from utils.visualize import draw_result
11,422
# generate prompts text_single = categories[item_idx] if args.use_text_prompt else None if categories is not None: print(f'inference |{categories[item_idx]}| target...') points_single = None box_single = None if args.use_point_prompt: point, point_label = select_points(label_single_resize, num_positive_extra=3, num_negative_extra=3) points_single = (point.unsqueeze(0).float().cuda(), point_label.unsqueeze(0).float().cuda()) binary_points_resize = build_binary_points(point, point_label, label_single_resize.shape) if args.use_box_prompt: box_single = generate_box(label_single_resize).unsqueeze(0).float().cuda() binary_cube_resize = build_binary_cube(box_single, binary_cube_shape=label_single_resize.shape) #################### # zoom-out inference: print('--- zoom out inference ---') print(f'use text-prompt [{text_single!=None}], use box-prompt [{box_single!=None}], use point-prompt [{points_single!=None}]') with torch.no_grad(): logits_global_single = segvol_model(image_single_resize.cuda(), text=text_single, boxes=box_single, points=points_single) # resize back global logits logits_global_single = F.interpolate( logits_global_single.cpu(), size=ori_shape, mode='nearest')[0][0] # build prompt reflection for zoom-in if args.use_point_prompt: binary_points = F.interpolate( binary_points_resize.unsqueeze(0).unsqueeze(0).float(), size=ori_shape, mode='nearest')[0][0] if args.use_box_prompt: binary_cube = F.interpolate( binary_cube_resize.unsqueeze(0).unsqueeze(0).float(), size=ori_shape, mode='nearest')[0][0] zoom_out_dice = dice_score(logits_global_single.squeeze(), label_single.squeeze()) logits_labels_record[categories[item_idx]] = ( zoom_out_dice, image_single, points_single, box_single, logits_global_single, label_single) print(f'zoom out inference done with zoom_out_dice: {zoom_out_dice:.4f}') if not args.use_zoom_in: continue #################### # zoom-in inference: min_d, min_h, min_w, max_d, max_h, max_w = logits2roi_coor(args.spatial_size, logits_global_single) if min_d is None: print('Fail to detect foreground!') continue # Crop roi image_single_cropped = image_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1].unsqueeze(0).unsqueeze(0) global_preds = (torch.sigmoid(logits_global_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1])>0.5).long() assert not (args.use_box_prompt and args.use_point_prompt) prompt_reflection = None if args.use_box_prompt: binary_cube_cropped = binary_cube[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] prompt_reflection = ( binary_cube_cropped.unsqueeze(0).unsqueeze(0), global_preds.unsqueeze(0).unsqueeze(0) ) if args.use_point_prompt: binary_points_cropped = binary_points[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] prompt_reflection = ( binary_points_cropped.unsqueeze(0).unsqueeze(0), global_preds.unsqueeze(0).unsqueeze(0) ) ## inference with torch.no_grad(): logits_single_cropped = sliding_window_inference( image_single_cropped.cuda(), prompt_reflection, args.spatial_size, 1, segvol_model, args.infer_overlap, text=text_single, use_box=args.use_box_prompt, use_point=args.use_point_prompt, ) logits_single_cropped = logits_single_cropped.cpu().squeeze() logits_global_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] = logits_single_cropped zoom_in_dice = dice_score(logits_global_single.squeeze(), label_single.squeeze()) logits_labels_record[categories[item_idx]] = ( zoom_in_dice, image_single, points_single, box_single, logits_global_single, label_single) print(f'===> zoom out dice {zoom_out_dice:.4f} -> zoom-out-zoom-in dice {zoom_in_dice:.4f} <===') return logits_labels_record def inference_single_ct(args, segvol_model, data_item, categories): segvol_model.eval() image, gt3D = data_item["image"].float(), data_item["label"] image_zoom_out, gt3D__zoom_out = data_item["zoom_out_image"].float(), data_item['zoom_out_label'] logits_labels_record = zoom_in_zoom_out( args, segvol_model, image.unsqueeze(0), image_zoom_out.unsqueeze(0), gt3D.unsqueeze(0), gt3D__zoom_out.unsqueeze(0), categories=categories) # visualize if args.visualize: for target, values in logits_labels_record.items(): dice_score, image, point_prompt, box_prompt, logits, labels = values print(f'{target} result with Dice score {dice_score:.4f} visualizing') draw_result(target + f"-Dice {dice_score:.4f}", image, box_prompt, point_prompt, logits, labels, args.spatial_size, args.work_dir) def main(args): gpu = 0 torch.cuda.set_device(gpu) # build model sam_model = sam_model_registry['vit'](args=args)
def set_parse(): # %% set up parser parser = argparse.ArgumentParser() parser.add_argument("--test_mode", default=True, type=bool) parser.add_argument("--resume", type = str, default = '') parser.add_argument("-infer_overlap", default=0.5, type=float, help="sliding window inference overlap") parser.add_argument("-spatial_size", default=(32, 256, 256), type=tuple) parser.add_argument("-patch_size", default=(4, 16, 16), type=tuple) parser.add_argument('-work_dir', type=str, default='./work_dir') ### demo parser.add_argument('--demo_config', type=str, required=True) parser.add_argument("--clip_ckpt", type = str, default = './config/clip') args = parser.parse_args() return args def dice_score(preds, labels): # on GPU assert preds.shape[0] == labels.shape[0], "predict & target batch size don't match\n" + str(preds.shape) + str(labels.shape) predict = preds.view(1, -1) target = labels.view(1, -1) if target.shape[1] < 1e8: predict = predict.cuda() target = target.cuda() predict = torch.sigmoid(predict) predict = torch.where(predict > 0.5, 1., 0.) tp = torch.sum(torch.mul(predict, target)) den = torch.sum(predict) + torch.sum(target) + 1 dice = 2 * tp / den if target.shape[1] < 1e8: predict = predict.cpu() target = target.cpu() return dice def zoom_in_zoom_out(args, segvol_model, image, image_resize, gt3D, gt3D_resize, categories=None): logits_labels_record = {} image_single_resize = image_resize image_single = image[0,0] ori_shape = image_single.shape for item_idx in range(len(categories)): # get label to generate prompts label_single = gt3D[0][item_idx] label_single_resize = gt3D_resize[0][item_idx] # skip meaningless categories if torch.sum(label_single) == 0: print('No object, skip') continue # generate prompts text_single = categories[item_idx] if args.use_text_prompt else None if categories is not None: print(f'inference |{categories[item_idx]}| target...') points_single = None box_single = None if args.use_point_prompt: point, point_label = select_points(label_single_resize, num_positive_extra=3, num_negative_extra=3) points_single = (point.unsqueeze(0).float().cuda(), point_label.unsqueeze(0).float().cuda()) binary_points_resize = build_binary_points(point, point_label, label_single_resize.shape) if args.use_box_prompt: box_single = generate_box(label_single_resize).unsqueeze(0).float().cuda() binary_cube_resize = build_binary_cube(box_single, binary_cube_shape=label_single_resize.shape) #################### # zoom-out inference: print('--- zoom out inference ---') print(f'use text-prompt [{text_single!=None}], use box-prompt [{box_single!=None}], use point-prompt [{points_single!=None}]') with torch.no_grad(): logits_global_single = segvol_model(image_single_resize.cuda(), text=text_single, boxes=box_single, points=points_single) # resize back global logits logits_global_single = F.interpolate( logits_global_single.cpu(), size=ori_shape, mode='nearest')[0][0] # build prompt reflection for zoom-in if args.use_point_prompt: binary_points = F.interpolate( binary_points_resize.unsqueeze(0).unsqueeze(0).float(), size=ori_shape, mode='nearest')[0][0] if args.use_box_prompt: binary_cube = F.interpolate( binary_cube_resize.unsqueeze(0).unsqueeze(0).float(), size=ori_shape, mode='nearest')[0][0] zoom_out_dice = dice_score(logits_global_single.squeeze(), label_single.squeeze()) logits_labels_record[categories[item_idx]] = ( zoom_out_dice, image_single, points_single, box_single, logits_global_single, label_single) print(f'zoom out inference done with zoom_out_dice: {zoom_out_dice:.4f}') if not args.use_zoom_in: continue #################### # zoom-in inference: min_d, min_h, min_w, max_d, max_h, max_w = logits2roi_coor(args.spatial_size, logits_global_single) if min_d is None: print('Fail to detect foreground!') continue # Crop roi image_single_cropped = image_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1].unsqueeze(0).unsqueeze(0) global_preds = (torch.sigmoid(logits_global_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1])>0.5).long() assert not (args.use_box_prompt and args.use_point_prompt) prompt_reflection = None if args.use_box_prompt: binary_cube_cropped = binary_cube[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] prompt_reflection = ( binary_cube_cropped.unsqueeze(0).unsqueeze(0), global_preds.unsqueeze(0).unsqueeze(0) ) if args.use_point_prompt: binary_points_cropped = binary_points[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] prompt_reflection = ( binary_points_cropped.unsqueeze(0).unsqueeze(0), global_preds.unsqueeze(0).unsqueeze(0) ) ## inference with torch.no_grad(): logits_single_cropped = sliding_window_inference( image_single_cropped.cuda(), prompt_reflection, args.spatial_size, 1, segvol_model, args.infer_overlap, text=text_single, use_box=args.use_box_prompt, use_point=args.use_point_prompt, ) logits_single_cropped = logits_single_cropped.cpu().squeeze() logits_global_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] = logits_single_cropped zoom_in_dice = dice_score(logits_global_single.squeeze(), label_single.squeeze()) logits_labels_record[categories[item_idx]] = ( zoom_in_dice, image_single, points_single, box_single, logits_global_single, label_single) print(f'===> zoom out dice {zoom_out_dice:.4f} -> zoom-out-zoom-in dice {zoom_in_dice:.4f} <===') return logits_labels_record def inference_single_ct(args, segvol_model, data_item, categories): segvol_model.eval() image, gt3D = data_item["image"].float(), data_item["label"] image_zoom_out, gt3D__zoom_out = data_item["zoom_out_image"].float(), data_item['zoom_out_label'] logits_labels_record = zoom_in_zoom_out( args, segvol_model, image.unsqueeze(0), image_zoom_out.unsqueeze(0), gt3D.unsqueeze(0), gt3D__zoom_out.unsqueeze(0), categories=categories) # visualize if args.visualize: for target, values in logits_labels_record.items(): dice_score, image, point_prompt, box_prompt, logits, labels = values print(f'{target} result with Dice score {dice_score:.4f} visualizing') draw_result(target + f"-Dice {dice_score:.4f}", image, box_prompt, point_prompt, logits, labels, args.spatial_size, args.work_dir) def main(args): gpu = 0 torch.cuda.set_device(gpu) # build model sam_model = sam_model_registry['vit'](args=args)
segvol_model = SegVol(
1
2023-11-10 08:25:37+00:00
16k
theroyallab/tabbyAPI
main.py
[ { "identifier": "convert_args_to_dict", "path": "args.py", "snippet": "def convert_args_to_dict(args: argparse.Namespace, parser: argparse.ArgumentParser):\n \"\"\"Broad conversion of surface level arg groups to dictionaries\"\"\"\n\n arg_groups = {}\n for group in parser._action_groups:\n ...
import pathlib import uvicorn import gen_logging from asyncio import CancelledError from typing import Optional from uuid import uuid4 from jinja2 import TemplateError from fastapi import FastAPI, Depends, HTTPException, Request from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import StreamingResponse from functools import partial from progress.bar import IncrementalBar from args import convert_args_to_dict, init_argparser from auth import check_admin_key, check_api_key, load_auth_keys from config import ( override_config_from_args, read_config_from_file, get_gen_logging_config, get_model_config, get_draft_model_config, get_lora_config, get_network_config, ) from generators import call_with_semaphore, generate_with_semaphore from model import ModelContainer from OAI.types.completion import CompletionRequest from OAI.types.chat_completion import ChatCompletionRequest from OAI.types.lora import LoraCard, LoraList, LoraLoadRequest, LoraLoadResponse from OAI.types.model import ( ModelCard, ModelLoadRequest, ModelLoadResponse, ModelCardParameters, ) from OAI.types.template import TemplateList from OAI.types.token import ( TokenEncodeRequest, TokenEncodeResponse, TokenDecodeRequest, TokenDecodeResponse, ) from OAI.utils_oai import ( create_completion_response, get_model_list, get_lora_list, create_chat_completion_response, create_chat_completion_stream_chunk, ) from templating import get_all_templates, get_prompt_from_template from utils import get_generator_error, get_sse_packet, load_progress, unwrap from logger import init_logger
14,216
success=unwrap(result.get("success"), []), failure=unwrap(result.get("failure"), []), ) # Unload lora endpoint @app.post( "/v1/lora/unload", dependencies=[Depends(check_admin_key), Depends(_check_model_container)], ) async def unload_loras(): """Unloads the currently loaded loras.""" MODEL_CONTAINER.unload(True) # Encode tokens endpoint @app.post( "/v1/token/encode", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def encode_tokens(data: TokenEncodeRequest): """Encodes a string into tokens.""" raw_tokens = MODEL_CONTAINER.get_tokens(data.text, None, **data.get_params()) # Have to use this if check otherwise Torch's tensors error out # with a boolean issue tokens = raw_tokens[0].tolist() if raw_tokens is not None else [] response = TokenEncodeResponse(tokens=tokens, length=len(tokens)) return response # Decode tokens endpoint @app.post( "/v1/token/decode", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def decode_tokens(data: TokenDecodeRequest): """Decodes tokens into a string.""" message = MODEL_CONTAINER.get_tokens(None, data.tokens, **data.get_params()) response = TokenDecodeResponse(text=unwrap(message, "")) return response # Completions endpoint @app.post( "/v1/completions", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def generate_completion(request: Request, data: CompletionRequest): """Generates a completion from a prompt.""" model_path = MODEL_CONTAINER.get_model_path() if isinstance(data.prompt, list): data.prompt = "\n".join(data.prompt) if data.stream: async def generator(): """Generator for the generation process.""" try: new_generation = MODEL_CONTAINER.generate_gen( data.prompt, **data.to_gen_params() ) for part, prompt_tokens, completion_tokens in new_generation: if await request.is_disconnected(): break response = create_completion_response( part, prompt_tokens, completion_tokens, model_path.name ) yield get_sse_packet(response.model_dump_json()) # Yield a finish response on successful generation yield get_sse_packet("[DONE]") except CancelledError: logger.error("Completion request cancelled by user.") except Exception as exc: yield get_generator_error(str(exc)) return StreamingResponse( generate_with_semaphore(generator), media_type="text/event-stream" ) response_text, prompt_tokens, completion_tokens = await call_with_semaphore( partial(MODEL_CONTAINER.generate, data.prompt, **data.to_gen_params()) ) response = create_completion_response( response_text, prompt_tokens, completion_tokens, model_path.name ) return response # Chat completions endpoint @app.post( "/v1/chat/completions", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def generate_chat_completion(request: Request, data: ChatCompletionRequest): """Generates a chat completion from a prompt.""" if MODEL_CONTAINER.prompt_template is None: raise HTTPException( 422, "This endpoint is disabled because a prompt template is not set.", ) model_path = MODEL_CONTAINER.get_model_path() if isinstance(data.messages, str): prompt = data.messages else: try: special_tokens_dict = MODEL_CONTAINER.get_special_tokens( unwrap(data.add_bos_token, True), unwrap(data.ban_eos_token, False), )
"""The main tabbyAPI module. Contains the FastAPI server and endpoints.""" logger = init_logger(__name__) app = FastAPI( title="TabbyAPI", summary="An OAI compatible exllamav2 API that's both lightweight and fast", description=( "This docs page is not meant to send requests! Please use a service " "like Postman or a frontend UI." ), ) # Globally scoped variables. Undefined until initalized in main MODEL_CONTAINER: Optional[ModelContainer] = None def _check_model_container(): if MODEL_CONTAINER is None or MODEL_CONTAINER.model is None: raise HTTPException(400, "No models are loaded.") # ALlow CORS requests app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) # Model list endpoint @app.get("/v1/models", dependencies=[Depends(check_api_key)]) @app.get("/v1/model/list", dependencies=[Depends(check_api_key)]) async def list_models(): """Lists all models in the model directory.""" model_config = get_model_config() model_dir = unwrap(model_config.get("model_dir"), "models") model_path = pathlib.Path(model_dir) draft_model_dir = get_draft_model_config().get("draft_model_dir") models = get_model_list(model_path.resolve(), draft_model_dir) if unwrap(model_config.get("use_dummy_models"), False): models.data.insert(0, ModelCard(id="gpt-3.5-turbo")) return models # Currently loaded model endpoint @app.get( "/v1/model", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) @app.get( "/v1/internal/model/info", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def get_current_model(): """Returns the currently loaded model.""" model_name = MODEL_CONTAINER.get_model_path().name prompt_template = MODEL_CONTAINER.prompt_template model_card = ModelCard( id=model_name, parameters=ModelCardParameters( rope_scale=MODEL_CONTAINER.config.scale_pos_emb, rope_alpha=MODEL_CONTAINER.config.scale_alpha_value, max_seq_len=MODEL_CONTAINER.config.max_seq_len, cache_mode="FP8" if MODEL_CONTAINER.cache_fp8 else "FP16", prompt_template=prompt_template.name if prompt_template else None, num_experts_per_token=MODEL_CONTAINER.config.num_experts_per_token, use_cfg=MODEL_CONTAINER.use_cfg, ), logging=gen_logging.PREFERENCES, ) if MODEL_CONTAINER.draft_config: draft_card = ModelCard( id=MODEL_CONTAINER.get_model_path(True).name, parameters=ModelCardParameters( rope_scale=MODEL_CONTAINER.draft_config.scale_pos_emb, rope_alpha=MODEL_CONTAINER.draft_config.scale_alpha_value, max_seq_len=MODEL_CONTAINER.draft_config.max_seq_len, ), ) model_card.parameters.draft = draft_card return model_card @app.get("/v1/model/draft/list", dependencies=[Depends(check_api_key)]) async def list_draft_models(): """Lists all draft models in the model directory.""" draft_model_dir = unwrap(get_draft_model_config().get("draft_model_dir"), "models") draft_model_path = pathlib.Path(draft_model_dir) models = get_model_list(draft_model_path.resolve()) return models # Load model endpoint @app.post("/v1/model/load", dependencies=[Depends(check_admin_key)]) async def load_model(request: Request, data: ModelLoadRequest): """Loads a model into the model container.""" global MODEL_CONTAINER if MODEL_CONTAINER and MODEL_CONTAINER.model: raise HTTPException(400, "A model is already loaded! Please unload it first.") if not data.name: raise HTTPException(400, "model_name not found.") model_path = pathlib.Path(unwrap(get_model_config().get("model_dir"), "models")) model_path = model_path / data.name load_data = data.model_dump() if data.draft: if not data.draft.draft_model_name: raise HTTPException( 400, "draft_model_name was not found inside the draft object." ) load_data["draft"]["draft_model_dir"] = unwrap( get_draft_model_config().get("draft_model_dir"), "models" ) if not model_path.exists(): raise HTTPException(400, "model_path does not exist. Check model_name?") MODEL_CONTAINER = ModelContainer(model_path.resolve(), False, **load_data) async def generator(): """Generator for the loading process.""" model_type = "draft" if MODEL_CONTAINER.draft_config else "model" load_status = MODEL_CONTAINER.load_gen(load_progress) try: for module, modules in load_status: if await request.is_disconnected(): break if module == 0: loading_bar: IncrementalBar = IncrementalBar("Modules", max=modules) elif module == modules: loading_bar.next() loading_bar.finish() response = ModelLoadResponse( model_type=model_type, module=module, modules=modules, status="finished", ) yield get_sse_packet(response.model_dump_json()) # Switch to model progress if the draft model is loaded if MODEL_CONTAINER.draft_config: model_type = "model" else: loading_bar.next() response = ModelLoadResponse( model_type=model_type, module=module, modules=modules, status="processing", ) yield get_sse_packet(response.model_dump_json()) except CancelledError: logger.error( "Model load cancelled by user. " "Please make sure to run unload to free up resources." ) except Exception as exc: yield get_generator_error(str(exc)) return StreamingResponse(generator(), media_type="text/event-stream") # Unload model endpoint @app.post( "/v1/model/unload", dependencies=[Depends(check_admin_key), Depends(_check_model_container)], ) async def unload_model(): """Unloads the currently loaded model.""" global MODEL_CONTAINER MODEL_CONTAINER.unload() MODEL_CONTAINER = None @app.get("/v1/templates", dependencies=[Depends(check_api_key)]) @app.get("/v1/template/list", dependencies=[Depends(check_api_key)]) async def get_templates(): templates = get_all_templates() template_strings = list(map(lambda template: template.stem, templates)) return TemplateList(data=template_strings) # Lora list endpoint @app.get("/v1/loras", dependencies=[Depends(check_api_key)]) @app.get("/v1/lora/list", dependencies=[Depends(check_api_key)]) async def get_all_loras(): """Lists all LoRAs in the lora directory.""" lora_path = pathlib.Path(unwrap(get_lora_config().get("lora_dir"), "loras")) loras = get_lora_list(lora_path.resolve()) return loras # Currently loaded loras endpoint @app.get( "/v1/lora", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def get_active_loras(): """Returns the currently loaded loras.""" active_loras = LoraList( data=list( map( lambda lora: LoraCard( id=pathlib.Path(lora.lora_path).parent.name, scaling=lora.lora_scaling * lora.lora_r / lora.lora_alpha, ), MODEL_CONTAINER.active_loras, ) ) ) return active_loras # Load lora endpoint @app.post( "/v1/lora/load", dependencies=[Depends(check_admin_key), Depends(_check_model_container)], ) async def load_lora(data: LoraLoadRequest): """Loads a LoRA into the model container.""" if not data.loras: raise HTTPException(400, "List of loras to load is not found.") lora_dir = pathlib.Path(unwrap(get_lora_config().get("lora_dir"), "loras")) if not lora_dir.exists(): raise HTTPException( 400, "A parent lora directory does not exist. Check your config.yml?", ) # Clean-up existing loras if present if len(MODEL_CONTAINER.active_loras) > 0: MODEL_CONTAINER.unload(True) result = MODEL_CONTAINER.load_loras(lora_dir, **data.model_dump()) return LoraLoadResponse( success=unwrap(result.get("success"), []), failure=unwrap(result.get("failure"), []), ) # Unload lora endpoint @app.post( "/v1/lora/unload", dependencies=[Depends(check_admin_key), Depends(_check_model_container)], ) async def unload_loras(): """Unloads the currently loaded loras.""" MODEL_CONTAINER.unload(True) # Encode tokens endpoint @app.post( "/v1/token/encode", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def encode_tokens(data: TokenEncodeRequest): """Encodes a string into tokens.""" raw_tokens = MODEL_CONTAINER.get_tokens(data.text, None, **data.get_params()) # Have to use this if check otherwise Torch's tensors error out # with a boolean issue tokens = raw_tokens[0].tolist() if raw_tokens is not None else [] response = TokenEncodeResponse(tokens=tokens, length=len(tokens)) return response # Decode tokens endpoint @app.post( "/v1/token/decode", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def decode_tokens(data: TokenDecodeRequest): """Decodes tokens into a string.""" message = MODEL_CONTAINER.get_tokens(None, data.tokens, **data.get_params()) response = TokenDecodeResponse(text=unwrap(message, "")) return response # Completions endpoint @app.post( "/v1/completions", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def generate_completion(request: Request, data: CompletionRequest): """Generates a completion from a prompt.""" model_path = MODEL_CONTAINER.get_model_path() if isinstance(data.prompt, list): data.prompt = "\n".join(data.prompt) if data.stream: async def generator(): """Generator for the generation process.""" try: new_generation = MODEL_CONTAINER.generate_gen( data.prompt, **data.to_gen_params() ) for part, prompt_tokens, completion_tokens in new_generation: if await request.is_disconnected(): break response = create_completion_response( part, prompt_tokens, completion_tokens, model_path.name ) yield get_sse_packet(response.model_dump_json()) # Yield a finish response on successful generation yield get_sse_packet("[DONE]") except CancelledError: logger.error("Completion request cancelled by user.") except Exception as exc: yield get_generator_error(str(exc)) return StreamingResponse( generate_with_semaphore(generator), media_type="text/event-stream" ) response_text, prompt_tokens, completion_tokens = await call_with_semaphore( partial(MODEL_CONTAINER.generate, data.prompt, **data.to_gen_params()) ) response = create_completion_response( response_text, prompt_tokens, completion_tokens, model_path.name ) return response # Chat completions endpoint @app.post( "/v1/chat/completions", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def generate_chat_completion(request: Request, data: ChatCompletionRequest): """Generates a chat completion from a prompt.""" if MODEL_CONTAINER.prompt_template is None: raise HTTPException( 422, "This endpoint is disabled because a prompt template is not set.", ) model_path = MODEL_CONTAINER.get_model_path() if isinstance(data.messages, str): prompt = data.messages else: try: special_tokens_dict = MODEL_CONTAINER.get_special_tokens( unwrap(data.add_bos_token, True), unwrap(data.ban_eos_token, False), )
prompt = get_prompt_from_template(
36
2023-11-10 05:54:02+00:00
16k
ShipBit/wingman-ai
services/tower.py
[ { "identifier": "MissingApiKeyException", "path": "exceptions.py", "snippet": "class MissingApiKeyException(Exception):\n pass" }, { "identifier": "OpenAiWingman", "path": "wingmen/open_ai_wingman.py", "snippet": "class OpenAiWingman(Wingman):\n \"\"\"Our OpenAI Wingman base gives ...
import copy from exceptions import MissingApiKeyException from wingmen.open_ai_wingman import OpenAiWingman from wingmen.wingman import Wingman from services.printr import Printr from services.secret_keeper import SecretKeeper
12,682
printr = Printr() class Tower: def __init__(self, config: dict[str, any], secret_keeper: SecretKeeper, app_root_dir: str): # type: ignore self.config = config self.app_root_dir = app_root_dir self.secret_keeper = secret_keeper self.key_wingman_dict: dict[str, Wingman] = {} self.broken_wingmen = [] self.wingmen = self.__instantiate_wingmen() self.key_wingman_dict: dict[str, Wingman] = {} for wingman in self.wingmen: self.key_wingman_dict[wingman.get_record_key()] = wingman def __instantiate_wingmen(self) -> list[Wingman]: wingmen = [] for wingman_name, wingman_config in self.config["wingmen"].items(): if wingman_config.get("disabled") is True: continue global_config = { "sound": self.config.get("sound", {}), "openai": self.config.get("openai", {}), "features": self.config.get("features", {}), "edge_tts": self.config.get("edge_tts", {}), "commands": self.config.get("commands", {}), "elevenlabs": self.config.get("elevenlabs", {}), "azure": self.config.get("azure", {}), } merged_config = self.__merge_configs(global_config, wingman_config) class_config = merged_config.get("class") wingman = None # it's a custom Wingman try: if class_config: kwargs = class_config.get("args", {}) wingman = Wingman.create_dynamically( name=wingman_name, config=merged_config, secret_keeper=self.secret_keeper, module_path=class_config.get("module"), class_name=class_config.get("name"), app_root_dir=self.app_root_dir, **kwargs ) else: wingman = OpenAiWingman( name=wingman_name, config=merged_config, secret_keeper=self.secret_keeper, app_root_dir=self.app_root_dir, )
printr = Printr() class Tower: def __init__(self, config: dict[str, any], secret_keeper: SecretKeeper, app_root_dir: str): # type: ignore self.config = config self.app_root_dir = app_root_dir self.secret_keeper = secret_keeper self.key_wingman_dict: dict[str, Wingman] = {} self.broken_wingmen = [] self.wingmen = self.__instantiate_wingmen() self.key_wingman_dict: dict[str, Wingman] = {} for wingman in self.wingmen: self.key_wingman_dict[wingman.get_record_key()] = wingman def __instantiate_wingmen(self) -> list[Wingman]: wingmen = [] for wingman_name, wingman_config in self.config["wingmen"].items(): if wingman_config.get("disabled") is True: continue global_config = { "sound": self.config.get("sound", {}), "openai": self.config.get("openai", {}), "features": self.config.get("features", {}), "edge_tts": self.config.get("edge_tts", {}), "commands": self.config.get("commands", {}), "elevenlabs": self.config.get("elevenlabs", {}), "azure": self.config.get("azure", {}), } merged_config = self.__merge_configs(global_config, wingman_config) class_config = merged_config.get("class") wingman = None # it's a custom Wingman try: if class_config: kwargs = class_config.get("args", {}) wingman = Wingman.create_dynamically( name=wingman_name, config=merged_config, secret_keeper=self.secret_keeper, module_path=class_config.get("module"), class_name=class_config.get("name"), app_root_dir=self.app_root_dir, **kwargs ) else: wingman = OpenAiWingman( name=wingman_name, config=merged_config, secret_keeper=self.secret_keeper, app_root_dir=self.app_root_dir, )
except MissingApiKeyException:
0
2023-11-15 09:36:06+00:00
16k
wjun0830/CGDETR
cg_detr/inference.py
[ { "identifier": "AverageMeter", "path": "utils/basic_utils.py", "snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current/max/min value\"\"\"\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n s...
import pprint import numpy as np import os import torch import torch.nn.functional as F import torch.backends.cudnn as cudnn import logging from tqdm import tqdm, trange from collections import OrderedDict, defaultdict from utils.basic_utils import AverageMeter from torch.utils.data import DataLoader from cg_detr.config import TestOptions from cg_detr.model import build_model from cg_detr.span_utils import span_cxw_to_xx from cg_detr.start_end_dataset import StartEndDataset, start_end_collate, prepare_batch_inputs from cg_detr.postprocessing_cg_detr import PostProcessorDETR from standalone_eval.eval import eval_submission from utils.basic_utils import save_jsonl, save_json from utils.temporal_nms import temporal_nms from collections import OrderedDict from sys import argv
11,379
logger = logging.getLogger(__name__) logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO) def post_processing_mr_nms(mr_res, nms_thd, max_before_nms, max_after_nms): mr_res_after_nms = [] for e in mr_res: e["pred_relevant_windows"] = temporal_nms( e["pred_relevant_windows"][:max_before_nms], nms_thd=nms_thd, max_after_nms=max_after_nms ) mr_res_after_nms.append(e) return mr_res_after_nms def eval_epoch_post_processing(submission, opt, gt_data, save_submission_filename): # IOU_THDS = (0.5, 0.7) logger.info("Saving/Evaluating before nms results") submission_path = os.path.join(opt.results_dir, save_submission_filename) save_jsonl(submission, submission_path) if opt.eval_split_name in ["val"]: # since test_public has no GT metrics = eval_submission( submission, gt_data, verbose=opt.debug, match_number=not opt.debug ) save_metrics_path = submission_path.replace(".jsonl", "_metrics.json") save_json(metrics, save_metrics_path, save_pretty=True, sort_keys=False) latest_file_paths = [submission_path, save_metrics_path] else: metrics = None latest_file_paths = [submission_path, ] if opt.nms_thd != -1: logger.info("[MR] Performing nms with nms_thd {}".format(opt.nms_thd)) submission_after_nms = post_processing_mr_nms( submission, nms_thd=opt.nms_thd, max_before_nms=opt.max_before_nms, max_after_nms=opt.max_after_nms ) logger.info("Saving/Evaluating nms results") submission_nms_path = submission_path.replace(".jsonl", "_nms_thd_{}.jsonl".format(opt.nms_thd)) save_jsonl(submission_after_nms, submission_nms_path) if opt.eval_split_name == "val": metrics_nms = eval_submission( submission_after_nms, gt_data, verbose=opt.debug, match_number=not opt.debug ) save_metrics_nms_path = submission_nms_path.replace(".jsonl", "_metrics.json") save_json(metrics_nms, save_metrics_nms_path, save_pretty=True, sort_keys=False) latest_file_paths += [submission_nms_path, save_metrics_nms_path] else: metrics_nms = None latest_file_paths = [submission_nms_path, ] else: metrics_nms = None return metrics, metrics_nms, latest_file_paths # for HL @torch.no_grad() def compute_hl_results(model, eval_loader, opt, epoch_i=None, criterion=None, tb_writer=None): model.eval() if criterion: assert eval_loader.dataset.load_labels criterion.eval() loss_meters = defaultdict(AverageMeter) write_tb = tb_writer is not None and epoch_i is not None mr_res = [] topk = 5 # top-5 map video_ap_collected = [] for batch in tqdm(eval_loader, desc="compute st ed scores"): query_meta = batch[0]
logger = logging.getLogger(__name__) logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO) def post_processing_mr_nms(mr_res, nms_thd, max_before_nms, max_after_nms): mr_res_after_nms = [] for e in mr_res: e["pred_relevant_windows"] = temporal_nms( e["pred_relevant_windows"][:max_before_nms], nms_thd=nms_thd, max_after_nms=max_after_nms ) mr_res_after_nms.append(e) return mr_res_after_nms def eval_epoch_post_processing(submission, opt, gt_data, save_submission_filename): # IOU_THDS = (0.5, 0.7) logger.info("Saving/Evaluating before nms results") submission_path = os.path.join(opt.results_dir, save_submission_filename) save_jsonl(submission, submission_path) if opt.eval_split_name in ["val"]: # since test_public has no GT metrics = eval_submission( submission, gt_data, verbose=opt.debug, match_number=not opt.debug ) save_metrics_path = submission_path.replace(".jsonl", "_metrics.json") save_json(metrics, save_metrics_path, save_pretty=True, sort_keys=False) latest_file_paths = [submission_path, save_metrics_path] else: metrics = None latest_file_paths = [submission_path, ] if opt.nms_thd != -1: logger.info("[MR] Performing nms with nms_thd {}".format(opt.nms_thd)) submission_after_nms = post_processing_mr_nms( submission, nms_thd=opt.nms_thd, max_before_nms=opt.max_before_nms, max_after_nms=opt.max_after_nms ) logger.info("Saving/Evaluating nms results") submission_nms_path = submission_path.replace(".jsonl", "_nms_thd_{}.jsonl".format(opt.nms_thd)) save_jsonl(submission_after_nms, submission_nms_path) if opt.eval_split_name == "val": metrics_nms = eval_submission( submission_after_nms, gt_data, verbose=opt.debug, match_number=not opt.debug ) save_metrics_nms_path = submission_nms_path.replace(".jsonl", "_metrics.json") save_json(metrics_nms, save_metrics_nms_path, save_pretty=True, sort_keys=False) latest_file_paths += [submission_nms_path, save_metrics_nms_path] else: metrics_nms = None latest_file_paths = [submission_nms_path, ] else: metrics_nms = None return metrics, metrics_nms, latest_file_paths # for HL @torch.no_grad() def compute_hl_results(model, eval_loader, opt, epoch_i=None, criterion=None, tb_writer=None): model.eval() if criterion: assert eval_loader.dataset.load_labels criterion.eval() loss_meters = defaultdict(AverageMeter) write_tb = tb_writer is not None and epoch_i is not None mr_res = [] topk = 5 # top-5 map video_ap_collected = [] for batch in tqdm(eval_loader, desc="compute st ed scores"): query_meta = batch[0]
model_inputs, targets = prepare_batch_inputs(batch[1], opt.device, non_blocking=opt.pin_memory)
6
2023-11-10 12:45:25+00:00
16k
zhang-tao-whu/DVIS_Plus
ov_dvis/meta_architecture_ov.py
[ { "identifier": "VideoSetCriterion_ov", "path": "mask2former_video/modeling/criterion.py", "snippet": "class VideoSetCriterion_ov(VideoSetCriterion):\n \"\"\"This class computes the loss for DETR.\n The process happens in two steps:\n 1) we compute hungarian assignment between ground truth ...
import logging import einops import torch from typing import Tuple from torch import nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.data import MetadataCatalog from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head from detectron2.modeling.backbone import Backbone from detectron2.structures import Boxes, ImageList, Instances, BitMasks from mask2former_video.modeling.criterion import VideoSetCriterion_ov from mask2former_video.modeling.matcher import VideoHungarianMatcher, VideoHungarianMatcher_Consistent from mask2former_video.utils.memory import retry_if_cuda_oom from scipy.optimize import linear_sum_assignment from .video_dvis_modules_ov import ReferringTracker_noiser_OV, TemporalRefiner_OV from .video_mask2former_transformer_decoder_ov import MaskPooling from dvis_Plus.utils import loss_reid
13,492
*, backbone: Backbone, sem_seg_head: nn.Module, criterion: nn.Module, num_queries: int, object_mask_threshold: float, overlap_threshold: float, train_metadatas: dict, test_metadatas: dict, size_divisibility: int, sem_seg_postprocess_before_inference: bool, pixel_mean: Tuple[float], pixel_std: Tuple[float], # video tracker, num_frames, window_inference, max_num, max_iter_num, window_size, task, # fc-clip geometric_ensemble_alpha: float, geometric_ensemble_beta: float, ensemble_on_valid_mask: bool, # multi datasets test2train={}, ): """ Args: backbone: a backbone module, must follow detectron2's backbone interface sem_seg_head: a module that predicts semantic segmentation from backbone features criterion: a module that defines the loss num_queries: int, number of queries object_mask_threshold: float, threshold to filter query based on classification score for panoptic segmentation inference overlap_threshold: overlap threshold used in general inference for panoptic segmentation metadata: dataset meta, get `thing` and `stuff` category names for panoptic segmentation inference size_divisibility: Some backbones require the input height and width to be divisible by a specific integer. We can use this to override such requirement. sem_seg_postprocess_before_inference: whether to resize the prediction back to original input size before semantic segmentation inference or after. For high-resolution dataset like Mapillary, resizing predictions before inference will cause OOM error. pixel_mean, pixel_std: list or tuple with #channels element, representing the per-channel mean and std to be used to normalize the input image # video tracker: a tracker module, e.g. ReferringTracker num_frames: number of frames sampled during training window_inference: if the GPU memory is insufficient to predict the entire video at once, inference needs to be performed clip by clip num_class: the categories number of the dataset max_num: the maximum number of instances retained for a video, only used in VIS max_iter_num: the iter nums window_size: the number of images processed by the segmenter at a time task: VIS, VSS or VPS """ super().__init__( backbone=backbone, sem_seg_head=sem_seg_head, criterion=criterion, num_queries=num_queries, object_mask_threshold=object_mask_threshold, overlap_threshold=overlap_threshold, train_metadatas=train_metadatas, test_metadatas=test_metadatas, size_divisibility=size_divisibility, sem_seg_postprocess_before_inference=sem_seg_postprocess_before_inference, pixel_mean=pixel_mean, pixel_std=pixel_std, # video num_frames=num_frames, window_inference=window_inference, # dc clip geometric_ensemble_alpha=geometric_ensemble_alpha, geometric_ensemble_beta=geometric_ensemble_beta, ensemble_on_valid_mask=ensemble_on_valid_mask, # multi datasets test2train=test2train, ) # frozen the void classifier for p in self.void_embedding.parameters(): p.requires_grad_(False) # frozen the segmenter for p in self.backbone.parameters(): p.requires_grad_(False) for p in self.sem_seg_head.parameters(): p.requires_grad_(False) self.tracker = tracker self.max_num = max_num self.iter = 0 self.max_iter_num = max_iter_num self.window_size = window_size self.task = task assert self.task in ['vis', 'vss', 'vps'], "Only support vis, vss and vps !" inference_dict = { 'vis': self.inference_video_vis, 'vss': self.inference_video_vss, 'vps': self.inference_video_vps, } self.inference_video_task = inference_dict[self.task] @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) # Loss parameters: deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT # loss weights class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT # building criterion
logger = logging.getLogger(__name__) VILD_PROMPT = [ "a photo of a {}.", "This is a photo of a {}", "There is a {} in the scene", "There is the {} in the scene", "a photo of a {} in the scene", "a photo of a small {}.", "a photo of a medium {}.", "a photo of a large {}.", "This is a photo of a small {}.", "This is a photo of a medium {}.", "This is a photo of a large {}.", "There is a small {} in the scene.", "There is a medium {} in the scene.", "There is a large {} in the scene.", ] def get_classification_logits(x, text_classifier, logit_scale, num_templates=None): x = F.normalize(x, dim=-1) logit_scale = torch.clamp(logit_scale.exp(), max=100) pred_logits = logit_scale * x @ text_classifier.T # B, *, N + 1 # max ensembel as in OpenSeg/ODISE final_pred_logits = [] cur_idx = 0 for num_t in num_templates[:-1]: final_pred_logits.append(pred_logits[:, :, cur_idx: cur_idx + num_t].max(-1).values) cur_idx += num_t # final_pred_logits.append(pred_logits[:, :, -1]) # the last classifier is for void final_pred_logits.append(pred_logits[:, :, -num_templates[-1]:].max(-1).values) final_pred_logits = torch.stack(final_pred_logits, dim=-1) return final_pred_logits @META_ARCH_REGISTRY.register() class MinVIS_OV(nn.Module): @configurable def __init__( self, *, backbone: Backbone, sem_seg_head: nn.Module, criterion: nn.Module, num_queries: int, object_mask_threshold: float, overlap_threshold: float, train_metadatas: dict, test_metadatas: dict, size_divisibility: int, sem_seg_postprocess_before_inference: bool, pixel_mean: Tuple[float], pixel_std: Tuple[float], # video num_frames, window_inference, # fc-clip geometric_ensemble_alpha: float, geometric_ensemble_beta: float, ensemble_on_valid_mask: bool, # multi datasets test2train={}, task='vis', ): """ Args: backbone: a backbone module, must follow detectron2's backbone interface sem_seg_head: a module that predicts semantic segmentation from backbone features criterion: a module that defines the loss num_queries: int, number of queries object_mask_threshold: float, threshold to filter query based on classification score for panoptic segmentation inference overlap_threshold: overlap threshold used in general inference for panoptic segmentation metadata: dataset meta, get `thing` and `stuff` category names for panoptic segmentation inference size_divisibility: Some backbones require the input height and width to be divisible by a specific integer. We can use this to override such requirement. sem_seg_postprocess_before_inference: whether to resize the prediction back to original input size before semantic segmentation inference or after. For high-resolution dataset like Mapillary, resizing predictions before inference will cause OOM error. pixel_mean, pixel_std: list or tuple with #channels element, representing the per-channel mean and std to be used to normalize the input image semantic_on: bool, whether to output semantic segmentation prediction instance_on: bool, whether to output instance segmentation prediction panoptic_on: bool, whether to output panoptic segmentation prediction test_topk_per_image: int, instance segmentation parameter, keep topk instances per image test2train: dict, which void embedding to use """ super().__init__() self.backbone = backbone for p in self.backbone.parameters(): p.requires_grad_(False) self.sem_seg_head = sem_seg_head self.criterion = criterion self.num_queries = num_queries self.overlap_threshold = overlap_threshold self.object_mask_threshold = object_mask_threshold self.metadata = train_metadatas self.test_metadata = test_metadatas if size_divisibility < 0: # use backbone size_divisibility if not set size_divisibility = self.backbone.size_divisibility self.size_divisibility = size_divisibility self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False) self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) self.num_frames = num_frames self.window_inference = window_inference # FC-CLIP args self.mask_pooling = MaskPooling() self.geometric_ensemble_alpha = geometric_ensemble_alpha self.geometric_ensemble_beta = geometric_ensemble_beta self.ensemble_on_valid_mask = ensemble_on_valid_mask self.train_text_classifier = None self.test_text_classifier = None self.train_num_templates = None self.test_num_templates = None self.category_overlapping_mask = None self.train_text_classifier_dict = {} self.test_text_classifier_dict = {} self.train_num_templates_dict = {} self.test_num_templates_dict = {} self.test_num_templates_dict = {} self.void_embedding = nn.Embedding(1, backbone.dim_latent) # use this for void # init private void embedding for each dataset if len(train_metadatas) - 1 > 0: self.additional_void_embedding = nn.Embedding(len(train_metadatas) - 1, backbone.dim_latent) else: self.additional_void_embedding = None self.train_class_prepares = {} self.train_names2id = {} self.test_class_prepares = {} for i, name in enumerate(train_metadatas.keys()): self.train_names2id[name] = i train_metadata = train_metadatas[name] _, train_num_templates, train_class_names = self.prepare_class_names_from_metadata(train_metadata, train_metadata) self.train_class_prepares.update({name: {'num_templates': train_num_templates, 'class_names': train_class_names}}) all_train_metadatas = [train_metadatas[key] for key in train_metadatas.keys()] self.all_train_metadatas = all_train_metadatas for name in test_metadatas.keys(): test_metadata = test_metadatas[name] category_overlapping_mask, test_num_templates, test_class_names = self.prepare_class_names_from_metadata( test_metadata, all_train_metadatas) self.test_class_prepares.update({name: {'overlapping': category_overlapping_mask, 'num_templates': test_num_templates, 'class_names': test_class_names}}) self.test2train = test2train self.test_use_all_vocabulary = False self.void_embedding_merge_mode = 'coco' # ['mean', 'max', 'coco'] self.task = task assert self.task in ['vis', 'vss', 'vps'], "Only support vis, vss and vps !" inference_dict = { 'vis': self.inference_video_vis, 'vss': self.inference_video_vss, 'vps': self.inference_video_vps, } self.inference_video = inference_dict[self.task] def get_text_classifier_with_void(self, text_classifier, num_templates, name): def split_labels(x): res = [] for x_ in x: x_ = x_.replace(', ', ',') x_ = x_.split(',') # there can be multiple synonyms for single class res.append(x_) return res if self.training or not self.test_use_all_vocabulary: if self.additional_void_embedding is None: _zero = self.void_embedding.weight.sum() * 0.0 else: _zero = self.void_embedding.weight.sum() * 0.0 + self.additional_void_embedding.weight.sum() * 0.0 if name in self.train_names2id.keys(): i = self.train_names2id[name] if i == 0: void_embed = self.void_embedding.weight else: void_embed = self.additional_void_embedding.weight[i - 1: i] void_embed = F.normalize(void_embed, dim=-1) + _zero else: if self.additional_void_embedding is None: void_embed = self.void_embedding.weight void_embed = F.normalize(void_embed, dim=-1) else: void_embed = torch.cat([self.void_embedding.weight, self.additional_void_embedding.weight], dim=0) void_embed = F.normalize(void_embed, dim=-1).detach() if self.void_embedding_merge_mode == 'mean': void_embed = torch.mean(void_embed, dim=0, keepdim=True) elif self.void_embedding_merge_mode == 'max': pass elif self.void_embedding_merge_mode == 'coco': void_embed = void_embed[:1] else: raise NotImplementedError text_classifier = torch.cat([text_classifier, void_embed], dim=0) num_templates = num_templates + [void_embed.shape[0]] return text_classifier, num_templates else: # print("using additional vocabulary !!!") class_names = split_labels(self.test_metadata[name].classes_ov) # it includes both thing and stuff if isinstance(self.all_train_metadatas, list): train_classes = [] for item in self.all_train_metadatas: train_classes += item.classes_ov if len(train_classes) != 0: train_class_names = split_labels(train_classes) else: raise NotImplementedError else: train_class_names = split_labels(self.all_train_metadatas.classes_ov) test_class_names = {l for label in class_names for l in label} # train_class_names = {l for label in train_class_names for l in label} train2test_category_overlapping_list = [] for train_class_name in train_class_names: not_overlapping = set(train_class_name).isdisjoint(set(test_class_names)) train2test_category_overlapping_list.extend([not_overlapping] * len(train_class_name)) train2test_category_overlapping_list = torch.tensor( train2test_category_overlapping_list, dtype=torch.bool) train_classifiers = [] for key in self.metadata.keys(): if key not in self.train_text_classifier_dict.keys(): self._set_class_information(key, train=True) train_classifiers.append(self.train_text_classifier_dict[key]) train_classifiers = torch.cat(train_classifiers, dim=0)[train2test_category_overlapping_list] if name in self.test2train.keys(): i = self.train_names2id[self.test2train[name]] if i == 0: void_embed = self.void_embedding.weight else: void_embed = self.additional_void_embedding.weight[i - 1: i] void_embed = F.normalize(void_embed, dim=-1) else: if self.additional_void_embedding is not None: void_embed = torch.cat([self.void_embedding.weight, self.additional_void_embedding.weight], dim=0) void_embed = F.normalize(void_embed, dim=-1).detach() if self.void_embedding_merge_mode == 'mean': void_embed = torch.mean(void_embed, dim=0, keepdim=True) elif self.void_embedding_merge_mode == 'max': pass elif self.void_embedding_merge_mode == 'coco': void_embed = void_embed[:1] else: raise NotImplementedError else: void_embed = self.void_embedding.weight void_embed = F.normalize(void_embed, dim=-1) text_classifier = torch.cat([text_classifier, void_embed, train_classifiers], dim=0) num_templates = num_templates + [len(void_embed) + len(train_classifiers)] return text_classifier, num_templates def _set_class_information(self, name, train=True): self.name = name if train: if name in self.train_text_classifier_dict.keys(): return self.train_text_classifier_dict[name], self.train_num_templates_dict[name] else: infos = self.train_class_prepares[name] self.train_num_templates = infos['num_templates'] self.train_class_names = infos['class_names'] self.train_text_classifier = None self.train_text_classifier, self.train_num_templates = self.get_text_classifier(train=train) self.train_text_classifier_dict[name] = self.train_text_classifier self.train_num_templates_dict[name] = self.train_num_templates return self.train_text_classifier, self.train_num_templates else: self.category_overlapping_mask = self.test_class_prepares[name]['overlapping'] if name in self.test_text_classifier_dict.keys(): return self.test_text_classifier_dict[name], self.test_num_templates_dict[name] infos = self.test_class_prepares[name] self.category_overlapping_mask = infos['overlapping'] self.test_num_templates = infos['num_templates'] self.test_class_names = infos['class_names'] self.test_text_classifier = None self.test_text_classifier, self.test_num_templates = self.get_text_classifier(train=train) self.test_text_classifier_dict[name] = self.test_text_classifier self.test_num_templates_dict[name] = self.test_num_templates return self.test_text_classifier, self.test_num_templates def set_metadata(self, name, metadata): print(metadata.classes_ov) self.category_overlapping_mask, self.test_num_templates, self.test_class_names = \ self.prepare_class_names_from_metadata(metadata, self.all_train_metadatas) self.test_class_prepares.update({name: {'overlapping': self.category_overlapping_mask, 'num_templates': self.test_num_templates, 'class_names': self.test_class_names}}) if name in self.test_text_classifier_dict.keys(): del self.test_text_classifier_dict[name] self.test_text_classifier = None return def get_text_classifier(self, train=False): if self.training or train: if self.train_text_classifier is None: text_classifier = [] # this is needed to avoid oom, which may happen when num of class is large bs = 128 for idx in range(0, len(self.train_class_names), bs): text_classifier.append(self.backbone.get_text_classifier(self.train_class_names[idx:idx+bs], self.device).detach()) text_classifier = torch.cat(text_classifier, dim=0) # get per text embedding for per class template # average across templates and normalization. text_classifier /= text_classifier.norm(dim=-1, keepdim=True) text_classifier = text_classifier.reshape(text_classifier.shape[0]//len(VILD_PROMPT), len(VILD_PROMPT), text_classifier.shape[-1]).mean(1) text_classifier /= text_classifier.norm(dim=-1, keepdim=True) self.train_text_classifier = text_classifier # self.train_text_classifier, per component templates # self.train_num_templates, per class have how many components return self.train_text_classifier, self.train_num_templates else: if self.test_text_classifier is None: text_classifier = [] # this is needed to avoid oom, which may happen when num of class is large bs = 128 for idx in range(0, len(self.test_class_names), bs): text_classifier.append(self.backbone.get_text_classifier(self.test_class_names[idx:idx+bs], self.device).detach()) text_classifier = torch.cat(text_classifier, dim=0) # average across templates and normalization. text_classifier /= text_classifier.norm(dim=-1, keepdim=True) text_classifier = text_classifier.reshape(text_classifier.shape[0]//len(VILD_PROMPT), len(VILD_PROMPT), text_classifier.shape[-1]).mean(1) text_classifier /= text_classifier.norm(dim=-1, keepdim=True) self.test_text_classifier = text_classifier return self.test_text_classifier, self.test_num_templates def prepare_class_names_from_metadata(self, metadata, train_metadata): def split_labels(x): res = [] for x_ in x: x_ = x_.replace(', ', ',') x_ = x_.split(',') # there can be multiple synonyms for single class res.append(x_) return res # get text classifier try: class_names = split_labels(metadata.classes_ov) # it includes both thing and stuff if isinstance(train_metadata, list): train_classes = [] for item in train_metadata: train_classes += item.classes_ov if len(train_classes) != 0: train_class_names = split_labels(train_classes) else: raise NotImplementedError else: train_class_names = split_labels(train_metadata.classes_ov) except: # this could be for insseg, where only thing_classes are available class_names = split_labels(metadata.thing_classes_ov) if isinstance(train_metadata, list): train_thing_classes = [] for item in train_metadata: train_thing_classes += item.thing_classes_ov train_class_names = split_labels(train_thing_classes) else: train_class_names = split_labels(train_metadata.thing_classes_ov) train_class_names = {l for label in train_class_names for l in label} category_overlapping_list = [] for test_class_names in class_names: is_overlapping = not set(train_class_names).isdisjoint(set(test_class_names)) category_overlapping_list.append(is_overlapping) category_overlapping_mask = torch.tensor( category_overlapping_list, dtype=torch.long) def fill_all_templates_ensemble(x_=''): res = [] for x in x_: for template in VILD_PROMPT: res.append(template.format(x)) return res, len(res) // len(VILD_PROMPT) num_templates = [] templated_class_names = [] for x in class_names: templated_classes, templated_classes_num = fill_all_templates_ensemble(x) templated_class_names += templated_classes num_templates.append(templated_classes_num) # how many templates for current classes class_names = templated_class_names # category_overlapping_mask (N_train, ) # num_templates, [num_per_class_name, ], num of cur class is splited to how many components # class_names, [per_class_template, ], per_class_template [N_comp * N_template] return category_overlapping_mask, num_templates, class_names @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) # Loss parameters: deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT # loss weights class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT # building criterion matcher = VideoHungarianMatcher( cost_class=class_weight, cost_mask=mask_weight, cost_dice=dice_weight, num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS, ) weight_dict = {"loss_ce": class_weight, "loss_mask": mask_weight, "loss_dice": dice_weight} if deep_supervision: dec_layers = cfg.MODEL.MASK_FORMER.DEC_LAYERS aux_weight_dict = {} for i in range(dec_layers - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) losses = ["labels", "masks"] criterion = VideoSetCriterion_ov( sem_seg_head.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight, losses=losses, num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS, oversample_ratio=cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO, importance_sample_ratio=cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO, ) train_metadatas = {} test_metadatas = {} for name in cfg.DATASETS.TRAIN: train_metadatas[name] = MetadataCatalog.get(name) for name in cfg.DATASETS.TEST: test_metadatas[name] = MetadataCatalog.get(name) return { "backbone": backbone, "sem_seg_head": sem_seg_head, "criterion": criterion, "num_queries": cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES, "object_mask_threshold": cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD, "overlap_threshold": cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD, "train_metadatas": train_metadatas, "test_metadatas": test_metadatas, "size_divisibility": cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY, "sem_seg_postprocess_before_inference": True, "pixel_mean": cfg.MODEL.PIXEL_MEAN, "pixel_std": cfg.MODEL.PIXEL_STD, # video "num_frames": cfg.INPUT.SAMPLING_FRAME_NUM, "window_inference": cfg.MODEL.MASK_FORMER.TEST.WINDOW_INFERENCE, "task": cfg.MODEL.MASK_FORMER.TEST.TASK, # fc clip "geometric_ensemble_alpha": cfg.MODEL.FC_CLIP.GEOMETRIC_ENSEMBLE_ALPHA, "geometric_ensemble_beta": cfg.MODEL.FC_CLIP.GEOMETRIC_ENSEMBLE_BETA, "ensemble_on_valid_mask": cfg.MODEL.FC_CLIP.ENSEMBLE_ON_VALID_MASK, # multi datasets "test2train": {x: y for x, y in zip(cfg.DATASETS.TEST, cfg.DATASETS.TEST2TRAIN)}, } @property def device(self): return self.pixel_mean.device def forward(self, batched_inputs): """ Args: batched_inputs: a list, batched outputs of :class:`DatasetMapper`. Each item in the list contains the inputs for one image. For now, each item in the list is a dict that contains: * "image": Tensor, image in (C, H, W) format. * "instances": per-region ground truth * Other information that's included in the original dicts, such as: "height", "width" (int): the output resolution of the model (may be different from input resolution), used in inference. Returns: list[dict]: each dict has the results for one image. The dict contains the following keys: * "sem_seg": A Tensor that represents the per-pixel segmentation prediced by the head. The prediction has shape KxHxW that represents the logits of each class for each pixel. * "panoptic_seg": A tuple that represent panoptic output panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. segments_info (list[dict]): Describe each segment in `panoptic_seg`. Each dict contains keys "id", "category_id", "isthing". """ name = batched_inputs[0]['name'] for batched_input in batched_inputs: assert name == batched_input['name'] # print(batched_inputs) images = [] for video in batched_inputs: for frame in video["image"]: images.append(frame.to(self.device)) images = [(x - self.pixel_mean) / self.pixel_std for x in images] images = ImageList.from_tensors(images, self.size_divisibility) text_classifier, num_templates = self._set_class_information(batched_inputs[0]['name'], self.training) # Append void class weight text_classifier, num_templates = self.get_text_classifier_with_void(text_classifier, num_templates, name=batched_inputs[0]['name']) if not self.training and self.window_inference: outputs = self.run_window_inference(images.tensor, window_size=3, text_classifier=text_classifier, num_templates=num_templates) else: features = self.backbone(images.tensor) features['text_classifier'] = text_classifier features['num_templates'] = num_templates outputs = self.sem_seg_head(features) if self.training: # mask classification target targets = self.prepare_targets(batched_inputs, images) outputs, targets = self.frame_decoder_loss_reshape(outputs, targets) # bipartite matching-based loss losses = self.criterion(outputs, targets) for k in list(losses.keys()): if k in self.criterion.weight_dict: losses[k] *= self.criterion.weight_dict[k] else: # remove this loss if not specified in `weight_dict` losses.pop(k) return losses else: # when inference, bs must be 1 mask_cls_results = outputs["pred_logits"][0] # t q c mask_pred_results = outputs["pred_masks"][0].transpose(0, 1) # t q h w # We ensemble the pred logits of in-vocab and out-vocab if "clip_vis_dense" in outputs.keys(): clip_feature = outputs["clip_vis_dense"] else: clip_feature = features["clip_vis_dense"] mask_for_pooling = F.interpolate(mask_pred_results, size=clip_feature.shape[-2:], mode='bilinear', align_corners=False) if "convnext" in self.backbone.model_name.lower(): pooled_clip_feature = self.mask_pooling(clip_feature, mask_for_pooling) pooled_clip_feature = self.backbone.visual_prediction_forward(pooled_clip_feature) elif "rn" in self.backbone.model_name.lower(): try: pooled_clip_feature = self.backbone.visual_prediction_forward(clip_feature, mask_for_pooling) # (t, q, c) except: pooled_clip_feature = [] _windows_size = 16 iters = len(mask_for_pooling) // _windows_size if len(mask_for_pooling) % _windows_size != 0: iters += 1 for i in range(iters): start_idx = i * _windows_size end_idx = (i + 1) * _windows_size pooled_clip_feature.append(self.backbone.visual_prediction_forward( clip_feature[start_idx:end_idx].to(self.device), mask_for_pooling[start_idx:end_idx].to(self.device))) pooled_clip_feature = torch.cat(pooled_clip_feature, dim=0) else: raise NotImplementedError out_vocab_cls_results = get_classification_logits(pooled_clip_feature, text_classifier, self.backbone.clip_model.logit_scale, num_templates) in_vocab_cls_results = mask_cls_results[..., :-1] # remove void out_vocab_cls_results = out_vocab_cls_results[..., :-1] # remove void # Reference: https://github.com/NVlabs/ODISE/blob/main/odise/modeling/meta_arch/odise.py#L1506 out_vocab_cls_probs = out_vocab_cls_results.softmax(-1) in_vocab_cls_results = in_vocab_cls_results.softmax(-1) category_overlapping_mask = self.category_overlapping_mask.to(self.device) if self.ensemble_on_valid_mask: # Only include out_vocab cls results on masks with valid pixels # We empirically find that this is important to obtain reasonable AP/mIOU score with ResNet CLIP models valid_masking = (mask_for_pooling > 0).to(mask_for_pooling).sum(-1).sum(-1) > 0 valid_masking = valid_masking.to(in_vocab_cls_results.dtype).unsqueeze(-1) alpha = torch.ones_like(in_vocab_cls_results) * self.geometric_ensemble_alpha beta = torch.ones_like(in_vocab_cls_results) * self.geometric_ensemble_beta alpha = alpha * valid_masking beta = beta * valid_masking else: alpha = self.geometric_ensemble_alpha beta = self.geometric_ensemble_beta cls_logits_seen = ( (in_vocab_cls_results ** (1 - alpha) * out_vocab_cls_probs ** alpha).log() * category_overlapping_mask ) cls_logits_unseen = ( (in_vocab_cls_results ** (1 - beta) * out_vocab_cls_probs ** beta).log() * (1 - category_overlapping_mask) ) cls_results = cls_logits_seen + cls_logits_unseen # This is used to filtering void predictions. is_void_prob = F.softmax(mask_cls_results, dim=-1)[..., -1:] mask_cls_probs = torch.cat([ cls_results.softmax(-1) * (1.0 - is_void_prob), is_void_prob], dim=-1) mask_cls_results = torch.log(mask_cls_probs + 1e-8) outputs["pred_logits"][0] = mask_cls_results # t q c # for minvis outputs = self.post_processing(outputs) mask_cls_results = outputs["pred_logits"] mask_pred_results = outputs["pred_masks"] mask_cls_result = mask_cls_results[0] mask_pred_result = mask_pred_results[0] first_resize_size = (images.tensor.shape[-2], images.tensor.shape[-1]) input_per_image = batched_inputs[0] image_size = images.image_sizes[0] # image size without padding after data augmentation height = input_per_image.get("height", image_size[0]) # raw image size before data augmentation width = input_per_image.get("width", image_size[1]) return retry_if_cuda_oom(self.inference_video)( mask_cls_result, mask_pred_result, image_size, height, width, first_resize_size) def frame_decoder_loss_reshape(self, outputs, targets): outputs['pred_masks'] = einops.rearrange(outputs['pred_masks'], 'b q t h w -> (b t) q () h w') outputs['pred_logits'] = einops.rearrange(outputs['pred_logits'], 'b t q c -> (b t) q c') if 'aux_outputs' in outputs: for i in range(len(outputs['aux_outputs'])): outputs['aux_outputs'][i]['pred_masks'] = einops.rearrange( outputs['aux_outputs'][i]['pred_masks'], 'b q t h w -> (b t) q () h w' ) outputs['aux_outputs'][i]['pred_logits'] = einops.rearrange( outputs['aux_outputs'][i]['pred_logits'], 'b t q c -> (b t) q c' ) gt_instances = [] for targets_per_video in targets: num_labeled_frames = targets_per_video['ids'].shape[1] for f in range(num_labeled_frames): labels = targets_per_video['labels'] ids = targets_per_video['ids'][:, [f]] masks = targets_per_video['masks'][:, [f], :, :] gt_instances.append({"labels": labels, "ids": ids, "masks": masks}) return outputs, gt_instances def match_from_embds(self, tgt_embds, cur_embds): cur_embds = cur_embds / cur_embds.norm(dim=1)[:, None] tgt_embds = tgt_embds / tgt_embds.norm(dim=1)[:, None] cos_sim = torch.mm(cur_embds, tgt_embds.transpose(0, 1)) cost_embd = 1 - cos_sim C = 1.0 * cost_embd C = C.cpu() indices = linear_sum_assignment(C.transpose(0, 1)) # target x current indices = indices[1] # permutation that makes current aligns to target return indices def post_processing(self, outputs): pred_logits, pred_masks, pred_embds = outputs['pred_logits'], outputs['pred_masks'], outputs['pred_embds'] pred_logits = pred_logits[0] pred_masks = einops.rearrange(pred_masks[0], 'q t h w -> t q h w') pred_embds = einops.rearrange(pred_embds[0], 'c t q -> t q c') pred_logits = list(torch.unbind(pred_logits)) pred_masks = list(torch.unbind(pred_masks)) pred_embds = list(torch.unbind(pred_embds)) out_logits = [] out_masks = [] out_embds = [] out_logits.append(pred_logits[0]) out_masks.append(pred_masks[0]) out_embds.append(pred_embds[0]) # match the instances frame by frame for i in range(1, len(pred_logits)): indices = self.match_from_embds(out_embds[-1], pred_embds[i]) out_logits.append(pred_logits[i][indices, :]) out_masks.append(pred_masks[i][indices, :, :]) out_embds.append(pred_embds[i][indices, :]) out_logits = sum(out_logits)/len(out_logits) out_masks = torch.stack(out_masks, dim=1) # q h w -> q t h w out_logits = out_logits.unsqueeze(0) out_masks = out_masks.unsqueeze(0) outputs['pred_logits'] = out_logits outputs['pred_masks'] = out_masks return outputs def run_window_inference(self, images_tensor, window_size=30, text_classifier=None, num_templates=None): iters = len(images_tensor) // window_size if len(images_tensor) % window_size != 0: iters += 1 out_list = [] for i in range(iters): start_idx = i * window_size end_idx = (i+1) * window_size features = self.backbone(images_tensor[start_idx:end_idx]) features['text_classifier'] = text_classifier features['num_templates'] = num_templates out = self.sem_seg_head(features) del features['res2'], features['res3'], features['res4'], features['res5'] for j in range(len(out['aux_outputs'])): del out['aux_outputs'][j]['pred_masks'], out['aux_outputs'][j]['pred_logits'] # out['pred_masks'] = out['pred_masks'].detach().cpu().to(torch.float32) out['pred_masks'] = out['pred_masks'].detach() out['clip_vis_dense'] = features['clip_vis_dense'] out_list.append(out) # merge outputs outputs = {} outputs['pred_logits'] = torch.cat([x['pred_logits'] for x in out_list], dim=1).detach() outputs['pred_masks'] = torch.cat([x['pred_masks'] for x in out_list], dim=2).detach() outputs['pred_embds'] = torch.cat([x['pred_embds'] for x in out_list], dim=2).detach() outputs['clip_vis_dense'] = torch.cat([x['clip_vis_dense'] for x in out_list], dim=0).detach() return outputs def prepare_targets(self, targets, images): h_pad, w_pad = images.tensor.shape[-2:] gt_instances = [] for targets_per_video in targets: _num_instance = len(targets_per_video["instances"][0]) mask_shape = [_num_instance, self.num_frames, h_pad, w_pad] gt_masks_per_video = torch.zeros(mask_shape, dtype=torch.bool, device=self.device) gt_ids_per_video = [] gt_classes_per_video = [] for f_i, targets_per_frame in enumerate(targets_per_video["instances"]): targets_per_frame = targets_per_frame.to(self.device) h, w = targets_per_frame.image_size gt_ids_per_video.append(targets_per_frame.gt_ids[:, None]) gt_classes_per_video.append(targets_per_frame.gt_classes[:, None]) if isinstance(targets_per_frame.gt_masks, BitMasks): gt_masks_per_video[:, f_i, :h, :w] = targets_per_frame.gt_masks.tensor else: # polygon gt_masks_per_video[:, f_i, :h, :w] = targets_per_frame.gt_masks gt_ids_per_video = torch.cat(gt_ids_per_video, dim=1) gt_classes_per_video = torch.cat(gt_classes_per_video, dim=1).max(dim=1)[0] valid_idx = (gt_ids_per_video != -1).any(dim=-1) gt_classes_per_video = gt_classes_per_video[valid_idx] # N, gt_ids_per_video = gt_ids_per_video[valid_idx] # N, num_frames gt_instances.append({"labels": gt_classes_per_video, "ids": gt_ids_per_video}) gt_masks_per_video = gt_masks_per_video[valid_idx].float() # N, num_frames, H, W gt_instances[-1].update({"masks": gt_masks_per_video}) return gt_instances def inference_video_vis( self, pred_cls, pred_masks, img_size, output_height, output_width, first_resize_size, ): if len(pred_cls) > 0: scores = F.softmax(pred_cls, dim=-1)[:, :-1] labels = torch.arange( # self.sem_seg_head.num_classes, device=self.device pred_cls.shape[-1] - 1, device=self.device ).unsqueeze(0).repeat(self.num_queries, 1).flatten(0, 1) # keep top-K predictions scores_per_image, topk_indices = scores.flatten(0, 1).topk(10, sorted=False) labels_per_image = labels[topk_indices] topk_indices = topk_indices // (pred_cls.shape[-1] - 1) pred_masks = pred_masks[topk_indices] # interpolation to original image size pred_masks = F.interpolate( pred_masks, size=first_resize_size, mode="bilinear", align_corners=False ) pred_masks = pred_masks[:, :, : img_size[0], : img_size[1]] pred_masks = F.interpolate( pred_masks, size=(output_height, output_width), mode="bilinear", align_corners=False ) masks = pred_masks > 0. del pred_masks out_scores = scores_per_image.tolist() out_labels = labels_per_image.tolist() out_masks = [m for m in masks.cpu()] else: out_scores = [] out_labels = [] out_masks = [] video_output = { "image_size": (output_height, output_width), "pred_scores": out_scores, "pred_labels": out_labels, "pred_masks": out_masks, "task": "vis", } return video_output def inference_video_vps( self, pred_cls, pred_masks, img_size, output_height, output_width, first_resize_size, ): pred_cls = F.softmax(pred_cls, dim=-1) mask_pred = pred_masks scores, labels = pred_cls.max(-1) # filter out the background prediction keep = labels.ne(pred_cls.shape[-1] - 1) & (scores > self.object_mask_threshold) cur_scores = scores[keep] cur_classes = labels[keep] cur_masks = mask_pred[keep] # interpolation to original image size cur_masks = F.interpolate( cur_masks, size=first_resize_size, mode="bilinear", align_corners=False ) cur_masks = cur_masks[:, :, :img_size[0], :img_size[1]].sigmoid() cur_masks = F.interpolate( cur_masks, size=(output_height, output_width), mode="bilinear", align_corners=False ) cur_prob_masks = cur_scores.view(-1, 1, 1, 1).to(cur_masks.device) * cur_masks # initial panoptic_seg and segments infos h, w = cur_masks.shape[-2:] panoptic_seg = torch.zeros((cur_masks.size(1), h, w), dtype=torch.int32, device=cur_masks.device) segments_infos = [] current_segment_id = 0 if cur_masks.shape[0] == 0: # We didn't detect any mask return { "image_size": (output_height, output_width), "pred_masks": panoptic_seg.cpu(), "segments_infos": segments_infos, "task": "vps", } else: # take argmax cur_mask_ids = cur_prob_masks.argmax(0) # (t, h, w) stuff_memory_list = {} for k in range(cur_classes.shape[0]): pred_class = cur_classes[k].item() isthing = pred_class < len(self.test_metadata[self.name].thing_dataset_id_to_contiguous_id) # filter out the unstable segmentation results mask_area = (cur_mask_ids == k).sum().item() original_area = (cur_masks[k] >= 0.5).sum().item() mask = (cur_mask_ids == k) & (cur_masks[k] >= 0.5) if mask_area > 0 and original_area > 0 and mask.sum().item() > 0: if mask_area / original_area < self.overlap_threshold: continue # merge stuff regions if not isthing: if int(pred_class) in stuff_memory_list.keys(): panoptic_seg[mask] = stuff_memory_list[int(pred_class)] continue else: stuff_memory_list[int(pred_class)] = current_segment_id + 1 current_segment_id += 1 panoptic_seg[mask] = current_segment_id segments_infos.append( { "id": current_segment_id, "isthing": bool(isthing), "category_id": int(pred_class), } ) return { "image_size": (output_height, output_width), "pred_masks": panoptic_seg.cpu(), "segments_infos": segments_infos, "task": "vps", } def inference_video_vss( self, pred_cls, pred_masks, img_size, output_height, output_width, first_resize_size, ): mask_cls = F.softmax(pred_cls, dim=-1)[..., :-1] mask_pred = pred_masks # interpolation to original image size cur_masks = F.interpolate( mask_pred, size=first_resize_size, mode="bilinear", align_corners=False ) cur_masks = cur_masks[:, :, :img_size[0], :img_size[1]].sigmoid() cur_masks = F.interpolate( cur_masks, size=(output_height, output_width), mode="bilinear", align_corners=False ) semseg = torch.einsum("qc,qthw->cthw", mask_cls, cur_masks) sem_score, sem_mask = semseg.max(0) sem_mask = sem_mask return { "image_size": (output_height, output_width), "pred_masks": sem_mask.cpu(), "task": "vss", } @META_ARCH_REGISTRY.register() class DVIS_online_OV(MinVIS_OV): """ Online version of DVIS, including a segmenter and a referring tracker. """ @configurable def __init__( self, *, backbone: Backbone, sem_seg_head: nn.Module, criterion: nn.Module, num_queries: int, object_mask_threshold: float, overlap_threshold: float, train_metadatas: dict, test_metadatas: dict, size_divisibility: int, sem_seg_postprocess_before_inference: bool, pixel_mean: Tuple[float], pixel_std: Tuple[float], # video tracker, num_frames, window_inference, max_num, max_iter_num, window_size, task, # fc-clip geometric_ensemble_alpha: float, geometric_ensemble_beta: float, ensemble_on_valid_mask: bool, # multi datasets test2train={}, ): """ Args: backbone: a backbone module, must follow detectron2's backbone interface sem_seg_head: a module that predicts semantic segmentation from backbone features criterion: a module that defines the loss num_queries: int, number of queries object_mask_threshold: float, threshold to filter query based on classification score for panoptic segmentation inference overlap_threshold: overlap threshold used in general inference for panoptic segmentation metadata: dataset meta, get `thing` and `stuff` category names for panoptic segmentation inference size_divisibility: Some backbones require the input height and width to be divisible by a specific integer. We can use this to override such requirement. sem_seg_postprocess_before_inference: whether to resize the prediction back to original input size before semantic segmentation inference or after. For high-resolution dataset like Mapillary, resizing predictions before inference will cause OOM error. pixel_mean, pixel_std: list or tuple with #channels element, representing the per-channel mean and std to be used to normalize the input image # video tracker: a tracker module, e.g. ReferringTracker num_frames: number of frames sampled during training window_inference: if the GPU memory is insufficient to predict the entire video at once, inference needs to be performed clip by clip num_class: the categories number of the dataset max_num: the maximum number of instances retained for a video, only used in VIS max_iter_num: the iter nums window_size: the number of images processed by the segmenter at a time task: VIS, VSS or VPS """ super().__init__( backbone=backbone, sem_seg_head=sem_seg_head, criterion=criterion, num_queries=num_queries, object_mask_threshold=object_mask_threshold, overlap_threshold=overlap_threshold, train_metadatas=train_metadatas, test_metadatas=test_metadatas, size_divisibility=size_divisibility, sem_seg_postprocess_before_inference=sem_seg_postprocess_before_inference, pixel_mean=pixel_mean, pixel_std=pixel_std, # video num_frames=num_frames, window_inference=window_inference, # dc clip geometric_ensemble_alpha=geometric_ensemble_alpha, geometric_ensemble_beta=geometric_ensemble_beta, ensemble_on_valid_mask=ensemble_on_valid_mask, # multi datasets test2train=test2train, ) # frozen the void classifier for p in self.void_embedding.parameters(): p.requires_grad_(False) # frozen the segmenter for p in self.backbone.parameters(): p.requires_grad_(False) for p in self.sem_seg_head.parameters(): p.requires_grad_(False) self.tracker = tracker self.max_num = max_num self.iter = 0 self.max_iter_num = max_iter_num self.window_size = window_size self.task = task assert self.task in ['vis', 'vss', 'vps'], "Only support vis, vss and vps !" inference_dict = { 'vis': self.inference_video_vis, 'vss': self.inference_video_vss, 'vps': self.inference_video_vps, } self.inference_video_task = inference_dict[self.task] @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) # Loss parameters: deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT # loss weights class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT # building criterion
matcher = VideoHungarianMatcher_Consistent(
2
2023-11-14 10:55:11+00:00
16k
ej0cl6/TextEE
TextEE/models/Degree/EDtrainer.py
[ { "identifier": "BasicTrainer", "path": "TextEE/models/trainer.py", "snippet": "class BasicTrainer(object):\n def __init__(self, config, type_set=None):\n self.config = config\n self.type_set = type_set\n \n @classmethod\n def add_extra_info_fn(cls, instances, raw_data, con...
import os, sys, logging, tqdm, pprint import torch import numpy as np import ipdb from collections import namedtuple from transformers import BartTokenizer, AutoTokenizer, get_linear_schedule_with_warmup from torch.utils.data import DataLoader from torch.optim import AdamW from ..trainer import BasicTrainer from .EDmodel import DegreeEDModel from .template_generate import event_template, eve_template_generator from .pattern import patterns, ROLE_PH_MAP from scorer import compute_ED_scores, print_scores
12,307
logger = logging.getLogger(__name__) EDBatch_fields = ['batch_doc_id', 'batch_wnd_id', 'batch_tokens', 'batch_text', 'batch_piece_idxs', 'batch_token_start_idxs', 'batch_input', 'batch_target', 'batch_info'] EDBatch = namedtuple('EDBatch', field_names=EDBatch_fields, defaults=[None] * len(EDBatch_fields)) def ED_collate_fn(batch): return EDBatch( batch_doc_id=[instance["doc_id"] for instance in batch], batch_wnd_id=[instance["wnd_id"] for instance in batch], batch_tokens=[instance["tokens"] for instance in batch], batch_text=[instance["text"] for instance in batch], batch_piece_idxs=[instance["piece_idxs"] for instance in batch], batch_token_start_idxs=[instance["token_start_idxs"] for instance in batch], batch_input=[instance["input"] for instance in batch], batch_target=[instance["target"] for instance in batch], batch_info=[instance["info"] for instance in batch], ) def get_span_idx_tri(pieces, token_start_idxs, span, tokenizer, trigger_span=None): """ This function is how we map the generated prediction back to span prediction. Detailed Explanation: We will first split our prediction and use tokenizer to tokenize our predicted "span" into pieces. Then, we will find whether we can find a continuous span in the original "pieces" can match tokenized "span". If it is an argument/relation extraction task, we will return the one which is closest to the trigger_span. """ words = [] for s in span.split(' '): words.extend(tokenizer.encode(s, add_special_tokens=False)) candidates = [] for i in range(len(pieces)): j = 0 k = 0 while j < len(words) and i+k < len(pieces): if pieces[i+k] == words[j]: j += 1 k += 1 elif tokenizer.decode(words[j]) == "": j += 1 elif tokenizer.decode(pieces[i+k]) == "": k += 1 else: break if j == len(words): candidates.append((i, i+k)) candidates = [(token_start_idxs.index(c1), token_start_idxs.index(c2)) for c1, c2 in candidates if c1 in token_start_idxs and c2 in token_start_idxs] if len(candidates) < 1: return [(-1, -1)] else: if trigger_span is None: return candidates else: return sorted(candidates, key=lambda x: np.abs(trigger_span[0]-x[0])) class DegreeEDTrainer(BasicTrainer): def __init__(self, config, type_set=None): super().__init__(config, type_set) self.tokenizer = None self.model = None def load_model(self, checkpoint=None): if checkpoint: logger.info(f"Loading model from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.state"), map_location=f'cuda:{self.config.gpu_device}') self.tokenizer = state["tokenizer"] self.type_set = state["type_set"] self.model = DegreeEDModel(self.config, self.tokenizer, self.type_set) self.model.load_state_dict(state['model']) self.model.cuda(device=self.config.gpu_device) else: logger.info(f"Loading model from {self.config.pretrained_model_name}") if self.config.pretrained_model_name.startswith('facebook/bart-'): self.tokenizer = BartTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) else: self.tokenizer = AutoTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir, use_fast=False) special_tokens = ['<Trigger>', '<sep>', '<None>'] logger.info(f"Add tokens {special_tokens}") self.tokenizer.add_tokens(special_tokens) self.model = DegreeEDModel(self.config, self.tokenizer, self.type_set) self.model.cuda(device=self.config.gpu_device) self.generate_vocab() def generate_vocab(self): event_type_itos = sorted(self.type_set["trigger"]) event_type_stoi = {e: i for i, e in enumerate(event_type_itos)} self.vocab = {"event_type_itos": event_type_itos, "event_type_stoi": event_type_stoi, } def process_data_for_training(self, data): assert self.tokenizer, "Please load model and tokneizer before processing data!" logger.info("Processing data...") n_total = 0 new_data = [] for dt in tqdm.tqdm(data, ncols=100): n_total += 1 _triggers = [t[:3] for t in dt["triggers"]]
logger = logging.getLogger(__name__) EDBatch_fields = ['batch_doc_id', 'batch_wnd_id', 'batch_tokens', 'batch_text', 'batch_piece_idxs', 'batch_token_start_idxs', 'batch_input', 'batch_target', 'batch_info'] EDBatch = namedtuple('EDBatch', field_names=EDBatch_fields, defaults=[None] * len(EDBatch_fields)) def ED_collate_fn(batch): return EDBatch( batch_doc_id=[instance["doc_id"] for instance in batch], batch_wnd_id=[instance["wnd_id"] for instance in batch], batch_tokens=[instance["tokens"] for instance in batch], batch_text=[instance["text"] for instance in batch], batch_piece_idxs=[instance["piece_idxs"] for instance in batch], batch_token_start_idxs=[instance["token_start_idxs"] for instance in batch], batch_input=[instance["input"] for instance in batch], batch_target=[instance["target"] for instance in batch], batch_info=[instance["info"] for instance in batch], ) def get_span_idx_tri(pieces, token_start_idxs, span, tokenizer, trigger_span=None): """ This function is how we map the generated prediction back to span prediction. Detailed Explanation: We will first split our prediction and use tokenizer to tokenize our predicted "span" into pieces. Then, we will find whether we can find a continuous span in the original "pieces" can match tokenized "span". If it is an argument/relation extraction task, we will return the one which is closest to the trigger_span. """ words = [] for s in span.split(' '): words.extend(tokenizer.encode(s, add_special_tokens=False)) candidates = [] for i in range(len(pieces)): j = 0 k = 0 while j < len(words) and i+k < len(pieces): if pieces[i+k] == words[j]: j += 1 k += 1 elif tokenizer.decode(words[j]) == "": j += 1 elif tokenizer.decode(pieces[i+k]) == "": k += 1 else: break if j == len(words): candidates.append((i, i+k)) candidates = [(token_start_idxs.index(c1), token_start_idxs.index(c2)) for c1, c2 in candidates if c1 in token_start_idxs and c2 in token_start_idxs] if len(candidates) < 1: return [(-1, -1)] else: if trigger_span is None: return candidates else: return sorted(candidates, key=lambda x: np.abs(trigger_span[0]-x[0])) class DegreeEDTrainer(BasicTrainer): def __init__(self, config, type_set=None): super().__init__(config, type_set) self.tokenizer = None self.model = None def load_model(self, checkpoint=None): if checkpoint: logger.info(f"Loading model from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.state"), map_location=f'cuda:{self.config.gpu_device}') self.tokenizer = state["tokenizer"] self.type_set = state["type_set"] self.model = DegreeEDModel(self.config, self.tokenizer, self.type_set) self.model.load_state_dict(state['model']) self.model.cuda(device=self.config.gpu_device) else: logger.info(f"Loading model from {self.config.pretrained_model_name}") if self.config.pretrained_model_name.startswith('facebook/bart-'): self.tokenizer = BartTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) else: self.tokenizer = AutoTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir, use_fast=False) special_tokens = ['<Trigger>', '<sep>', '<None>'] logger.info(f"Add tokens {special_tokens}") self.tokenizer.add_tokens(special_tokens) self.model = DegreeEDModel(self.config, self.tokenizer, self.type_set) self.model.cuda(device=self.config.gpu_device) self.generate_vocab() def generate_vocab(self): event_type_itos = sorted(self.type_set["trigger"]) event_type_stoi = {e: i for i, e in enumerate(event_type_itos)} self.vocab = {"event_type_itos": event_type_itos, "event_type_stoi": event_type_stoi, } def process_data_for_training(self, data): assert self.tokenizer, "Please load model and tokneizer before processing data!" logger.info("Processing data...") n_total = 0 new_data = [] for dt in tqdm.tqdm(data, ncols=100): n_total += 1 _triggers = [t[:3] for t in dt["triggers"]]
event_template = eve_template_generator(self.config.dataset, dt["tokens"], _triggers, [], self.config.input_style, self.config.output_style, self.vocab, True)
2
2023-11-15 21:32:56+00:00
16k
ahayler/s4c
scripts/videos/gen_vid_nvs.py
[ { "identifier": "BTSNet", "path": "models/bts/model/models_bts.py", "snippet": "class BTSNet(torch.nn.Module):\n def __init__(self, conf):\n super().__init__()\n\n self.d_min = conf.get(\"z_near\")\n self.d_max = conf.get(\"z_far\")\n\n self.learn_empty = conf.get(\"learn_...
import numpy as np import sys import copy import hydra import torch from moviepy.video.io.ImageSequenceClip import ImageSequenceClip from tqdm import tqdm from scripts.inference_setup import * from models.bts.model import BTSNet, ImageRaySampler from models.common.render import NeRFRenderer from utils.array_operations import map_fn, unsqueezer from utils.plotting import color_tensor
11,884
sys.path.append(".") def main(): s_img = True s_depth = True dry_run = False indices = [1044] d_min = 3 d_max = 40 task = "KITTI-360" assert task in ["KITTI-360", "KITTI-Raw", "RealEstate10K"] cam_traj = "simple_movement.npy" if task == "KITTI-360": dataset, config_path, cp_path, out_path, resolution, cam_incl_adjust = setup_kitti360("videos/nvs", "2013_05_28_drive_0000_sync", "val_seq") elif task == "KITTI-Raw": dataset, config_path, cp_path, out_path, resolution, cam_incl_adjust = setup_kittiraw("videos/nvs", "val") elif task == "RealEstate10K": dataset, config_path, cp_path, out_path, resolution, cam_incl_adjust = setup_re10k("videos/nvs", "val") else: raise ValueError(f"Invalid task: {task}") # Slightly hacky, but we need to load the config based on the task global config config = {} @hydra.main(version_base=None, config_path="../../configs", config_name=config_path) def main_dummy(cfg): global config config = copy.deepcopy(cfg) main_dummy() print("Setup folders") out_path.mkdir(exist_ok=True, parents=True) traj_folder = Path("scripts/videos/trajectories") print('Loading checkpoint') cp = torch.load(cp_path, map_location=device) net = BTSNet(config["model_conf"])
sys.path.append(".") def main(): s_img = True s_depth = True dry_run = False indices = [1044] d_min = 3 d_max = 40 task = "KITTI-360" assert task in ["KITTI-360", "KITTI-Raw", "RealEstate10K"] cam_traj = "simple_movement.npy" if task == "KITTI-360": dataset, config_path, cp_path, out_path, resolution, cam_incl_adjust = setup_kitti360("videos/nvs", "2013_05_28_drive_0000_sync", "val_seq") elif task == "KITTI-Raw": dataset, config_path, cp_path, out_path, resolution, cam_incl_adjust = setup_kittiraw("videos/nvs", "val") elif task == "RealEstate10K": dataset, config_path, cp_path, out_path, resolution, cam_incl_adjust = setup_re10k("videos/nvs", "val") else: raise ValueError(f"Invalid task: {task}") # Slightly hacky, but we need to load the config based on the task global config config = {} @hydra.main(version_base=None, config_path="../../configs", config_name=config_path) def main_dummy(cfg): global config config = copy.deepcopy(cfg) main_dummy() print("Setup folders") out_path.mkdir(exist_ok=True, parents=True) traj_folder = Path("scripts/videos/trajectories") print('Loading checkpoint') cp = torch.load(cp_path, map_location=device) net = BTSNet(config["model_conf"])
renderer = NeRFRenderer.from_conf(config["renderer"])
2
2023-11-12 21:53:27+00:00
16k
TCLResearchEurope/torch-dag
torch_dag_algorithms/pruning/module_multipliers.py
[ { "identifier": "structured_modules", "path": "torch_dag/structured_modules.py", "snippet": "ACTIVATION_MODULES_T = Union[\n nn.ReLU,\n nn.ReLU6,\n nn.SiLU,\n nn.Softmax,\n nn.Sigmoid,\n nn.Hardswish,\n nn.Hardsigmoid,\n nn.GELU,\n nn.LeakyReLU,\n nn.ELU,\n nn.Tanh,\n ...
import logging import torch from typing import List, Tuple, Dict, Union from torch_dag import structured_modules as smodules from torch_dag.core.dag_module import DagModule from torch_dag.core.dag_module import InputVertex, InnerVertex, Vertex from torch_dag_algorithms.pruning.commons import PASS_THROUGH_CHANNELS_CLASSES from torch_dag_algorithms.pruning.commons import is_source, get_orbits_dict, is_linear_source, is_depthwise_conv from torch_dag_algorithms.pruning.modules import OrbitModule from torch_dag_timm_plugin.module_multipliers import compute_timm_average_num_channels, \ CUSTOM_AVERAGE_CHANNELS_TIMM_CLASSES
12,103
# # Copyright © TCL Research Europe. All rights reserved. # logger = logging.getLogger(__name__) PASS_THROUGH_MULTIPLIER_CLASSES = PASS_THROUGH_CHANNELS_CLASSES def shape_to_float(shape, device, dim=1): return torch.tensor(shape[dim], device=device).to(torch.float32) def compute_elementwise_op_average_channels(average_number_input_channels: List[List[torch.Tensor]], ): average_number_input_channels = [e for e in average_number_input_channels if e is not None] if len(average_number_input_channels) == 0: return None return [torch.max(torch.stack([e[0] for e in average_number_input_channels]))] def compute_average_num_channels( vertex: InnerVertex, average_number_input_channels: List[List[torch.Tensor]], orbits_dict: Dict[str, OrbitModule], forward_dict: Dict[Vertex, Union[torch.Tensor, List[torch.Tensor]]] ) -> Union[List[torch.Tensor], None]: device = forward_dict[vertex.dag_module.input_vertices[0]].device if isinstance(vertex.module, PASS_THROUGH_MULTIPLIER_CLASSES): return [average_number_input_channels[0][0]] if is_source(vertex.module): if vertex.orbit is not None: orbit_module = orbits_dict[vertex.orbit] return [orbit_module.compute_average_number_of_output_channels()] else: if is_linear_source(vertex.module): return [shape_to_float(forward_dict[vertex].shape, dim=-1, device=device)] else: return [shape_to_float(forward_dict[vertex].shape, device=device)] elif is_depthwise_conv(vertex.module): return [average_number_input_channels[0][0]] elif isinstance(vertex.module, (smodules.AddModule, smodules.SubModule, smodules.MulModule)): return compute_elementwise_op_average_channels(average_number_input_channels) elif isinstance(vertex.module, smodules.ConcatModule): return [torch.stack([x[0] for x in average_number_input_channels]).sum()] elif isinstance(vertex.module, smodules.ChunkModule): assert vertex.module.dim == 1 channels = average_number_input_channels[0][0] return [channels / vertex.module.chunks for _ in range(vertex.module.chunks)] elif isinstance(vertex.module, smodules.ParameterModule): # the heuristic here is that the channel dim will be the axis with max shape max_shape = max(forward_dict[vertex].shape) return [torch.tensor(max_shape, device=device).to(torch.float32)] elif isinstance(vertex.module, smodules.TfTokenizeModule): return [shape_to_float(forward_dict[vertex].shape, dim=2, device=device)] elif isinstance(vertex.module, smodules.TfDetokenizeModule): return [shape_to_float(forward_dict[vertex].shape, dim=1, device=device)]
# # Copyright © TCL Research Europe. All rights reserved. # logger = logging.getLogger(__name__) PASS_THROUGH_MULTIPLIER_CLASSES = PASS_THROUGH_CHANNELS_CLASSES def shape_to_float(shape, device, dim=1): return torch.tensor(shape[dim], device=device).to(torch.float32) def compute_elementwise_op_average_channels(average_number_input_channels: List[List[torch.Tensor]], ): average_number_input_channels = [e for e in average_number_input_channels if e is not None] if len(average_number_input_channels) == 0: return None return [torch.max(torch.stack([e[0] for e in average_number_input_channels]))] def compute_average_num_channels( vertex: InnerVertex, average_number_input_channels: List[List[torch.Tensor]], orbits_dict: Dict[str, OrbitModule], forward_dict: Dict[Vertex, Union[torch.Tensor, List[torch.Tensor]]] ) -> Union[List[torch.Tensor], None]: device = forward_dict[vertex.dag_module.input_vertices[0]].device if isinstance(vertex.module, PASS_THROUGH_MULTIPLIER_CLASSES): return [average_number_input_channels[0][0]] if is_source(vertex.module): if vertex.orbit is not None: orbit_module = orbits_dict[vertex.orbit] return [orbit_module.compute_average_number_of_output_channels()] else: if is_linear_source(vertex.module): return [shape_to_float(forward_dict[vertex].shape, dim=-1, device=device)] else: return [shape_to_float(forward_dict[vertex].shape, device=device)] elif is_depthwise_conv(vertex.module): return [average_number_input_channels[0][0]] elif isinstance(vertex.module, (smodules.AddModule, smodules.SubModule, smodules.MulModule)): return compute_elementwise_op_average_channels(average_number_input_channels) elif isinstance(vertex.module, smodules.ConcatModule): return [torch.stack([x[0] for x in average_number_input_channels]).sum()] elif isinstance(vertex.module, smodules.ChunkModule): assert vertex.module.dim == 1 channels = average_number_input_channels[0][0] return [channels / vertex.module.chunks for _ in range(vertex.module.chunks)] elif isinstance(vertex.module, smodules.ParameterModule): # the heuristic here is that the channel dim will be the axis with max shape max_shape = max(forward_dict[vertex].shape) return [torch.tensor(max_shape, device=device).to(torch.float32)] elif isinstance(vertex.module, smodules.TfTokenizeModule): return [shape_to_float(forward_dict[vertex].shape, dim=2, device=device)] elif isinstance(vertex.module, smodules.TfDetokenizeModule): return [shape_to_float(forward_dict[vertex].shape, dim=1, device=device)]
elif isinstance(vertex.module, CUSTOM_AVERAGE_CHANNELS_TIMM_CLASSES):
12
2023-11-17 15:36:44+00:00
16k
newcastleuniversity/DISPEL
dispel/processing/epochs.py
[ { "identifier": "Reading", "path": "dispel/data/core.py", "snippet": "class Reading(FlagMixIn):\n \"\"\"A data capture from an experiment.\n\n Attributes\n ----------\n evaluation\n The evaluation information for this reading\n session\n The session information for this read...
from abc import ABC, ABCMeta from dataclasses import dataclass from typing import Any, Iterable, List, Optional, Sequence, Tuple, Union from dispel.data.core import Reading from dispel.data.epochs import Epoch, EpochDefinition from dispel.data.levels import Level, LevelEpoch, LevelEpochMeasureValue from dispel.data.measures import MeasureValue from dispel.data.raw import ( RawDataSet, RawDataSetDefinition, RawDataSetSource, RawDataValueDefinition, ) from dispel.processing.core import ProcessResultType from dispel.processing.data_set import ( DataSetProcessingStepProtocol, MutateDataSetProcessingStepBase, RawDataSetProcessingResult, StorageError, WrapResultGeneratorType, ) from dispel.processing.extract import ExtractStep from dispel.processing.level import LevelProcessingResult from dispel.processing.transform import TransformStepChainMixIn import pandas as pd
12,044
"""Epoch-specific processing steps.""" class LevelEpochDefinitionMixIn: """A mixin-class for processing steps producing epoch measure sets. Parameters ---------- definition An optional epoch definition. If no epoch definition is provided, the :data:`definition` class variable will be used. Alternatively, one can overwrite :meth:`get_definition` to provide the definition. Attributes ---------- definition The epoch definition. This will be used in :func:`get_definition` by default. You can overwrite the function to implement custom logic. """ definition: Optional[EpochDefinition] = None def __init__(self, *args, **kwargs): definition = kwargs.pop("definition", None) self.definition = definition or self.definition super().__init__(*args, **kwargs) def get_definition(self, **_kwargs) -> EpochDefinition: """Get the measure definition. Other Parameters ---------------- _kwargs Optional parameters that will be passed along to the creation of epoch definitions. This can be used to implement custom logic in the epoch definition that depends on processing arguments. Returns ------- EpochDefinition The definition of the epoch """ assert ( self.definition is not None ), "Definition must be set or get_definition must be overwritten." return self.definition class CreateLevelEpochStep(
"""Epoch-specific processing steps.""" class LevelEpochDefinitionMixIn: """A mixin-class for processing steps producing epoch measure sets. Parameters ---------- definition An optional epoch definition. If no epoch definition is provided, the :data:`definition` class variable will be used. Alternatively, one can overwrite :meth:`get_definition` to provide the definition. Attributes ---------- definition The epoch definition. This will be used in :func:`get_definition` by default. You can overwrite the function to implement custom logic. """ definition: Optional[EpochDefinition] = None def __init__(self, *args, **kwargs): definition = kwargs.pop("definition", None) self.definition = definition or self.definition super().__init__(*args, **kwargs) def get_definition(self, **_kwargs) -> EpochDefinition: """Get the measure definition. Other Parameters ---------------- _kwargs Optional parameters that will be passed along to the creation of epoch definitions. This can be used to implement custom logic in the epoch definition that depends on processing arguments. Returns ------- EpochDefinition The definition of the epoch """ assert ( self.definition is not None ), "Definition must be set or get_definition must be overwritten." return self.definition class CreateLevelEpochStep(
LevelEpochDefinitionMixIn, TransformStepChainMixIn, MutateDataSetProcessingStepBase
12
2023-11-14 10:06:46+00:00
16k
believethehype/nostrdvm
main.py
[ { "identifier": "Bot", "path": "nostr_dvm/bot.py", "snippet": "class Bot:\n job_list: list\n\n # This is a simple list just to keep track which events we created and manage, so we don't pay for other requests\n def __init__(self, dvm_config, admin_config=None):\n self.NAME = \"Bot\"\n ...
import os import dotenv from pathlib import Path from sys import platform from nostr_dvm.bot import Bot from nostr_dvm.tasks import videogeneration_replicate_svd, imagegeneration_replicate_sdxl, textgeneration_llmlite, \ trending_notes_nostrband, discovery_inactive_follows, translation_google, textextraction_pdf, \ translation_libretranslate, textextraction_google, convert_media, imagegeneration_openai_dalle, texttospeech, \ imagegeneration_sd21_mlx, advanced_search, textgeneration_huggingchat, summarization_huggingchat from nostr_dvm.utils.admin_utils import AdminConfig from nostr_dvm.utils.backend_utils import keep_alive from nostr_dvm.utils.definitions import EventDefinitions from nostr_dvm.utils.dvmconfig import DVMConfig from nostr_dvm.utils.external_dvm_utils import build_external_dvm from nostr_dvm.utils.nostr_utils import check_and_set_private_key from nostr_dvm.utils.output_utils import PostProcessFunctionType from nostr_dvm.utils.zap_utils import check_and_set_ln_bits_keys from nostr_sdk import Keys
13,007
def playground(): # We will run an optional bot that can communicate with the DVMs # Note this is very basic for now and still under development bot_config = DVMConfig() bot_config.PRIVATE_KEY = check_and_set_private_key("bot") npub = Keys.from_sk_str(bot_config.PRIVATE_KEY).public_key().to_bech32() invoice_key, admin_key, wallet_id, user_id, lnaddress = check_and_set_ln_bits_keys("bot", npub) bot_config.LNBITS_INVOICE_KEY = invoice_key bot_config.LNBITS_ADMIN_KEY = admin_key # The dvm might pay failed jobs back bot_config.LNBITS_URL = os.getenv("LNBITS_HOST") # Generate an optional Admin Config, in this case, whenever we give our DVMs this config, they will (re)broadcast # their NIP89 announcement # You can create individual admins configs and hand them over when initializing the dvm, # for example to whilelist users or add to their balance. # If you use this global config, options will be set for all dvms that use it. admin_config = AdminConfig() admin_config.REBROADCAST_NIP89 = False admin_config.LUD16 = lnaddress # Set rebroadcast to true once you have set your NIP89 descriptions and d tags. You only need to rebroadcast once you # want to update your NIP89 descriptions # Update the DVMs (not the bot) profile. For example after you updated the NIP89 or the lnaddress, you can automatically update profiles here. admin_config.UPDATE_PROFILE = False # Spawn some DVMs in the playground and run them # You can add arbitrary DVMs there and instantiate them here # Spawn DVM1 Kind 5000: A local Text Extractor from PDFs pdfextractor = textextraction_pdf.build_example("PDF Extractor", "pdf_extractor", admin_config) # If we don't add it to the bot, the bot will not provide access to the DVM pdfextractor.run() # Spawn DVM2 Kind 5002 Local Text TranslationGoogle, calling the free Google API. translator = translation_google.build_example("Google Translator", "google_translator", admin_config) bot_config.SUPPORTED_DVMS.append(translator) # We add translator to the bot translator.run() # Spawn DVM3 Kind 5002 Local Text TranslationLibre, calling the free LibreTranslateApi, as an alternative. # This will only run and appear on the bot if an endpoint is set in the .env if os.getenv("LIBRE_TRANSLATE_ENDPOINT") is not None and os.getenv("LIBRE_TRANSLATE_ENDPOINT") != "": libre_translator = translation_libretranslate.build_example("Libre Translator", "libre_translator", admin_config) bot_config.SUPPORTED_DVMS.append(libre_translator) # We add translator to the bot libre_translator.run() # Spawn DVM4, this one requires an OPENAI API Key and balance with OpenAI, you will move the task to them and pay # per call. Make sure you have enough balance and the DVM's cost is set higher than what you pay yourself, except, you know, # you're being generous. if os.getenv("OPENAI_API_KEY") is not None and os.getenv("OPENAI_API_KEY") != "": dalle = imagegeneration_openai_dalle.build_example("Dall-E 3", "dalle3", admin_config) bot_config.SUPPORTED_DVMS.append(dalle) dalle.run() if os.getenv("REPLICATE_API_TOKEN") is not None and os.getenv("REPLICATE_API_TOKEN") != "":
def playground(): # We will run an optional bot that can communicate with the DVMs # Note this is very basic for now and still under development bot_config = DVMConfig() bot_config.PRIVATE_KEY = check_and_set_private_key("bot") npub = Keys.from_sk_str(bot_config.PRIVATE_KEY).public_key().to_bech32() invoice_key, admin_key, wallet_id, user_id, lnaddress = check_and_set_ln_bits_keys("bot", npub) bot_config.LNBITS_INVOICE_KEY = invoice_key bot_config.LNBITS_ADMIN_KEY = admin_key # The dvm might pay failed jobs back bot_config.LNBITS_URL = os.getenv("LNBITS_HOST") # Generate an optional Admin Config, in this case, whenever we give our DVMs this config, they will (re)broadcast # their NIP89 announcement # You can create individual admins configs and hand them over when initializing the dvm, # for example to whilelist users or add to their balance. # If you use this global config, options will be set for all dvms that use it. admin_config = AdminConfig() admin_config.REBROADCAST_NIP89 = False admin_config.LUD16 = lnaddress # Set rebroadcast to true once you have set your NIP89 descriptions and d tags. You only need to rebroadcast once you # want to update your NIP89 descriptions # Update the DVMs (not the bot) profile. For example after you updated the NIP89 or the lnaddress, you can automatically update profiles here. admin_config.UPDATE_PROFILE = False # Spawn some DVMs in the playground and run them # You can add arbitrary DVMs there and instantiate them here # Spawn DVM1 Kind 5000: A local Text Extractor from PDFs pdfextractor = textextraction_pdf.build_example("PDF Extractor", "pdf_extractor", admin_config) # If we don't add it to the bot, the bot will not provide access to the DVM pdfextractor.run() # Spawn DVM2 Kind 5002 Local Text TranslationGoogle, calling the free Google API. translator = translation_google.build_example("Google Translator", "google_translator", admin_config) bot_config.SUPPORTED_DVMS.append(translator) # We add translator to the bot translator.run() # Spawn DVM3 Kind 5002 Local Text TranslationLibre, calling the free LibreTranslateApi, as an alternative. # This will only run and appear on the bot if an endpoint is set in the .env if os.getenv("LIBRE_TRANSLATE_ENDPOINT") is not None and os.getenv("LIBRE_TRANSLATE_ENDPOINT") != "": libre_translator = translation_libretranslate.build_example("Libre Translator", "libre_translator", admin_config) bot_config.SUPPORTED_DVMS.append(libre_translator) # We add translator to the bot libre_translator.run() # Spawn DVM4, this one requires an OPENAI API Key and balance with OpenAI, you will move the task to them and pay # per call. Make sure you have enough balance and the DVM's cost is set higher than what you pay yourself, except, you know, # you're being generous. if os.getenv("OPENAI_API_KEY") is not None and os.getenv("OPENAI_API_KEY") != "": dalle = imagegeneration_openai_dalle.build_example("Dall-E 3", "dalle3", admin_config) bot_config.SUPPORTED_DVMS.append(dalle) dalle.run() if os.getenv("REPLICATE_API_TOKEN") is not None and os.getenv("REPLICATE_API_TOKEN") != "":
sdxlreplicate = imagegeneration_replicate_sdxl.build_example("Stable Diffusion XL", "replicate_sdxl", admin_config)
2
2023-11-17 18:32:56+00:00
16k
IBM/oper8
oper8/watch_manager/python_watch_manager/filters/filters.py
[ { "identifier": "KubeEventType", "path": "oper8/deploy_manager/kube_event.py", "snippet": "class KubeEventType(Enum):\n \"\"\"Enum for all possible kubernetes event types\"\"\"\n\n DELETED = \"DELETED\"\n MODIFIED = \"MODIFIED\"\n ADDED = \"ADDED\"" }, { "identifier": "ManagedObject"...
from abc import ABC, abstractmethod from collections import deque from typing import Optional from ....deploy_manager import KubeEventType from ....managed_object import ManagedObject from ....reconcile import ReconcileManager from ....status import READY_CONDITION, get_condition from ....utils import abstractclassproperty from ..utils import ( RESERVED_PLATFORM_ANNOTATIONS, RESOURCE_VERSION_KEEP_COUNT, obj_to_hash, ) import alog
11,921
"""Filter for duplicate resource versions which happens when restarting a watch connection""" def __init__(self, resource: ManagedObject): """Initialize the resource version list""" # Use a dequeue instead of a list/set to set a bound on the number # of tracked versions self.resource_versions = deque([], maxlen=RESOURCE_VERSION_KEEP_COUNT) super().__init__(resource) def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Test if the resource's resourceVersion has been seen before""" # Don't skip add events as the kubernetes watch can duplicate events if event == KubeEventType.DELETED: return return resource.resource_version not in self.resource_versions def update(self, resource: ManagedObject): """Add the resources ResourceVersion to the list""" self.resource_versions.append(resource.resource_version) ### Annotation Filters class AnnotationFilter(Filter): """Filter resources to reconcile on annotation changes""" def __init__(self, resource: ManagedObject): """Initialize the annotation hash variable""" self.annotations = None super().__init__(resource) def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Test if a resource's annotation has changed""" # Ignore Added and deleted events if event in [KubeEventType.ADDED, KubeEventType.DELETED]: return return self.annotations != self.get_annotation_hash(resource) def update(self, resource: ManagedObject): """Update the currently stored annotation""" self.annotations = self.get_annotation_hash(resource) def get_annotation_hash(self, resource: ManagedObject) -> str: """Helper function to get the annotation hash""" return obj_to_hash(resource.metadata.get("annotations", {})) class UserAnnotationFilter(AnnotationFilter): """Filter resources to reconcile on user annotation changes. This excludes kubernetes and openshift annotations """ def get_annotation_hash(self, resource: ManagedObject) -> str: """Overriden function to exclude common platform annotations from the annotation hash""" output_annotations = {} for key, value in resource.metadata.get("annotations", {}).items(): if self.contains_platform_key(key): continue output_annotations[key] = value return obj_to_hash(output_annotations) def contains_platform_key(self, key: str) -> bool: """Helper to check if the key contains one of the platform annotations""" return any( reserved_key in key for reserved_key in RESERVED_PLATFORM_ANNOTATIONS ) ### Oper8 Filters class PauseFilter(Filter): """This filter skips resources that have the oper8 pause annotation""" def test(self, resource: ManagedObject, event: KubeEventType) -> Optional[bool]: """Test if a resource has the pause annotation""" return not ReconcileManager._is_paused( # pylint: disable=protected-access resource ) class SubsystemStatusFilter(Filter): """Reconcile oper8 controllers when their oper8 status changes EXPERIMENTAL: This has passed basic validation but has not been rigorously tested in the field """ def __init__(self, resource: ManagedObject): """Initialize the currently observed ready condition""" self.ready_condition = None super().__init__(resource) def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Test if a resources subsystem condition has changed""" if event in [KubeEventType.ADDED, KubeEventType.DELETED]: return return self.ready_condition != get_condition(
""" Filters are used to limit the amount of events being reconciled by a watch manager This is based off of the kubernetes controller runtime's "predicates": https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.15.0/pkg/predicate#Funcs The default set of filters is derived from operator-sdk's ansible predicates https://github.com/operator-framework/operator-sdk/blob/50c6ac03746ff4edf582feb9a71d2a7ea6ae6c40/internal/ansible/controller/controller.go#L105 """ # Standard # First Party # Local log = alog.use_channel("PWMFLT") ## Default Types class Filter(ABC): """Generic Filter Interface for subclassing. Every subclass should implement a `test` function which returns true when a resource should be reconciled. Subclasses can optionally implement a `update` method if the filter requires storing some stateful information like ResourceVersion or Metadata. NOTE: A unique Filter instance is created for each resource """ def __init__(self, resource: ManagedObject): # noqa: B027 """Initializer can be used to detect configuration or create instance variables. Even though a resource is provided it should not set state until update is called Args: resource: ManagedObject This resource can be used by subclass to gather generic information. """ ## Abstract Interface ###################################################### # # These functions must be implemented by child classes ## @abstractmethod def test(self, resource: ManagedObject, event: KubeEventType) -> Optional[bool]: """Test whether the resource&event passes the filter. Returns true if the filter should be reconciled and return false if it should not be. A filter can optionally return None to ignore an event Args: resource: ManagedObject The current resource being checked event: KubeEventType The event type that triggered this filter Returns: result: Optional[bool] The result of the test. """ ## Base Class Interface #################################################### # # These methods MAY be implemented by children, but contain default # implementations that are appropriate for simple cases. # ## def update(self, resource: ManagedObject): # noqa: B027 """Update the instances current state. Args: resource: ManagedObject The current state of the resource """ def update_and_test(self, resource: ManagedObject, event: KubeEventType) -> bool: """First test a resource/event against a filter then update the current state Args: resource: ManagedObject The resource being filtered event: KubeEventType The event to be filtered Returns: test_result: bool The test result """ result = self.test(resource, event) if result is not None and not result: log.debug3( "Failed filter: %s with return val %s", self, result, extra={"resource": resource}, ) self.update(resource) return result ## Generic Resource filters class CreationDeletionFilter(Filter): """Filter to ensure reconciliation on creation and deletion events""" def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Return true if event is ADDED or DELETED""" # Ignore non Added/Deleted Events if event not in [KubeEventType.ADDED, KubeEventType.DELETED]: return return True class GenerationFilter(Filter): """Filter for reconciling on generation changes for resources that support it""" def __init__(self, resource: ManagedObject): """Set generation instance variable""" super().__init__(resource) self.generation = None def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Return true if resource generation is different than before""" # Only update&test resources with a generation if not self.generation: return # Only test on resource updates if event in [KubeEventType.ADDED, KubeEventType.DELETED]: return # Test if new generation is different return self.generation != resource.metadata.get("generation") def update(self, resource: ManagedObject): """Update the currently observed generation""" self.generation = resource.metadata.get("generation") class NoGenerationFilter(Filter): """Filter for reconciling changes to spec on resources that don't support the generation field like pods. It does this by hashing the objects excluding status and metadata""" def __init__(self, resource: ManagedObject): """Check if resource supports generation and initialize the hash dict""" self.supports_generation = resource.metadata.get("generation") is not None self.resource_hashes = {} super().__init__(resource) def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Return True if a resources current hash differs from the current""" # Don't test resources that support generation or if we don't have hashes yet if self.supports_generation or not self.resource_hashes: return # Only test on resource updates if event in [KubeEventType.ADDED, KubeEventType.DELETED]: return # Check each stored resource hash to see if its # changed for key, obj_has in self.resource_hashes.items(): if obj_has != obj_to_hash(resource.get(key)): log.debug2("Detected change in %s", key) return True return False def update(self, resource: ManagedObject): """Update the observed spec hashes""" if self.supports_generation: return # Get the default hashes for all object keys except metadata # and status for key, obj in resource.definition.items(): if key in ["metadata", "status", "kind", "apiVersion"]: continue self.resource_hashes[key] = obj_to_hash(obj) class ResourceVersionFilter(Filter): """Filter for duplicate resource versions which happens when restarting a watch connection""" def __init__(self, resource: ManagedObject): """Initialize the resource version list""" # Use a dequeue instead of a list/set to set a bound on the number # of tracked versions self.resource_versions = deque([], maxlen=RESOURCE_VERSION_KEEP_COUNT) super().__init__(resource) def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Test if the resource's resourceVersion has been seen before""" # Don't skip add events as the kubernetes watch can duplicate events if event == KubeEventType.DELETED: return return resource.resource_version not in self.resource_versions def update(self, resource: ManagedObject): """Add the resources ResourceVersion to the list""" self.resource_versions.append(resource.resource_version) ### Annotation Filters class AnnotationFilter(Filter): """Filter resources to reconcile on annotation changes""" def __init__(self, resource: ManagedObject): """Initialize the annotation hash variable""" self.annotations = None super().__init__(resource) def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Test if a resource's annotation has changed""" # Ignore Added and deleted events if event in [KubeEventType.ADDED, KubeEventType.DELETED]: return return self.annotations != self.get_annotation_hash(resource) def update(self, resource: ManagedObject): """Update the currently stored annotation""" self.annotations = self.get_annotation_hash(resource) def get_annotation_hash(self, resource: ManagedObject) -> str: """Helper function to get the annotation hash""" return obj_to_hash(resource.metadata.get("annotations", {})) class UserAnnotationFilter(AnnotationFilter): """Filter resources to reconcile on user annotation changes. This excludes kubernetes and openshift annotations """ def get_annotation_hash(self, resource: ManagedObject) -> str: """Overriden function to exclude common platform annotations from the annotation hash""" output_annotations = {} for key, value in resource.metadata.get("annotations", {}).items(): if self.contains_platform_key(key): continue output_annotations[key] = value return obj_to_hash(output_annotations) def contains_platform_key(self, key: str) -> bool: """Helper to check if the key contains one of the platform annotations""" return any( reserved_key in key for reserved_key in RESERVED_PLATFORM_ANNOTATIONS ) ### Oper8 Filters class PauseFilter(Filter): """This filter skips resources that have the oper8 pause annotation""" def test(self, resource: ManagedObject, event: KubeEventType) -> Optional[bool]: """Test if a resource has the pause annotation""" return not ReconcileManager._is_paused( # pylint: disable=protected-access resource ) class SubsystemStatusFilter(Filter): """Reconcile oper8 controllers when their oper8 status changes EXPERIMENTAL: This has passed basic validation but has not been rigorously tested in the field """ def __init__(self, resource: ManagedObject): """Initialize the currently observed ready condition""" self.ready_condition = None super().__init__(resource) def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Test if a resources subsystem condition has changed""" if event in [KubeEventType.ADDED, KubeEventType.DELETED]: return return self.ready_condition != get_condition(
READY_CONDITION, resource.get("status", {})
3
2023-11-15 16:43:29+00:00
16k
Jisencc/yolov5_dual_weighting
segment/predict.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=Tr...
import argparse import os import platform import sys import torch from pathlib import Path from ultralytics.utils.plotting import Annotator, colors, save_one_box from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, strip_optimizer) from utils.segment.general import masks2segments, process_mask, process_mask_native from utils.torch_utils import select_device, smart_inference_mode
11,286
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. Usage - sources: $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam img.jpg # image vid.mp4 # video screen # screenshot path/ # directory list.txt # list of images list.streams # list of streams 'path/*.jpg' # glob 'https://youtu.be/LNwODJXcvt4' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream Usage - formats: $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_model # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. Usage - sources: $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam img.jpg # image vid.mp4 # video screen # screenshot path/ # directory list.txt # list of images list.streams # list of streams 'path/*.jpg' # glob 'https://youtu.be/LNwODJXcvt4' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream Usage - formats: $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_model # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
@smart_inference_mode()
11
2023-11-12 13:28:26+00:00
16k
RAIVNLab/MatFormer-OLMo
olmo/train.py
[ { "identifier": "PathOrStr", "path": "olmo/aliases.py", "snippet": "" }, { "identifier": "CheckpointType", "path": "olmo/config.py", "snippet": "class CheckpointType(StrEnum):\n sharded = \"sharded\"\n unsharded = \"unsharded\"" }, { "identifier": "SpeedMonitorConfig", ...
import logging import math import random import shutil import time import numpy as np import torch import torch.nn.functional as F import wandb from collections import deque from dataclasses import dataclass, field from itertools import islice from pathlib import Path from typing import Any, Deque, Dict, List, Optional, TextIO, Tuple from packaging import version from torch.distributed.fsdp import FullStateDictConfig from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp import StateDictType from torch.distributed.fsdp.api import ( FullOptimStateDictConfig, ShardedOptimStateDictConfig, ShardedStateDictConfig, ) from torch.utils.data import DataLoader from torchmetrics import MeanMetric from .aliases import PathOrStr from .config import CheckpointType, SpeedMonitorConfig, TrainConfig from .data import IterableDataset from .eval import Evaluator from .exceptions import OlmoConfigurationError from .model import Olmo, MatformerManager from .optim import set_new_base_lr from .util import ( barrier, get_global_rank, get_world_size, move_to_device, peak_gpu_memory, resource_path, syncronize_flag, upload, wait_on, )
13,307
else: return False def eval(self) -> Dict[str, Any]: # Zero gradients and set model to 'eval' mode. self.optim.zero_grad(set_to_none=True) self.fsdp_model.eval() eval_metrics = {} for evaluator in self.evaluators: log.info(f"Running evaluation for '{evaluator.label}'...") # Run model over batches. if self.cfg.matformer_factor > 1: self.matmng = MatformerManager.get_instance() self.matmng.current_factor = 1 assert self.cfg.matformer_factor % 2 == 0 iters = int(math.log2(self.cfg.matformer_factor)) + 1 for i in range(iters): # Reset metrics. evaluator.reset_metrics() # Initialize data loader iterator. eval_batches = iter(evaluator.eval_loader) # Adjust how many batches to evaluate on. num_eval_batches = ( evaluator.subset_num_batches if evaluator.subset_num_batches is not None else self.cfg.eval_subset_num_batches ) if num_eval_batches > 0: num_eval_batches = min(num_eval_batches, len(evaluator.eval_loader)) eval_batches = islice(eval_batches, num_eval_batches) for eval_step, eval_batch in enumerate(eval_batches): self.eval_step(eval_batch, evaluator) # Log to console. if eval_step + 1 == num_eval_batches or (eval_step + 1) % self.cfg.console_log_interval == 0: log.info(f"[eval_step={eval_step + 1}/{num_eval_batches}]") # Get final metrics. metrics = evaluator.compute_metrics() eval_metrics.update(metrics) self.log_metrics_to_console(f"{evaluator.label} 1/{self.matmng.current_factor}", metrics) self.matmng.current_factor *= 2 for m in metrics: eval_metrics.pop(m, None) else: # Reset metrics. evaluator.reset_metrics() # Initialize data loader iterator. eval_batches = iter(evaluator.eval_loader) # Adjust how many batches to evaluate on. num_eval_batches = ( evaluator.subset_num_batches if evaluator.subset_num_batches is not None else self.cfg.eval_subset_num_batches ) if num_eval_batches > 0: num_eval_batches = min(num_eval_batches, len(evaluator.eval_loader)) eval_batches = islice(eval_batches, num_eval_batches) for eval_step, eval_batch in enumerate(eval_batches): self.eval_step(eval_batch, evaluator) # Log to console. if eval_step + 1 == num_eval_batches or (eval_step + 1) % self.cfg.console_log_interval == 0: log.info(f"[eval_step={eval_step + 1}/{num_eval_batches}]") # Get final metrics. metrics = evaluator.compute_metrics() eval_metrics.update(metrics) self.log_metrics_to_console(f"{evaluator.label}", metrics) del eval_batches return eval_metrics def fit(self): start_time = time.time() if self.cfg.load_path is not None and self.global_step > 0 and self.cfg.eval_on_load: eval_metrics = self.eval() if wandb.run is not None: wandb.log(eval_metrics, step=self.global_step) # Set model to 'train' mode. self.fsdp_model.train() # Initialize monitors. assert self.cfg.device_train_batch_size is not None speed_monitor = SpeedMonitor(self.cfg.speed_monitor) lr_monitor = LRMonitor(self.optim) # Log system metrics at the start of training. sys_metrics = self.system_metrics() if sys_metrics: self.log_metrics_to_console("Pre-train system metrics", sys_metrics) if wandb.run is not None: wandb.log(sys_metrics, step=0) # Train. first_batch: bool = True for batch in self.train_loader: # Bookkeeping. # NOTE: To track the global batch size / number of tokens per batch we make the assumption that all # batches see the same number of tokens, which should be the case for language model pre-training # (at least when drop_last=True). # Alternatively we'd have to use a distributed all reduce over seq_len here, but I don't want that overhead. # So for now I'm putting these assertions here so if the assumption is violated it will fail loudly. batch_size, seq_len = batch["input_ids"].shape assert seq_len == self.cfg.model.max_sequence_length assert batch_size == self.cfg.device_train_batch_size
from __future__ import annotations __all__ = ["SpeedMonitor", "LRMonitor", "Trainer"] log = logging.getLogger(__name__) @dataclass class SpeedMonitor: cfg: SpeedMonitorConfig start_times: Deque[float] = field(default_factory=lambda: deque([])) global_total_tokens: int = 0 device_interval_tokens: Deque[int] = field(default_factory=lambda: deque([])) def batch_start(self, global_total_tokens: int, device_batch_num_tokens: int, record: bool = True) -> None: self.global_total_tokens = global_total_tokens if record: if len(self.start_times) >= self.cfg.window_size: self.start_times.popleft() self.device_interval_tokens.popleft() self.start_times.append(time.monotonic()) self.device_interval_tokens.append(device_batch_num_tokens) def reset(self) -> None: self.start_times.clear() self.device_interval_tokens.clear() def check(self) -> Dict[str, float]: metrics: Dict[str, float] = {"throughput/total_tokens": self.global_total_tokens} if self.start_times: interval_seconds = time.monotonic() - self.start_times[0] interval_batches = len(self.start_times) interval_tokens = sum(self.device_interval_tokens) metrics["throughput/device/tokens_per_second"] = interval_tokens / interval_seconds metrics["throughput/device/batches_per_second"] = interval_batches / interval_seconds return metrics @dataclass class LRMonitor: optim: torch.optim.Optimizer def check(self) -> Dict[str, float]: lrs = [group["lr"] for group in self.optim.param_groups] return {f"optim/learning_rate_group{idx}": lr for idx, lr in enumerate(lrs)} @dataclass class Trainer: cfg: TrainConfig model: Olmo fsdp_model: FSDP optim: torch.optim.Optimizer scheduler: torch.optim.lr_scheduler.LRScheduler train_loader: DataLoader device: torch.device evaluators: List[Evaluator] ce_train_loss_metric: MeanMetric z_train_loss_metric: Optional[MeanMetric] = None global_step: int = 0 global_data_step: int = 0 """This is now redundant since adding 'global_train_examples_seen'.""" global_train_examples_seen: int = 0 """Tracks the global number of training examples seen for the purpose of restoring the dataset position on restarts.""" global_train_tokens_seen: int = 0 """Tracks the global total number of tokens trained on.""" checkpoints: List[Path] = field(default_factory=list) unsharded_checkpoints: List[Path] = field(default_factory=list) min_train_loss: float = float("inf") indices_file: Optional[TextIO] = None def state_dict(self) -> Dict[str, Any]: state_dict = self.non_tensor_state_dict() state_dict["model"] = self.fsdp_model.state_dict() state_dict["optim"] = FSDP.optim_state_dict(self.fsdp_model, self.optim) return state_dict def non_tensor_state_dict(self) -> Dict[str, Any]: return { "scheduler": self.scheduler.state_dict(), "global_step": self.global_step, "global_data_step": self.global_data_step, "global_train_examples_seen": self.global_train_examples_seen, "global_train_tokens_seen": self.global_train_tokens_seen, "checkpoints": self.checkpoints, "unsharded_checkpoints": self.unsharded_checkpoints, "rng": { "python": random.getstate(), "numpy": np.random.get_state(), "torch": torch.random.get_rng_state(), "cuda": torch.cuda.get_rng_state(), }, } def load_non_tensor_state_dict(self, state_dict: Dict[str, Any]) -> None: # Checkpoint paths. self.checkpoints = [ path for path in state_dict["checkpoints"] if path.is_dir() and path.resolve().parent == Path(self.cfg.save_folder).resolve() ] self.unsharded_checkpoints = [ path for path in state_dict["unsharded_checkpoints"] if path.is_dir() and path.resolve().parent == Path(self.cfg.save_folder).resolve() ] # Learning rate scheduler. self.scheduler.load_state_dict(state_dict["scheduler"]) # Dataset / dataloader position. self.global_step = state_dict["global_step"] self.global_data_step = state_dict["global_data_step"] self.global_train_examples_seen = state_dict.get( # newer addition "global_train_examples_seen", self.global_data_step * self.cfg.global_train_batch_size ) self.global_train_tokens_seen = state_dict.get( # newer addition "global_train_tokens_seen", self.global_data_step * self.cfg.global_train_batch_size * self.cfg.model.max_sequence_length, ) if not self.cfg.restore_dataloader: self.global_data_step = 0 self.global_train_examples_seen = 0 self.global_train_tokens_seen = 0 elif self.cfg.fast_forward_batches: self.global_data_step += self.cfg.fast_forward_batches # Technically we don't "see" these batches that we fast-forward through, but we use # this variable to update the position of the dataset so we need to include them here. self.global_train_examples_seen += self.cfg.fast_forward_batches * self.cfg.global_train_batch_size # NOTE: on the other hand we don't add anything to 'self.global_train_tokens_seen' here because # that variable is meant to track the actual number of tokens trained on. if self.global_data_step > 0: if self.global_data_step > self.global_step: log.info( f"Fast-forwarding data loader to step {self.global_step:,d}+{self.global_data_step-self.global_step:,d} " f"({self.global_train_examples_seen:,d} examples)" ) else: log.info( f"Fast-forwarding data loader to step {self.global_data_step:,d} " f"({self.global_train_examples_seen:,d} examples)" ) assert isinstance(self.train_loader.dataset, IterableDataset) self.train_loader.dataset.start_index = self.global_train_examples_seen if not self.cfg.restore_base_learning_rate: # Reset base learning rate to the value in the config, not the checkpoint. set_new_base_lr(self.optim, self.scheduler, self.cfg.optimizer.learning_rate) # RNG states. if "rng" in state_dict: rng_state = state_dict["rng"] self.restore_rng_state(rng_state) def restore_rng_state(self, rng_state: Dict[str, Any]) -> None: random.setstate(rng_state["python"]) np.random.set_state(rng_state["numpy"]) torch.set_rng_state(rng_state["torch"]) torch.cuda.set_rng_state(rng_state["cuda"]) def save_sharded_checkpoint(self) -> Path: checkpoint_dir = Path(self.cfg.save_folder) / f"step{self.global_step}" checkpoint_dir_tmp = Path(self.cfg.save_folder) / f"step{self.global_step}-tmp" try: next(checkpoint_dir.glob("*")) if self.cfg.save_overwrite: if get_global_rank() == 0: shutil.rmtree(checkpoint_dir) else: raise OlmoConfigurationError( f"Checkpoint for step {self.global_step} already exists, use --save-overwrite to overwrite it" ) except StopIteration: pass if get_global_rank() == 0: checkpoint_dir_tmp.mkdir(parents=True, exist_ok=True) self.checkpoints.append(checkpoint_dir) barrier() # Flush data indices file. if self.indices_file is not None: self.indices_file.flush() # Write the checkpoint. with FSDP.state_dict_type( self.fsdp_model, state_dict_type=StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig(offload_to_cpu=True), optim_state_dict_config=ShardedOptimStateDictConfig(offload_to_cpu=True), ): # NOTE: Alternatively we could use the checkpointing method in this test # https://github.com/pytorch/pytorch/blob/main/test/distributed/checkpoint/test_fsdp_optim_state.py # but we've had issues with that on AMD GPUs. See # https://github.com/pytorch/pytorch/issues/100041 # checkpoint.save_state_dict(self.state_dict(), checkpoint.FileSystemWriter(checkpoint_dir)) torch.save(self.state_dict(), checkpoint_dir_tmp / f"rank{get_global_rank()}.pt") # Save config too. if get_global_rank() == 0: self.cfg.save(checkpoint_dir_tmp / "config.yaml") barrier() if get_global_rank() == 0: # Replace temp directory with target checkpoint directory. checkpoint_dir_tmp.replace(checkpoint_dir) # Link to 'latest'. latest_path = Path(self.cfg.save_folder) / "latest" latest_path.unlink(missing_ok=True) latest_path.symlink_to(checkpoint_dir.name, target_is_directory=True) # In the cases where we're using a shared NFS drive between ranks to save checkpoints, # replacing the temp directory with the final directory from rank 0 might not be immediately # realized in the file systems of the other ranks. # So we wait here across all ranks until that final checkpoint directory is visible. wait_on(lambda: checkpoint_dir.exists(), "Waiting for checkpoint directory", timeout=10.0) # Remove old checkpoints. if self.cfg.save_num_checkpoints_to_keep > 0: while len(self.checkpoints) > self.cfg.save_num_checkpoints_to_keep: self.remove_sharded_checkpoint(0) barrier() # Upload checkpoint to bucket. if self.cfg.remote_save_folder is not None: files_to_upload = [f"rank{get_global_rank()}.pt"] if get_global_rank() == 0: files_to_upload.append("config.yaml") for fname in files_to_upload: source = checkpoint_dir / fname target = f"{self.cfg.remote_save_folder}/{checkpoint_dir.name}/{fname}" log.info(f"Uploading {source} to {target}...") upload(source, target, save_overwrite=self.cfg.save_overwrite) barrier() return checkpoint_dir def remove_sharded_checkpoint(self, idx: int = 0): oldest_checkpoint = self.checkpoints.pop(idx) barrier() if get_global_rank() == 0 and oldest_checkpoint.is_dir(): shutil.rmtree(oldest_checkpoint, ignore_errors=True) latest_path = Path(self.cfg.save_folder) / "latest" if latest_path.resolve() == oldest_checkpoint.resolve(): latest_path.unlink() barrier() def restore_sharded_checkpoint(self, load_path: PathOrStr): # Zero-gradients to avoid gathering them. self.optim.zero_grad(set_to_none=True) with FSDP.state_dict_type( self.fsdp_model, state_dict_type=StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig(offload_to_cpu=True), optim_state_dict_config=ShardedOptimStateDictConfig(offload_to_cpu=True), ): # NOTE: Alternatively we could use the checkpointing method in this test # https://github.com/pytorch/pytorch/blob/main/test/distributed/checkpoint/test_fsdp_optim_state.py # but we've had issues with that on AMD GPUs. See # https://github.com/pytorch/pytorch/issues/100041 # But basically it would look like this. # Load the serialized state dict in place. # state_dict = self.state_dict() # del state_dict["optim"] # Can't load optimizer together with the model # checkpoint.load_state_dict(state_dict, checkpoint.FileSystemReader(load_path)) # self.fsdp_model.load_state_dict(state_dict["model"]) # Load other state... # Load optim state. # optim_state = load_sharded_optimizer_state_dict( # model_state_dict=state_dict["model"], # optimizer_key="optim", # storage_reader=checkpoint.FileSystemReader(load_path), # ) # flattened_osd = FSDP.optim_state_dict_to_load(optim_state["optim"], self.fsdp_model, self.optim) # self.optim.load_state_dict(flattened_osd) # Deserialize state dictionary. state_dict = torch.load(resource_path(load_path, f"rank{get_global_rank()}.pt")) # Load model and optimizer state. log.info("Loading model state...") self.fsdp_model.load_state_dict(state_dict["model"]) log.info("Loading optimizer state...") # NOTE: careful, the order of these arguments has changed since the 2.0 release. if version.parse(torch.__version__) < version.parse("2.1.0"): # flattened_osd = FSDP.optim_state_dict_to_load(optim_state["optim"], self.fsdp_model, self.optim) # type: ignore flattened_osd = FSDP.optim_state_dict_to_load(state_dict["optim"], self.fsdp_model, self.optim) # type: ignore else: # flattened_osd = FSDP.optim_state_dict_to_load(self.fsdp_model, self.optim, optim_state["optim"]) # type: ignore flattened_osd = FSDP.optim_state_dict_to_load(self.fsdp_model, self.optim, state_dict["optim"]) # type: ignore self.optim.load_state_dict(flattened_osd) # Load non-tensor state. self.load_non_tensor_state_dict(state_dict) del state_dict, flattened_osd barrier() def save_unsharded_checkpoint(self) -> Path: # Zero-gradients to avoid gathering them. self.optim.zero_grad(set_to_none=True) checkpoint_dir = Path(self.cfg.save_folder) / f"step{self.global_step}-unsharded" checkpoint_dir_tmp = Path(self.cfg.save_folder) / f"step{self.global_step}-unsharded-tmp" try: next(checkpoint_dir.glob("*")) if self.cfg.save_overwrite: if get_global_rank() == 0: shutil.rmtree(checkpoint_dir) else: raise OlmoConfigurationError( f"Unsharded checkpoint for step {self.global_step} already exists, use --save-overwrite to overwrite it" ) except StopIteration: pass if get_global_rank() == 0: checkpoint_dir_tmp.mkdir(parents=True, exist_ok=True) self.unsharded_checkpoints.append(checkpoint_dir) barrier() # Flush data indices file. if self.indices_file is not None: self.indices_file.flush() # Write the checkpoint. with FSDP.state_dict_type( self.fsdp_model, state_dict_type=StateDictType.FULL_STATE_DICT, state_dict_config=FullStateDictConfig(rank0_only=True, offload_to_cpu=True), optim_state_dict_config=FullOptimStateDictConfig(rank0_only=True, offload_to_cpu=True), ): # We'll write the model and optimizer state dicts individually to reduce (CPU) memory consumption. # First the model state. model_state_dict = self.fsdp_model.state_dict() if get_global_rank() == 0: torch.save(model_state_dict, checkpoint_dir_tmp / "model.pt") del model_state_dict # Then the optimizer state. optim_state_dict = FSDP.optim_state_dict(self.fsdp_model, self.optim) if get_global_rank() == 0: torch.save(optim_state_dict, checkpoint_dir_tmp / "optim.pt") del optim_state_dict # Then everything else. other_state_dict = self.non_tensor_state_dict() if get_global_rank() == 0: torch.save(other_state_dict, checkpoint_dir_tmp / "other.pt") self.cfg.save(checkpoint_dir_tmp / "config.yaml") barrier() if get_global_rank() == 0: # Replace temp directory with target checkpoint directory. checkpoint_dir_tmp.replace(checkpoint_dir) # Link to 'latest'. latest_path = Path(self.cfg.save_folder) / "latest-unsharded" latest_path.unlink(missing_ok=True) latest_path.symlink_to(checkpoint_dir.name, target_is_directory=True) # In the cases where we're using a shared NFS drive between ranks to save checkpoints, # replacing the temp directory with the final directory from rank 0 might not be immediately # realized in the file systems of the other ranks. # So we wait here across all ranks until that final checkpoint directory is visible. wait_on(lambda: checkpoint_dir.exists(), "Waiting for checkpoint directory", timeout=10.0) # Remove old checkpoints. if self.cfg.save_num_unsharded_checkpoints_to_keep > 0: while len(self.unsharded_checkpoints) > self.cfg.save_num_unsharded_checkpoints_to_keep: self.remove_unsharded_checkpoint(0) barrier() # Upload checkpoint to bucket. if self.cfg.remote_save_folder is not None: if get_global_rank() == 0: for fname in ["config.yaml", "model.pt", "optim.pt", "other.pt"]: source = checkpoint_dir / fname target = f"{self.cfg.remote_save_folder}/{checkpoint_dir.name}/{fname}" log.info(f"Uploading {source} to {target}...") upload(source, target, save_overwrite=self.cfg.save_overwrite) barrier() return checkpoint_dir def remove_unsharded_checkpoint(self, idx: int = 0): barrier() oldest_checkpoint = self.unsharded_checkpoints.pop(idx) if get_global_rank() == 0 and oldest_checkpoint.is_dir(): shutil.rmtree(oldest_checkpoint, ignore_errors=True) latest_path = Path(self.cfg.save_folder) / "latest-unsharded" if latest_path.resolve() == oldest_checkpoint.resolve(): latest_path.unlink() barrier() def restore_unsharded_checkpoint(self, load_path: PathOrStr): # Zero-gradients to avoid gathering them. self.optim.zero_grad(set_to_none=True) with FSDP.state_dict_type( self.fsdp_model, state_dict_type=StateDictType.FULL_STATE_DICT, state_dict_config=FullStateDictConfig(rank0_only=True, offload_to_cpu=True), optim_state_dict_config=FullOptimStateDictConfig(rank0_only=True, offload_to_cpu=True), ): # Load model state. log.info("Loading model state...") self.fsdp_model.load_state_dict(torch.load(resource_path(load_path, "model.pt"))) # Load optimizer state. log.info("Loading optimizer state...") optim_state_dict = torch.load(resource_path(load_path, "optim.pt")) # NOTE: careful, the order of these arguments has changed since the 2.0 release. if version.parse(torch.__version__) < version.parse("2.1.0"): # flattened_osd = FSDP.optim_state_dict_to_load(optim_state["optim"], self.fsdp_model, self.optim) # type: ignore flattened_osd = FSDP.optim_state_dict_to_load(optim_state_dict, self.fsdp_model, self.optim) # type: ignore else: # flattened_osd = FSDP.optim_state_dict_to_load(self.fsdp_model, self.optim, optim_state["optim"]) # type: ignore flattened_osd = FSDP.optim_state_dict_to_load(self.fsdp_model, self.optim, optim_state_dict) # type: ignore del optim_state_dict self.optim.load_state_dict(flattened_osd) del flattened_osd # Load other state. other_state_dict = torch.load(resource_path(load_path, "other.pt")) self.load_non_tensor_state_dict(other_state_dict) barrier() def save_checkpoint(self, checkpoint_type: CheckpointType = CheckpointType.sharded) -> Path: if checkpoint_type == CheckpointType.sharded: return self.save_sharded_checkpoint() elif checkpoint_type == CheckpointType.unsharded: return self.save_unsharded_checkpoint() else: raise NotImplementedError(checkpoint_type) def restore_checkpoint(self, load_path: PathOrStr, checkpoint_type: Optional[CheckpointType] = None): if checkpoint_type == CheckpointType.unsharded or ( checkpoint_type is None and str(load_path).endswith("-unsharded") ): self.restore_unsharded_checkpoint(load_path) elif checkpoint_type == CheckpointType.sharded or checkpoint_type is None: self.restore_sharded_checkpoint(load_path) elif checkpoint_type is not None: raise NotImplementedError(checkpoint_type) def remove_checkpoint(self, idx: int = 0, checkpoint_type: CheckpointType = CheckpointType.sharded): if checkpoint_type == CheckpointType.sharded: self.remove_sharded_checkpoint(idx=idx) elif checkpoint_type == CheckpointType.unsharded: self.remove_unsharded_checkpoint(idx=idx) else: raise NotImplementedError(checkpoint_type) def get_labels(self, batch: Dict[str, Any]) -> torch.Tensor: # Labels are just input IDs shifted to the left (first item is ignored). labels, attention_mask = batch["input_ids"], batch.get("attention_mask") if attention_mask is not None: labels = labels.masked_fill(attention_mask == 0.0, -100) return labels[..., 1:].contiguous() def model_forward( self, batch: Dict[str, Any], loss_reduction: str = "mean" ) -> Tuple[torch.Tensor, torch.Tensor]: # shape: (batch_size, seq_len, vocab_size) logits = self.fsdp_model( input_ids=batch["input_ids"], attention_mask=batch.get("attention_mask"), attention_bias=batch.get("attention_bias"), ).logits logits_for_loss = logits[..., :-1, :].contiguous() # shape: (batch_size * seq_len, vocab_size) logits_for_loss = logits_for_loss.view(-1, logits_for_loss.size(-1)) # shape: (batch_size, seq_len) labels = self.get_labels(batch) # shape: (batch_size * seq_len,) labels = labels.view(-1) ce_loss = F.cross_entropy(logits_for_loss, labels, ignore_index=-100, reduction=loss_reduction) if loss_reduction == "none": # Reshape (batch_size * seq_len,) -> (batch_size, seq_len) ce_loss = ce_loss.view(batch["input_ids"].shape[0], -1) return ce_loss, logits def train_batch(self, batch: Dict[str, Any]) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: # Split into micro-batches. micro_batches = self.split_batch(batch) # In case this helps with memory utilization. del batch ce_batch_loss = torch.tensor(0.0, device=self.device) z_batch_loss = None if not self.cfg.softmax_auxiliary_loss else torch.tensor(0.0, device=self.device) for micro_batch in micro_batches: with torch.autocast("cuda", enabled=True, dtype=self.cfg.autocast_precision): # Run forward pass. ce_loss, logits = self.model_forward(micro_batch) ce_loss = ce_loss / len(micro_batches) # In case this helps with memory utilization. del micro_batch # Update overall CE batch loss. ce_batch_loss += ce_loss.detach() # Get loss to optimize for. if self.cfg.softmax_auxiliary_loss: z_squared = logits.logsumexp(-1).pow(2).mean() z_loss = 1e-4 * z_squared / len(micro_batches) loss = ce_loss + z_loss # Update overall Z batch loss. z_batch_loss += z_loss.detach() else: loss = ce_loss del logits # Check for nan. if torch.isnan(loss): raise ValueError("nan loss encountered") # Run backward pass. loss.backward() return ce_batch_loss, z_batch_loss def train_step(self, batch: Dict[str, Any]) -> Dict[str, float]: # Write data-indices to file. if self.indices_file is not None and "index" in batch: indices = "\t".join(str(int(i)) for i in batch["index"]) self.indices_file.write(f"{self.global_step}\t{indices}\n") # Zero-gradients. self.optim.zero_grad(set_to_none=True) # Reset metrics. self.ce_train_loss_metric.reset() if self.z_train_loss_metric is not None: self.z_train_loss_metric.reset() # Move tensors to the right device. batch = move_to_device(batch, self.device) # Run forward-backward pass. losses = [] if self.cfg.matformer_factor > 1: self.matmng = MatformerManager.get_instance() self.matmng.current_factor = 1 assert self.cfg.matformer_factor % 2 == 0 iters = int(math.log2(self.cfg.matformer_factor)) + 1 for i in range(iters): # Reduce loss metrics across ranks. ce_batch_loss, z_batch_loss = self.train_batch(batch) self.ce_train_loss_metric.update(ce_batch_loss) ce_batch_loss = self.ce_train_loss_metric.compute() losses.append((ce_batch_loss, z_batch_loss)) self.matmng.current_factor *= 2 else: ce_batch_loss, z_batch_loss = self.train_batch(batch) # Reduce loss metrics across ranks. self.ce_train_loss_metric.update(ce_batch_loss) ce_batch_loss = self.ce_train_loss_metric.compute() losses.append((ce_batch_loss, z_batch_loss)) # Clip gradient norms. grad_norm: Optional[float] = None if self.cfg.max_grad_norm is not None: grad_norm = self.fsdp_model.clip_grad_norm_(self.cfg.max_grad_norm).item() # Optimizer step. self.optim.step() self.scheduler.step() if len(losses) > 1: metrics = {} for i in range(len(losses)): factor = 2**i metrics[f'train/CrossEntropyLoss 1/{factor}'] = losses[i][0].item() metrics[f'train/Perplexity 1/{factor}'] = (2**(losses[i][0])).item() if z_batch_loss is not None and self.z_train_loss_metric is not None: self.z_train_loss_metric.update(z_batch_loss) z_batch_loss = self.z_train_loss_metric.compute() metrics[f"train/ZLoss 1/{factor}"] = z_batch_loss.item() else: metrics = { "train/CrossEntropyLoss": ce_batch_loss.item(), "train/Perplexity": torch.exp(ce_batch_loss).item(), } if z_batch_loss is not None and self.z_train_loss_metric is not None: self.z_train_loss_metric.update(z_batch_loss) z_batch_loss = self.z_train_loss_metric.compute() metrics["train/ZLoss"] = z_batch_loss.item() if grad_norm is not None: metrics["optim/grad_norm"] = grad_norm # Update min train loss and see if we should stop early. self.min_train_loss = min(self.min_train_loss, ce_batch_loss.item()) # type: ignore if ( self.cfg.early_stopping_factor is not None and self.global_step > self.cfg.scheduler.t_warmup and ce_batch_loss.item() > self.cfg.early_stopping_factor * self.min_train_loss ): raise ValueError("Stopping early because train loss has increased substantially") return metrics def eval_batch(self, batch: Dict[str, Any]) -> Tuple[torch.Tensor, torch.Tensor]: with torch.autocast("cuda", enabled=True, dtype=self.cfg.autocast_precision): ce_loss, logits = self.model_forward(batch, loss_reduction="none") return ce_loss.mean(dim=-1), logits def eval_step(self, batch: Dict[str, Any], evaluator: Evaluator) -> None: # Move tensors to the right device. batch = move_to_device(batch, self.device) # Run forward pass. with torch.no_grad(): # NOTE: 'torch.inference_mode()' doesn't work with 'torch.compile()'. ce_loss, logits = self.eval_batch(batch) # Update metrics. evaluator.update_metrics( batch, ce_loss, logits ) # batch includes all keys that the downstream evaluation needs barrier() def split_batch(self, batch: Dict[str, Any]) -> List[Dict[str, Any]]: microbatch_size = self.cfg.device_train_microbatch_size batch_size = batch["input_ids"].shape[0] if batch_size <= microbatch_size: return [batch] else: micro_batches = {} for key, value in batch.items(): if isinstance(value, torch.Tensor): micro_batches[key] = value.split(microbatch_size, dim=0) elif isinstance(value, list): micro_batches[key] = [ value[microbatch_size * i : microbatch_size * i + microbatch_size] for i in range(math.ceil(batch_size / microbatch_size)) ] else: raise ValueError(f"unexpected item in batch: '{key}={value}'") return [ {key: value[i] for key, value in micro_batches.items()} # type: ignore for i in range(len(micro_batches["input_ids"])) ] def system_metrics(self) -> Dict[str, float]: metrics = {} peak_gpu_mb = peak_gpu_memory() if peak_gpu_mb is not None: metrics["System/Peak GPU Memory (MB)"] = peak_gpu_mb return metrics def log_metrics_to_console(self, prefix: str, metrics: Dict[str, float]): def format_float(value: float) -> str: if value < 0.0001: return str(value) # scientific notation elif value > 1000: return f"{int(value):,d}" elif value > 100: return f"{value:.1f}" elif value > 10: return f"{value:.2f}" elif value > 1: return f"{value:.3f}" else: return f"{value:.4f}" log.info( f"{prefix}\n" + "\n".join([f" {name}={format_float(value)}" for name, value in metrics.items()]) ) def should_log_this_step(self) -> bool: if self.global_step % self.cfg.console_log_interval == 0: return True elif self.cfg.wandb is not None and self.global_step % self.cfg.wandb.log_interval == 0: return True else: return False def eval(self) -> Dict[str, Any]: # Zero gradients and set model to 'eval' mode. self.optim.zero_grad(set_to_none=True) self.fsdp_model.eval() eval_metrics = {} for evaluator in self.evaluators: log.info(f"Running evaluation for '{evaluator.label}'...") # Run model over batches. if self.cfg.matformer_factor > 1: self.matmng = MatformerManager.get_instance() self.matmng.current_factor = 1 assert self.cfg.matformer_factor % 2 == 0 iters = int(math.log2(self.cfg.matformer_factor)) + 1 for i in range(iters): # Reset metrics. evaluator.reset_metrics() # Initialize data loader iterator. eval_batches = iter(evaluator.eval_loader) # Adjust how many batches to evaluate on. num_eval_batches = ( evaluator.subset_num_batches if evaluator.subset_num_batches is not None else self.cfg.eval_subset_num_batches ) if num_eval_batches > 0: num_eval_batches = min(num_eval_batches, len(evaluator.eval_loader)) eval_batches = islice(eval_batches, num_eval_batches) for eval_step, eval_batch in enumerate(eval_batches): self.eval_step(eval_batch, evaluator) # Log to console. if eval_step + 1 == num_eval_batches or (eval_step + 1) % self.cfg.console_log_interval == 0: log.info(f"[eval_step={eval_step + 1}/{num_eval_batches}]") # Get final metrics. metrics = evaluator.compute_metrics() eval_metrics.update(metrics) self.log_metrics_to_console(f"{evaluator.label} 1/{self.matmng.current_factor}", metrics) self.matmng.current_factor *= 2 for m in metrics: eval_metrics.pop(m, None) else: # Reset metrics. evaluator.reset_metrics() # Initialize data loader iterator. eval_batches = iter(evaluator.eval_loader) # Adjust how many batches to evaluate on. num_eval_batches = ( evaluator.subset_num_batches if evaluator.subset_num_batches is not None else self.cfg.eval_subset_num_batches ) if num_eval_batches > 0: num_eval_batches = min(num_eval_batches, len(evaluator.eval_loader)) eval_batches = islice(eval_batches, num_eval_batches) for eval_step, eval_batch in enumerate(eval_batches): self.eval_step(eval_batch, evaluator) # Log to console. if eval_step + 1 == num_eval_batches or (eval_step + 1) % self.cfg.console_log_interval == 0: log.info(f"[eval_step={eval_step + 1}/{num_eval_batches}]") # Get final metrics. metrics = evaluator.compute_metrics() eval_metrics.update(metrics) self.log_metrics_to_console(f"{evaluator.label}", metrics) del eval_batches return eval_metrics def fit(self): start_time = time.time() if self.cfg.load_path is not None and self.global_step > 0 and self.cfg.eval_on_load: eval_metrics = self.eval() if wandb.run is not None: wandb.log(eval_metrics, step=self.global_step) # Set model to 'train' mode. self.fsdp_model.train() # Initialize monitors. assert self.cfg.device_train_batch_size is not None speed_monitor = SpeedMonitor(self.cfg.speed_monitor) lr_monitor = LRMonitor(self.optim) # Log system metrics at the start of training. sys_metrics = self.system_metrics() if sys_metrics: self.log_metrics_to_console("Pre-train system metrics", sys_metrics) if wandb.run is not None: wandb.log(sys_metrics, step=0) # Train. first_batch: bool = True for batch in self.train_loader: # Bookkeeping. # NOTE: To track the global batch size / number of tokens per batch we make the assumption that all # batches see the same number of tokens, which should be the case for language model pre-training # (at least when drop_last=True). # Alternatively we'd have to use a distributed all reduce over seq_len here, but I don't want that overhead. # So for now I'm putting these assertions here so if the assumption is violated it will fail loudly. batch_size, seq_len = batch["input_ids"].shape assert seq_len == self.cfg.model.max_sequence_length assert batch_size == self.cfg.device_train_batch_size
global_batch_size = batch_size * get_world_size() # assumes batch size equal across ranks
12
2023-11-14 02:24:07+00:00
16k
1in-oos/ccplus
caringcaribou/tests/test_module_uds.py
[ { "identifier": "Constants", "path": "caringcaribou/utils/iso14229_1.py", "snippet": "class Constants(object):\n # NR_SI (Negative Response Service Identifier) is a bit special, since\n # it is not a service per se.\n # From ISO-14229-1 specification: \"The NR_SI value is co-ordinated with\n ...
from caringcaribou.utils.iso14229_1 import Constants, Iso14229_1, NegativeResponseCodes, ServiceID, Services from caringcaribou.tests.mock.mock_ecu_uds import MockEcuIso14229 from caringcaribou.modules import uds import unittest
10,860
ARB_ID_RESPONSE = 0x300F # Timeout (in seconds) when waiting for response during bruteforce BRUTEFORCE_TIMEOUT = 0.01 def setUp(self): # Initialize mock ECU self.ecu = MockEcuIso14229(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE) # Remove response delay self.ecu.DELAY_BEFORE_RESPONSE = 0.0 self.ecu.start_server() def tearDown(self): if isinstance(self.ecu, MockEcuIso14229): self.ecu.__exit__(None, None, None) def test_uds_discovery(self): # Discovery arguments start_arb_id = self.ARB_ID_REQUEST - 5 end_arb_id = self.ARB_ID_REQUEST + 5 blacklist = [] auto_blacklist_duration = 0 timeout = self.BRUTEFORCE_TIMEOUT verify = True print_results = False # Perform UDS discovery result = uds.uds_discovery(min_id=start_arb_id, max_id=end_arb_id, blacklist_args=blacklist, auto_blacklist_duration=auto_blacklist_duration, delay=timeout, verify=verify, print_results=print_results) expected_result = [(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE)] self.assertListEqual(result, expected_result, "UDS discovery gave '{0}', expected '{1}'".format( result, expected_result)) def test_uds_discovery_blacklist(self): # Discovery arguments start_arb_id = self.ARB_ID_REQUEST - 5 end_arb_id = self.ARB_ID_REQUEST + 5 # Blacklist the arbitration ID used for response blacklist = [self.ARB_ID_RESPONSE] auto_blacklist_duration = 0 timeout = self.BRUTEFORCE_TIMEOUT verify = True print_results = False # Perform UDS discovery result = uds.uds_discovery(min_id=start_arb_id, max_id=end_arb_id, blacklist_args=blacklist, auto_blacklist_duration=auto_blacklist_duration, delay=timeout, verify=verify, print_results=print_results) # No results expected due to blacklist expected_result = [] self.assertListEqual(result, expected_result, "UDS discovery gave '{0}', expected '{1}'".format( result, expected_result)) def test_service_discovery(self): # Service discovery arguments range_start = 0x09 range_end = 0x13 print_results = False # Perform service discovery result = uds.service_discovery(arb_id_request=self.ARB_ID_REQUEST, arb_id_response=self.ARB_ID_RESPONSE, timeout=self.BRUTEFORCE_TIMEOUT, min_id=range_start, max_id=range_end, print_results=print_results) # Supported services within specified range expected_result = [ServiceID.DIAGNOSTIC_SESSION_CONTROL, ServiceID.ECU_RESET] self.assertListEqual(result, expected_result, "UDS service discovery gave '{0}', expected '{1}'".format( result, expected_result)) def test_service_discovery_empty_range(self): # Service discovery arguments range_start = 0x00 range_end = 0x05 print_results = False # Perform service discovery result = uds.service_discovery(arb_id_request=self.ARB_ID_REQUEST, arb_id_response=self.ARB_ID_RESPONSE, timeout=self.BRUTEFORCE_TIMEOUT, min_id=range_start, max_id=range_end, print_results=print_results) # No services should be found within range expected_result = [] self.assertListEqual(result, expected_result, "UDS service discovery gave '{0}', expected no hits".format( result)) def test_ecu_reset_hard_reset_success(self): # ECU Reset arguments reset_type = Services.EcuReset.ResetType.HARD_RESET timeout = None # Perform ECU Reset result = uds.ecu_reset(arb_id_request=self.ARB_ID_REQUEST, arb_id_response=self.ARB_ID_RESPONSE, reset_type=reset_type, timeout=timeout) # Expected response format for successful request expected_response_id = Iso14229_1.get_service_response_id(Services.EcuReset.service_id) expected_result = [expected_response_id, reset_type] self.assertListEqual(result, expected_result, "ECU Reset gave '{0}', expected '{1}'".format( result, expected_result)) def test_ecu_reset_unsupported_reset_type_failure(self): # Invalid reset type reset_type = 0x00 timeout = None # Perform ECU Reset result = uds.ecu_reset(arb_id_request=self.ARB_ID_REQUEST, arb_id_response=self.ARB_ID_RESPONSE, reset_type=reset_type, timeout=timeout) # Expected response format for invalid request expected_response_id = Services.EcuReset.service_id
from __future__ import print_function class UdsModuleTestCase(unittest.TestCase): ARB_ID_REQUEST = 0x300E ARB_ID_RESPONSE = 0x300F # Timeout (in seconds) when waiting for response during bruteforce BRUTEFORCE_TIMEOUT = 0.01 def setUp(self): # Initialize mock ECU self.ecu = MockEcuIso14229(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE) # Remove response delay self.ecu.DELAY_BEFORE_RESPONSE = 0.0 self.ecu.start_server() def tearDown(self): if isinstance(self.ecu, MockEcuIso14229): self.ecu.__exit__(None, None, None) def test_uds_discovery(self): # Discovery arguments start_arb_id = self.ARB_ID_REQUEST - 5 end_arb_id = self.ARB_ID_REQUEST + 5 blacklist = [] auto_blacklist_duration = 0 timeout = self.BRUTEFORCE_TIMEOUT verify = True print_results = False # Perform UDS discovery result = uds.uds_discovery(min_id=start_arb_id, max_id=end_arb_id, blacklist_args=blacklist, auto_blacklist_duration=auto_blacklist_duration, delay=timeout, verify=verify, print_results=print_results) expected_result = [(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE)] self.assertListEqual(result, expected_result, "UDS discovery gave '{0}', expected '{1}'".format( result, expected_result)) def test_uds_discovery_blacklist(self): # Discovery arguments start_arb_id = self.ARB_ID_REQUEST - 5 end_arb_id = self.ARB_ID_REQUEST + 5 # Blacklist the arbitration ID used for response blacklist = [self.ARB_ID_RESPONSE] auto_blacklist_duration = 0 timeout = self.BRUTEFORCE_TIMEOUT verify = True print_results = False # Perform UDS discovery result = uds.uds_discovery(min_id=start_arb_id, max_id=end_arb_id, blacklist_args=blacklist, auto_blacklist_duration=auto_blacklist_duration, delay=timeout, verify=verify, print_results=print_results) # No results expected due to blacklist expected_result = [] self.assertListEqual(result, expected_result, "UDS discovery gave '{0}', expected '{1}'".format( result, expected_result)) def test_service_discovery(self): # Service discovery arguments range_start = 0x09 range_end = 0x13 print_results = False # Perform service discovery result = uds.service_discovery(arb_id_request=self.ARB_ID_REQUEST, arb_id_response=self.ARB_ID_RESPONSE, timeout=self.BRUTEFORCE_TIMEOUT, min_id=range_start, max_id=range_end, print_results=print_results) # Supported services within specified range expected_result = [ServiceID.DIAGNOSTIC_SESSION_CONTROL, ServiceID.ECU_RESET] self.assertListEqual(result, expected_result, "UDS service discovery gave '{0}', expected '{1}'".format( result, expected_result)) def test_service_discovery_empty_range(self): # Service discovery arguments range_start = 0x00 range_end = 0x05 print_results = False # Perform service discovery result = uds.service_discovery(arb_id_request=self.ARB_ID_REQUEST, arb_id_response=self.ARB_ID_RESPONSE, timeout=self.BRUTEFORCE_TIMEOUT, min_id=range_start, max_id=range_end, print_results=print_results) # No services should be found within range expected_result = [] self.assertListEqual(result, expected_result, "UDS service discovery gave '{0}', expected no hits".format( result)) def test_ecu_reset_hard_reset_success(self): # ECU Reset arguments reset_type = Services.EcuReset.ResetType.HARD_RESET timeout = None # Perform ECU Reset result = uds.ecu_reset(arb_id_request=self.ARB_ID_REQUEST, arb_id_response=self.ARB_ID_RESPONSE, reset_type=reset_type, timeout=timeout) # Expected response format for successful request expected_response_id = Iso14229_1.get_service_response_id(Services.EcuReset.service_id) expected_result = [expected_response_id, reset_type] self.assertListEqual(result, expected_result, "ECU Reset gave '{0}', expected '{1}'".format( result, expected_result)) def test_ecu_reset_unsupported_reset_type_failure(self): # Invalid reset type reset_type = 0x00 timeout = None # Perform ECU Reset result = uds.ecu_reset(arb_id_request=self.ARB_ID_REQUEST, arb_id_response=self.ARB_ID_RESPONSE, reset_type=reset_type, timeout=timeout) # Expected response format for invalid request expected_response_id = Services.EcuReset.service_id
expected_nrc = NegativeResponseCodes.SUB_FUNCTION_NOT_SUPPORTED
2
2023-11-13 05:05:46+00:00
16k
L1bra1/WeakMotion
train_WeakMotionNet.py
[ { "identifier": "WeakMotionNet", "path": "weak_model.py", "snippet": "class WeakMotionNet(nn.Module):\n def __init__(self, out_seq_len=1, FGBG_category_num=2, height_feat_size=13):\n super(WeakMotionNet, self).__init__()\n self.out_seq_len = out_seq_len\n\n self.motion_pred = Mot...
import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import numpy as np import time import sys import argparse import os from shutil import copytree, copy from weak_model import WeakMotionNet from data.weak_nuscenes_dataloader import DatasetSingleSeq_Stage2 from data.weak_waymo_dataloader import DatasetSingleSeq_Stage2 as DatasetSingleSeq_Stage2_waymo from sklearn.metrics import confusion_matrix from tqdm import tqdm from loss_utils import FGBG_seg_loss, CCD_loss from evaluation_utils import evaluate_FGBG_prediction, evaluate_motion_prediction
11,245
Reference: MotionNet (https://www.merl.com/research/?research=license-request&sw=MotionNet) """ class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self, name, fmt=':f'): self.name = name self.fmt = fmt self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def __str__(self): fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' return fmtstr.format(**self.__dict__) def check_folder(folder_path): if not os.path.exists(folder_path): os.mkdir(folder_path) return folder_path out_seq_len = 1 # The number of future frames we are going to predict height_feat_size = 13 # The size along the height dimension parser = argparse.ArgumentParser() parser.add_argument('-md', '--motiondata', default='/path_to/nuScenes/input-data/train/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('-wd', '--weakdata', default='/path_to/nuScenes/weak-data/train/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('-FBd', '--FBdata', default='/path_to/nuScenes/FGBG-data/nuscenes_seg_0-01/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('--datatype', default='nuScenes', type=str, choices=['Waymo', 'nuScenes']) parser.add_argument('-t', '--evaldata', default='/path_to/nuScenes/input-data/val/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('--resume', default='', type=str, help='The path to the saved model that is loaded to resume training') parser.add_argument('--batch', default=8, type=int, help='Batch size') parser.add_argument('--nepoch', default=60, type=int, help='Number of epochs') parser.add_argument('--nworker', default=4, type=int, help='Number of workers') parser.add_argument('--log', default=True, action='store_true', help='Whether to log') parser.add_argument('--logpath', default='', help='The path to the output log file') parser.add_argument('--gpu', default='1') parser.add_argument('--annotation_ratio', default=0.01, type=float) args = parser.parse_args() print(args) num_epochs = args.nepoch need_log = args.log BATCH_SIZE = args.batch num_workers = args.nworker os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu datatype = args.datatype annotation_ratio = args.annotation_ratio def main(): start_epoch = 1 # Whether to log the training information if need_log: logger_root = args.logpath if args.logpath != '' else 'logs' time_stamp = time.strftime("%Y-%m-%d_%H-%M-%S") if args.resume == '': model_save_path = check_folder(logger_root) model_save_path = check_folder(os.path.join(model_save_path, 'Stage2')) model_save_path = check_folder(os.path.join(model_save_path, time_stamp)) log_file_name = os.path.join(model_save_path, 'log.txt') saver = open(log_file_name, "w") saver.write("GPU number: {}\n".format(torch.cuda.device_count())) saver.flush() # Logging the details for this experiment saver.write("command line: {}\n".format(" ".join(sys.argv[0:]))) saver.write(args.__repr__() + "\n\n") saver.flush() else: model_save_path = args.resume log_file_name = os.path.join(model_save_path, 'log.txt') saver = open(log_file_name, "a") saver.write("GPU number: {}\n".format(torch.cuda.device_count())) saver.flush() # Logging the details for this experiment saver.write("command line: {}\n".format(" ".join(sys.argv[1:]))) saver.write(args.__repr__() + "\n\n") saver.flush() # Specify gpu device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") device_num = torch.cuda.device_count() print("device number", device_num) voxel_size = (0.25, 0.25, 0.4) if datatype == 'nuScenes': area_extents = np.array([[-32., 32.], [-32., 32.], [-3., 2.]]) elif datatype == 'Waymo': area_extents = np.array([[-32., 32.], [-32., 32.], [-1., 4.]]) tmp = args.motiondata trainset_split = tmp.split('/')[-1] if tmp.split('/')[-1] is not '' else tmp.split('/')[-2] if datatype == 'nuScenes':
""" Train WeakMotionNet in Stage2 Some of the code are modified based on 'train_single_seq.py' in MotionNet. Reference: MotionNet (https://www.merl.com/research/?research=license-request&sw=MotionNet) """ class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self, name, fmt=':f'): self.name = name self.fmt = fmt self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def __str__(self): fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' return fmtstr.format(**self.__dict__) def check_folder(folder_path): if not os.path.exists(folder_path): os.mkdir(folder_path) return folder_path out_seq_len = 1 # The number of future frames we are going to predict height_feat_size = 13 # The size along the height dimension parser = argparse.ArgumentParser() parser.add_argument('-md', '--motiondata', default='/path_to/nuScenes/input-data/train/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('-wd', '--weakdata', default='/path_to/nuScenes/weak-data/train/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('-FBd', '--FBdata', default='/path_to/nuScenes/FGBG-data/nuscenes_seg_0-01/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('--datatype', default='nuScenes', type=str, choices=['Waymo', 'nuScenes']) parser.add_argument('-t', '--evaldata', default='/path_to/nuScenes/input-data/val/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('--resume', default='', type=str, help='The path to the saved model that is loaded to resume training') parser.add_argument('--batch', default=8, type=int, help='Batch size') parser.add_argument('--nepoch', default=60, type=int, help='Number of epochs') parser.add_argument('--nworker', default=4, type=int, help='Number of workers') parser.add_argument('--log', default=True, action='store_true', help='Whether to log') parser.add_argument('--logpath', default='', help='The path to the output log file') parser.add_argument('--gpu', default='1') parser.add_argument('--annotation_ratio', default=0.01, type=float) args = parser.parse_args() print(args) num_epochs = args.nepoch need_log = args.log BATCH_SIZE = args.batch num_workers = args.nworker os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu datatype = args.datatype annotation_ratio = args.annotation_ratio def main(): start_epoch = 1 # Whether to log the training information if need_log: logger_root = args.logpath if args.logpath != '' else 'logs' time_stamp = time.strftime("%Y-%m-%d_%H-%M-%S") if args.resume == '': model_save_path = check_folder(logger_root) model_save_path = check_folder(os.path.join(model_save_path, 'Stage2')) model_save_path = check_folder(os.path.join(model_save_path, time_stamp)) log_file_name = os.path.join(model_save_path, 'log.txt') saver = open(log_file_name, "w") saver.write("GPU number: {}\n".format(torch.cuda.device_count())) saver.flush() # Logging the details for this experiment saver.write("command line: {}\n".format(" ".join(sys.argv[0:]))) saver.write(args.__repr__() + "\n\n") saver.flush() else: model_save_path = args.resume log_file_name = os.path.join(model_save_path, 'log.txt') saver = open(log_file_name, "a") saver.write("GPU number: {}\n".format(torch.cuda.device_count())) saver.flush() # Logging the details for this experiment saver.write("command line: {}\n".format(" ".join(sys.argv[1:]))) saver.write(args.__repr__() + "\n\n") saver.flush() # Specify gpu device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") device_num = torch.cuda.device_count() print("device number", device_num) voxel_size = (0.25, 0.25, 0.4) if datatype == 'nuScenes': area_extents = np.array([[-32., 32.], [-32., 32.], [-3., 2.]]) elif datatype == 'Waymo': area_extents = np.array([[-32., 32.], [-32., 32.], [-1., 4.]]) tmp = args.motiondata trainset_split = tmp.split('/')[-1] if tmp.split('/')[-1] is not '' else tmp.split('/')[-2] if datatype == 'nuScenes':
trainset = DatasetSingleSeq_Stage2(dataset_root=args.motiondata, weakdata_root=args.weakdata,
2
2023-11-12 07:03:29+00:00
16k
c3exchange/c3-smartcontracts-v1
contracts_unified/core/main.py
[ { "identifier": "update", "path": "contracts_unified/core/bare_calls/update.py", "snippet": "@Subroutine(TealType.none)\ndef update() -> Expr:\n \"\"\"Implements the contract method called on update\"\"\"\n\n return sender_is_creator()" }, { "identifier": "delete", "path": "contracts_u...
from pyteal import ( BareCallActions, CallConfig, MethodConfig, OnCompleteAction, OptimizeOptions, Reject, Router, ) from contracts_unified.core.bare_calls import delete, update from contracts_unified.core.methods import ( account_move, add_order, clean_orders, create, deposit, fund_mbr, liquidate, pool_move, portal_transfer, settle, update_instrument, update_parameter, withdraw, wormhole_deposit, )
11,878
""" This file implements the router of the Core contract. """ CORE_ROUTER = Router( "C3 Core", BareCallActions( update_application=OnCompleteAction.always(update()), delete_application=OnCompleteAction.always(delete()), ), clear_state=Reject(), ) CORE_ROUTER.add_method_handler( create, "create", MethodConfig(no_op=CallConfig.CREATE), "Create C3 Core contract", ) CORE_ROUTER.add_method_handler( update_instrument, "update_instrument", MethodConfig(no_op=CallConfig.CALL), "Add a new instrument (ASA) to the Core", ) CORE_ROUTER.add_method_handler( update_parameter, "update_parameter", MethodConfig(no_op=CallConfig.CALL), "Update a global parameter", ) CORE_ROUTER.add_method_handler( deposit, "deposit", MethodConfig(no_op=CallConfig.CALL), "Deposit assets to user account", ) CORE_ROUTER.add_method_handler( wormhole_deposit, "wormhole_deposit", MethodConfig(no_op=CallConfig.CALL), "Deposit assets to user account via Wormhole", ) CORE_ROUTER.add_method_handler( pool_move, "pool_move", MethodConfig(no_op=CallConfig.CALL), "Transfer instruments between user and pool", ) CORE_ROUTER.add_method_handler( add_order, "add_order", MethodConfig(no_op=CallConfig.CALL), "Add an order to the order book", ) CORE_ROUTER.add_method_handler( settle, "settle", MethodConfig(no_op=CallConfig.CALL), "Settle two orders" ) CORE_ROUTER.add_method_handler(
""" This file implements the router of the Core contract. """ CORE_ROUTER = Router( "C3 Core", BareCallActions( update_application=OnCompleteAction.always(update()), delete_application=OnCompleteAction.always(delete()), ), clear_state=Reject(), ) CORE_ROUTER.add_method_handler( create, "create", MethodConfig(no_op=CallConfig.CREATE), "Create C3 Core contract", ) CORE_ROUTER.add_method_handler( update_instrument, "update_instrument", MethodConfig(no_op=CallConfig.CALL), "Add a new instrument (ASA) to the Core", ) CORE_ROUTER.add_method_handler( update_parameter, "update_parameter", MethodConfig(no_op=CallConfig.CALL), "Update a global parameter", ) CORE_ROUTER.add_method_handler( deposit, "deposit", MethodConfig(no_op=CallConfig.CALL), "Deposit assets to user account", ) CORE_ROUTER.add_method_handler( wormhole_deposit, "wormhole_deposit", MethodConfig(no_op=CallConfig.CALL), "Deposit assets to user account via Wormhole", ) CORE_ROUTER.add_method_handler( pool_move, "pool_move", MethodConfig(no_op=CallConfig.CALL), "Transfer instruments between user and pool", ) CORE_ROUTER.add_method_handler( add_order, "add_order", MethodConfig(no_op=CallConfig.CALL), "Add an order to the order book", ) CORE_ROUTER.add_method_handler( settle, "settle", MethodConfig(no_op=CallConfig.CALL), "Settle two orders" ) CORE_ROUTER.add_method_handler(
withdraw,
14
2023-11-17 20:54:15+00:00
16k
cyberark/ark-sdk-python
ark_sdk_python/cli_services/dpa/db/ark_dpa_db_policies_editor_service.py
[ { "identifier": "ArkInquirerRender", "path": "ark_sdk_python/args/ark_args_formatter.py", "snippet": "class ArkInquirerRender(ConsoleRender):\n # pylint: disable=keyword-arg-before-vararg,protected-access\n def __init__(self, event_generator=None, *args, **kwargs):\n super().__init__(event_...
from datetime import date, timedelta from typing import Dict, Final, List, Optional from overrides import overrides from ark_sdk_python.args.ark_args_formatter import ArkInquirerRender from ark_sdk_python.auth.ark_isp_auth import ArkISPAuth from ark_sdk_python.cli_services.dpa.common.ark_dpa_base_policies_editor_service import ArkDPABasePoliciesEditorService from ark_sdk_python.models.ark_profile import ArkProfile from ark_sdk_python.models.cli_services.dpa.policies_editor.db import ArkDPADBGeneratePolicy from ark_sdk_python.models.common import ArkWorkspaceType from ark_sdk_python.models.services import ArkServiceConfig from ark_sdk_python.models.services.dpa.policies.common import ArkDPADeletePolicy, ArkDPAGetPolicy, ArkDPARuleStatus, ArkDPAUserData from ark_sdk_python.models.services.dpa.policies.db import ( ArkDPADB, ArkDPADBAddPolicy, ArkDPADBAppliedTo, ArkDPADBAuthorizationRule, ArkDPADBBaseAuth, ArkDPADBConnectAs, ArkDPADBConnectionInformation, ArkDPADBLDAPAuth, ArkDPADBLocalDBAuth, ArkDPADBMariaDB, ArkDPADBMSSQL, ArkDPADBMySQL, ArkDPADBOracle, ArkDPADBOracleDBAuth, ArkDPADBOracleResource, ArkDPADBPolicy, ArkDPADBPolicyListItem, ArkDPADBPostgres, ArkDPADBProvidersData, ArkDPADBResourceIdentifierType, ArkDPADBUpdatePolicy, ) from ark_sdk_python.services.dpa.policies.db.ark_dpa_db_policies_service import ArkDPADBPoliciesService import inquirer
13,906
SERVICE_CONFIG: Final[ArkServiceConfig] = ArkServiceConfig( service_name='dpa-policies-db-editor', required_authenticator_names=['isp'], optional_authenticator_names=[] ) SUPPORTED_DATABASE_TYPES: Final[List[str]] = [ 'MSSQL', 'MySQL', 'MariaDB', 'Postgres', 'Oracle', ] DEFAULT_GENERATED_POLICY: Final[ArkDPADBPolicy] = ArkDPADBPolicy( policy_name='Default DB Policy', status=ArkDPARuleStatus.Draft, description='Auto generated db policy', providers_data=ArkDPADBProvidersData( postgres=ArkDPADBPostgres( resources=['postgres-onboarded-asset'], ), ), start_date=date.today().strftime('%Y-%m-%d'), end_date=(date.today() + timedelta(days=7)).strftime('%Y-%m-%d'), user_access_rules=[], )
SERVICE_CONFIG: Final[ArkServiceConfig] = ArkServiceConfig( service_name='dpa-policies-db-editor', required_authenticator_names=['isp'], optional_authenticator_names=[] ) SUPPORTED_DATABASE_TYPES: Final[List[str]] = [ 'MSSQL', 'MySQL', 'MariaDB', 'Postgres', 'Oracle', ] DEFAULT_GENERATED_POLICY: Final[ArkDPADBPolicy] = ArkDPADBPolicy( policy_name='Default DB Policy', status=ArkDPARuleStatus.Draft, description='Auto generated db policy', providers_data=ArkDPADBProvidersData( postgres=ArkDPADBPostgres( resources=['postgres-onboarded-asset'], ), ), start_date=date.today().strftime('%Y-%m-%d'), end_date=(date.today() + timedelta(days=7)).strftime('%Y-%m-%d'), user_access_rules=[], )
DEFAULT_GENERATED_PROVIDERS: Final[Dict[ArkWorkspaceType, ArkDPADB]] = {
5
2023-11-13 09:24:31+00:00
16k
mohenghui/detectAuto_v8
ultralytics/models/sam/model.py
[ { "identifier": "Model", "path": "ultralytics/engine/model.py", "snippet": "class Model(nn.Module):\n \"\"\"\n A base class to unify APIs for all models.\n\n Args:\n model (str, Path): Path to the model file to load or create.\n task (Any, optional): Task type for the YOLO model. ...
from pathlib import Path from ultralytics.engine.model import Model from ultralytics.utils.torch_utils import model_info from .build import build_sam from .predict import Predictor
11,471
# Ultralytics YOLO 🚀, AGPL-3.0 license """ SAM model interface. This module provides an interface to the Segment Anything Model (SAM) from Ultralytics, designed for real-time image segmentation tasks. The SAM model allows for promptable segmentation with unparalleled versatility in image analysis, and has been trained on the SA-1B dataset. It features zero-shot performance capabilities, enabling it to adapt to new image distributions and tasks without prior knowledge. Key Features: - Promptable segmentation - Real-time performance - Zero-shot transfer capabilities - Trained on SA-1B dataset """ class SAM(Model): """ SAM (Segment Anything Model) interface class. SAM is designed for promptable real-time image segmentation. It can be used with a variety of prompts such as bounding boxes, points, or labels. The model has capabilities for zero-shot performance and is trained on the SA-1B dataset. """ def __init__(self, model='sam_b.pt') -> None: """ Initializes the SAM model with a pre-trained model file. Args: model (str): Path to the pre-trained SAM model file. File should have a .pt or .pth extension. Raises: NotImplementedError: If the model file extension is not .pt or .pth. """ if model and Path(model).suffix not in ('.pt', '.pth'): raise NotImplementedError('SAM prediction requires pre-trained *.pt or *.pth model.') super().__init__(model=model, task='segment') def _load(self, weights: str, task=None): """ Loads the specified weights into the SAM model. Args: weights (str): Path to the weights file. task (str, optional): Task name. Defaults to None. """ self.model = build_sam(weights) def predict(self, source, stream=False, bboxes=None, points=None, labels=None, **kwargs): """ Performs segmentation prediction on the given image or video source. Args: source (str): Path to the image or video file, or a PIL.Image object, or a numpy.ndarray object. stream (bool, optional): If True, enables real-time streaming. Defaults to False. bboxes (list, optional): List of bounding box coordinates for prompted segmentation. Defaults to None. points (list, optional): List of points for prompted segmentation. Defaults to None. labels (list, optional): List of labels for prompted segmentation. Defaults to None. Returns: (list): The model predictions. """ overrides = dict(conf=0.25, task='segment', mode='predict', imgsz=1024) kwargs.update(overrides) prompts = dict(bboxes=bboxes, points=points, labels=labels) return super().predict(source, stream, prompts=prompts, **kwargs) def __call__(self, source=None, stream=False, bboxes=None, points=None, labels=None, **kwargs): """ Alias for the 'predict' method. Args: source (str): Path to the image or video file, or a PIL.Image object, or a numpy.ndarray object. stream (bool, optional): If True, enables real-time streaming. Defaults to False. bboxes (list, optional): List of bounding box coordinates for prompted segmentation. Defaults to None. points (list, optional): List of points for prompted segmentation. Defaults to None. labels (list, optional): List of labels for prompted segmentation. Defaults to None. Returns: (list): The model predictions. """ return self.predict(source, stream, bboxes, points, labels, **kwargs) def info(self, detailed=False, verbose=True): """ Logs information about the SAM model. Args: detailed (bool, optional): If True, displays detailed information about the model. Defaults to False. verbose (bool, optional): If True, displays information on the console. Defaults to True. Returns: (tuple): A tuple containing the model's information. """
# Ultralytics YOLO 🚀, AGPL-3.0 license """ SAM model interface. This module provides an interface to the Segment Anything Model (SAM) from Ultralytics, designed for real-time image segmentation tasks. The SAM model allows for promptable segmentation with unparalleled versatility in image analysis, and has been trained on the SA-1B dataset. It features zero-shot performance capabilities, enabling it to adapt to new image distributions and tasks without prior knowledge. Key Features: - Promptable segmentation - Real-time performance - Zero-shot transfer capabilities - Trained on SA-1B dataset """ class SAM(Model): """ SAM (Segment Anything Model) interface class. SAM is designed for promptable real-time image segmentation. It can be used with a variety of prompts such as bounding boxes, points, or labels. The model has capabilities for zero-shot performance and is trained on the SA-1B dataset. """ def __init__(self, model='sam_b.pt') -> None: """ Initializes the SAM model with a pre-trained model file. Args: model (str): Path to the pre-trained SAM model file. File should have a .pt or .pth extension. Raises: NotImplementedError: If the model file extension is not .pt or .pth. """ if model and Path(model).suffix not in ('.pt', '.pth'): raise NotImplementedError('SAM prediction requires pre-trained *.pt or *.pth model.') super().__init__(model=model, task='segment') def _load(self, weights: str, task=None): """ Loads the specified weights into the SAM model. Args: weights (str): Path to the weights file. task (str, optional): Task name. Defaults to None. """ self.model = build_sam(weights) def predict(self, source, stream=False, bboxes=None, points=None, labels=None, **kwargs): """ Performs segmentation prediction on the given image or video source. Args: source (str): Path to the image or video file, or a PIL.Image object, or a numpy.ndarray object. stream (bool, optional): If True, enables real-time streaming. Defaults to False. bboxes (list, optional): List of bounding box coordinates for prompted segmentation. Defaults to None. points (list, optional): List of points for prompted segmentation. Defaults to None. labels (list, optional): List of labels for prompted segmentation. Defaults to None. Returns: (list): The model predictions. """ overrides = dict(conf=0.25, task='segment', mode='predict', imgsz=1024) kwargs.update(overrides) prompts = dict(bboxes=bboxes, points=points, labels=labels) return super().predict(source, stream, prompts=prompts, **kwargs) def __call__(self, source=None, stream=False, bboxes=None, points=None, labels=None, **kwargs): """ Alias for the 'predict' method. Args: source (str): Path to the image or video file, or a PIL.Image object, or a numpy.ndarray object. stream (bool, optional): If True, enables real-time streaming. Defaults to False. bboxes (list, optional): List of bounding box coordinates for prompted segmentation. Defaults to None. points (list, optional): List of points for prompted segmentation. Defaults to None. labels (list, optional): List of labels for prompted segmentation. Defaults to None. Returns: (list): The model predictions. """ return self.predict(source, stream, bboxes, points, labels, **kwargs) def info(self, detailed=False, verbose=True): """ Logs information about the SAM model. Args: detailed (bool, optional): If True, displays detailed information about the model. Defaults to False. verbose (bool, optional): If True, displays information on the console. Defaults to True. Returns: (tuple): A tuple containing the model's information. """
return model_info(self.model, detailed=detailed, verbose=verbose)
1
2023-11-16 12:49:59+00:00
16k
Aues6uen11Z/Zafkiel
tests/test.py
[ { "identifier": "logger", "path": "zafkiel/logger.py", "snippet": "" }, { "identifier": "Config", "path": "zafkiel/config.py", "snippet": "class Config:\n ST = Settings\n ST.CVSTRATEGY = [\"mstpl\", \"sift\"]\n ST.THRESHOLD = 0.8\n\n GAME_PATH = None\n SERVER_LANG = 'cn'\n...
from zafkiel import API, Template, logger, Timer, simple_report, Config from zafkiel.ocr import Keyword, Ocr, Digit, DigitCounter, Duration, OcrResultButton from zafkiel.ui import Page, Switch, UI
12,426
# Auto import test Keyword Ocr Digit DigitCounter Duration OcrResultButton Page Switch
# Auto import test Keyword Ocr Digit DigitCounter Duration OcrResultButton Page Switch
UI
14
2023-11-12 09:33:35+00:00
16k
doodledood/chat-flock
chatflock/use_cases/bshr.py
[ { "identifier": "InMemoryChatDataBackingStore", "path": "chatflock/backing_stores/in_memory.py", "snippet": "class InMemoryChatDataBackingStore(ChatDataBackingStore):\n messages: List[ChatMessage]\n participants: Dict[str, ChatParticipant]\n last_message_id: Optional[int] = None\n\n def __in...
from typing import Any, Dict, Generator, Generic, List, Optional, Type, TypeVar from functools import partial from halo import Halo from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.chat_models.base import BaseChatModel from langchain.llms.openai import OpenAI from langchain.memory import ConversationSummaryBufferMemory from langchain.tools import BaseTool from pydantic import BaseModel, Field from chatflock.backing_stores import InMemoryChatDataBackingStore from chatflock.backing_stores.langchain import LangChainMemoryBasedChatDataBackingStore from chatflock.base import Chat, ChatDataBackingStore from chatflock.conductors import RoundRobinChatConductor from chatflock.parsing_utils import chat_messages_to_pydantic from chatflock.participants.langchain import LangChainBasedAIChatParticipant from chatflock.participants.user import UserChatParticipant from chatflock.renderers import TerminalChatRenderer from chatflock.sequencial_process import SequentialProcess, Step from chatflock.structured_string import Section, StructuredString from chatflock.use_cases.request_response import get_response from chatflock.web_research import WebSearch from chatflock.web_research.web_research import WebResearchTool import datetime import json import questionary
11,236
feedback: str = Field( description="If not satisficed yet, feedback on why not satisfied and what to think about next. If satisficed, feedback can be empty." ) is_satisficed: bool = Field(description="Whether or not the information need has been satisficed.") def generate_queries( state: BHSRState, chat_model: BaseChatModel, interactive_user: bool = True, max_queries: int = 5, shared_sections: Optional[List[Section]] = None, web_search_tool: Optional[BaseTool] = None, spinner: Optional[Halo] = None, ) -> None: if state.queries_to_run is not None and len(state.queries_to_run) > 0: # Means we are continuing a previous session return if shared_sections is None: shared_sections = [] query_generator = LangChainBasedAIChatParticipant( name="Search Query Generator", role="Search Query Generator", personal_mission="You will be given a specific query or problem by the user and you are to generate a list of " f"AT MOST {max_queries} search queries that will be used to search the internet. Make sure you " f"generate comprehensive, counterfactual, and maximally orthogonal search queries. " "Employ everything you know about " "information foraging and information literacy to generate the best possible questions. " "Use a step-by-step approach and think about the information need and the information " "domain before generating the queries. Order the queries by their importance and relevance " "to the main information need of the user.", other_prompt_sections=shared_sections + [ Section( name="Unclear Information Need", text=( "If the information need or query are vague and unclear, either perform a web search to " "clarify the information need or ask the user for clarification." if interactive_user else "If the information need or query are vague and unclear, either perform a web search to " "clarify the information need or make a best guess. The user will not be available to " "respond back." ), ), Section( name="Refine Queries", text='You might be given a first-pass information need with "None" previous queries and answers, ' "in which case you will do the best you" 'can to generate "naive queries" (uninformed search queries). However the USER might also ' "give you previous search queries or other background information such as accumulated notes. " 'If these materials are present, you are to generate "informed queries" - more specific ' "search queries that aim to zero in on the correct information domain. Do not duplicate " "previously asked questions. Use the notes and other information presented to create " "targeted queries and/or to cast a wider net.", ), Section( name="Termination", text="Once you generate a new set of queries to run, you should terminate the chat immediately by " "ending your message with TERMINATE", ), ], tools=[web_search_tool] if web_search_tool is not None else None, ignore_group_chat_environment=True, chat_model=chat_model, spinner=spinner, ) user = UserChatParticipant() participants = [user, query_generator] try: memory = ConversationSummaryBufferMemory( llm=chat_model, max_token_limit=OpenAI.modelname_to_contextsize(chat_model.model_name) # type: ignore ) backing_store: ChatDataBackingStore = LangChainMemoryBasedChatDataBackingStore(memory=memory) except ValueError: backing_store = InMemoryChatDataBackingStore() chat = Chat( backing_store=backing_store, renderer=TerminalChatRenderer(), initial_participants=participants, max_total_messages=None if interactive_user else 2, ) chat_conductor = RoundRobinChatConductor() if state.information_need is None: if spinner is not None: spinner.stop() _ = chat_conductor.initiate_dialog( chat=chat, initial_message=f"What is your information need or query?", from_participant=query_generator ) else: _ = chat_conductor.initiate_dialog( chat=chat, initial_message=str( StructuredString( sections=[ Section(name="Information Need", text=state.information_need), Section( name="Previous Queries & Answers", text="None" if state.answers_to_queries is None or len(state.answers_to_queries) == 0 else None, sub_sections=[ Section(name=query, text=f"```markdown\n{answer}\n```", uppercase_name=False) for query, answer in (state.answers_to_queries or {}).items() ], ), Section(name="Current Hypothesis", text=str(state.current_hypothesis)), ] ) ), from_participant=user, )
# Based directly on David Shaprio's BSHR Loop: https://github.com/daveshap/BSHR_Loop class BHSRState(BaseModel): information_need: Optional[str] = None queries_to_run: Optional[List[str]] = None answers_to_queries: Optional[Dict[str, str]] = None current_hypothesis: Optional[str] = None proposed_hypothesis: Optional[str] = None feedback: Optional[str] = None is_satisficed: Optional[bool] = None def save_state(state: BHSRState, state_file: Optional[str]) -> None: if state_file is None: return data = state.model_dump() with open(state_file, "w") as f: json.dump(data, f, indent=2) def load_state(state_file: Optional[str]) -> Optional[BHSRState]: if state_file is None: return None try: with open(state_file) as f: data = json.load(f) return BHSRState.model_validate(data) except FileNotFoundError: return None class QueryGenerationResult(BaseModel): information_need: str = Field(description="Information need as requested by the user.") queries: List[str] = Field(description="Set of queries to run.") class HypothesisGenerationResult(BaseModel): hypothesis: str = Field( description="A new or updated hypothesis based on the materials provided. Rich formatting using Markdown. Should include all relevant citations inline." ) class SatisficationCheckResult(BaseModel): feedback: str = Field( description="If not satisficed yet, feedback on why not satisfied and what to think about next. If satisficed, feedback can be empty." ) is_satisficed: bool = Field(description="Whether or not the information need has been satisficed.") def generate_queries( state: BHSRState, chat_model: BaseChatModel, interactive_user: bool = True, max_queries: int = 5, shared_sections: Optional[List[Section]] = None, web_search_tool: Optional[BaseTool] = None, spinner: Optional[Halo] = None, ) -> None: if state.queries_to_run is not None and len(state.queries_to_run) > 0: # Means we are continuing a previous session return if shared_sections is None: shared_sections = [] query_generator = LangChainBasedAIChatParticipant( name="Search Query Generator", role="Search Query Generator", personal_mission="You will be given a specific query or problem by the user and you are to generate a list of " f"AT MOST {max_queries} search queries that will be used to search the internet. Make sure you " f"generate comprehensive, counterfactual, and maximally orthogonal search queries. " "Employ everything you know about " "information foraging and information literacy to generate the best possible questions. " "Use a step-by-step approach and think about the information need and the information " "domain before generating the queries. Order the queries by their importance and relevance " "to the main information need of the user.", other_prompt_sections=shared_sections + [ Section( name="Unclear Information Need", text=( "If the information need or query are vague and unclear, either perform a web search to " "clarify the information need or ask the user for clarification." if interactive_user else "If the information need or query are vague and unclear, either perform a web search to " "clarify the information need or make a best guess. The user will not be available to " "respond back." ), ), Section( name="Refine Queries", text='You might be given a first-pass information need with "None" previous queries and answers, ' "in which case you will do the best you" 'can to generate "naive queries" (uninformed search queries). However the USER might also ' "give you previous search queries or other background information such as accumulated notes. " 'If these materials are present, you are to generate "informed queries" - more specific ' "search queries that aim to zero in on the correct information domain. Do not duplicate " "previously asked questions. Use the notes and other information presented to create " "targeted queries and/or to cast a wider net.", ), Section( name="Termination", text="Once you generate a new set of queries to run, you should terminate the chat immediately by " "ending your message with TERMINATE", ), ], tools=[web_search_tool] if web_search_tool is not None else None, ignore_group_chat_environment=True, chat_model=chat_model, spinner=spinner, ) user = UserChatParticipant() participants = [user, query_generator] try: memory = ConversationSummaryBufferMemory( llm=chat_model, max_token_limit=OpenAI.modelname_to_contextsize(chat_model.model_name) # type: ignore ) backing_store: ChatDataBackingStore = LangChainMemoryBasedChatDataBackingStore(memory=memory) except ValueError: backing_store = InMemoryChatDataBackingStore() chat = Chat( backing_store=backing_store, renderer=TerminalChatRenderer(), initial_participants=participants, max_total_messages=None if interactive_user else 2, ) chat_conductor = RoundRobinChatConductor() if state.information_need is None: if spinner is not None: spinner.stop() _ = chat_conductor.initiate_dialog( chat=chat, initial_message=f"What is your information need or query?", from_participant=query_generator ) else: _ = chat_conductor.initiate_dialog( chat=chat, initial_message=str( StructuredString( sections=[ Section(name="Information Need", text=state.information_need), Section( name="Previous Queries & Answers", text="None" if state.answers_to_queries is None or len(state.answers_to_queries) == 0 else None, sub_sections=[ Section(name=query, text=f"```markdown\n{answer}\n```", uppercase_name=False) for query, answer in (state.answers_to_queries or {}).items() ], ), Section(name="Current Hypothesis", text=str(state.current_hypothesis)), ] ) ), from_participant=user, )
output = chat_messages_to_pydantic(
5
2023-11-12 11:10:58+00:00
16k
atlantic-quantum/Shipyard
shipyard/passes/interpreter.py
[ { "identifier": "ActivationRecord", "path": "shipyard/call_stack.py", "snippet": "class ActivationRecord:\n \"\"\"Activation Records for shipyard\"\"\"\n\n def __init__(\n self,\n name: str,\n ar_type: ARType,\n nesting_level: int,\n ):\n self.name = name\n ...
import functools import operator import numpy as np from contextlib import contextmanager from openpulse import ast from ..call_stack import ActivationRecord, ARType, CallStack from ..compiler_error import Error, ErrorCode, SemanticError from ..logger import LOGGER from ..mangle import Mangler from ..setup.internal import Frame, SetupInternal from ..visitors import GenericVisitor as QASMVisitor
12,825
def visit_CalibrationDefinition(self, node: ast.CalibrationDefinition) -> None: """ CalibrationDefinition (defcal) node visitor: Saves defcal defintions to self.defcal_nodes dictionary with a mangled name. These mangled names are also saved to a list of defcal names (self.defcal_names) Args: node (ast.CalibrationDefinition): defcal node to visit """ mangled_name = Mangler(node).signature().mangle() self.defcal_names.append(mangled_name) self.defcal_nodes[mangled_name] = node @_maybe_annotated def visit_CalibrationStatement(self, node: ast.CalibrationStatement) -> None: """ CalibrationStatement node visitor: Evaluates each line in a calibration block. Updates the self.calibration_scope dictionary which maintains a dictionary of values/variables in calibration scope. Args: node (ast.CalibrationStatement): openQASM CalibrationStatement AST node """ curr_nesting = self.call_stack.peek().nesting_level outer_activation_record = ActivationRecord( name="outer_calibration", ar_type=ARType.CALIBRATION, nesting_level=curr_nesting + 1, ) outer_activation_record.members = self.calibration_scope with self.ar_context_manager(outer_activation_record): inner_activation_record = ActivationRecord( name="new_calibration", ar_type=ARType.CALIBRATION, nesting_level=curr_nesting + 2, ) with self.ar_context_manager(inner_activation_record): for statement in node.body: self.visit(statement) self.calibration_scope.update(self.call_stack.peek().members) def visit_QuantumArgument(self, node: ast.QuantumArgument) -> None: """Raises error""" self.visit(node.name) @_maybe_annotated def visit_BreakStatement(self, node: ast.BreakStatement) -> None: """Raises error""" raise NotImplementedError @_maybe_annotated def visit_ContinueStatement(self, node: ast.ContinueStatement) -> None: """Raises error""" raise NotImplementedError @_maybe_annotated def visit_EndStatement(self, node: ast.EndStatement) -> None: """Raises error""" raise NotImplementedError @_maybe_annotated def visit_WhileLoop(self, node: ast.WhileLoop) -> None: """ WhileLoop node visitor: Prints out a while loop in SEQC format (which happens to be identical to openQASM format) All the statements in the block of the while loop are visited Example: qasm: while (int i < 10) {...; i=i+1;} -> seqc: while (cvar i < 10) {...; i=i+1;} Args: node (ast.WhileLoop): openQASM WhileLoop AST node context (PrinterState): state of the printer (e.g. indentation) """ if not self.visit_loops: return activation_record = ActivationRecord( name="while loop", ar_type=ARType.LOOP, nesting_level=self.call_stack.nesting_level + 1, ) with self.ar_context_manager(activation_record): # todo break if while_condition is just True (i.e. infiinite loop) while self.visit(node.while_condition): for statement in node.block: self.visit(statement) @_maybe_annotated def visit_ForInLoop(self, node: ast.ForInLoop) -> None: """ ForInLoop node visitor: Evaluates iteration range of for loop and then evaluates the body of the for loop for each iteration. Args: node (ast.ForInLoop): openQASM ForInLoop AST node Raises: Error: ErrorCode.UNHANDLED If the SET iterated over by the ForInLoop is incorrectly defined or not created using a RangeDefinition """ if not self.visit_loops: return name = node.identifier.name activation_record = ActivationRecord( name=f"for_loop_{self.call_stack.nesting_level+1}", ar_type=ARType.LOOP, nesting_level=self.call_stack.nesting_level + 1, ) with self.ar_context_manager(activation_record): start, end, step = self.visit(node.set_declaration) if end is None:
# pylint: disable=C0302 # Too many lines in module """ Interpreter: Class for evaluating OpenQASM ASTs """ # pylint: disable=W0221,R0904 def _maybe_annotated(method): # pragma: no cover @functools.wraps(method) def annotated(self: "Interpreter", node: ast.Statement) -> None: for annotation in node.annotations: self.visit(annotation) return method(self, node) return annotated # redefine IndexElement as it is not accessible from the openqasm3.ast IndexElement = ast.DiscreteSet | list[ast.Expression | ast.RangeDefinition] class Interpreter(QASMVisitor): """AST-visitor for evaluating OpenQASM code. Class maintains a call stack of activation records, which hold variable/literals information. Also maintains record of external functions, subroutines, and quantum gates. If subclassing, generally only the specialised ``visit_*`` methods need to be overridden. These are derived from the base class, and use the name of the relevant :mod:`AST node <.ast>` verbatim after ``visit_``. Based on the openQASM3 Printer""" def __init__( self, setup: SetupInternal = None, external_funcs: dict = None, visit_loops: bool = True, ): self.call_stack = CallStack() self.setup = setup self.external_funcs = external_funcs self.calibration_scope = {} self.defcal_nodes = {} self.defcal_names = [] self.subroutines = {} self.visit_loops = visit_loops def visit_Program(self, node: ast.Program) -> None: activation_record = ActivationRecord( name="main", ar_type=ARType.PROGRAM, nesting_level=1 ) with self.ar_context_manager(activation_record): for statement in node.statements: self.visit(statement) @_maybe_annotated def visit_Include(self, node: ast.Include) -> None: """Include statements should be resolved at this point""" raise self.compile_out(node) @_maybe_annotated def visit_QubitDeclaration(self, node: ast.QubitDeclaration) -> None: """Qubit declarations not supported""" activation_record = self.call_stack.peek() if node.size is not None: size = self.visit(node.size) activation_record[node.qubit.name] = [f"${x}" for x in range(size)] def visit_SubroutineDefinition(self, node: ast.SubroutineDefinition) -> None: """Add subroutine to subroutines dict""" self.subroutines[node.name.name] = node @_maybe_annotated def visit_QuantumGateDefinition(self, node: ast.QuantumGateDefinition) -> None: """Not supporting quantum gate definitions""" raise self.compile_out(node) @_maybe_annotated def visit_ExternDeclaration(self, node: ast.ExternDeclaration) -> None: """Pass over extern declarations""" def visit_Identifier(self, node: ast.Identifier) -> None: """Return the value associated with a given identifier""" try: activation_record = self.call_stack.down_stack(node.name) return activation_record[node.name] except KeyError as exc: raise SemanticError( ErrorCode.ID_NOT_FOUND, f"Identifier: {node.name} not found in call stack", ) from exc def visit_BooleanLiteral(self, node: ast.BooleanLiteral) -> bool: """Return the value of a boolean literal""" return node.value def visit_BinaryExpression(self, node: ast.BinaryExpression) -> None: """Evaluate and return the binary expression""" left = self.visit(node.lhs) right = self.visit(node.rhs) op = node.op return binary_ops[op.value](left, right) def visit_UnaryExpression(self, node: ast.UnaryExpression) -> None: """Evaluate and return the unary expression""" op = node.op return unary_ops[op.value](self.visit(node.expression)) def visit_FloatLiteral(self, node: ast.FloatLiteral) -> None: """Return the value of a float literal""" return node.value def visit_ImaginaryLiteral(self, node: ast.ImaginaryLiteral) -> None: """Return the value of an imaginary literal""" return complex(0, node.value) def visit_DurationLiteral(self, node: ast.DurationLiteral) -> None: """Return the value of a duration literal""" return node.value def visit_IntegerLiteral(self, node: ast.IntegerLiteral) -> None: """Return the value of an integer literal""" return node.value def visit_ArrayLiteral(self, node: ast.ArrayLiteral) -> None: """Return the value of an array literal""" return np.array([self.visit(val) for val in node.values]) def visit_IndexExpression(self, node: ast.IndexExpression) -> None: """Return the value of an index expression. Assumes the IndexExpression is a discrete set (ex. arr[{0, 1, 2}]), range (ex. arr[0:3:1]), or list of expressions (ex. arr[0:2, 4])""" activation_record = self.call_stack.down_stack(node.collection.name) if isinstance(node.index, ast.DiscreteSet): return activation_record[node.collection.name][self.visit(node.index)] if isinstance(node.index, ast.RangeDefinition): start, end, step = self.visit(node.index) return activation_record[node.collection.name][start:end:step] # assume list of expressions indices = [self.visit(index) for index in node.index] return activation_record[node.collection.name][indices] def visit_ReturnStatement(self, node: ast.ReturnStatement) -> None: """Return the value of a return statement""" return self.visit(node.expression) def visit_Concatenation(self, node: ast.Concatenation) -> None: """ Concatenation node visitor: joins elements in OpenQASM concatenation statement example: qasm: 'a ++ b ++ c;' Args: node (ast.Concatenation): openQASM concatenation AST node """ return np.concatenate([self.visit(node.lhs), self.visit(node.rhs)]) def quantum_gate_helper( self, node: ast.QuantumMeasurementStatement | ast.QuantumReset | ast.QuantumGate ) -> None: """ Helper function for QuantumGate, QuantumMeasurementStatement, and QuantumReset. Puts the calibration dictionary onto the stack and then adds a new activation record for the quantum gate, measurement, or reset. In the case of a QuantumGate, the function first adds the arguments to the activation record, then the statements in the measurement, reset, or gate body are visited. """ curr_nesting = self.call_stack.peek().nesting_level outer_activation_record = ActivationRecord( name="calibration", ar_type=ARType.CALIBRATION, nesting_level=curr_nesting + 1, ) outer_activation_record.members = self.calibration_scope with self.ar_context_manager(outer_activation_record): inner_activation_record = ActivationRecord( name="defcal", ar_type=ARType.DEFCAL, nesting_level=curr_nesting + 2 ) with self.ar_context_manager(inner_activation_record): signature = Mangler(node).signature() mangled_name = signature.match(self.defcal_names)[0] if isinstance(node, ast.QuantumGate): if node.modifiers: raise self.compile_out(node.modifiers) args = [self.visit(arg) for arg in node.arguments] node = self.defcal_nodes[mangled_name] inner_activation_record = self.call_stack.peek() for arg, val in zip( node.arguments, args ): # ignores Integer arguments if isinstance(arg, ast.ClassicalArgument): inner_activation_record[arg.name.name] = val for statement in self.defcal_nodes[mangled_name].body: if isinstance(statement, ast.ReturnStatement): returnval = self.visit(statement) return returnval self.visit(statement) @_maybe_annotated def visit_QuantumGate(self, node: ast.QuantumGate) -> None: """ QuantumGate node visitor: Visits and evaluates quantum gate call, at this point the gate operation should have a calibration definition (defcal). Example: qasm: defcal x90 $0 {...} >>x90 $0; -> ^^^^^^^ Args: node (ast.QuantumGate): openQASM QuantumGate AST node Optionally returns elements based on gate definition """ self.quantum_gate_helper(node) @_maybe_annotated def visit_QuantumMeasurementStatement( self, node: ast.QuantumMeasurementStatement ) -> None: """ QuantumMeasurementStatement node visitor: Visits and evaluates quantum measurement call, at this point the quantum measurement statement should have a calibration definition (defcal) Example: qasm: defcal measure $0 -> bit {...} >>b1 = measure $0; -> ^^^^^^^^^^^ Args: node (ast.QuantumMeasurementStatement): openQASM QuantumMeasurementStatement AST node Optionally allows for returns based on quantum measurement definition (gate definition) """ match node.target: case ast.Identifier(): name = node.target.name activation_record = self.call_stack.down_stack(name) activation_record[name] = self.quantum_gate_helper(node) case ast.IndexedIdentifier(): activation_record = self.call_stack.down_stack(node.target.name.name) activation_record[node.target.name.name][ [self.visit(index) for index in node.target.indices[0]] ] = self.quantum_gate_helper(node) case _: self.quantum_gate_helper(node) @_maybe_annotated def visit_QuantumReset(self, node: ast.QuantumReset) -> None: """ QuantumReset node visitor: Visits and evaluates quantum reset call, at this point the quantum reset should have a calibration definition (defcal) Example: qasm: defcal reset $0 {...} >>reset $0; -> ^^^^^^^^^ Args: node (ast.QuantumReset): openQASM QuantumReset AST node """ self.quantum_gate_helper(node) def visit_QuantumMeasurement(self, node: ast.QuantumMeasurement) -> None: """ QuantumMeasurement node visitor: Visits and evaluates quantum measurement call, at this point the quantum measurement statement should have a calibration definition (defcal). Differs from QuantumMeasurementStatement in that it does not allow for returns Example: qasm: defcal measure $0 -> bit {...} >>measure $0; ^^^^^^^^^^^ Args: node (ast.QuantumMeasurement): openQASM QuantumMeasurement AST node Optionally allows for returns based on quantum measurement definition (gate definition) """ self.quantum_gate_helper(node) def visit_ExternArgument(self, node: ast.ExternArgument) -> None: """Passes extern argument call""" def visit_DiscreteSet(self, node: ast.DiscreteSet) -> None: """Returns a set of discrete values""" discrete_set = [] for i in node.values: discrete_set.append(self.visit(i)) return set(discrete_set) def visit_RangeDefinition(self, node: ast.RangeDefinition) -> None: """Returns tuple of (start,end,step) or default values""" start = self.visit(node.start) if node.start else 0 end = self.visit(node.end) if node.end else None step = self.visit(node.step) if node.step else 1 return (start, end, step) def visit_ExpressionStatement(self, node: ast.ExpressionStatement) -> None: """Visits expression statement""" return self.visit(node.expression) def generic_visit(self, node: ast.QASMNode) -> None: LOGGER.debug("Generic visit: %s", node) @_maybe_annotated def visit_ClassicalDeclaration(self, node: ast.ClassicalDeclaration) -> None: """Saves classical declaration to activation record""" activation_record = self.call_stack.peek() match node: case ast.ClassicalDeclaration(type=ast.PortType()): name = node.identifier.name # activation_record = self.call_stack.peek() activation_record[name] = self.setup.ports[name] case ast.ClassicalDeclaration( type=ast.FrameType(), init_expression=ast.FunctionCall(name=ast.Identifier("newframe")), ): call = node.init_expression assert isinstance(call, ast.FunctionCall) assert len(call.arguments) == 3 port = call.arguments[0].name frequency = self.visit(call.arguments[1]) phase = self.visit(call.arguments[2]) frame = Frame( name=node.identifier.name, port=activation_record[port], frequency=frequency, phase=phase, ) activation_record[frame.name] = frame case ast.ClassicalDeclaration(type=ast.ArrayType()): if node.init_expression is None: shapes = [self.visit(dim) for dim in node.type.dimensions] activation_record[node.identifier.name] = np.zeros(shape=shapes) else: activation_record[node.identifier.name] = self.visit( node.init_expression ) case ast.ClassicalDeclaration(type=ast.BitType()): if node.init_expression is None: size = self.visit(node.type.size) or 1 activation_record[node.identifier.name] = np.zeros(shape=size) else: activation_record[node.identifier.name] = self.visit( node.init_expression ) case ast.ClassicalDeclaration(type=ast.WaveformType()): if node.init_expression is None: activation_record[node.identifier.name] = None else: activation_record[node.identifier.name] = self.visit( node.init_expression ) case _: if node.init_expression is not None: activation_record[node.identifier.name] = self.visit( node.init_expression ) else: activation_record[node.identifier.name] = None @_maybe_annotated def visit_IODeclaration(self, node: ast.IODeclaration) -> None: """IO Declaration should be resolved""" raise self.compile_out(node) @_maybe_annotated def visit_ConstantDeclaration(self, node: ast.ConstantDeclaration) -> None: """Saves constant declaration to activation record""" activation_record = self.call_stack.peek() activation_record[node.identifier.name] = self.visit(node.init_expression) @_maybe_annotated def visit_CalibrationDefinition(self, node: ast.CalibrationDefinition) -> None: """ CalibrationDefinition (defcal) node visitor: Saves defcal defintions to self.defcal_nodes dictionary with a mangled name. These mangled names are also saved to a list of defcal names (self.defcal_names) Args: node (ast.CalibrationDefinition): defcal node to visit """ mangled_name = Mangler(node).signature().mangle() self.defcal_names.append(mangled_name) self.defcal_nodes[mangled_name] = node @_maybe_annotated def visit_CalibrationStatement(self, node: ast.CalibrationStatement) -> None: """ CalibrationStatement node visitor: Evaluates each line in a calibration block. Updates the self.calibration_scope dictionary which maintains a dictionary of values/variables in calibration scope. Args: node (ast.CalibrationStatement): openQASM CalibrationStatement AST node """ curr_nesting = self.call_stack.peek().nesting_level outer_activation_record = ActivationRecord( name="outer_calibration", ar_type=ARType.CALIBRATION, nesting_level=curr_nesting + 1, ) outer_activation_record.members = self.calibration_scope with self.ar_context_manager(outer_activation_record): inner_activation_record = ActivationRecord( name="new_calibration", ar_type=ARType.CALIBRATION, nesting_level=curr_nesting + 2, ) with self.ar_context_manager(inner_activation_record): for statement in node.body: self.visit(statement) self.calibration_scope.update(self.call_stack.peek().members) def visit_QuantumArgument(self, node: ast.QuantumArgument) -> None: """Raises error""" self.visit(node.name) @_maybe_annotated def visit_BreakStatement(self, node: ast.BreakStatement) -> None: """Raises error""" raise NotImplementedError @_maybe_annotated def visit_ContinueStatement(self, node: ast.ContinueStatement) -> None: """Raises error""" raise NotImplementedError @_maybe_annotated def visit_EndStatement(self, node: ast.EndStatement) -> None: """Raises error""" raise NotImplementedError @_maybe_annotated def visit_WhileLoop(self, node: ast.WhileLoop) -> None: """ WhileLoop node visitor: Prints out a while loop in SEQC format (which happens to be identical to openQASM format) All the statements in the block of the while loop are visited Example: qasm: while (int i < 10) {...; i=i+1;} -> seqc: while (cvar i < 10) {...; i=i+1;} Args: node (ast.WhileLoop): openQASM WhileLoop AST node context (PrinterState): state of the printer (e.g. indentation) """ if not self.visit_loops: return activation_record = ActivationRecord( name="while loop", ar_type=ARType.LOOP, nesting_level=self.call_stack.nesting_level + 1, ) with self.ar_context_manager(activation_record): # todo break if while_condition is just True (i.e. infiinite loop) while self.visit(node.while_condition): for statement in node.block: self.visit(statement) @_maybe_annotated def visit_ForInLoop(self, node: ast.ForInLoop) -> None: """ ForInLoop node visitor: Evaluates iteration range of for loop and then evaluates the body of the for loop for each iteration. Args: node (ast.ForInLoop): openQASM ForInLoop AST node Raises: Error: ErrorCode.UNHANDLED If the SET iterated over by the ForInLoop is incorrectly defined or not created using a RangeDefinition """ if not self.visit_loops: return name = node.identifier.name activation_record = ActivationRecord( name=f"for_loop_{self.call_stack.nesting_level+1}", ar_type=ARType.LOOP, nesting_level=self.call_stack.nesting_level + 1, ) with self.ar_context_manager(activation_record): start, end, step = self.visit(node.set_declaration) if end is None:
raise Error(
3
2023-11-16 17:37:29+00:00
16k
quantuminterface/qiclib
src/qiclib/code/qi_dataflow.py
[ { "identifier": "ForRange", "path": "src/qiclib/code/qi_jobs.py", "snippet": "class ForRange(QiContextManager):\n \"\"\"Adds ForRange to program.\n If multiple cells are used inside body, a synchronisation between the cells is done before the ForRange as well as after the end of the body.\n If ...
from abc import abstractmethod from enum import Enum from typing import Optional, List, Set, Tuple, Union, Dict from copy import copy from qiclib.code.qi_var_definitions import ( _QiVariableBase, QiExpression, ) from .qi_jobs import ( ForRange, If, Parallel, QiCell, QiCommand, QiContextManager, QiJob, )
11,223
): """Implementation of (a fairly naive) worklist algorithm which performs the dataflow analysis, with the given visitor.""" queue = list(cfg.nodes) cfg.add_value(name, initial) while len(queue) != 0: next = queue.pop(0) preds = list(predecessors(next)) if len(preds) != 0: input = preds[0].node.value_map[name] for pred in preds[1:]: input = input.merge(pred.node.value_map[name]) else: input = initial if next.type == _CFGNode.Type.COMMAND: output = next.command.accept(visitor, input, next) else: output = input original = next.value_map[name] if output != original: next.value_map[name] = output for succ in successors(next): queue.append(succ.node) class FlatLatticeValue(DataflowValue): """ FlatLatticeValue is a commonly used abstract value. * undefined: represents no value. * value: represents a single value. * no_const: represents all values. One should not use this constructor directly but instead use the :meth:`undefined` :meth:`no_const` and :meth:`value` class functions instead. """ class Type(Enum): UNDEFINED = 0 VALUE = 1 NO_CONST = 2 def __init__(self, type, value: QiExpression): self.type = type self.value = value @staticmethod def undefined(): return FlatLatticeValue(FlatLatticeValue.Type.UNDEFINED, None) @staticmethod def no_const(): return FlatLatticeValue(FlatLatticeValue.Type.NO_CONST, None) @staticmethod def value(value): return FlatLatticeValue(FlatLatticeValue.Type.VALUE, value) def merge(self, other): assert isinstance(other, FlatLatticeValue) if self.type == other.type and self.type == FlatLatticeValue.Type.VALUE: if self.value._equal_syntax(other.value): return self else: return FlatLatticeValue.no_const() else: if self.type == FlatLatticeValue.Type.UNDEFINED: return other elif other.type == FlatLatticeValue.Type.UNDEFINED: return self elif self.type == FlatLatticeValue.Type.NO_CONST: return self elif other.type == FlatLatticeValue.Type.NO_CONST: return other else: raise NotImplementedError(f"Merge of {other.type} not implemented") def __eq__(self, other): return self.type == other.type and ( self.value._equal_syntax(other.value) if self.type == FlatLatticeValue.Type.VALUE else True ) def __str__(self): if self.type == FlatLatticeValue.Type.VALUE: return f"{self.value}" else: return str(self.type) def __repr__(self): if self.type == FlatLatticeValue.Type.UNDEFINED: return "<undefined>" elif self.type == FlatLatticeValue.Type.VALUE: return f"<value: {self.value}>" elif self.type == FlatLatticeValue.Type.NO_CONST: return "<no_const>" else: raise NotImplementedError(f"__repr__ for type {self.type} not implemented") class CellValues(DataflowValue): """ DataflowValue which generalises FlatLatticeValue so every cell has its own FlatLatticeValue. """ def __init__(self, values=None): self.values = copy(values or {}) @classmethod
# Copyright © 2017-2023 Quantum Interface (quantuminterface@ipe.kit.edu) # Richard Gebauer, IPE, Karlsruhe Institute of Technology # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. """ This module provides basic infrastructure to perform dataflow analyses on qicode programs. Dataflow analyses are computed on the control flow graph (CFG) of a QiJob which should be created when necessary. The dataflow analysis itself is performed in using a standard worklist algorithm. The abstract domain is modeled using DataflowValue. Its merge function represents the supremum calculation. It is recommended to treat DataflowValues as immutable. """ class _CFGNode: class Type(Enum): START = 0 END = 1 COMMAND = 2 class SrcEdgeType(Enum): """CFG Edge information about the source node""" IF_TRUE = 0 IF_FALSE = 1 FOR_BODY = 2 FOR_END = 4 NORMAL = 5 def __str__(self): return { _CFGNode.SrcEdgeType.IF_TRUE: "if_true", _CFGNode.SrcEdgeType.IF_FALSE: "if_false", _CFGNode.SrcEdgeType.FOR_BODY: "for_true", _CFGNode.SrcEdgeType.FOR_END: "for_end", _CFGNode.SrcEdgeType.NORMAL: "normal", }[self] class DestEdgeType(Enum): """CFG Edge information about the destination node""" FOR_BODY_RETURN = 0 FOR_ENTRY = 1 NORMAL = 2 def __str__(self): return { _CFGNode.DestEdgeType.FOR_BODY_RETURN: "for_body_ret", _CFGNode.DestEdgeType.FOR_ENTRY: "for_entry", _CFGNode.DestEdgeType.NORMAL: "normal", }[self] class Neighbor: """Combination of node and both edge types. Each edge in the CFG is represented by an instance of this class""" def __init__( self, neighbor: "_CFGNode", src_edge_type: "_CFGNode.SrcEdgeType", dest_edge_type: Optional["_CFGNode.DestEdgeType"] = None, ): # Default argument didn't work for me in this case. if dest_edge_type is None: dest_edge_type = _CFGNode.DestEdgeType.NORMAL self.node = neighbor # Information about the edge for the src node # (for example, if this edge goes to the 'else' block of an 'if' statement.) self.src_edge_type = src_edge_type # Information about the edge for the destination node # (for example, if the edge loops back from the body of a for statement.) self.dest_edge_type = dest_edge_type _cfg_node_next_id = 1 def __init__( self, type: Union["_CFGNode.Type", QiCommand], instruction_list, index, *predecessors: "Tuple[_CFGNode, _CFGNode.SrcEdgeType]", ): if isinstance(type, QiCommand): self.type = _CFGNode.Type.COMMAND self.command = type else: assert isinstance(type, _CFGNode.Type) self.type = type # This field is used to associated arbitrary data with every node. # For example, a dataflow analysis might use this dictionary to # the nodes current abstract value. self.value_map: Dict[str, CellValues] = {} self.predecessors: Set[_CFGNode.Neighbor] = set() self.successors: Set[_CFGNode.Neighbor] = set() # Used to find commands in job command list, so we can insert new instruction before or after this # command. self.instruction_list = instruction_list self.instruction_index = index self.id = _CFGNode._cfg_node_next_id _CFGNode._cfg_node_next_id += 1 self.connect_predecessors(*predecessors) def connect_successors(self, *successors: "_CFGNode.Neighbor"): assert all(map(lambda x: isinstance(x, _CFGNode.Neighbor), successors)) for succ_neighbor in successors: succ = succ_neighbor.node pred_neighbor = copy(succ_neighbor) pred_neighbor.node = self self.successors.add(succ_neighbor) succ.predecessors.add(pred_neighbor) def connect_predecessors(self, *predecessors: "_CFGNode.Neighbor"): assert all(map(lambda x: isinstance(x, _CFGNode.Neighbor), predecessors)) for pred_neighbor in predecessors: pred = pred_neighbor.node succ_neighbor = copy(pred_neighbor) succ_neighbor.node = self self.predecessors.add(pred_neighbor) pred.successors.add(succ_neighbor) class _CFG: """Constructs a control flow graph (CFG) from the commands of a QiJob. The end node does not contain a command, if the last top level command is an If-else or ForRange """ def __init__(self, job: QiJob): self.nodes: Set[_CFGNode] = set() start, end = recursive_build_sub_cfg(job.commands, self.nodes) self.end = _CFGNode(_CFGNode.Type.END, None, None, *end) self.start = _CFGNode(_CFGNode.Type.START, None, None) self.start.connect_successors( _CFGNode.Neighbor(start, _CFGNode.SrcEdgeType.NORMAL) ) def node_iterator(self): visited = set() stack = [self.start] while len(stack) > 0: node = stack.pop() visited.add(node) yield node for successor in node.successors: successor = successor.node if successor not in visited: stack.append(successor) def add_value(self, key, initial): for node in self.node_iterator(): if key not in node.value_map: node.value_map[key] = initial def dump_dot_graph(self, path): """Dump the current cfg topology as a dot file for inspecting and debugging purposes.""" with open(path, "w", encoding="utf-8") as f: f.write("\ndigraph {\n") queue = [self.start] node_visited_or_in_queue = set() node_visited_or_in_queue.add(self.start) while len(queue) > 0: node = queue.pop(0) node_attributes = "\n".join( [f"{name} = {value}" for name, value in node.value_map.items()] ) if node.type == _CFGNode.Type.COMMAND: if isinstance(node.command, QiCommand): node_text = f"{node.command._stringify()}" else: node_text = f"{node.command}" label = f"{node_text}\n{node_attributes}" shape = "box" elif node.type == _CFGNode.Type.START: label = f"start\n{node_attributes}" shape = "oval" elif node.type == _CFGNode.Type.END: label = f"end\n{node_attributes}" shape = "oval" escaped_label = label.translate(str.maketrans({'"': '\\"'})) f.write(f'\t{node.id} [shape={shape}, label="{escaped_label}"];\n') for successor in node.successors: src_edge_type = successor.src_edge_type dest_edge_type = successor.dest_edge_type successor = successor.node assert isinstance(successor, _CFGNode) label = [] if src_edge_type is not _CFGNode.SrcEdgeType.NORMAL: label.append(f"{src_edge_type}") if dest_edge_type is not _CFGNode.DestEdgeType.NORMAL: label.append(f"{dest_edge_type}") label = ", ".join(label) node_label = f'[label="{label}"]' f.write(f"\t{node.id} -> {successor.id} {node_label};\n") if successor not in node_visited_or_in_queue: queue.append(successor) node_visited_or_in_queue.add(successor) f.write("}") def recursive_build_sub_cfg( commands: List[QiCommand], nodes ) -> Tuple[_CFGNode, List[_CFGNode.Neighbor]]: """ Constructs the nodes and edges for a CFG containing provided commands. `nodes` accumulates all nodes of the CFG. """ assert len(commands) > 0 prev: List[_CFGNode.Neighbor] = [] for idx, command in enumerate(commands, 0): if isinstance(command, If): node = _CFGNode(command, commands, idx, *prev) nodes.add(node) if len(command.body) > 0: body_start, body_end = recursive_build_sub_cfg(command.body, nodes) node.connect_successors( _CFGNode.Neighbor(body_start, _CFGNode.SrcEdgeType.IF_TRUE) ) prev = body_end else: prev = [_CFGNode.Neighbor(node, _CFGNode.SrcEdgeType.IF_TRUE)] if command.is_followed_by_else(): # len(command._else_body) > 0 else_start, else_end = recursive_build_sub_cfg( command._else_body, nodes ) node.connect_successors( _CFGNode.Neighbor(else_start, _CFGNode.SrcEdgeType.IF_FALSE) ) prev += else_end else: prev.append(_CFGNode.Neighbor(node, _CFGNode.SrcEdgeType.IF_FALSE)) elif isinstance(command, ForRange): for p in prev: p.dest_edge_type = _CFGNode.DestEdgeType.FOR_ENTRY node = _CFGNode(command, commands, idx, *prev) nodes.add(node) if len(command.body) > 0: body_start, body_end = recursive_build_sub_cfg(command.body, nodes) dest_edge_type = ( _CFGNode.DestEdgeType.FOR_ENTRY if isinstance(body_start.command, ForRange) else None ) node.connect_successors( _CFGNode.Neighbor( body_start, _CFGNode.SrcEdgeType.FOR_BODY, dest_edge_type ) ) for b in body_end: b.dest_edge_type = _CFGNode.DestEdgeType.FOR_BODY_RETURN node.connect_predecessors(*body_end) else: node.connect_predecessors( _CFGNode.Neighbor( node, _CFGNode.SrcEdgeType.FOR_BODY, _CFGNode.DestEdgeType.FOR_BODY_RETURN, ) ) prev = [_CFGNode.Neighbor(node, _CFGNode.SrcEdgeType.FOR_END)] elif isinstance(command, Parallel): # Parallel Blocks have somewhat tricky semantics and don't fit neatly into a CFG schema. # Therefore we just treat them as a single command and the respective analyses can deal with them # as they see fit. node = _CFGNode(command, commands, idx, *prev) nodes.add(node) prev = [_CFGNode.Neighbor(node, _CFGNode.SrcEdgeType.NORMAL)] else: assert not isinstance( command, QiContextManager ), "Context manager should probably be handled separately." node = _CFGNode(command, commands, idx, *prev) nodes.add(node) prev = [_CFGNode.Neighbor(node, _CFGNode.SrcEdgeType.NORMAL)] if idx == 0: start = node end = prev return start, end class DataflowValue: """ Interface for the abstract value used by dataflow analyses An implementation of DataflowValue should be a bounded lattice. """ @abstractmethod def merge(self, other: "DataflowValue") -> "DataflowValue": raise NotImplementedError( f"{self.__class__} doesn't implement merge function. This is a bug." ) class DataflowVisitor: """Visitor for dataflow analyses. The input (of type DataflowValue) is in the input field. The resulting output is returned by the respective visitor methods.""" def visit_cell_command(self, cell_cmd, input, node): return input def visit_context_manager(self, context_manager, input, node): return input def visit_if(self, if_cm, input, node): return input def visit_parallel(self, parallel_cm, input, node): return input def visit_for_range(self, for_range_cm, input, node): return input def visit_variable_command(self, variable_cmd, input, node): return input def visit_assign_command(self, assign_cmd, input, node): return input def visit_declare_command(self, declare_cmd, input, node): return input def visit_sync_command(self, sync_cmd, input, node): return input def visit_asm_command(self, asm_command, input, node): return input def forward_dataflow( cfg: _CFG, name, visitor: "DataflowVisitor", initial: DataflowValue, ): dataflow( cfg, name, visitor, initial, lambda x: x.predecessors, lambda x: x.successors, ) def reverse_dataflow( cfg: _CFG, name, visitor: "DataflowVisitor", initial: DataflowValue, ): dataflow( cfg, name, visitor, initial, lambda x: x.successors, lambda x: x.predecessors, ) def dataflow( cfg: _CFG, name, visitor: "DataflowVisitor", initial: DataflowValue, predecessors, successors, ): """Implementation of (a fairly naive) worklist algorithm which performs the dataflow analysis, with the given visitor.""" queue = list(cfg.nodes) cfg.add_value(name, initial) while len(queue) != 0: next = queue.pop(0) preds = list(predecessors(next)) if len(preds) != 0: input = preds[0].node.value_map[name] for pred in preds[1:]: input = input.merge(pred.node.value_map[name]) else: input = initial if next.type == _CFGNode.Type.COMMAND: output = next.command.accept(visitor, input, next) else: output = input original = next.value_map[name] if output != original: next.value_map[name] = output for succ in successors(next): queue.append(succ.node) class FlatLatticeValue(DataflowValue): """ FlatLatticeValue is a commonly used abstract value. * undefined: represents no value. * value: represents a single value. * no_const: represents all values. One should not use this constructor directly but instead use the :meth:`undefined` :meth:`no_const` and :meth:`value` class functions instead. """ class Type(Enum): UNDEFINED = 0 VALUE = 1 NO_CONST = 2 def __init__(self, type, value: QiExpression): self.type = type self.value = value @staticmethod def undefined(): return FlatLatticeValue(FlatLatticeValue.Type.UNDEFINED, None) @staticmethod def no_const(): return FlatLatticeValue(FlatLatticeValue.Type.NO_CONST, None) @staticmethod def value(value): return FlatLatticeValue(FlatLatticeValue.Type.VALUE, value) def merge(self, other): assert isinstance(other, FlatLatticeValue) if self.type == other.type and self.type == FlatLatticeValue.Type.VALUE: if self.value._equal_syntax(other.value): return self else: return FlatLatticeValue.no_const() else: if self.type == FlatLatticeValue.Type.UNDEFINED: return other elif other.type == FlatLatticeValue.Type.UNDEFINED: return self elif self.type == FlatLatticeValue.Type.NO_CONST: return self elif other.type == FlatLatticeValue.Type.NO_CONST: return other else: raise NotImplementedError(f"Merge of {other.type} not implemented") def __eq__(self, other): return self.type == other.type and ( self.value._equal_syntax(other.value) if self.type == FlatLatticeValue.Type.VALUE else True ) def __str__(self): if self.type == FlatLatticeValue.Type.VALUE: return f"{self.value}" else: return str(self.type) def __repr__(self): if self.type == FlatLatticeValue.Type.UNDEFINED: return "<undefined>" elif self.type == FlatLatticeValue.Type.VALUE: return f"<value: {self.value}>" elif self.type == FlatLatticeValue.Type.NO_CONST: return "<no_const>" else: raise NotImplementedError(f"__repr__ for type {self.type} not implemented") class CellValues(DataflowValue): """ DataflowValue which generalises FlatLatticeValue so every cell has its own FlatLatticeValue. """ def __init__(self, values=None): self.values = copy(values or {}) @classmethod
def default(cls, cells: List[QiCell], value: FlatLatticeValue):
3
2023-11-10 10:26:10+00:00
16k
jpcadena/fastapi-boilerplate
app/api/api_v1/router/auth.py
[ { "identifier": "get_redis_dep", "path": "app/api/deps.py", "snippet": "async def get_redis_dep(\n redis_dependency: Annotated[RedisDependency, Depends()]\n) -> AsyncGenerator[Redis, None]: # type: ignore\n \"\"\"\n Lazy generation of Redis dependency\n :param redis_dependency: The dependen...
import logging from typing import Annotated, Any, Optional from fastapi import ( APIRouter, Body, Depends, Header, HTTPException, Path, Request, status, ) from fastapi.security import OAuth2PasswordRequestForm from pydantic import EmailStr from redis.asyncio import Redis from starlette.datastructures import Address from app.api.deps import get_redis_dep from app.api.oauth2_validation import get_current_user, get_refresh_current_user from app.config.config import ( get_auth_settings, get_init_settings, get_settings, init_setting, ) from app.config.db.auth_settings import AuthSettings from app.config.init_settings import InitSettings from app.config.settings import Settings from app.core.security.password import verify_password from app.exceptions.exceptions import NotFoundException, ServiceException from app.models.sql.user import User as UserDB from app.schemas.external.msg import Msg from app.schemas.external.token import TokenResetPassword, TokenResponse from app.schemas.external.user import ( UserResponse, UserUpdate, UserUpdateResponse, ) from app.schemas.infrastructure.user import UserAuth from app.services.infrastructure.auth import common_auth_procedure from app.services.infrastructure.token import TokenService from app.services.infrastructure.user import UserService, get_user_service from app.tasks.email_tasks.email_tasks import ( send_password_changed_confirmation_email, send_reset_password_email, ) from app.utils.security.password import ( generate_password_reset_token, verify_password_reset_token, )
13,927
- `type:` **OAuth2PasswordRequestForm** ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: Request object for client host information :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] = request.client if not client: raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: found_user: UserDB = await user_service.get_login_user(user.username) except ServiceException as exc: logger.error(exc) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="Invalid credentials" ) from exc if not verify_password(found_user.password, user.password): detail: str = "Incorrect password" logger.warning(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) if not found_user.is_active: user_detail: str = "Inactive user" logger.warning(user_detail) raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=user_detail ) return await common_auth_procedure( found_user, client_ip, redis, auth_settings ) @router.post( "/refresh", response_model=TokenResponse, status_code=status.HTTP_201_CREATED, ) async def refresh_token( request: Request, user_service: Annotated[UserService, Depends(get_user_service)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], refresh_current_user: Annotated[ UserAuth, Depends(get_refresh_current_user) ], redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore ) -> TokenResponse: """ Generates a refresh token for the current user and saves it to the database ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: The HTTP request on the server :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param refresh_current_user: The current user dependency for refresh token :type refresh_current_user: UserAuth :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] if not (client := request.client): raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: user: UserDB = await user_service.get_login_user( refresh_current_user.username ) except ServiceException as exc: detail: str = "Can not found user information." logger.error(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) from exc return await common_auth_procedure(user, client_ip, redis, auth_settings) @router.post("/validate-token", response_model=UserAuth) async def validate_token( current_user: Annotated[UserAuth, Depends(get_current_user)] ) -> UserAuth: """ Endpoint to validate an access token. ## Response: - `return:` **The authenticated user instance** - `rtype:` **UserAuth** \f :param current_user: The current user :type current_user: UserAuth """ return current_user @router.post("/recover-password/{email}", response_model=Msg) async def recover_password( settings: Annotated[Settings, Depends(get_settings)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], email: Annotated[ EmailStr, Path( ..., title="Email", description="The email used to recover the password", example={"email": "someone@example.com"},
""" Authentication API Router. This module provides login and password recovery functionality. """ logger: logging.Logger = logging.getLogger(__name__) router: APIRouter = APIRouter(prefix="/auth", tags=["auth"]) @router.post("/login", response_model=TokenResponse) async def login( request: Request, auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], user: Annotated[OAuth2PasswordRequestForm, Depends()], user_service: Annotated[UserService, Depends(get_user_service)], redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore ) -> TokenResponse: """ Endpoint to handle user login with OAuth2 authentication using request form. ## Parameter: - `user:` **Request body with username and password** - `type:` **OAuth2PasswordRequestForm** ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: Request object for client host information :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] = request.client if not client: raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: found_user: UserDB = await user_service.get_login_user(user.username) except ServiceException as exc: logger.error(exc) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="Invalid credentials" ) from exc if not verify_password(found_user.password, user.password): detail: str = "Incorrect password" logger.warning(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) if not found_user.is_active: user_detail: str = "Inactive user" logger.warning(user_detail) raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=user_detail ) return await common_auth_procedure( found_user, client_ip, redis, auth_settings ) @router.post( "/refresh", response_model=TokenResponse, status_code=status.HTTP_201_CREATED, ) async def refresh_token( request: Request, user_service: Annotated[UserService, Depends(get_user_service)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], refresh_current_user: Annotated[ UserAuth, Depends(get_refresh_current_user) ], redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore ) -> TokenResponse: """ Generates a refresh token for the current user and saves it to the database ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: The HTTP request on the server :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param refresh_current_user: The current user dependency for refresh token :type refresh_current_user: UserAuth :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] if not (client := request.client): raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: user: UserDB = await user_service.get_login_user( refresh_current_user.username ) except ServiceException as exc: detail: str = "Can not found user information." logger.error(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) from exc return await common_auth_procedure(user, client_ip, redis, auth_settings) @router.post("/validate-token", response_model=UserAuth) async def validate_token( current_user: Annotated[UserAuth, Depends(get_current_user)] ) -> UserAuth: """ Endpoint to validate an access token. ## Response: - `return:` **The authenticated user instance** - `rtype:` **UserAuth** \f :param current_user: The current user :type current_user: UserAuth """ return current_user @router.post("/recover-password/{email}", response_model=Msg) async def recover_password( settings: Annotated[Settings, Depends(get_settings)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], email: Annotated[ EmailStr, Path( ..., title="Email", description="The email used to recover the password", example={"email": "someone@example.com"},
openapi_examples=init_setting.EMAIL_BODY_EXAMPLES,
3
2023-11-17 00:32:32+00:00
16k
vitant-lang/CBAM-ASPP
train.py
[ { "identifier": "DeepLab", "path": "nets/deeplabv3_plus.py", "snippet": "class DeepLab(nn.Module):\n\tdef __init__(self, num_classes, backbone=\"mobilenet\", pretrained=True, downsample_factor=16):\n\t\tsuper(DeepLab, self).__init__()\n\t\tif backbone==\"xception\":\n\t\t\t#-----------------------------...
import os import datetime import numpy as np import torch import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim as optim from torch.utils.data import DataLoader from nets.deeplabv3_plus import DeepLab from nets.deeplabv3_training import (get_lr_scheduler, set_optimizer_lr, weights_init) from utils.callbacks import LossHistory, EvalCallback from utils.dataloader import DeeplabDataset, deeplab_dataset_collate from utils.utils import download_weights, show_config from utils.utils_fit import fit_one_epoch from torch.cuda.amp import GradScaler as GradScaler
12,537
Freeze_batch_size = 8 #------------------------------------------------------------------# # 解冻阶段训练参数 # 此时模型的主干不被冻结了,特征提取网络会发生改变 # 占用的显存较大,网络所有的参数都会发生改变 # UnFreeze_Epoch 模型总共训练的epoch # Unfreeze_batch_size 模型在解冻后的batch_size #------------------------------------------------------------------# UnFreeze_Epoch = 20 Unfreeze_batch_size = 4 #------------------------------------------------------------------# # Freeze_Train 是否进行冻结训练 # 默认先冻结主干训练后解冻训练。 #------------------------------------------------------------------# Freeze_Train = True #------------------------------------------------------------------# # 其它训练参数:学习率、优化器、学习率下降有关 #------------------------------------------------------------------# #------------------------------------------------------------------# # Init_lr 模型的最大学习率 # 当使用Adam优化器时建议设置 Init_lr=5e-4 # 当使用SGD优化器时建议设置 Init_lr=7e-3 # Min_lr 模型的最小学习率,默认为最大学习率的0.01 #------------------------------------------------------------------# Init_lr = 7e-4 Min_lr = Init_lr * 0.01 #------------------------------------------------------------------# # optimizer_type 使用到的优化器种类,可选的有adam、sgd # 当使用Adam优化器时建议设置 Init_lr=5e-4 # 当使用SGD优化器时建议设置 Init_lr=7e-3 # momentum 优化器内部使用到的momentum参数 # weight_decay 权值衰减,可防止过拟合 # adam会导致weight_decay错误,使用adam时建议设置为0。 #------------------------------------------------------------------# optimizer_type = "sgd" momentum = 0.9 weight_decay = 1e-4 #1e-4 sgd是 #------------------------------------------------------------------# # lr_decay_type 使用到的学习率下降方式,可选的有'step'、'cos' #------------------------------------------------------------------# lr_decay_type = 'cos' #------------------------------------------------------------------# # save_period 多少个epoch保存一次权值 #------------------------------------------------------------------# save_period = 800 #------------------------------------------------------------------# # save_dir 权值与日志文件保存的文件夹 #------------------------------------------------------------------# save_dir = 'logs' #------------------------------------------------------------------# # eval_flag 是否在训练时进行评估,评估对象为验证集 # eval_period 代表多少个epoch评估一次,不建议频繁的评估 # 评估需要消耗较多的时间,频繁评估会导致训练非常慢 # 此处获得的mAP会与get_map.py获得的会有所不同,原因有二: # (一)此处获得的mAP为验证集的mAP。 # (二)此处设置评估参数较为保守,目的是加快评估速度。 #------------------------------------------------------------------# eval_flag = True eval_period = 400 #7.13开始跑 #10点40 #------------------------------------------------------------------# # VOCdevkit_path 数据集路径 #------------------------------------------------------------------# VOCdevkit_path = 'VOCdevkit' #------------------------------------------------------------------# # 建议选项: # 种类少(几类)时,设置为True # 种类多(十几类)时,如果batch_size比较大(10以上),那么设置为True # 种类多(十几类)时,如果batch_size比较小(10以下),那么设置为False #------------------------------------------------------------------# dice_loss = False #------------------------------------------------------------------# # 是否使用focal loss来防止正负样本不平衡 #------------------------------------------------------------------# focal_loss = False #------------------------------------------------------------------# # 是否给不同种类赋予不同的损失权值,默认是平衡的。 # 设置的话,注意设置成numpy形式的,长度和num_classes一样。 # 如: # num_classes = 3 # cls_weights = np.array([1, 2, 3], np.float32) #------------------------------------------------------------------# cls_weights = np.ones([num_classes], np.float32) #------------------------------------------------------------------# # num_workers 用于设置是否使用多线程读取数据,1代表关闭多线程 # 开启后会加快数据读取速度,但是会占用更多内存 # keras里开启多线程有些时候速度反而慢了许多 # 在IO为瓶颈的时候再开启多线程,即GPU运算速度远大于读取图片的速度。 #------------------------------------------------------------------# num_workers = 4 #------------------------------------------------------# # 设置用到的显卡 #------------------------------------------------------# ngpus_per_node = torch.cuda.device_count() if distributed: dist.init_process_group(backend="nccl") local_rank = int(os.environ["LOCAL_RANK"]) rank = int(os.environ["RANK"]) device = torch.device("cuda", local_rank) if local_rank == 0: print(f"[{os.getpid()}] (rank = {rank}, local_rank = {local_rank}) training...") print("Gpu Device Count : ", ngpus_per_node) else: device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') local_rank = 0 #----------------------------------------------------# # 下载预训练权重 #----------------------------------------------------# if pretrained: if distributed: if local_rank == 0: download_weights(backbone) dist.barrier() else: download_weights(backbone)
''' 训练自己的语义分割模型一定需要注意以下几点: 1、训练前仔细检查自己的格式是否满足要求,该库要求数据集格式为VOC格式,需要准备好的内容有输入图片和标签 输入图片为.jpg图片,无需固定大小,传入训练前会自动进行resize。 灰度图会自动转成RGB图片进行训练,无需自己修改。 输入图片如果后缀非jpg,需要自己批量转成jpg后再开始训练。 标签为png图片,无需固定大小,传入训练前会自动进行resize。 由于许多同学的数据集是网络上下载的,标签格式并不符合,需要再度处理。一定要注意!标签的每个像素点的值就是这个像素点所属的种类。 网上常见的数据集总共对输入图片分两类,背景的像素点值为0,目标的像素点值为255。这样的数据集可以正常运行但是预测是没有效果的! 需要改成,背景的像素点值为0,目标的像素点值为1。 如果格式有误,参考:https://github.com/bubbliiiing/segmentation-format-fix 2、损失值的大小用于判断是否收敛,比较重要的是有收敛的趋势,即验证集损失不断下降,如果验证集损失基本上不改变的话,模型基本上就收敛了。 损失值的具体大小并没有什么意义,大和小只在于损失的计算方式,并不是接近于0才好。如果想要让损失好看点,可以直接到对应的损失函数里面除上10000。 训练过程中的损失值会保存在logs文件夹下的loss_%Y_%m_%d_%H_%M_%S文件夹中 3、训练好的权值文件保存在logs文件夹中,每个训练世代(Epoch)包含若干训练步长(Step),每个训练步长(Step)进行一次梯度下降。 如果只是训练了几个Step是不会保存的,Epoch和Step的概念要捋清楚一下。 ''' if __name__ == "__main__": #---------------------------------# # Cuda 是否使用Cuda # 没有GPU可以设置成False #---------------------------------# Cuda = True #---------------------------------------------------------------------# # distributed 用于指定是否使用单机多卡分布式运行 # 终端指令仅支持Ubuntu。CUDA_VISIBLE_DEVICES用于在Ubuntu下指定显卡。 # Windows系统下默认使用DP模式调用所有显卡,不支持DDP。 # DP模式: # 设置 distributed = False # 在终端中输入 CUDA_VISIBLE_DEVICES=0,1 python train.py # DDP模式: # 设置 distributed = True # 在终端中输入 CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node=2 train.py #---------------------------------------------------------------------# distributed = False #---------------------------------------------------------------------# # sync_bn 是否使用sync_bn,DDP模式多卡可用 #---------------------------------------------------------------------# sync_bn = False #---------------------------------------------------------------------# # fp16 是否使用混合精度训练 # 可减少约一半的显存、需要pytorch1.7.1以上 #---------------------------------------------------------------------# fp16 = False #-----------------------------------------------------# # num_classes 训练自己的数据集必须要修改的 # 自己需要的分类个数+1,如2+1 #-----------------------------------------------------# num_classes = 3 #---------------------------------# # 所使用的的主干网络: # mobilenet # xception #---------------------------------# backbone = "mobilenet" #----------------------------------------------------------------------------------------------------------------------------# # pretrained 是否使用主干网络的预训练权重,此处使用的是主干的权重,因此是在模型构建的时候进行加载的。 # 如果设置了model_path,则主干的权值无需加载,pretrained的值无意义。 # 如果不设置model_path,pretrained = True,此时仅加载主干开始训练。 # 如果不设置model_path,pretrained = False,Freeze_Train = Fasle,此时从0开始训练,且没有冻结主干的过程。 #----------------------------------------------------------------------------------------------------------------------------# pretrained = False #----------------------------------------------------------------------------------------------------------------------------# # 权值文件的下载请看README,可以通过网盘下载。模型的 预训练权重 对不同数据集是通用的,因为特征是通用的。 # 模型的 预训练权重 比较重要的部分是 主干特征提取网络的权值部分,用于进行特征提取。 # 预训练权重对于99%的情况都必须要用,不用的话主干部分的权值太过随机,特征提取效果不明显,网络训练的结果也不会好 # 训练自己的数据集时提示维度不匹配正常,预测的东西都不一样了自然维度不匹配 # # 如果训练过程中存在中断训练的操作,可以将model_path设置成logs文件夹下的权值文件,将已经训练了一部分的权值再次载入。 # 同时修改下方的 冻结阶段 或者 解冻阶段 的参数,来保证模型epoch的连续性。 # # 当model_path = ''的时候不加载整个模型的权值。 # # 此处使用的是整个模型的权重,因此是在train.py进行加载的,pretrain不影响此处的权值加载。 # 如果想要让模型从主干的预训练权值开始训练,则设置model_path = '',pretrain = True,此时仅加载主干。 # 如果想要让模型从0开始训练,则设置model_path = '',pretrain = Fasle,Freeze_Train = Fasle,此时从0开始训练,且没有冻结主干的过程。 # # 一般来讲,网络从0开始的训练效果会很差,因为权值太过随机,特征提取效果不明显,因此非常、非常、非常不建议大家从0开始训练! # 如果一定要从0开始,可以了解imagenet数据集,首先训练分类模型,获得网络的主干部分权值,分类模型的 主干部分 和该模型通用,基于此进行训练。 #----------------------------------------------------------------------------------------------------------------------------# model_path = "model_data/deeplab_mobilenetv2.pth" #---------------------------------------------------------# # downsample_factor 下采样的倍数8、16 # 8下采样的倍数较小、理论上效果更好。 # 但也要求更大的显存 #---------------------------------------------------------# downsample_factor = 8 #------------------------------# # 输入图片的大小 #------------------------------# input_shape = [512, 512] #----------------------------------------------------------------------------------------------------------------------------# # 训练分为两个阶段,分别是冻结阶段和解冻阶段。设置冻结阶段是为了满足机器性能不足的同学的训练需求。 # 冻结训练需要的显存较小,显卡非常差的情况下,可设置Freeze_Epoch等于UnFreeze_Epoch,此时仅仅进行冻结训练。 # # 在此提供若干参数设置建议,各位训练者根据自己的需求进行灵活调整: # (一)从整个模型的预训练权重开始训练: # Adam: # Init_Epoch = 0,Freeze_Epoch = 50,UnFreeze_Epoch = 100,Freeze_Train = True,optimizer_type = 'adam',Init_lr = 5e-4,weight_decay = 0。(冻结) # Init_Epoch = 0,UnFreeze_Epoch = 100,Freeze_Train = False,optimizer_type = 'adam',Init_lr = 5e-4,weight_decay = 0。(不冻结) # SGD: # Init_Epoch = 0,Freeze_Epoch = 50,UnFreeze_Epoch = 100,Freeze_Train = True,optimizer_type = 'sgd',Init_lr = 7e-3,weight_decay = 1e-4。(冻结) # Init_Epoch = 0,UnFreeze_Epoch = 100,Freeze_Train = False,optimizer_type = 'sgd',Init_lr = 7e-3,weight_decay = 1e-4。(不冻结) # 其中:UnFreeze_Epoch可以在100-300之间调整。 # (二)从主干网络的预训练权重开始训练: # Adam: # Init_Epoch = 0,Freeze_Epoch = 50,UnFreeze_Epoch = 100,Freeze_Train = True,optimizer_type = 'adam',Init_lr = 5e-4,weight_decay = 0。(冻结) # Init_Epoch = 0,UnFreeze_Epoch = 100,Freeze_Train = False,optimizer_type = 'adam',Init_lr = 5e-4,weight_decay = 0。(不冻结) # SGD: # Init_Epoch = 0,Freeze_Epoch = 50,UnFreeze_Epoch = 120,Freeze_Train = True,optimizer_type = 'sgd',Init_lr = 7e-3,weight_decay = 1e-4。(冻结) # Init_Epoch = 0,UnFreeze_Epoch = 120,Freeze_Train = False,optimizer_type = 'sgd',Init_lr = 7e-3,weight_decay = 1e-4。(不冻结) # 其中:由于从主干网络的预训练权重开始训练,主干的权值不一定适合语义分割,需要更多的训练跳出局部最优解。 # UnFreeze_Epoch可以在120-300之间调整。 # Adam相较于SGD收敛的快一些。因此UnFreeze_Epoch理论上可以小一点,但依然推荐更多的Epoch。 # (三)batch_size的设置: # 在显卡能够接受的范围内,以大为好。显存不足与数据集大小无关,提示显存不足(OOM或者CUDA out of memory)请调小batch_size。 # 受到BatchNorm层影响,batch_size最小为2,不能为1。 # 正常情况下Freeze_batch_size建议为Unfreeze_batch_size的1-2倍。不建议设置的差距过大,因为关系到学习率的自动调整。 #----------------------------------------------------------------------------------------------------------------------------# #------------------------------------------------------------------# # 冻结阶段训练参数 # 此时模型的主干被冻结了,特征提取网络不发生改变 # 占用的显存较小,仅对网络进行微调 # Init_Epoch 模型当前开始的训练世代,其值可以大于Freeze_Epoch,如设置: # Init_Epoch = 60、Freeze_Epoch = 50、UnFreeze_Epoch = 100 # 会跳过冻结阶段,直接从60代开始,并调整对应的学习率。 # (断点续练时使用) # Freeze_Epoch 模型冻结训练的Freeze_Epoch # (当Freeze_Train=False时失效) # Freeze_batch_size 模型冻结训练的batch_size # (当Freeze_Train=False时失效) #------------------------------------------------------------------# Init_Epoch = 0 Freeze_Epoch = 10 Freeze_batch_size = 8 #------------------------------------------------------------------# # 解冻阶段训练参数 # 此时模型的主干不被冻结了,特征提取网络会发生改变 # 占用的显存较大,网络所有的参数都会发生改变 # UnFreeze_Epoch 模型总共训练的epoch # Unfreeze_batch_size 模型在解冻后的batch_size #------------------------------------------------------------------# UnFreeze_Epoch = 20 Unfreeze_batch_size = 4 #------------------------------------------------------------------# # Freeze_Train 是否进行冻结训练 # 默认先冻结主干训练后解冻训练。 #------------------------------------------------------------------# Freeze_Train = True #------------------------------------------------------------------# # 其它训练参数:学习率、优化器、学习率下降有关 #------------------------------------------------------------------# #------------------------------------------------------------------# # Init_lr 模型的最大学习率 # 当使用Adam优化器时建议设置 Init_lr=5e-4 # 当使用SGD优化器时建议设置 Init_lr=7e-3 # Min_lr 模型的最小学习率,默认为最大学习率的0.01 #------------------------------------------------------------------# Init_lr = 7e-4 Min_lr = Init_lr * 0.01 #------------------------------------------------------------------# # optimizer_type 使用到的优化器种类,可选的有adam、sgd # 当使用Adam优化器时建议设置 Init_lr=5e-4 # 当使用SGD优化器时建议设置 Init_lr=7e-3 # momentum 优化器内部使用到的momentum参数 # weight_decay 权值衰减,可防止过拟合 # adam会导致weight_decay错误,使用adam时建议设置为0。 #------------------------------------------------------------------# optimizer_type = "sgd" momentum = 0.9 weight_decay = 1e-4 #1e-4 sgd是 #------------------------------------------------------------------# # lr_decay_type 使用到的学习率下降方式,可选的有'step'、'cos' #------------------------------------------------------------------# lr_decay_type = 'cos' #------------------------------------------------------------------# # save_period 多少个epoch保存一次权值 #------------------------------------------------------------------# save_period = 800 #------------------------------------------------------------------# # save_dir 权值与日志文件保存的文件夹 #------------------------------------------------------------------# save_dir = 'logs' #------------------------------------------------------------------# # eval_flag 是否在训练时进行评估,评估对象为验证集 # eval_period 代表多少个epoch评估一次,不建议频繁的评估 # 评估需要消耗较多的时间,频繁评估会导致训练非常慢 # 此处获得的mAP会与get_map.py获得的会有所不同,原因有二: # (一)此处获得的mAP为验证集的mAP。 # (二)此处设置评估参数较为保守,目的是加快评估速度。 #------------------------------------------------------------------# eval_flag = True eval_period = 400 #7.13开始跑 #10点40 #------------------------------------------------------------------# # VOCdevkit_path 数据集路径 #------------------------------------------------------------------# VOCdevkit_path = 'VOCdevkit' #------------------------------------------------------------------# # 建议选项: # 种类少(几类)时,设置为True # 种类多(十几类)时,如果batch_size比较大(10以上),那么设置为True # 种类多(十几类)时,如果batch_size比较小(10以下),那么设置为False #------------------------------------------------------------------# dice_loss = False #------------------------------------------------------------------# # 是否使用focal loss来防止正负样本不平衡 #------------------------------------------------------------------# focal_loss = False #------------------------------------------------------------------# # 是否给不同种类赋予不同的损失权值,默认是平衡的。 # 设置的话,注意设置成numpy形式的,长度和num_classes一样。 # 如: # num_classes = 3 # cls_weights = np.array([1, 2, 3], np.float32) #------------------------------------------------------------------# cls_weights = np.ones([num_classes], np.float32) #------------------------------------------------------------------# # num_workers 用于设置是否使用多线程读取数据,1代表关闭多线程 # 开启后会加快数据读取速度,但是会占用更多内存 # keras里开启多线程有些时候速度反而慢了许多 # 在IO为瓶颈的时候再开启多线程,即GPU运算速度远大于读取图片的速度。 #------------------------------------------------------------------# num_workers = 4 #------------------------------------------------------# # 设置用到的显卡 #------------------------------------------------------# ngpus_per_node = torch.cuda.device_count() if distributed: dist.init_process_group(backend="nccl") local_rank = int(os.environ["LOCAL_RANK"]) rank = int(os.environ["RANK"]) device = torch.device("cuda", local_rank) if local_rank == 0: print(f"[{os.getpid()}] (rank = {rank}, local_rank = {local_rank}) training...") print("Gpu Device Count : ", ngpus_per_node) else: device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') local_rank = 0 #----------------------------------------------------# # 下载预训练权重 #----------------------------------------------------# if pretrained: if distributed: if local_rank == 0: download_weights(backbone) dist.barrier() else: download_weights(backbone)
model = DeepLab(num_classes=num_classes, backbone=backbone, downsample_factor=downsample_factor, pretrained=pretrained)
0
2023-11-17 13:25:28+00:00
16k
fg320/DEASC
examples/12D_5x5_farm_dyn_tuning_wso_grouping_CI.py
[ { "identifier": "WfModel", "path": "deasc/wf_model.py", "snippet": "class WfModel:\n \"\"\"\n Class for wind farm modelling (Interface setup but not limited to FLORIS\n framework).\n \"\"\"\n\n def __init__(self, input_file, path):\n \"\"\"\n Initialise wind farm object by p...
import numpy as np from deasc import WfModel from deasc import WSOpt from deasc import GPWrap from deasc import TuningDyn_Grouping_CI from deasc.utils_floris import ( floris_extract_object_dict, floris_param_change_object_dict, floris_param_change_object )
12,695
""" This example shows wake steering optimisation on a 5x5 wind farm of NREL 5 MW turbines. Dynamic parameter tuning with grouping and column-independence is introduced in the optimisation for the wake expansion parameter k of the Jensen wake model. The tuning variables for each column are the yaw angles two most upstream groups, each of two turbines. """ # Initialise and set layout for wind farm model path = "./inputs/" input_file = "jensen.yaml" wf_model = WfModel(input_file, path) wf_model.set_aligned_layout(5, 5, 7, 5) # Set kd deflection parameter wf_model_dict = floris_extract_object_dict(wf_model) wf_model_dict = floris_param_change_object_dict(wf_model_dict, 'wake_deflection_parameters', 'kd', 0.3) wf_model = floris_param_change_object(wf_model, wf_model_dict) # Specify atmopheric conditions ws = 8.0 wd = 270 ti = 0.05 shear = 0.0 # Wake steering optimisation inputs yaw_initial = np.full(shape=(25), fill_value=0) inflow = (yaw_initial, wd, ws, ti, shear) variables = [int(x) for x in np.linspace(1, 20, 20)] var_bounds = (-25, 25) var_initial = np.full(shape=(len(variables)), fill_value=0) # %% Dynamic tuning object based on a single farm column # Parameter info parameter_class = 'wake_velocity_parameters' parameter_name = 'we' # Import optimal parameter dataset and extract GP input dataset_path = "./optimal_parameter_datasets/" dataset_import = np.load(dataset_path+'we_5x1_2dim_grouping.npy', allow_pickle=True) optimal_parameter_dataset = dataset_import.item() yaw_data = [] param_data = [] for key in optimal_parameter_dataset.keys(): yaw_data.append([key[0], key[2]]) # Extract group yaw param_data.append([optimal_parameter_dataset[key]]) # Construct Gaussian Process (GP) GP_obj = GPWrap(parameter_class=parameter_class, parameter_name=parameter_name, dimensions=2) GP_model = GP_obj.GP_so(yaw_data, param_data, num_restarts=100, noise=0.05) # Tuning object initialisation tuning_turbines_cols_dict = {}
""" This example shows wake steering optimisation on a 5x5 wind farm of NREL 5 MW turbines. Dynamic parameter tuning with grouping and column-independence is introduced in the optimisation for the wake expansion parameter k of the Jensen wake model. The tuning variables for each column are the yaw angles two most upstream groups, each of two turbines. """ # Initialise and set layout for wind farm model path = "./inputs/" input_file = "jensen.yaml" wf_model = WfModel(input_file, path) wf_model.set_aligned_layout(5, 5, 7, 5) # Set kd deflection parameter wf_model_dict = floris_extract_object_dict(wf_model) wf_model_dict = floris_param_change_object_dict(wf_model_dict, 'wake_deflection_parameters', 'kd', 0.3) wf_model = floris_param_change_object(wf_model, wf_model_dict) # Specify atmopheric conditions ws = 8.0 wd = 270 ti = 0.05 shear = 0.0 # Wake steering optimisation inputs yaw_initial = np.full(shape=(25), fill_value=0) inflow = (yaw_initial, wd, ws, ti, shear) variables = [int(x) for x in np.linspace(1, 20, 20)] var_bounds = (-25, 25) var_initial = np.full(shape=(len(variables)), fill_value=0) # %% Dynamic tuning object based on a single farm column # Parameter info parameter_class = 'wake_velocity_parameters' parameter_name = 'we' # Import optimal parameter dataset and extract GP input dataset_path = "./optimal_parameter_datasets/" dataset_import = np.load(dataset_path+'we_5x1_2dim_grouping.npy', allow_pickle=True) optimal_parameter_dataset = dataset_import.item() yaw_data = [] param_data = [] for key in optimal_parameter_dataset.keys(): yaw_data.append([key[0], key[2]]) # Extract group yaw param_data.append([optimal_parameter_dataset[key]]) # Construct Gaussian Process (GP) GP_obj = GPWrap(parameter_class=parameter_class, parameter_name=parameter_name, dimensions=2) GP_model = GP_obj.GP_so(yaw_data, param_data, num_restarts=100, noise=0.05) # Tuning object initialisation tuning_turbines_cols_dict = {}
tuning_dyn_obj = TuningDyn_Grouping_CI(param_class=parameter_class,
3
2023-11-10 18:13:27+00:00
16k
OpenBMB/XAgent
command.py
[ { "identifier": "XAgentServerEnv", "path": "XAgentServer/application/core/envs.py", "snippet": "class XAgentServerEnv:\n \"\"\"\n XAgentServer environment variables\n if you change value of the environment variable, you need to restart \n the XAgentServer by running the following command:\n ...
import asyncio import json import os import threading import traceback import uuid import sys from contextlib import contextmanager from datetime import datetime from typing import List from colorama import Fore from apscheduler.schedulers.asyncio import AsyncIOScheduler from apscheduler.schedulers.blocking import BlockingScheduler from XAgentServer.application.core.envs import XAgentServerEnv from XAgentServer.database.connect import SessionLocal from XAgentServer.enums.status import StatusEnum from XAgentServer.exts.exception_ext import XAgentError from XAgentServer.interaction import XAgentInteraction from XAgentServer.loggers.logs import Logger from XAgentServer.models.interaction import InteractionBase from XAgentServer.models.parameter import InteractionParameter from XAgentServer.models.raw import XAgentRaw from XAgentServer.server import XAgentServer from XAgentServer.application.cruds.interaction import InteractionCRUD from XAgentServer.application.global_val import redis from command_input import CommandLineInput from XAgent.running_recorder import recorder
13,964
self.input = None if self.interrupt: self.input = CommandLineInput( do_interrupt=True, max_wait_seconds=self.max_wait_seconds, logger=self.logger) def init_conv_env(self): """initialize the conversation environment, Share the same database resource with webui. If you have initiated a session on the front end but it has not been executed, this ID will be shared. """ user_id = "guest" token = "xagent" description = self.args.description upload_files = self.args.upload_files record_dir = self.args.record_dir agent = self.args.agent goal = self.args.task mode = self.args.mode plan = self.args.plan with get_db() as db: interaction = InteractionCRUD.get_ready_interaction( db=db, user_id=user_id) self.continue_flag = True upload_files = upload_files if upload_files else [] file_list = [] for file in upload_files: file_list.append({ "uuid": file, "name": file }) if interaction is None: base = InteractionBase(interaction_id=self.client_id, user_id=user_id, create_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), description=description, agent=agent, mode=mode, file_list=file_list, recorder_root_dir="", status="ready", message="ready...", current_step="-1", update_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), call_method="cmd") InteractionCRUD.create_interaction(db=db, base=base) else: self.client_id = interaction.interaction_id parameter = InteractionParameter( interaction_id=self.client_id, parameter_id=uuid.uuid4().hex, args={ "goal": goal, "plan": plan }, ) InteractionCRUD.add_parameter(db=db, parameter=parameter) def run(self): """ Runs the interaction with the XAgentServer with the provided arguments. """ # Create a new raw data to record with get_db() as db: InteractionCRUD.insert_raw(db=db, process=XAgentRaw( interaction_id=self.client_id, node_id=uuid.uuid4().hex, status=StatusEnum.RUNNING, create_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), update_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), current="", step=-1, data=None, file_list=[], do_interrupt=self.interrupt, wait_seconds=0, ask_for_human_help=False, is_human=True, human_data={"goal": self.args.task, "plan": self.args.plan}, human_file_list=self.args.upload_files, is_send=True, is_receive=False, is_deleted=False )) redis.set_key(f"{self.client_id}_send", 1) parameter = InteractionCRUD.get_init_parameter( db=db, interaction_id=self.client_id) self.task_handler(parameter=parameter) def task_handler(self, parameter: InteractionParameter): """ define a long task to run interaction Args: parameter (InteractionParameter): The parameter of interaction """ try: current_step = uuid.uuid4().hex with get_db() as db: base = InteractionCRUD.get_interaction(db=db, interaction_id=self.client_id) InteractionCRUD.update_interaction_status(db=db, interaction_id=base.interaction_id, status="running", message="running", current_step=current_step) # if mode is not auto, we will interrupt the interaction # and you can change the wait_seconds # default 10 min.
@contextmanager def get_db(): """ Provide a transactional scope around a series of operations. """ session = SessionLocal() try: yield session session.commit() except: session.rollback() raise finally: session.close() class CommandLineParam: """Command line parameters. Attributes: task: Task description. role: Role name (default is "Assistant"). plan: List of steps to perform (default is empty list). upload_files: List of files to upload (default is empty list). download_files: List of files to download (default is empty list). record_dir: Directory to store records (default is `None`). mode: Run mode. Can be "auto" (default is "auto"). max_wait_seconds: Maximum wait time in seconds (default is 600). description: Description of the interaction (default is "XAgent-Test"). agent: Agent name (default is "XAgent"). """ def __init__(self, task, role="Assistant", plan=[], upload_files: List[str] = [], download_files: List[str] = [], record_dir: str = None, mode: str = "auto", max_wait_seconds: int = 600, description: str = "XAgent-Test", agent: str = "XAgent", ): self.task = task self.plan = plan self.role = role self.upload_files = upload_files self.download_files = download_files self.record_dir = record_dir # auto is supported only in cmd self.mode = "auto" self.max_wait_seconds = max_wait_seconds self.description = description self.agent = agent class CommandLine(): """ A command-line interface for interacting with XAgentServer. Attributes: env: An instance of the XAgentServer environment. client_id: A unique identifier for the client, generated as a hexadecimal UUID. date_str: The current date as a string in YYYY-MM-DD format. log_dir: The directory where the logs are stored. logger: An instance of the Logger used for logging interactions. interactionDB: A database interface for interacting with either a persistent database (SQLite, MySQL, PostgreSQL) or a local storage file, depending on the configuration of `env`. """ def __init__(self, args: CommandLineParam = None): """ Initialize the CommandLine instance. Args: args (CommandLineParam) : parameters. task is required, mode options: ["auto"] """ self.args = args self.client_id = uuid.uuid4().hex self.date_str = datetime.now().strftime("%Y-%m-%d") self.log_dir = os.path.join(os.path.join(XAgentServerEnv.base_dir, "localstorage", "interact_records"), self.date_str, self.client_id) if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) self.logger = Logger(log_dir=self.log_dir, log_file=f"interact.log") self.logger.typewriter_log( title=f"XAgentServer is running on cmd mode", title_color=Fore.RED) self.logger.info(title=f"XAgentServer log:", title_color=Fore.RED, message=f"{self.log_dir}") self.interrupt = self.args.mode != "auto" self.init_conv_env() self.max_wait_seconds = self.args.max_wait_seconds self.scheduler = AsyncIOScheduler() self.input = None if self.interrupt: self.input = CommandLineInput( do_interrupt=True, max_wait_seconds=self.max_wait_seconds, logger=self.logger) def init_conv_env(self): """initialize the conversation environment, Share the same database resource with webui. If you have initiated a session on the front end but it has not been executed, this ID will be shared. """ user_id = "guest" token = "xagent" description = self.args.description upload_files = self.args.upload_files record_dir = self.args.record_dir agent = self.args.agent goal = self.args.task mode = self.args.mode plan = self.args.plan with get_db() as db: interaction = InteractionCRUD.get_ready_interaction( db=db, user_id=user_id) self.continue_flag = True upload_files = upload_files if upload_files else [] file_list = [] for file in upload_files: file_list.append({ "uuid": file, "name": file }) if interaction is None: base = InteractionBase(interaction_id=self.client_id, user_id=user_id, create_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), description=description, agent=agent, mode=mode, file_list=file_list, recorder_root_dir="", status="ready", message="ready...", current_step="-1", update_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), call_method="cmd") InteractionCRUD.create_interaction(db=db, base=base) else: self.client_id = interaction.interaction_id parameter = InteractionParameter( interaction_id=self.client_id, parameter_id=uuid.uuid4().hex, args={ "goal": goal, "plan": plan }, ) InteractionCRUD.add_parameter(db=db, parameter=parameter) def run(self): """ Runs the interaction with the XAgentServer with the provided arguments. """ # Create a new raw data to record with get_db() as db: InteractionCRUD.insert_raw(db=db, process=XAgentRaw( interaction_id=self.client_id, node_id=uuid.uuid4().hex, status=StatusEnum.RUNNING, create_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), update_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), current="", step=-1, data=None, file_list=[], do_interrupt=self.interrupt, wait_seconds=0, ask_for_human_help=False, is_human=True, human_data={"goal": self.args.task, "plan": self.args.plan}, human_file_list=self.args.upload_files, is_send=True, is_receive=False, is_deleted=False )) redis.set_key(f"{self.client_id}_send", 1) parameter = InteractionCRUD.get_init_parameter( db=db, interaction_id=self.client_id) self.task_handler(parameter=parameter) def task_handler(self, parameter: InteractionParameter): """ define a long task to run interaction Args: parameter (InteractionParameter): The parameter of interaction """ try: current_step = uuid.uuid4().hex with get_db() as db: base = InteractionCRUD.get_interaction(db=db, interaction_id=self.client_id) InteractionCRUD.update_interaction_status(db=db, interaction_id=base.interaction_id, status="running", message="running", current_step=current_step) # if mode is not auto, we will interrupt the interaction # and you can change the wait_seconds # default 10 min.
interaction = XAgentInteraction(
4
2023-10-16 03:44:57+00:00
16k
deepseek-ai/DreamCraft3D
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"i...
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from functools import partial from einops import rearrange, repeat from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import ( AutoencoderKL, IdentityFirstStage, VQModelInterface, ) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like, ) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl, ) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import ( count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat, )
12,321
else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) return self.p_sample_loop( cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0, ) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample( ddim_steps, batch_size, shape, cond, verbose=False, **kwargs ) else: samples, intermediates = self.sample( cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs ) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning( self, batch_size, null_label=None, image_size=512 ): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: # todo: get null label from cond_stage_model raise NotImplementedError() c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) cond = {} cond["c_crossattn"] = [c] cond["c_concat"] = [ torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to( self.device ) ] return cond @torch.no_grad() def log_images( self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1.0, return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1.0, unconditional_guidance_label=None, use_ema_scope=True, **kwargs, ): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N, ) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25, ) log["conditioning"] = xc elif self.cond_stage_key == "class_label": xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25, ) log["conditioning"] = xc elif isimage(xc): log["conditioning"] = xc
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != "__is_first_stage__" assert config != "__is_unconditional__" model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list( self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization ) ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError( f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" ) return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, "encode") and callable( self.cond_stage_model.encode ): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min( torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1 )[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"], ) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold( self, x, kernel_size, stride, uf=1, df=1 ): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting( kernel_size[0], kernel_size[1], Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf), ) fold = torch.nn.Fold( output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h * uf, w * uf ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx) ) elif df > 1 and uf == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [ self.first_stage_model.decode( z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize, ) for i in range(z.shape[-1]) ] else: output_list = [ self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize ) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize ) else: return self.first_stage_model.decode(z) # @torch.no_grad() # wasted two hours to find this bug... why no grad here! def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params["original_image_size"] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( x, ks, stride, df=df ) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) output_list = [ self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() if self.model.conditioning_key is not None: assert c is not None # if self.cond_stage_trainable: # c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = ( "c_concat" if self.model.conditioning_key == "concat" else "c_crossattn" ) cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold( x_noisy, ks, stride ) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if ( self.cond_stage_key in ["image", "LR_image", "segmentation", "bbox_img"] and self.model.conditioning_key ): # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert len(c) == 1 # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view( (c.shape[0], -1, ks[0], ks[1], c.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == "coordinates_bbox": assert ( "original_image_size" in self.split_input_params ), "BoudingBoxRescaling is missing original_image_size" # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params["original_image_size"] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [ ( rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h, ) for patch_nr in range(z.shape[-1]) ] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [ ( x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h, ) for x_tl, y_tl in tl_patch_coordinates ] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [ torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to( self.device ) for bbox in patch_limits ] # list of length l with tensors of shape (1, 2) # cut tknzd crop position from conditioning assert isinstance(cond, dict), "cond must be dict to be fed into model" cut_cond = cond["c_crossattn"][0][..., :-2].to(self.device) adapted_cond = torch.stack( [torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd] ) adapted_cond = rearrange(adapted_cond, "l b n -> (l b) n") adapted_cond = self.get_learned_conditioning(adapted_cond) adapted_cond = rearrange( adapted_cond, "(l b) n d -> l b n d", l=z.shape[-1] ) cond_list = [{"c_crossattn": [e]} for e in adapted_cond] else: cond_list = [ cond for i in range(z.shape[-1]) ] # Todo make this more efficient # apply model by loop over crops output_list = [ self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1]) ] assert not isinstance( output_list[0], tuple ) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart ) / extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl( mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0 ) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = "train" if self.training else "val" if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f"{prefix}/loss_simple": loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f"{prefix}/loss_gamma": loss.mean()}) loss_dict.update({"logvar": self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f"{prefix}/loss_vlb": loss_vlb}) loss += self.original_elbo_weight * loss_vlb loss_dict.update({f"{prefix}/loss": loss}) return loss, loss_dict def p_mean_variance( self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None, ): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score( self, model_out, x, t, c, **corrector_kwargs ) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1.0, 1.0) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample( self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, ): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance( x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, ) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.0: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * ( 0.5 * model_log_variance ).exp() * noise, logits.argmax(dim=1) if return_x0: return ( model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0, ) else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising( self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None, ): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) if start_T is not None: timesteps = min(timesteps, start_T) iterator = ( tqdm( reversed(range(0, timesteps)), desc="Progressive Generation", total=timesteps, ) if verbose else reversed(range(0, timesteps)) ) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, ) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1.0 - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop( self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None, ): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = ( tqdm(reversed(range(0, timesteps)), desc="Sampling t", total=timesteps) if verbose else reversed(range(0, timesteps)) ) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, ) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1.0 - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample( self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs, ): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) return self.p_sample_loop( cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0, ) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample( ddim_steps, batch_size, shape, cond, verbose=False, **kwargs ) else: samples, intermediates = self.sample( cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs ) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning( self, batch_size, null_label=None, image_size=512 ): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: # todo: get null label from cond_stage_model raise NotImplementedError() c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) cond = {} cond["c_crossattn"] = [c] cond["c_concat"] = [ torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to( self.device ) ] return cond @torch.no_grad() def log_images( self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1.0, return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1.0, unconditional_guidance_label=None, use_ema_scope=True, **kwargs, ): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N, ) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25, ) log["conditioning"] = xc elif self.cond_stage_key == "class_label": xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25, ) log["conditioning"] = xc elif isimage(xc): log["conditioning"] = xc
if ismap(xc):
16
2023-10-23 07:40:20+00:00
16k
zju3dv/4K4D
easyvolcap/utils/gl_utils.py
[ { "identifier": "dotdict", "path": "easyvolcap/utils/base_utils.py", "snippet": "class dotdict(dict, Dict[KT, VT]):\n \"\"\"\n This is the default data passing object used throughout the codebase\n Main function: dot access for dict values & dict like merging and updates\n\n a dictionary tha...
from typing import TYPE_CHECKING from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager # must be imported before OpenGL.GL from torch import nn from enum import Enum, auto from os.path import join, dirname from typing import Dict, Union, List from glm import vec2, vec3, vec4, mat3, mat4, mat4x3, mat2x3 # This is actually highly optimized from easyvolcap.utils.console_utils import * from easyvolcap.utils.base_utils import dotdict from easyvolcap.utils.viewer_utils import Camera from easyvolcap.utils.color_utils import cm_cpu_store from easyvolcap.utils.depth_utils import depth_curve_fn from easyvolcap.utils.data_utils import load_pts, load_mesh, to_cuda from easyvolcap.utils.fcds_utils import prepare_feedback_transform, get_opencv_camera_params from easyvolcap.utils.net_utils import typed, multi_gather, create_meshgrid, volume_rendering, raw2alpha, torch_dtype_to_numpy_dtype, load_pretrained, get_bounds from easyvolcap.utils.net_utils import CHECK_CUDART_ERROR, FORMAT_CUDART_ERROR from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager from OpenGL.GL import shaders from pytorch3d.structures import Pointclouds, Meshes from pytorch3d.structures import Pointclouds, Meshes from cuda import cudart from cuda import cudart from cuda import cudart from easyvolcap.engine.registry import call_from_cfg from easyvolcap.utils.gaussian_utils import GaussianModel from cuda import cudart from cuda import cudart from cuda import cudart from cuda import cudart from cuda import cudart from cuda import cudart import os import glm import torch import ctypes import numpy as np import sys import OpenGL.GL as gl
12,167
# Restore the original state gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) return color_buffer, depth_upper, depth_lower, depth_attach, fbo def hareward_peeling_framebuffer(H: int, W: int): # Prepare for write frame buffers index_buffer = gl.glGenTextures(1) depth_lower = gl.glGenTextures(1) depth_attach = gl.glGenTextures(1) fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb # Init the texture (call the resizing function), will simply allocate empty memory # The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter). gl.glBindTexture(gl.GL_TEXTURE_2D, index_buffer) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32I, W, H, 0, gl.GL_RED_INTEGER, gl.GL_INT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Bind texture to fbo gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, index_buffer, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0) gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1]) # Check framebuffer status if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE: log(red('Framebuffer not complete, exiting...')) raise RuntimeError('Incomplete framebuffer') # Restore the original state gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) return index_buffer, depth_lower, depth_attach, fbo class Gaussian(Mesh): def __init__(self, filename: str = 'assets/meshes/zju3dv.npz', gaussian_cfg: dotdict = dotdict(), quad_cfg: dotdict = dotdict(), render_depth: bool = False, # show depth or show color dpt_cm: str = 'linear', H: int = 1024, W: int = 1024, **kwargs, ): # Import Gaussian Model # Housekeeping super().__init__(**kwargs) self.name = split(filename)[-1] # Init Gaussian related models, for now only the first gaussian model is supported if filename.endswith('.npz') or filename.endswith('.pt') or filename.endswith('.pth'): # Load from GaussianTSampler pretrained, _ = load_pretrained(filename) # loaded model and updated path (maybe) pretrained = pretrained.model state_dict = dotdict() for k, v in pretrained.items(): if k.startswith('sampler.pcds.0'): state_dict[k.replace('sampler.pcds.0.', '')] = v # Load the parameters into the gaussian model self.gaussian_model: GaussianModel = call_from_cfg(GaussianModel, gaussian_cfg) # init empty gaussian model self.gaussian_model.load_state_dict(state_dict) # load the first gaussian model self.gaussian_model.cuda() # move the parameters to GPU elif filename.endswith('.ply'): # Load raw GaussianModel pass else: raise NotImplementedError # Init rendering quad self.quad: Quad = call_from_cfg(Quad, quad_cfg, H=H, W=W) # Other configurations self.render_depth = render_depth self.dpt_cm = dpt_cm # Disabling initialization def load_from_file(self, *args, **kwargs): pass def load_from_data(self, *args, **kwargs): pass def compile_shaders(self): pass def update_gl_buffers(self): pass def resize_textures(self, H: int, W: int): self.quad.resize_textures(H, W) # The actual rendering function @torch.no_grad() def render(self, camera: Camera): # Perform actual gaussian rendering batch = to_cuda(camera.to_batch()) rgb, acc, dpt = self.gaussian_model.render(batch) if self.render_depth:
from __future__ import annotations if TYPE_CHECKING: # fmt: off # Environment variable messaging # Need to export EGL_DEVICE_ID before trying to import egl # And we need to consider the case when we're performing distributed training # from easyvolcap.engine import cfg, args # FIXME: GLOBAL IMPORTS if 'easyvolcap.engine' in sys.modules and (sys.modules['easyvolcap.engine'].args.type != 'gui' or sys.modules['easyvolcap.engine'].cfg.viewer_cfg.type == 'UnitySocketViewer'): # FIXME: GLOBAL VARIABLES try: except Exception as e: log(yellow(f'Could not import EGL related modules. {type(e).__name__}: {e}')) os.environ['PYOPENGL_PLATFORM'] = '' try: except Exception as e: print(f'WARNING: OpenGL shaders import error encountered, please install the latest PyOpenGL from github using:') print(f'pip install git+https://github.com/mcfletch/pyopengl') raise e # fmt: on def linearize_depth(d, n: float, f: float): # 0-1 -> -1,1 # ndc -> view return (2.0 * n * f) / (f + n - (d * 2 - 1) * (f - n)) def common_opengl_options(): # Use program point size gl.glEnable(gl.GL_PROGRAM_POINT_SIZE) # Performs face culling gl.glEnable(gl.GL_CULL_FACE) gl.glCullFace(gl.GL_BACK) # Performs alpha trans testing gl.glEnable(gl.GL_ALPHA_TEST) # Performs z-buffer testing gl.glEnable(gl.GL_DEPTH_TEST) # gl.glDepthMask(gl.GL_TRUE) gl.glDepthFunc(gl.GL_LEQUAL) # gl.glDepthRange(-1.0, 1.0) gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) # Enable some masking tests gl.glEnable(gl.GL_SCISSOR_TEST) # Enable this to correctly render points # https://community.khronos.org/t/gl-point-sprite-gone-in-3-2/59310 gl.glEnable(gl.GL_POINT_SPRITE) # MARK: ONLY SPRITE IS WORKING FOR NOW # gl.glEnable(gl.GL_POINT_SMOOTH) # MARK: ONLY SPRITE IS WORKING FOR NOW # # Configure how we store the pixels in memory for our subsequent reading of the FBO to store the rendering into memory. # # The second argument specifies that our pixels will be in bytes. # gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1) def load_shader_source(file: str = 'splat.frag'): # Ideally we can just specify the shader name instead of an variable if not exists(file): file = f'{dirname(__file__)}/shaders/{file}' if not exists(file): file = file.replace('shaders/', '') if not exists(file): raise RuntimeError(f'Shader file: {file} does not exist') with open(file, 'r') as f: return f.read() def use_gl_program(program: Union[shaders.ShaderProgram, dict]): if isinstance(program, dict): # Recompile the program if the user supplied sources program = dotdict(program) program = shaders.compileProgram( shaders.compileShader(program.VERT_SHADER_SRC, gl.GL_VERTEX_SHADER), shaders.compileShader(program.FRAG_SHADER_SRC, gl.GL_FRAGMENT_SHADER) ) return gl.glUseProgram(program) class Mesh: class RenderType(Enum): POINTS = 1 LINES = 2 TRIS = 3 QUADS = 4 # TODO: Support quad loading STRIPS = 5 # Helper class to render a mesh on opengl # This implementation should only be used for debug visualization # Since no differentiable mechanism will be added # We recommend using nvdiffrast and pytorch3d's point renderer directly if you will to optimize these structures directly def __init__(self, verts: torch.Tensor = torch.tensor([[0, 0, 0], [0, 1, 0], [0, 0, 1]]), # need to call update after update faces: torch.Tensor = torch.tensor([[0, 1, 2]]), # need to call update after update colors: torch.Tensor = None, normals: torch.Tensor = None, scalars: dotdict[str, torch.Tensor] = dotdict(), render_type: RenderType = RenderType.TRIS, # Misc info name: str = 'mesh', filename: str = '', visible: bool = True, # Render options shade_flat: bool = False, # smooth shading point_radius: float = 0.015, render_normal: bool = False, # Storage options store_device: str = 'cpu', compute_device: str = 'cuda', vert_sizes=[3, 3, 3], # pos + color + norm # Init options est_normal_thresh: int = 100000, # Ignore unused input **kwargs, ) -> None: super().__init__() self.name = name self.visible = visible self.render_type = render_type self.shade_flat = shade_flat self.point_radius = point_radius self.render_normal = render_normal self.store_device = store_device self.compute_device = compute_device self.vert_sizes = vert_sizes self.est_normal_thresh = est_normal_thresh # Uniform and program self.compile_shaders() self.uniforms = dotdict() # uniform values # Before initialization self.max_verts = 0 self.max_faces = 0 # OpenGL data if filename: self.load_from_file(filename) else: self.load_from_data(verts, faces, colors, normals, scalars) def compile_shaders(self): try: self.mesh_program = shaders.compileProgram( shaders.compileShader(load_shader_source('mesh.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('mesh.frag'), gl.GL_FRAGMENT_SHADER) ) self.point_program = shaders.compileProgram( shaders.compileShader(load_shader_source('point.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('point.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e @property def n_verts_bytes(self): return len(self.verts) * self.vert_size * self.verts.element_size() @property def n_faces_bytes(self): return len(self.faces) * self.face_size * self.faces.element_size() @property def verts_data(self): # a heavy copy operation verts = torch.cat([self.verts, self.colors, self.normals], dim=-1).ravel().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=np.float32, order='C') return verts @property def faces_data(self): # a heavy copy operation faces = self.faces.ravel().numpy() # N, 3 faces = np.asarray(faces, dtype=np.uint32, order='C') return faces @property def face_size(self): return self.render_type.value @property def vert_size(self): return sum(self.vert_sizes) def load_from_file(self, filename: str = 'assets/meshes/bunny.ply'): verts, faces, colors, normals, scalars = self.load_data_from_file(filename) self.load_from_data(verts, faces, colors, normals, scalars) def load_data_from_file(self, filename: str = 'assets/meshes/bunny.ply'): self.name = os.path.split(filename)[-1] verts, faces, colors, normals, scalars = None, None, None, None, None verts, faces = load_mesh(filename, device=self.store_device) if not len(faces): verts, colors, normals, scalars = load_pts(filename) self.render_type = Mesh.RenderType.POINTS else: self.render_type = Mesh.RenderType(faces.shape[-1]) # use value return verts, faces, colors, normals, scalars def load_from_data(self, verts: torch.Tensor, faces: torch.Tensor, colors: torch.Tensor = None, normals: torch.Tensor = None, scalars: dotdict[str, torch.Tensor] = dotdict()): # Data type conversion verts = torch.as_tensor(verts) # convert to tensor if input is of other types if verts.dtype == torch.float32: pass # supports this for now elif verts.dtype == torch.float16: pass # supports this for now else: verts = verts.type(torch.float) # convert to float32 if input is of higher precision gl_dtype = gl.GL_FLOAT if verts.dtype == torch.float else gl.GL_HALF_FLOAT self.vert_gl_types = [gl_dtype] * len(self.vert_sizes) # Prepare main mesh data: vertices and faces self.verts = torch.as_tensor(verts, device=self.store_device) self.faces = torch.as_tensor(faces, device=self.store_device, dtype=torch.int32) # NOTE: No uint32 support # Prepare colors and normals if colors is not None: self.colors = torch.as_tensor(colors, device=self.store_device, dtype=self.verts.dtype) else: bounds = get_bounds(self.verts[None])[0] self.colors = (self.verts - bounds[0]) / (bounds[1] - bounds[0]) if normals is not None: self.normals = torch.as_tensor(normals, device=self.store_device, dtype=self.verts.dtype) else: self.estimate_vertex_normals() # Prepare other scalars if scalars is not None: for k, v in scalars.items(): setattr(self, k, torch.as_tensor(v, device=self.store_device, dtype=self.verts.dtype)) # is this ok? # Prepare OpenGL related buffer self.update_gl_buffers() def estimate_vertex_normals(self): def est_pcd_norms(): if self.verts.dtype == torch.half: self.normals = self.verts else: pcd = Pointclouds([self.verts]).to(self.compute_device) self.normals = pcd.estimate_normals()[0].cpu().to(self.verts.dtype) # no batch dim def est_tri_norms(): if self.verts.dtype == torch.half: self.normals = self.verts else: mesh = Meshes([self.verts], [self.faces]).to(self.compute_device) self.normals = mesh.verts_normals_packed().cpu().to(self.verts.dtype) # no batch dim if not len(self.verts) > self.est_normal_thresh: if self.render_type == Mesh.RenderType.TRIS: est_tri_norms() elif self.render_type == Mesh.RenderType.POINTS: est_pcd_norms() else: # log(yellow(f'Unsupported mesh type: {self.render_type} for normal estimation, skipping')) self.normals = self.verts else: # log(yellow(f'Number of points for mesh too large: {len(self.verts)} > {self.est_normal_thresh}, skipping normal estimation')) self.normals = self.verts def offscreen_render(self, eglctx: "eglContextManager", camera: Camera): eglctx.resize(camera.W, camera.H) self.render(camera) def render(self, camera: Camera): if not self.visible: return # For point rendering if self.render_type == Mesh.RenderType.POINTS: gl.glUseProgram(self.point_program) self.use_gl_program(self.point_program) else: gl.glUseProgram(self.mesh_program) self.use_gl_program(self.mesh_program) self.upload_gl_uniforms(camera) gl.glBindVertexArray(self.vao) if self.render_type == Mesh.RenderType.POINTS: gl.glDrawArrays(gl.GL_POINTS, 0, len(self.verts)) # number of vertices elif self.render_type == Mesh.RenderType.LINES: gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glDrawElements(gl.GL_LINES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices elif self.render_type == Mesh.RenderType.TRIS: gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glDrawElements(gl.GL_TRIANGLES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices elif self.render_type == Mesh.RenderType.QUADS: gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glDrawElements(gl.GL_QUADS, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices elif self.render_type == Mesh.RenderType.STRIPS: gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) else: raise NotImplementedError gl.glBindVertexArray(0) def use_gl_program(self, program: shaders.ShaderProgram): use_gl_program(program) self.uniforms.shade_flat = gl.glGetUniformLocation(program, "shade_flat") self.uniforms.point_radius = gl.glGetUniformLocation(program, "point_radius") self.uniforms.render_normal = gl.glGetUniformLocation(program, "render_normal") self.uniforms.H = gl.glGetUniformLocation(program, "H") self.uniforms.W = gl.glGetUniformLocation(program, "W") self.uniforms.n = gl.glGetUniformLocation(program, "n") self.uniforms.f = gl.glGetUniformLocation(program, "f") self.uniforms.P = gl.glGetUniformLocation(program, "P") self.uniforms.K = gl.glGetUniformLocation(program, "K") self.uniforms.V = gl.glGetUniformLocation(program, "V") self.uniforms.M = gl.glGetUniformLocation(program, "M") def upload_gl_uniforms(self, camera: Camera): K = camera.gl_ixt # hold the reference V = camera.gl_ext # hold the reference M = glm.identity(mat4) P = K * V * M gl.glUniform1i(self.uniforms.shade_flat, self.shade_flat) gl.glUniform1f(self.uniforms.point_radius, self.point_radius) gl.glUniform1i(self.uniforms.render_normal, self.render_normal) gl.glUniform1i(self.uniforms.H, camera.H) # o2w gl.glUniform1i(self.uniforms.W, camera.W) # o2w gl.glUniform1f(self.uniforms.n, camera.n) # o2w gl.glUniform1f(self.uniforms.f, camera.f) # o2w gl.glUniformMatrix4fv(self.uniforms.P, 1, gl.GL_FALSE, glm.value_ptr(P)) # o2clip gl.glUniformMatrix4fv(self.uniforms.K, 1, gl.GL_FALSE, glm.value_ptr(K)) # c2clip gl.glUniformMatrix4fv(self.uniforms.V, 1, gl.GL_FALSE, glm.value_ptr(V)) # w2c gl.glUniformMatrix4fv(self.uniforms.M, 1, gl.GL_FALSE, glm.value_ptr(M)) # o2w def update_gl_buffers(self): # Might be overwritten self.resize_buffers(len(self.verts) if hasattr(self, 'verts') else 0, len(self.faces) if hasattr(self, 'faces') else 0) # maybe repeated if hasattr(self, 'verts'): gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo) gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, self.n_verts_bytes, self.verts_data) # hold the reference if hasattr(self, 'faces'): gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glBufferSubData(gl.GL_ELEMENT_ARRAY_BUFFER, 0, self.n_faces_bytes, self.faces_data) def resize_buffers(self, v: int = 0, f: int = 0): if v > self.max_verts or f > self.max_faces: if v > self.max_verts: self.max_verts = v if f > self.max_faces: self.max_faces = f self.init_gl_buffers(v, f) def init_gl_buffers(self, v: int = 0, f: int = 0): # This will only init the corresponding buffer object n_verts_bytes = v * self.vert_size * self.verts.element_size() if v > 0 else self.n_verts_bytes n_faces_bytes = f * self.face_size * self.faces.element_size() if f > 0 else self.n_faces_bytes # Housekeeping if hasattr(self, 'vao'): gl.glDeleteVertexArrays(1, [self.vao]) gl.glDeleteBuffers(2, [self.vbo, self.ebo]) self.vao = gl.glGenVertexArrays(1) self.vbo = gl.glGenBuffers(1) self.ebo = gl.glGenBuffers(1) gl.glBindVertexArray(self.vao) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo) gl.glBufferData(gl.GL_ARRAY_BUFFER, n_verts_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW) # NOTE: Using pointers here won't work # https://stackoverflow.com/questions/67195932/pyopengl-cannot-render-any-vao cumsum = 0 for i, (s, t) in enumerate(zip(self.vert_sizes, self.vert_gl_types)): gl.glVertexAttribPointer(i, s, t, gl.GL_FALSE, self.vert_size * self.verts.element_size(), ctypes.c_void_p(cumsum * self.verts.element_size())) # we use 32 bit float gl.glEnableVertexAttribArray(i) cumsum += s if n_faces_bytes > 0: # Some implementation has no faces, we dangerously ignore ebo here, assuming they will never be used gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, n_faces_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW) gl.glBindVertexArray(0) def render_imgui(self): pass class Quad(Mesh): # A shared texture for CUDA (pytorch) and OpenGL # Could be rendererd to screen using blitting or just drawing a quad def __init__(self, H: int = 256, W: int = 256, use_cudagl: bool = True, compose: bool = False, compose_power: float = 1.0): # the texture to blip self.use_cudagl = use_cudagl self.vert_sizes = [3] # only position self.vert_gl_types = [gl.GL_FLOAT] # only position self.render_type = Mesh.RenderType.STRIPS # remove side effects of settings _type self.max_verts, self.max_faces = 0, 0 self.verts = torch.as_tensor([[-1., -1., 0.5], [1., -1., 0.5], [-1., 1., 0.5], [1., 1., 0.5],]) self.update_gl_buffers() self.compile_shaders() self.max_H, self.max_W = H, W self.H, self.W = H, W self.compose = compose self.compose_power = compose_power self.init_texture() @property def n_faces_bytes(self): return 0 def use_gl_program(self, program: shaders.ShaderProgram): super().use_gl_program(program) self.uniforms.tex = gl.glGetUniformLocation(program, 'tex') gl.glUseProgram(self.quad_program) # use a different program gl.glUniform1i(self.uniforms.tex, 0) def compile_shaders(self): try: self.quad_program = shaders.compileProgram( shaders.compileShader(load_shader_source('quad.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('quad.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def resize_textures(self, H: int, W: int): # analogy to update_gl_buffers self.H, self.W = H, W if self.H > self.max_H or self.W > self.max_W: # max got updated self.max_H, self.max_W = max(int(self.H * 1.05), self.max_H), max(int(self.W * 1.05), self.max_W) self.init_texture() def init_texture(self): if hasattr(self, 'cu_tex'): CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_tex)) if hasattr(self, 'fbo'): gl.glDeleteFramebuffers(1, [self.fbo]) gl.glDeleteTextures(1, [self.tex]) # Init the texture to be blit onto the screen self.tex = gl.glGenTextures(1) gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA8, self.max_W, self.max_H, 0, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ctypes.c_void_p(0)) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Init the framebuffer object if explicit blitting is used (slower than drawing quad) self.fbo = gl.glGenFramebuffers(1) old_fbo = gl.glGetIntegerv(gl.GL_FRAMEBUFFER_BINDING) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self.tex, 0) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, old_fbo) if self.use_cudagl: if self.compose: # Both reading and writing of this resource is required flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsNone else: flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard self.cu_tex = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.tex, gl.GL_TEXTURE_2D, flags)) def copy_to_texture(self, image: torch.Tensor, x: int = 0, y: int = 0, w: int = 0, h: int = 0): assert self.use_cudagl, "Need to enable cuda-opengl interop to copy from device to device, check creation of this Quad" w = w or self.W h = h or self.H if image.shape[-1] == 3: image = torch.cat([image, image.new_ones(image.shape[:-1] + (1,)) * 255], dim=-1) # add alpha channel kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream)) cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(self.cu_tex, 0, 0)) if self.compose: """ Blit current framebuffer to this texture (self.tex) Read content of this texture into a cuda buffer Perform alpha blending based on the frame's alpha channel Copy the blended image back into the texture (self.tex) """ old = gl.glGetInteger(gl.GL_DRAW_FRAMEBUFFER_BINDING) gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, self.fbo) # read buffer defaults to 0 gl.glBlitFramebuffer(x, y, w, h, x, y, w, h, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) # now self.tex contains the content of the already rendered frame gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, old) buffer = torch.empty_like(image) CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(buffer.data_ptr(), # dst w * 4 * buffer.element_size(), # dpitch cu_tex_arr, # src x * 4 * image.element_size(), # wOffset y, # hOffset w * 4 * buffer.element_size(), # width Width of matrix transfer (columns in bytes) h, # height kind, # kind torch.cuda.current_stream().cuda_stream)) # stream # cv2.imwrite('image.png', image.flip(0).detach().cpu().numpy()[..., [2,1,0,3]]) alpha = image[..., -1:] / 255 image[..., :-1] = buffer[..., :-1] * (1 - alpha ** self.compose_power) + image[..., :-1] * alpha # storing float into int image[..., -1:] = buffer[..., -1:] + image[..., -1:] image = image.clip(0, 255) CHECK_CUDART_ERROR(cudart.cudaMemcpy2DToArrayAsync(cu_tex_arr, x * 4 * image.element_size(), y, image.data_ptr(), w * 4 * image.element_size(), # differently sized w * 4 * image.element_size(), # rgba, should do a composition first h, kind, torch.cuda.current_stream().cuda_stream)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream)) def upload_to_texture(self, ptr: np.ndarray): H, W = ptr.shape[:2] H, W = min(self.H, H), min(self.W, W) gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex) gl.glTexSubImage2D(gl.GL_TEXTURE_2D, 0, 0, 0, W, H, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ptr[:H, :W]) # to gpu, might slow down? @property def verts_data(self): # a heavy copy operation verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=np.float32, order='C') return verts def render(self, camera: Camera = None): self.draw() # no uploading needed def draw(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0): """ Upload the texture instead of the camera This respects the OpenGL convension of lower left corners """ w = w or self.W h = h or self.H _, _, W, H = gl.glGetIntegerv(gl.GL_VIEWPORT) gl.glViewport(x, y, w, h) gl.glScissor(x, y, w, h) # only render in this small region of the viewport gl.glUseProgram(self.quad_program) # use a different program gl.glActiveTexture(gl.GL_TEXTURE0) gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex) gl.glBindVertexArray(self.vao) gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) gl.glBindVertexArray(0) # Some house keepings gl.glViewport(0, 0, W, H) gl.glScissor(0, 0, W, H) def blit(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0): """ This respects the OpenGL convension of lower left corners """ w = w or self.W h = h or self.H old = gl.glGetInteger(gl.GL_READ_FRAMEBUFFER_BINDING) gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, self.fbo) # write buffer defaults to 0 gl.glBlitFramebuffer(x, y, x + w, y + h, # the height is flipped x, y, x + w, y + h, # the height is flipped gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, old) class UQuad(Mesh): """ Responsible for initializing textures with a single value or blitting a texture to a framebuffer (possibly better done with blit instead of quad drawing) Effectively clearing the texture for real, see: https://stackoverflow.com/questions/37335281/is-glcleargl-color-buffer-bit-preferred-before-a-whole-frame-buffer-overwritte """ def __init__(self): self.n_blit_values = 3 self.vert_sizes = [3] # only position self.vert_gl_types = [gl.GL_FLOAT] # only position self.max_verts, self.max_faces = 0, 0 self.verts = torch.as_tensor([[-1., -1., 0.5], [1., -1., 0.5], [-1., 1., 0.5], [1., 1., 0.5],]) self.compile_shaders() self.uniforms = dotdict() # uniform values self.use_gl_programs(self.quad_program) self.update_gl_buffers() @property def n_faces_bytes(self): return 0 @property def verts_data(self): # a heavy copy operation verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=np.float32, order='C') return verts def use_gl_programs(self, program: shaders.ShaderProgram): for i in range(self.n_blit_values): self.uniforms[f'value{i}'] = gl.glGetUniformLocation(program, f'value{i}') for i in range(self.n_blit_values): self.uniforms[f'use_tex{i}'] = gl.glGetUniformLocation(program, f'use_tex{i}') gl.glUseProgram(self.program) # use a different program for i in range(self.n_blit_values): self.uniforms[f'tex{i}'] = gl.glGetUniformLocation(program, f'tex{i}') gl.glUniform1i(self.uniforms[f'tex{i}'], i) def upload_gl_uniforms(self, values: List[List[float]], use_texs: List[bool]): for i, v in enumerate(values): v = vec4(v) # HACK: Hold the reference for this upload gl.glUniform4fv(self.uniforms[f'value{i}'], 1, glm.value_ptr(v)) # as float array for i, v in enumerate(use_texs): gl.glUniform1i(self.uniforms[f'use_tex{i}'], v) def compile_shaders(self): try: self.quad_program = shaders.compileProgram( shaders.compileShader(load_shader_source('uquad.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('uquad.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def draw(self, values: List[List[float]] = [], use_texs=[]): """ This function will render 'value' to the currently bound framebuffer, up to six outputs """ old_prog = gl.glGetIntegerv(gl.GL_CURRENT_PROGRAM) old_vao = gl.glGetIntegerv(gl.GL_VERTEX_ARRAY_BINDING) gl.glUseProgram(self.quad_program) self.upload_gl_uniforms(values, use_texs) # should be a noop # Prepare to render to textures gl.glBindVertexArray(self.vao) gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) # number of vertices gl.glBindVertexArray(old_vao) gl.glUseProgram(old_prog) class DQuad(UQuad): def compile_shaders(self): try: self.quad_program = shaders.compileProgram( shaders.compileShader(load_shader_source('dquad.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('dquad.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def draw(self, values: List[List[float]] = [], use_texs=[]): old_function = gl.glGetIntegerv(gl.GL_DEPTH_FUNC) gl.glDepthFunc(gl.GL_ALWAYS) super().draw(values, use_texs) gl.glDepthFunc(old_function) def hardware_rendering_framebuffer(H: int, W: int, gl_tex_dtype=gl.GL_RGBA16F): # Prepare for write frame buffers color_buffer = gl.glGenTextures(1) depth_upper = gl.glGenTextures(1) depth_lower = gl.glGenTextures(1) depth_attach = gl.glGenTextures(1) fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb # Init the texture (call the resizing function), will simply allocate empty memory # The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter). gl.glBindTexture(gl.GL_TEXTURE_2D, color_buffer) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl_tex_dtype, W, H, 0, gl.GL_RGBA, gl.GL_FLOAT, ctypes.c_void_p(0)) # 16 * 4 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_upper) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Bind texture to fbo gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, color_buffer, 0) # location 0 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_upper, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT2, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0) gl.glDrawBuffers(3, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1, gl.GL_COLOR_ATTACHMENT2]) # Check framebuffer status if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE: log(red('Framebuffer not complete, exiting...')) raise RuntimeError('Incomplete framebuffer') # Restore the original state gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) return color_buffer, depth_upper, depth_lower, depth_attach, fbo def hareward_peeling_framebuffer(H: int, W: int): # Prepare for write frame buffers index_buffer = gl.glGenTextures(1) depth_lower = gl.glGenTextures(1) depth_attach = gl.glGenTextures(1) fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb # Init the texture (call the resizing function), will simply allocate empty memory # The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter). gl.glBindTexture(gl.GL_TEXTURE_2D, index_buffer) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32I, W, H, 0, gl.GL_RED_INTEGER, gl.GL_INT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Bind texture to fbo gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, index_buffer, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0) gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1]) # Check framebuffer status if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE: log(red('Framebuffer not complete, exiting...')) raise RuntimeError('Incomplete framebuffer') # Restore the original state gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) return index_buffer, depth_lower, depth_attach, fbo class Gaussian(Mesh): def __init__(self, filename: str = 'assets/meshes/zju3dv.npz', gaussian_cfg: dotdict = dotdict(), quad_cfg: dotdict = dotdict(), render_depth: bool = False, # show depth or show color dpt_cm: str = 'linear', H: int = 1024, W: int = 1024, **kwargs, ): # Import Gaussian Model # Housekeeping super().__init__(**kwargs) self.name = split(filename)[-1] # Init Gaussian related models, for now only the first gaussian model is supported if filename.endswith('.npz') or filename.endswith('.pt') or filename.endswith('.pth'): # Load from GaussianTSampler pretrained, _ = load_pretrained(filename) # loaded model and updated path (maybe) pretrained = pretrained.model state_dict = dotdict() for k, v in pretrained.items(): if k.startswith('sampler.pcds.0'): state_dict[k.replace('sampler.pcds.0.', '')] = v # Load the parameters into the gaussian model self.gaussian_model: GaussianModel = call_from_cfg(GaussianModel, gaussian_cfg) # init empty gaussian model self.gaussian_model.load_state_dict(state_dict) # load the first gaussian model self.gaussian_model.cuda() # move the parameters to GPU elif filename.endswith('.ply'): # Load raw GaussianModel pass else: raise NotImplementedError # Init rendering quad self.quad: Quad = call_from_cfg(Quad, quad_cfg, H=H, W=W) # Other configurations self.render_depth = render_depth self.dpt_cm = dpt_cm # Disabling initialization def load_from_file(self, *args, **kwargs): pass def load_from_data(self, *args, **kwargs): pass def compile_shaders(self): pass def update_gl_buffers(self): pass def resize_textures(self, H: int, W: int): self.quad.resize_textures(H, W) # The actual rendering function @torch.no_grad() def render(self, camera: Camera): # Perform actual gaussian rendering batch = to_cuda(camera.to_batch()) rgb, acc, dpt = self.gaussian_model.render(batch) if self.render_depth:
rgba = torch.cat([depth_curve_fn(dpt, cm=self.dpt_cm), acc], dim=-1) # H, W, 4
3
2023-10-17 04:48:46+00:00
16k
0xbitches/sd-webui-lcm
scripts/main.py
[ { "identifier": "LCMScheduler", "path": "lcm/lcm_scheduler.py", "snippet": "class LCMScheduler(SchedulerMixin, ConfigMixin):\n \"\"\"\n `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with\n non-Markovian guidance.\n\n This model ...
from concurrent.futures import ThreadPoolExecutor from pathlib import Path from typing import Optional from lcm.lcm_scheduler import LCMScheduler from lcm.lcm_pipeline import LatentConsistencyModelPipeline from lcm.lcm_i2i_pipeline import LatentConsistencyModelImg2ImgPipeline from diffusers.image_processor import PipelineImageInput from modules import script_callbacks from PIL import Image, PngImagePlugin import uuid import modules.scripts as scripts import modules.shared import os import random import time import numpy as np import gradio as gr import torch import cv2
11,113
DESCRIPTION = '''# Latent Consistency Model Running [LCM_Dreamshaper_v7](https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7) | [Project Page](https://latent-consistency-models.github.io) | [Extension Page](https://github.com/0xbitches/sd-webui-lcm) ''' MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "768")) class Script(scripts.Script): def __init__(self) -> None: super().__init__() def title(self): return "LCM" def show(self, is_img2img): return scripts.AlwaysVisible def ui(self, is_img2img): return () def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: if randomize_seed: seed = random.randint(0, MAX_SEED) return seed def save_image(img, metadata: dict): save_dir = os.path.join(scripts.basedir(), "outputs/txt2img-images/LCM/") Path(save_dir).mkdir(exist_ok=True, parents=True) seed = metadata["seed"] unique_id = uuid.uuid4() filename = save_dir + f"{unique_id}-{seed}" + ".png" meta_tuples = [(k, str(v)) for k, v in metadata.items()] png_info = PngImagePlugin.PngInfo() for k, v in meta_tuples: png_info.add_text(k, v) img.save(filename, pnginfo=png_info) return filename def save_images(image_array, metadata: dict): paths = [] with ThreadPoolExecutor() as executor: paths = list(executor.map(save_image, image_array, [metadata]*len(image_array))) return paths def generate( prompt: str, seed: int = 0, width: int = 512, height: int = 512, guidance_scale: float = 8.0, num_inference_steps: int = 4, num_images: int = 4, randomize_seed: bool = False, use_fp16: bool = True, use_torch_compile: bool = False, use_cpu: bool = False, progress=gr.Progress(track_tqdm=True) ) -> Image.Image: seed = randomize_seed_fn(seed, randomize_seed) torch.manual_seed(seed) selected_device = modules.shared.device if use_cpu: selected_device = "cpu" if use_fp16: use_fp16 = False print("LCM warning: running on CPU, overrode FP16 with FP32")
DESCRIPTION = '''# Latent Consistency Model Running [LCM_Dreamshaper_v7](https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7) | [Project Page](https://latent-consistency-models.github.io) | [Extension Page](https://github.com/0xbitches/sd-webui-lcm) ''' MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "768")) class Script(scripts.Script): def __init__(self) -> None: super().__init__() def title(self): return "LCM" def show(self, is_img2img): return scripts.AlwaysVisible def ui(self, is_img2img): return () def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: if randomize_seed: seed = random.randint(0, MAX_SEED) return seed def save_image(img, metadata: dict): save_dir = os.path.join(scripts.basedir(), "outputs/txt2img-images/LCM/") Path(save_dir).mkdir(exist_ok=True, parents=True) seed = metadata["seed"] unique_id = uuid.uuid4() filename = save_dir + f"{unique_id}-{seed}" + ".png" meta_tuples = [(k, str(v)) for k, v in metadata.items()] png_info = PngImagePlugin.PngInfo() for k, v in meta_tuples: png_info.add_text(k, v) img.save(filename, pnginfo=png_info) return filename def save_images(image_array, metadata: dict): paths = [] with ThreadPoolExecutor() as executor: paths = list(executor.map(save_image, image_array, [metadata]*len(image_array))) return paths def generate( prompt: str, seed: int = 0, width: int = 512, height: int = 512, guidance_scale: float = 8.0, num_inference_steps: int = 4, num_images: int = 4, randomize_seed: bool = False, use_fp16: bool = True, use_torch_compile: bool = False, use_cpu: bool = False, progress=gr.Progress(track_tqdm=True) ) -> Image.Image: seed = randomize_seed_fn(seed, randomize_seed) torch.manual_seed(seed) selected_device = modules.shared.device if use_cpu: selected_device = "cpu" if use_fp16: use_fp16 = False print("LCM warning: running on CPU, overrode FP16 with FP32")
scheduler = LCMScheduler.from_pretrained(
0
2023-10-22 11:53:48+00:00
16k
kylesargent/ZeroNVS
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio
13,932
isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: raise NotImplementedError def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: raise NotImplementedError def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale
points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1)
2
2023-10-24 19:02:44+00:00
16k
princeton-nlp/LLM-Shearing
llmshearing/models/composer_pythia.py
[ { "identifier": "L0Module", "path": "llmshearing/models/l0_module.py", "snippet": "class L0Module(nn.Module):\n def __init__(self, cfg, device):\n super(L0Module, self).__init__()\n\n # base and target model info\n n_matrix_mlp = 2 if \"pythia\" in cfg.name else 3\n self.b...
import math import torch import torch.nn as nn from typing import List, Optional, Tuple from einops import rearrange from omegaconf import DictConfig from torch.nn import functional as F from transformers.pytorch_utils import (find_pruneable_heads_and_indices, prune_linear_layer) from llmshearing.models.l0_module import L0Module from llmshearing.models.composer_llama import ComposerMosaicLlama, prepare_decoder_attention_mask, turn_head_z, turn_mlp_z, normal_attn_fn, flash_attn_fn from transformers.models.gpt_neox.modeling_gpt_neox import apply_rotary_pos_emb
10,801
def param_init_fn(self, module): pass def fsdp_wrap_fn(self, module): return isinstance(module, PythiaBlock) # Activation Checkpointing def activation_checkpointing_fn(self, module): return isinstance(module, PythiaBlock) class PythiaBlock(nn.Module): def __init__(self, cfg: DictConfig, device: Optional[str] = None): super().__init__() layernorm_class = CoFiLayerNorm # TODO: CoFiLayerNorm,RMSLayerNorm self.ln_1 = layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=device) self.attn = PythiaAttention(cfg, device) self.ln_2 = layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=device) self.mlp = PythiaMLP(cfg, device) self.use_parallel_residual = cfg.get('use_parallel_residual', False) # TODO: add to config def prune_params(self, zs_block): self.attn.prune_params(zs_block) self.mlp.prune_params(zs_block) if self.attn.query_key_value is None: self.ln_1 = None if self.mlp.up_proj is None: self.ln_2 = None if "hidden_z" in zs_block: hidden_z = zs_block["hidden_z"] if self.ln_1 is not None: self.ln_1.prune_params(hidden_z) if self.ln_2 is not None: self.ln_2.prune_params(hidden_z) def forward( self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]] = None, attn_bias: Optional[torch.Tensor] = None, key_padding_mask: Optional[torch.ByteTensor] = None, is_causal: bool = True, attention_mask: Optional[torch.Tensor] = None, retain_grad: bool = False, head_z: Optional[torch.Tensor] = None, head_layer_z: Optional[torch.Tensor] = None, intermediate_z: Optional[torch.Tensor] = None, mlp_z: Optional[torch.Tensor] = None, hidden_z: Optional[torch.Tensor] = None, qk_head_dim_z: Optional[torch.Tensor] = None, vo_head_dim_z: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]: if self.ln_1 is not None: a = self.ln_1(x, hidden_z=hidden_z) attn_output, _, past_key_value = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, attention_mask=attention_mask, retain_grad=retain_grad, head_z=head_z, head_layer_z=head_layer_z, hidden_z=hidden_z, qk_head_dim_z=qk_head_dim_z, vo_head_dim_z=vo_head_dim_z) else: attn_output = 0 if self.use_parallel_residual: # pseudocode: # x = x + attn(ln1(x)) + mlp(ln2(x)) if self.ln_2 is not None: b = self.ln_2(x, hidden_z=hidden_z) mlp_output = self.mlp(b, retain_grad, intermediate_z, mlp_z, hidden_z) x = mlp_output + attn_output + x else: x = attn_output + x else: # pseudocode: # x = x + attn(ln1(x)) # x = x + mlp(ln2(x)) if self.ln_2 is not None: attn_output = x + attn_output hidden_states = self.ln_2(attn_output, hidden_z=hidden_z) mlp_output = self.mlp(hidden_states, retain_grad, intermediate_z, mlp_z, hidden_z) x = mlp_output + attn_output else: x = x + attn_output return x, past_key_value class PythiaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, cfg: DictConfig, device: Optional[str] = None): super().__init__() self.attn_impl = cfg.get('attn_impl') self.d_model = cfg.d_model self.n_heads = cfg.n_heads self.all_head_size = cfg.d_model self.head_dim = self.d_model // self.n_heads self.pruned_heads = set() self.softmax_scale = cfg.get('softmax_scale') if self.softmax_scale is None: self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads) self.attn_dropout_p = cfg.get('attn_pdrop') # self.Wqkv = nn.Linear(self.d_model, 3 * self.d_model, device=device, bias=False) # for param init fn; enables shape based init of fused layers # fuse_splits = (cfg.d_model, 2 * cfg.d_model) # self.Wqkv._fused = (0, fuse_splits) # type: ignore self.query_key_value = nn.Linear(self.d_model, 3 * self.d_model, device=device, bias=True) fuse_splits = (cfg.d_model, 2 * cfg.d_model) self.query_key_value._fused = (0, fuse_splits)
class ComposerMosaicPythia(ComposerMosaicLlama): def __init__(self, cfg): super().__init__(cfg) self.model = PythiaModel(cfg) class CoFiLayerNorm(torch.nn.LayerNorm): def __init__(self, normalized_shape, eps: float = 1e-5, elementwise_affine: bool = True, device=None) -> None: super().__init__(normalized_shape, eps, elementwise_affine, device) def forward(self, input, hidden_z=None): if hidden_z is not None: remaining_index = torch.where(~hidden_z.eq(0))[0] compressed_input = torch.index_select( input, dim=-1, index=remaining_index) compressed_weight = self.weight[remaining_index] compressed_bias = self.bias[remaining_index] normalized_shape = len(remaining_index) normed_input = F.layer_norm( compressed_input, [normalized_shape], compressed_weight, compressed_bias, self.eps) output = input.clone() normed_input = normed_input.to(output.dtype) output[..., remaining_index] = normed_input else: output = F.layer_norm( input, self.normalized_shape, self.weight, self.bias, self.eps) return output def prune_params(self, hidden_z): remaining_index = torch.where(~hidden_z.eq(0))[0] # self.weight = torch.nn.Parameter(self.weight.data.mul(hidden_z.squeeze())[remaining_index]) self.weight = torch.nn.parameter.Parameter(self.weight.index_select(0, remaining_index)) self.bias = torch.nn.parameter.Parameter(self.bias.index_select(0, remaining_index)) self.normalized_shape = (len(remaining_index),) class PythiaEmbedding(nn.Embedding): def forward(self, input, hidden_z=None): embeddings = super().forward(input) if hidden_z is not None: embeddings = embeddings.mul(hidden_z) return embeddings def prune_params(self, hidden_z): remaining_index = torch.where(~hidden_z.eq(0))[0] self.weight.data = self.weight.data.mul(hidden_z) self.weight = torch.nn.parameter.Parameter(self.weight.index_select(1, remaining_index).clone()) self.embedding_dim = len(remaining_index) print(f" Embedding: {len(hidden_z)} -> {len(remaining_index)}") class PythiaModel(nn.Module): def __init__(self, cfg: DictConfig): super().__init__() print(f'Tried to build Pythia model with cfg.name={cfg.name}') self.cfg = cfg ### added ### self.l0_module = None if getattr(self.cfg, "l0_module", None) is not None: self.l0_module = L0Module(self.cfg, device=cfg.init_device) ############# layernorm_class = CoFiLayerNorm self.attn_impl = cfg.attn_impl self.embedding_fraction = cfg.get('embedding_fraction', 1) assert 0 < self.embedding_fraction <= 1, 'model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!' self.transformer = nn.ModuleDict({ "wte": PythiaEmbedding(cfg.vocab_size, cfg.d_model, device=cfg.init_device), }) self.transformer.update({ 'blocks': nn.ModuleList([ PythiaBlock(cfg, device=cfg.init_device) for _ in range(cfg.n_layers) ]) }) self.transformer.update({ "output": nn.Linear(cfg.d_model, cfg.vocab_size, device=cfg.init_device, bias=False), }) self.transformer.update({ "ln_f": layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=cfg.init_device), # TODO: add to config }) self.is_causal = True if cfg.get('verbose') and cfg.get('verbose') > 2: print(self) def prune_params(self, zs=None): # TODO if zs is None: self.l0_module.eval() zs = self.l0_module(calculate_lagrangian=False) # wte as well :) # ln_f if hidden states are to be pruned if "hidden_z" in zs: hidden_z = zs["hidden_z"] remaining_index = torch.where(~hidden_z.eq(0))[0] self.transformer.ln_f.prune_params(hidden_z) self.transformer.wte.weight.data = self.transformer.wte.weight.data.mul(hidden_z) self.transformer.wte.weight = torch.nn.parameter.Parameter( self.transformer.wte.weight.index_select(1, remaining_index).clone()) self.transformer.wte.embedding_dim = len(remaining_index) # self.transformer.output.weight.data = self.transformer.output.weight.data.mul(hidden_z) half = self.transformer.output.weight.data.dtype == torch.float16 self.transformer.output = prune_linear_layer(self.transformer.output, remaining_index, dim=1) if half: self.transformer.output = self.transformer.output.half() for i, block in enumerate(self.transformer.blocks): zs_block = self.get_zs_block(zs, i) block.prune_params(zs_block) def get_zs_block(self, zs, block_idx): zs_block = {} if zs is not None: for key in zs: if key == "hidden_z": zs_block["hidden_z"] = zs["hidden_z"] else: zs_block[key] = zs[key][block_idx] return zs_block def forward( self, input_ids: torch.LongTensor, key_padding_mask: Optional[torch.ByteTensor] = None, past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None, pruned_steps: int = 0, retain_grad: bool = False, **zs,): S = input_ids.size(1) assert S <= self.cfg.max_seq_len, f"Sequence length ({S}) exceeds model maximum sequence length ({self.cfg.max_seq_len})!" tok_emb = self.transformer.wte(input_ids) if "hidden_z" in zs: tok_emb = tok_emb.mul(zs["hidden_z"]) x = tok_emb attn_bias = None # only consider the flash attention case attention_mask = prepare_decoder_attention_mask((tok_emb.size(0), tok_emb.size(1)), tok_emb) l0_output = None if self.l0_module is not None: assert zs == {}, "zs should be empty when using L0Module" zs = self.l0_module(calculate_lagrangian=False, pruned_steps=pruned_steps) for b_idx, block in enumerate(self.transformer.blocks): zs_block = self.get_zs_block(zs, b_idx) past_key_value = past_key_values[ b_idx] if past_key_values is not None else None x, past_key_value = block( x, past_key_value=past_key_value, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=self.is_causal, attention_mask=attention_mask, retain_grad=retain_grad, **zs_block ) if past_key_values is not None: past_key_values[b_idx] = past_key_value x = self.transformer.ln_f(x, hidden_z=zs.get("hidden_z", None)) logits = self.transformer.output(x) if self.l0_module is not None: l0_output = self.l0_module(calculate_lagrangian=True, pruned_steps=pruned_steps) return {"logits": logits, "l0_output": l0_output, "zs": zs} def param_init_fn(self, module): pass def fsdp_wrap_fn(self, module): return isinstance(module, PythiaBlock) # Activation Checkpointing def activation_checkpointing_fn(self, module): return isinstance(module, PythiaBlock) class PythiaBlock(nn.Module): def __init__(self, cfg: DictConfig, device: Optional[str] = None): super().__init__() layernorm_class = CoFiLayerNorm # TODO: CoFiLayerNorm,RMSLayerNorm self.ln_1 = layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=device) self.attn = PythiaAttention(cfg, device) self.ln_2 = layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=device) self.mlp = PythiaMLP(cfg, device) self.use_parallel_residual = cfg.get('use_parallel_residual', False) # TODO: add to config def prune_params(self, zs_block): self.attn.prune_params(zs_block) self.mlp.prune_params(zs_block) if self.attn.query_key_value is None: self.ln_1 = None if self.mlp.up_proj is None: self.ln_2 = None if "hidden_z" in zs_block: hidden_z = zs_block["hidden_z"] if self.ln_1 is not None: self.ln_1.prune_params(hidden_z) if self.ln_2 is not None: self.ln_2.prune_params(hidden_z) def forward( self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]] = None, attn_bias: Optional[torch.Tensor] = None, key_padding_mask: Optional[torch.ByteTensor] = None, is_causal: bool = True, attention_mask: Optional[torch.Tensor] = None, retain_grad: bool = False, head_z: Optional[torch.Tensor] = None, head_layer_z: Optional[torch.Tensor] = None, intermediate_z: Optional[torch.Tensor] = None, mlp_z: Optional[torch.Tensor] = None, hidden_z: Optional[torch.Tensor] = None, qk_head_dim_z: Optional[torch.Tensor] = None, vo_head_dim_z: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]: if self.ln_1 is not None: a = self.ln_1(x, hidden_z=hidden_z) attn_output, _, past_key_value = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, attention_mask=attention_mask, retain_grad=retain_grad, head_z=head_z, head_layer_z=head_layer_z, hidden_z=hidden_z, qk_head_dim_z=qk_head_dim_z, vo_head_dim_z=vo_head_dim_z) else: attn_output = 0 if self.use_parallel_residual: # pseudocode: # x = x + attn(ln1(x)) + mlp(ln2(x)) if self.ln_2 is not None: b = self.ln_2(x, hidden_z=hidden_z) mlp_output = self.mlp(b, retain_grad, intermediate_z, mlp_z, hidden_z) x = mlp_output + attn_output + x else: x = attn_output + x else: # pseudocode: # x = x + attn(ln1(x)) # x = x + mlp(ln2(x)) if self.ln_2 is not None: attn_output = x + attn_output hidden_states = self.ln_2(attn_output, hidden_z=hidden_z) mlp_output = self.mlp(hidden_states, retain_grad, intermediate_z, mlp_z, hidden_z) x = mlp_output + attn_output else: x = x + attn_output return x, past_key_value class PythiaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, cfg: DictConfig, device: Optional[str] = None): super().__init__() self.attn_impl = cfg.get('attn_impl') self.d_model = cfg.d_model self.n_heads = cfg.n_heads self.all_head_size = cfg.d_model self.head_dim = self.d_model // self.n_heads self.pruned_heads = set() self.softmax_scale = cfg.get('softmax_scale') if self.softmax_scale is None: self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads) self.attn_dropout_p = cfg.get('attn_pdrop') # self.Wqkv = nn.Linear(self.d_model, 3 * self.d_model, device=device, bias=False) # for param init fn; enables shape based init of fused layers # fuse_splits = (cfg.d_model, 2 * cfg.d_model) # self.Wqkv._fused = (0, fuse_splits) # type: ignore self.query_key_value = nn.Linear(self.d_model, 3 * self.d_model, device=device, bias=True) fuse_splits = (cfg.d_model, 2 * cfg.d_model) self.query_key_value._fused = (0, fuse_splits)
self.attn_fn = flash_attn_fn if self.attn_impl == 'flash' else normal_attn_fn
6
2023-10-16 12:26:08+00:00
16k
hkchengrex/Cutie
cutie/inference/inference_core.py
[ { "identifier": "MemoryManager", "path": "cutie/inference/memory_manager.py", "snippet": "class MemoryManager:\n \"\"\"\n Manages all three memory stores and the transition between working/long-term memory\n \"\"\"\n def __init__(self, cfg: DictConfig, object_manager: ObjectManager):\n ...
from typing import List, Optional, Iterable, Dict from omegaconf import DictConfig from cutie.inference.memory_manager import MemoryManager from cutie.inference.object_manager import ObjectManager from cutie.inference.image_feature_store import ImageFeatureStore from cutie.model.cutie import CUTIE from cutie.utils.tensor_utils import pad_divide_by, unpad, aggregate import logging import numpy as np import torch import torch.nn.functional as F
11,758
force_permanent: bool = False) -> torch.Tensor: """ Take a step with a new incoming image. If there is an incoming mask with new objects, we will memorize them. If there is no incoming mask, we will segment the image using the memory. In both cases, we will update the memory and return a segmentation. image: 3*H*W mask: H*W (if idx mask) or len(objects)*H*W or None objects: list of object ids that are valid in the mask Tensor. The ids themselves do not need to be consecutive/in order, but they need to be in the same position in the list as the corresponding mask in the tensor in non-idx-mask mode. objects is ignored if the mask is None. If idx_mask is False and objects is None, we sequentially infer the object ids. idx_mask: if True, mask is expected to contain an object id at every pixel. If False, mask should have multiple channels with each channel representing one object. end: if we are at the end of the sequence, we do not need to update memory if unsure just set it to False delete_buffer: whether to delete the image feature buffer after this step force_permanent: the memory recorded this frame will be added to the permanent memory """ if objects is None and mask is not None: assert not idx_mask objects = list(range(1, mask.shape[0] + 1)) # resize input if needed -- currently only used for the GUI resize_needed = False if self.max_internal_size > 0: h, w = image.shape[-2:] min_side = min(h, w) if min_side > self.max_internal_size: resize_needed = True new_h = int(h / min_side * self.max_internal_size) new_w = int(w / min_side * self.max_internal_size) image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False)[0] if mask is not None: if idx_mask: mask = F.interpolate(mask.unsqueeze(0).unsqueeze(0).float(), size=(new_h, new_w), mode='nearest', align_corners=False)[0, 0].round().long() else: mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False)[0] self.curr_ti += 1 image, self.pad = pad_divide_by(image, 16) image = image.unsqueeze(0) # add the batch dimension if self.flip_aug: image = torch.cat([image, torch.flip(image, dims=[-1])], dim=0) # whether to update the working memory is_mem_frame = ((self.curr_ti - self.last_mem_ti >= self.mem_every) or (mask is not None)) and (not end) # segment when there is no input mask or when the input mask is incomplete need_segment = (mask is None) or (self.object_manager.num_obj > 0 and not self.object_manager.has_all(objects)) update_sensory = ((self.curr_ti - self.last_mem_ti) in self.stagger_ti) and (not end) # encoding the image ms_feat, pix_feat = self.image_feature_store.get_features(self.curr_ti, image) key, shrinkage, selection = self.image_feature_store.get_key(self.curr_ti, image) # segmentation from memory if needed if need_segment: pred_prob_with_bg = self._segment(key, selection, pix_feat, ms_feat, update_sensory=update_sensory) # use the input mask if provided if mask is not None: # inform the manager of the new objects, and get a list of temporary id # temporary ids -- indicates the position of objects in the tensor # (starts with 1 due to the background channel) corresponding_tmp_ids, _ = self.object_manager.add_new_objects(objects) mask, _ = pad_divide_by(mask, 16) if need_segment: # merge predicted mask with the incomplete input mask pred_prob_no_bg = pred_prob_with_bg[1:] # use the mutual exclusivity of segmentation if idx_mask: pred_prob_no_bg[:, mask > 0] = 0 else: pred_prob_no_bg[:, mask.max(0) > 0.5] = 0 new_masks = [] for mask_id, tmp_id in enumerate(corresponding_tmp_ids): if idx_mask: this_mask = (mask == objects[mask_id]).type_as(pred_prob_no_bg) else: this_mask = mask[tmp_id] if tmp_id > pred_prob_no_bg.shape[0]: new_masks.append(this_mask.unsqueeze(0)) else: # +1 for padding the background channel pred_prob_no_bg[tmp_id - 1] = this_mask # new_masks are always in the order of tmp_id mask = torch.cat([pred_prob_no_bg, *new_masks], dim=0) elif idx_mask: # simply convert cls to one-hot representation if len(objects) == 0: if delete_buffer: self.image_feature_store.delete(self.curr_ti) log.warn('Trying to insert an empty mask as memory!') return torch.zeros((1, key.shape[-2] * 16, key.shape[-1] * 16), device=key.device, dtype=key.dtype) mask = torch.stack( [mask == objects[mask_id] for mask_id, _ in enumerate(corresponding_tmp_ids)], dim=0)
log = logging.getLogger() class InferenceCore: def __init__(self, network: CUTIE, cfg: DictConfig, *, image_feature_store: ImageFeatureStore = None): self.network = network self.cfg = cfg self.mem_every = cfg.mem_every stagger_updates = cfg.stagger_updates self.chunk_size = cfg.chunk_size self.save_aux = cfg.save_aux self.max_internal_size = cfg.max_internal_size self.flip_aug = cfg.flip_aug self.curr_ti = -1 self.last_mem_ti = 0 # at which time indices should we update the sensory memory if stagger_updates >= self.mem_every: self.stagger_ti = set(range(1, self.mem_every + 1)) else: self.stagger_ti = set( np.round(np.linspace(1, self.mem_every, stagger_updates)).astype(int)) self.object_manager = ObjectManager() self.memory = MemoryManager(cfg=cfg, object_manager=self.object_manager) if image_feature_store is None: self.image_feature_store = ImageFeatureStore(self.network) else: self.image_feature_store = image_feature_store self.last_mask = None def clear_memory(self): self.curr_ti = -1 self.last_mem_ti = 0 self.memory = MemoryManager(cfg=self.cfg, object_manager=self.object_manager) def clear_non_permanent_memory(self): self.curr_ti = -1 self.last_mem_ti = 0 self.memory.clear_non_permanent_memory() def clear_sensory_memory(self): self.curr_ti = -1 self.last_mem_ti = 0 self.memory.clear_sensory_memory() def update_config(self, cfg): self.mem_every = cfg['mem_every'] self.memory.update_config(cfg) def _add_memory(self, image: torch.Tensor, pix_feat: torch.Tensor, prob: torch.Tensor, key: torch.Tensor, shrinkage: torch.Tensor, selection: torch.Tensor, *, is_deep_update: bool = True, force_permanent: bool = False) -> None: """ Memorize the given segmentation in all memory stores. The batch dimension is 1 if flip augmentation is not used. image: RGB image, (1/2)*3*H*W pix_feat: from the key encoder, (1/2)*_*H*W prob: (1/2)*num_objects*H*W, in [0, 1] key/shrinkage/selection: for anisotropic l2, (1/2)*_*H*W selection can be None if not using long-term memory is_deep_update: whether to use deep update (e.g. with the mask encoder) force_permanent: whether to force the memory to be permanent """ if prob.shape[1] == 0: # nothing to add log.warn('Trying to add an empty object mask to memory!') return if force_permanent: as_permanent = 'all' else: as_permanent = 'first' self.memory.initialize_sensory_if_needed(key, self.object_manager.all_obj_ids) msk_value, sensory, obj_value, self.obj_logits = self.network.encode_mask( image, pix_feat, self.memory.get_sensory(self.object_manager.all_obj_ids), prob, deep_update=is_deep_update, chunk_size=self.chunk_size, need_weights=self.save_aux) self.memory.add_memory(key, shrinkage, msk_value, obj_value, self.object_manager.all_obj_ids, selection=selection, as_permanent=as_permanent) self.last_mem_ti = self.curr_ti if is_deep_update: self.memory.update_sensory(sensory, self.object_manager.all_obj_ids) def _segment(self, key: torch.Tensor, selection: torch.Tensor, pix_feat: torch.Tensor, ms_features: Iterable[torch.Tensor], update_sensory: bool = True) -> torch.Tensor: """ Produce a segmentation using the given features and the memory The batch dimension is 1 if flip augmentation is not used. key/selection: for anisotropic l2: (1/2) * _ * H * W pix_feat: from the key encoder, (1/2) * _ * H * W ms_features: an iterable of multiscale features from the encoder, each is (1/2)*_*H*W with strides 16, 8, and 4 respectively update_sensory: whether to update the sensory memory Returns: (num_objects+1)*H*W normalized probability; the first channel is the background """ bs = key.shape[0] if self.flip_aug: assert bs == 2 else: assert bs == 1 if not self.memory.engaged: log.warn('Trying to segment without any memory!') return torch.zeros((1, key.shape[-2] * 16, key.shape[-1] * 16), device=key.device, dtype=key.dtype) memory_readout = self.memory.read(pix_feat, key, selection, self.last_mask, self.network) memory_readout = self.object_manager.realize_dict(memory_readout) sensory, _, pred_prob_with_bg = self.network.segment(ms_features, memory_readout, self.memory.get_sensory( self.object_manager.all_obj_ids), chunk_size=self.chunk_size, update_sensory=update_sensory) # remove batch dim if self.flip_aug: # average predictions of the non-flipped and flipped version pred_prob_with_bg = (pred_prob_with_bg[0] + torch.flip(pred_prob_with_bg[1], dims=[-1])) / 2 else: pred_prob_with_bg = pred_prob_with_bg[0] if update_sensory: self.memory.update_sensory(sensory, self.object_manager.all_obj_ids) return pred_prob_with_bg def step(self, image: torch.Tensor, mask: Optional[torch.Tensor] = None, objects: Optional[List[int]] = None, *, idx_mask: bool = True, end: bool = False, delete_buffer: bool = True, force_permanent: bool = False) -> torch.Tensor: """ Take a step with a new incoming image. If there is an incoming mask with new objects, we will memorize them. If there is no incoming mask, we will segment the image using the memory. In both cases, we will update the memory and return a segmentation. image: 3*H*W mask: H*W (if idx mask) or len(objects)*H*W or None objects: list of object ids that are valid in the mask Tensor. The ids themselves do not need to be consecutive/in order, but they need to be in the same position in the list as the corresponding mask in the tensor in non-idx-mask mode. objects is ignored if the mask is None. If idx_mask is False and objects is None, we sequentially infer the object ids. idx_mask: if True, mask is expected to contain an object id at every pixel. If False, mask should have multiple channels with each channel representing one object. end: if we are at the end of the sequence, we do not need to update memory if unsure just set it to False delete_buffer: whether to delete the image feature buffer after this step force_permanent: the memory recorded this frame will be added to the permanent memory """ if objects is None and mask is not None: assert not idx_mask objects = list(range(1, mask.shape[0] + 1)) # resize input if needed -- currently only used for the GUI resize_needed = False if self.max_internal_size > 0: h, w = image.shape[-2:] min_side = min(h, w) if min_side > self.max_internal_size: resize_needed = True new_h = int(h / min_side * self.max_internal_size) new_w = int(w / min_side * self.max_internal_size) image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False)[0] if mask is not None: if idx_mask: mask = F.interpolate(mask.unsqueeze(0).unsqueeze(0).float(), size=(new_h, new_w), mode='nearest', align_corners=False)[0, 0].round().long() else: mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False)[0] self.curr_ti += 1 image, self.pad = pad_divide_by(image, 16) image = image.unsqueeze(0) # add the batch dimension if self.flip_aug: image = torch.cat([image, torch.flip(image, dims=[-1])], dim=0) # whether to update the working memory is_mem_frame = ((self.curr_ti - self.last_mem_ti >= self.mem_every) or (mask is not None)) and (not end) # segment when there is no input mask or when the input mask is incomplete need_segment = (mask is None) or (self.object_manager.num_obj > 0 and not self.object_manager.has_all(objects)) update_sensory = ((self.curr_ti - self.last_mem_ti) in self.stagger_ti) and (not end) # encoding the image ms_feat, pix_feat = self.image_feature_store.get_features(self.curr_ti, image) key, shrinkage, selection = self.image_feature_store.get_key(self.curr_ti, image) # segmentation from memory if needed if need_segment: pred_prob_with_bg = self._segment(key, selection, pix_feat, ms_feat, update_sensory=update_sensory) # use the input mask if provided if mask is not None: # inform the manager of the new objects, and get a list of temporary id # temporary ids -- indicates the position of objects in the tensor # (starts with 1 due to the background channel) corresponding_tmp_ids, _ = self.object_manager.add_new_objects(objects) mask, _ = pad_divide_by(mask, 16) if need_segment: # merge predicted mask with the incomplete input mask pred_prob_no_bg = pred_prob_with_bg[1:] # use the mutual exclusivity of segmentation if idx_mask: pred_prob_no_bg[:, mask > 0] = 0 else: pred_prob_no_bg[:, mask.max(0) > 0.5] = 0 new_masks = [] for mask_id, tmp_id in enumerate(corresponding_tmp_ids): if idx_mask: this_mask = (mask == objects[mask_id]).type_as(pred_prob_no_bg) else: this_mask = mask[tmp_id] if tmp_id > pred_prob_no_bg.shape[0]: new_masks.append(this_mask.unsqueeze(0)) else: # +1 for padding the background channel pred_prob_no_bg[tmp_id - 1] = this_mask # new_masks are always in the order of tmp_id mask = torch.cat([pred_prob_no_bg, *new_masks], dim=0) elif idx_mask: # simply convert cls to one-hot representation if len(objects) == 0: if delete_buffer: self.image_feature_store.delete(self.curr_ti) log.warn('Trying to insert an empty mask as memory!') return torch.zeros((1, key.shape[-2] * 16, key.shape[-1] * 16), device=key.device, dtype=key.dtype) mask = torch.stack( [mask == objects[mask_id] for mask_id, _ in enumerate(corresponding_tmp_ids)], dim=0)
pred_prob_with_bg = aggregate(mask, dim=0)
6
2023-10-19 17:49:24+00:00
16k
stanford-oval/WikiChat
chat_interactive.py
[ { "identifier": "DialogueTurn", "path": "pipelines/dialog_turn.py", "snippet": "class DialogueTurn:\n def __init__(\n self,\n agent_utterance: str = None,\n user_utterance: str = None,\n pipeline: str = None,\n engine: str = None,\n generate_engine: str = Non...
import logging import argparse import json import readline # enables keyboard arrows when typing in the terminal from typing import List from pygments import highlight from pygments.formatters.terminal256 import Terminal256Formatter from pygments.lexers.web import JsonLexer from pipelines.dialog_turn import DialogueTurn from pipelines.chatbot import Chatbot from pipelines.utils import input_user, print_chatbot, make_parent_directories from pipelines.pipeline_arguments import ( add_pipeline_arguments, check_pipeline_arguments, ) from llm.llm_generate import write_prompt_logs_to_file from llm.global_variables import set_debug_mode
11,642
""" Chat with the chatbot via command line """ logging.getLogger("openai").setLevel(logging.ERROR) logger = logging.getLogger(__name__) def main(args): chatbot = Chatbot(args) dlg_history: List[DialogueTurn] = [] while True: try: user_utterance = input_user() except EOFError: # stop the chatbot break # check for special commands if user_utterance in args.quit_commands: # stop the chatbot break if user_utterance in ["clear", "cls"]: # restart the dialog dlg_history = [] continue new_dlg_turn = chatbot.generate_next_turn( dlg_history, user_utterance, pipeline=args.pipeline ) dlg_history.append(new_dlg_turn) turn_log = json.dumps(new_dlg_turn.log(), indent=2, ensure_ascii=False) colorful_turn_log = highlight( turn_log, lexer=JsonLexer(), formatter=Terminal256Formatter(style="bw"), ) logger.info("Turn log: %s", colorful_turn_log) print_chatbot("Chatbot: " + new_dlg_turn.agent_utterance) make_parent_directories(args.output_file) with open(args.output_file, "a") as outfile: if len(dlg_history) == 1: # first turn outfile.write("=====\n") outfile.write("User: " + new_dlg_turn.user_utterance + "\n") outfile.write("Chatbot: " + new_dlg_turn.agent_utterance + "\n") with open(args.output_file.strip("txt") + "log", "a") as outfile: outfile.write(turn_log) outfile.write("\n") if __name__ == "__main__": # text generation arguments parser = argparse.ArgumentParser()
""" Chat with the chatbot via command line """ logging.getLogger("openai").setLevel(logging.ERROR) logger = logging.getLogger(__name__) def main(args): chatbot = Chatbot(args) dlg_history: List[DialogueTurn] = [] while True: try: user_utterance = input_user() except EOFError: # stop the chatbot break # check for special commands if user_utterance in args.quit_commands: # stop the chatbot break if user_utterance in ["clear", "cls"]: # restart the dialog dlg_history = [] continue new_dlg_turn = chatbot.generate_next_turn( dlg_history, user_utterance, pipeline=args.pipeline ) dlg_history.append(new_dlg_turn) turn_log = json.dumps(new_dlg_turn.log(), indent=2, ensure_ascii=False) colorful_turn_log = highlight( turn_log, lexer=JsonLexer(), formatter=Terminal256Formatter(style="bw"), ) logger.info("Turn log: %s", colorful_turn_log) print_chatbot("Chatbot: " + new_dlg_turn.agent_utterance) make_parent_directories(args.output_file) with open(args.output_file, "a") as outfile: if len(dlg_history) == 1: # first turn outfile.write("=====\n") outfile.write("User: " + new_dlg_turn.user_utterance + "\n") outfile.write("Chatbot: " + new_dlg_turn.agent_utterance + "\n") with open(args.output_file.strip("txt") + "log", "a") as outfile: outfile.write(turn_log) outfile.write("\n") if __name__ == "__main__": # text generation arguments parser = argparse.ArgumentParser()
add_pipeline_arguments(parser)
5
2023-10-19 18:17:25+00:00
16k
jhejna/cpl
research/algs/off_policy_algorithm.py
[ { "identifier": "ReplayBuffer", "path": "research/datasets/replay_buffer/buffer.py", "snippet": "class ReplayBuffer(torch.utils.data.IterableDataset):\n \"\"\"\n Generic Replay Buffer Class.\n\n This class adheres to the following conventions to support multiprocessing:\n 1. Variables/functi...
import datetime import functools import os import sys import tempfile import gym import numpy as np import torch from abc import abstractmethod from typing import Any, Dict, Optional, Union from research.datasets import ReplayBuffer from research.datasets.replay_buffer import storage from research.envs.base import EmptyEnv from research.networks.base import ModuleContainer from research.utils import runners, utils from .base import Algorithm from research.utils.config import Config
11,302
self.dataset.add(obs=self._current_obs) # add the first observation. self.env_step = self._env_step else: raise ValueError("Invalid env passed") def _empty_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: return dict() def _env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # Return if env is Empty or we we aren't at every env_freq steps if step <= self.offline_steps: # Purposefully set to nan so we write CSV log. return dict(steps=self._env_steps, reward=-np.inf, length=np.inf, num_ep=self._num_ep) if step < self.random_steps: action = env.action_space.sample() else: self.eval() action = self._get_train_action(self._current_obs, step, total_steps) self.train() if isinstance(env.action_space, gym.spaces.Box): action = np.clip(action, env.action_space.low, env.action_space.high) next_obs, reward, done, info = env.step(action) self._env_steps += 1 self._episode_length += 1 self._episode_reward += reward if "discount" in info: discount = info["discount"] elif hasattr(env, "_max_episode_steps") and self._episode_length == env._max_episode_steps: discount = 1.0 else: discount = 1 - float(done) # Store the consequences. self.dataset.add(obs=next_obs, action=action, reward=reward, done=done, discount=discount) if done: self._num_ep += 1 # Compute metrics metrics = dict( steps=self._env_steps, reward=self._episode_reward, length=self._episode_length, num_ep=self._num_ep ) # Reset the environment self._current_obs = env.reset() self.dataset.add(obs=self._current_obs) # Add the first timestep self._episode_length = 0 self._episode_reward = 0 return metrics else: self._current_obs = next_obs return dict(steps=self._env_steps) def _async_env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # Recieve Data from the last step and add to buffer. Should only call recv! if self._resetting: self._current_obs = env.reset_recv() self._num_ep += 1 self._episode_length = 0 self._episode_reward = 0 self.dataset.add(obs=self._current_obs) self._resetting = False done = False else: self._current_obs, reward, done, info = env.step_recv() self._env_steps += 1 self._episode_length += 1 self._episode_reward += reward self.dataset.add( obs=self._current_obs, action=self._current_action, reward=reward, done=done, discount=info["discount"] ) # Send data for the next step and return metrics. Should only call send! if done: # If the episode terminated, then we need to reset and send the reset message self._resetting = True env.reset_send() return dict( steps=self._env_steps, reward=self._episode_reward, length=self._episode_length, num_ep=self._num_ep ) else: # Otherwise, compute the action we should take and send it. self._resetting = False if step < self.random_steps: self._current_action = env.action_space.sample() else: self.eval() self._current_action = self._get_train_action(self._current_obs, step, total_steps) self.train() if isinstance(env.action_space, gym.spaces.Box): self._current_action = np.clip(self._current_action, env.action_space.low, env.action_space.high) env.step_send(self._current_action) return dict(steps=self._env_steps) def _runner_env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # All we do is check the pipe to see if there is data! metrics = env() if len(metrics) > 0: # If the metrics are non-empty, then it means that we have completed an episode. # As such, decrement the counter self._eps_since_last_checkpoint += 1 if self._eps_since_last_checkpoint == self.async_runner_ep_lag: self.save(self._checkpoint_dir, str(step), dict(step=step)) self._eps_since_last_checkpoint = 0 return metrics @abstractmethod def _get_train_action(self, obs: Any, step: int, total_steps: int) -> np.ndarray: raise NotImplementedError @functools.cached_property def action_range(self): action_range = (self.processor.action_space.low, self.processor.action_space.high) return utils.to_device(utils.to_tensor(action_range), self.device) def _predict( self, batch: Dict, sample: bool = False, noise: float = 0.0, noise_clip: Optional[float] = None, temperature=1.0 ) -> torch.Tensor: with torch.no_grad():
class OffPolicyAlgorithm(Algorithm): def __init__( self, *args, offline_steps: int = 0, # Run fully offline by setting to -1 random_steps: int = 1000, async_runner_ep_lag: int = 1, **kwargs, ): super().__init__(*args, **kwargs) self.offline_steps = offline_steps self.random_steps = random_steps self.async_runner_ep_lag = async_runner_ep_lag def setup_datasets(self, env: gym.Env, total_steps: int): super().setup_datasets(env, total_steps) # Assign the correct update function based on what is passed in. if env is None or isinstance(env, EmptyEnv) or self.offline_steps < 0: self.env_step = self._empty_step elif isinstance(env, runners.AsyncEnv): self._episode_reward = 0 self._episode_length = 0 self._num_ep = 0 self._env_steps = 0 self._resetting = True env.reset_send() # Ask the env to start resetting. self.env_step = self._async_env_step elif isinstance(env, runners.MPRunner): assert isinstance(self.dataset, ReplayBuffer), "must use replaybuffer for MP RUnner." assert self.dataset.distributed, "ReplayBuffer must be distributed for use with Fully MPRunner." # Launch the runner subprocess. self._eps_since_last_checkpoint = 0 self._checkpoint_dir = tempfile.mkdtemp(prefix="checkpoints_") assert self.offline_steps <= 0, "MPRunner does not currently support offline to online." env.start( fn=_off_policy_collector_subprocess, checkpoint_path=self._checkpoint_dir, storage_path=self.dataset.storage_path, random_steps=self.random_steps, exclude_keys=self.dataset.exclude_keys, total_steps=total_steps, ) self.env_step = self._runner_env_step elif isinstance(env, gym.Env): # Setup Env Metrics self._current_obs = env.reset() self._episode_reward = 0 self._episode_length = 0 self._num_ep = 0 self._env_steps = 0 # Note that currently the very first (s, a) pair is thrown away because # we don't add to the dataset here. # This was done for better compatibility for offline to online learning. self.dataset.add(obs=self._current_obs) # add the first observation. self.env_step = self._env_step else: raise ValueError("Invalid env passed") def _empty_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: return dict() def _env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # Return if env is Empty or we we aren't at every env_freq steps if step <= self.offline_steps: # Purposefully set to nan so we write CSV log. return dict(steps=self._env_steps, reward=-np.inf, length=np.inf, num_ep=self._num_ep) if step < self.random_steps: action = env.action_space.sample() else: self.eval() action = self._get_train_action(self._current_obs, step, total_steps) self.train() if isinstance(env.action_space, gym.spaces.Box): action = np.clip(action, env.action_space.low, env.action_space.high) next_obs, reward, done, info = env.step(action) self._env_steps += 1 self._episode_length += 1 self._episode_reward += reward if "discount" in info: discount = info["discount"] elif hasattr(env, "_max_episode_steps") and self._episode_length == env._max_episode_steps: discount = 1.0 else: discount = 1 - float(done) # Store the consequences. self.dataset.add(obs=next_obs, action=action, reward=reward, done=done, discount=discount) if done: self._num_ep += 1 # Compute metrics metrics = dict( steps=self._env_steps, reward=self._episode_reward, length=self._episode_length, num_ep=self._num_ep ) # Reset the environment self._current_obs = env.reset() self.dataset.add(obs=self._current_obs) # Add the first timestep self._episode_length = 0 self._episode_reward = 0 return metrics else: self._current_obs = next_obs return dict(steps=self._env_steps) def _async_env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # Recieve Data from the last step and add to buffer. Should only call recv! if self._resetting: self._current_obs = env.reset_recv() self._num_ep += 1 self._episode_length = 0 self._episode_reward = 0 self.dataset.add(obs=self._current_obs) self._resetting = False done = False else: self._current_obs, reward, done, info = env.step_recv() self._env_steps += 1 self._episode_length += 1 self._episode_reward += reward self.dataset.add( obs=self._current_obs, action=self._current_action, reward=reward, done=done, discount=info["discount"] ) # Send data for the next step and return metrics. Should only call send! if done: # If the episode terminated, then we need to reset and send the reset message self._resetting = True env.reset_send() return dict( steps=self._env_steps, reward=self._episode_reward, length=self._episode_length, num_ep=self._num_ep ) else: # Otherwise, compute the action we should take and send it. self._resetting = False if step < self.random_steps: self._current_action = env.action_space.sample() else: self.eval() self._current_action = self._get_train_action(self._current_obs, step, total_steps) self.train() if isinstance(env.action_space, gym.spaces.Box): self._current_action = np.clip(self._current_action, env.action_space.low, env.action_space.high) env.step_send(self._current_action) return dict(steps=self._env_steps) def _runner_env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # All we do is check the pipe to see if there is data! metrics = env() if len(metrics) > 0: # If the metrics are non-empty, then it means that we have completed an episode. # As such, decrement the counter self._eps_since_last_checkpoint += 1 if self._eps_since_last_checkpoint == self.async_runner_ep_lag: self.save(self._checkpoint_dir, str(step), dict(step=step)) self._eps_since_last_checkpoint = 0 return metrics @abstractmethod def _get_train_action(self, obs: Any, step: int, total_steps: int) -> np.ndarray: raise NotImplementedError @functools.cached_property def action_range(self): action_range = (self.processor.action_space.low, self.processor.action_space.high) return utils.to_device(utils.to_tensor(action_range), self.device) def _predict( self, batch: Dict, sample: bool = False, noise: float = 0.0, noise_clip: Optional[float] = None, temperature=1.0 ) -> torch.Tensor: with torch.no_grad():
if isinstance(self.network, ModuleContainer) and "encoder" in self.network.CONTAINERS:
3
2023-10-19 17:25:45+00:00
16k
bcmi/libcom
libcom/painterly_image_harmonization/source/PHDiffusion/ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n ...
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.modules.ema import LitEma from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.models.diffusion.ddim import DDIMSampler
12,446
if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim:
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=512, channels=3, log_every_t=100, clip_denoised=True, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization # print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) # count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3): linear_start = 0.00085 linear_end = 0.012 # if exists(given_betas): # betas = given_betas # else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] # if len(x.shape) == 3: # x = x[..., None] # x = rearrange(x, 'b h w c -> b c h w') # x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, *args, **kwargs): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3): linear_start = 0.00085 linear_end = 0.012 super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', "txt"]: xc = batch[cond_key] elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out def decode_first_stage_training(self, z, predict_cids=False, force_not_quantize=False): # print('decoding...') # # def print_message(grad): # print('backward decoding') # # z.register_hook(print_message) if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c, **kwargs) return loss def get_time_with_schedule(self, scheduler, bs): if scheduler == 'linear': t = torch.randint(0, self.num_timesteps, (bs,), device=self.device).long() elif scheduler == 'cosine': t = torch.rand((bs, ), device=self.device) t = torch.cos(torch.pi / 2. * t) * self.num_timesteps t = t.long() elif scheduler == 'cubic': t = torch.rand((bs,), device=self.device) t = (1 - t ** 3) * self.num_timesteps t = t.long() else: raise NotImplementedError t = torch.clamp(t, min=0, max=self.num_timesteps-1) return t def forward(self, x,mask, c, *args, **kwargs): if 't' not in kwargs: t = torch.randint(0, self.num_timesteps, (x.shape[0], ), device=self.device).long() else: t = kwargs.pop('t') return self.p_losses(x,mask, c, t, *args, **kwargs) def apply_model(self, x_noisy, mask,t, cond, return_ids=False, **kwargs): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, mask,t, **cond, **kwargs) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, mask,cond, t, noise=None, **kwargs): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy,mask, t, cond, **kwargs) loss_dict = {} prefix = 'train' if self.training else 'val' # ops if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) pred_x0 = self.predict_start_from_noise(x_t=x_noisy, t=t, noise=model_output) return loss, loss_dict,pred_x0 def p_losses_origin(self, x_start, cond, t, noise=None, **kwargs): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond, **kwargs) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim:
ddim_sampler = DDIMSampler(self)
16
2023-10-19 05:08:12+00:00
16k
e4s2023/E4S2023
optimization.py
[ { "identifier": "CelebAHQDataset", "path": "datasets/dataset.py", "snippet": "class CelebAHQDataset(Dataset):\n \"\"\"\n CelebA-HQ数据集,具体数据来自于 https://github.com/ZPdesu/SEAN\n \"\"\"\n def __init__(self, dataset_root, mode=\"test\",\n img_transform=TO_TENSOR, label_transform=T...
import numpy as np import torchvision.transforms as transforms import os import json import sys import pprint import torch import random import torch.nn.functional as F import torch.nn as nn import glob from torch.utils.data import DataLoader from datasets.dataset import CelebAHQDataset, get_transforms, TO_TENSOR, NORMALIZE, MASK_CONVERT_TF, MASK_CONVERT_TF_DETAILED, FFHQ_MASK_CONVERT_TF_DETAILED from models.networks import Net3 from options.optim_options import OptimOptions from criteria.id_loss import IDLoss from criteria.lpips.lpips import LPIPS from criteria.style_loss import StyleLoss from criteria.face_parsing.face_parsing_loss import FaceParsingLoss from functools import partial from utils import torch_utils from tqdm import tqdm from torchvision.utils import make_grid from PIL import Image from utils.alignment import crop_faces, calc_alignment_coefficients from utils.morphology import dilation from swap_face_fine.face_parsing.face_parsing_demo import init_faceParsing_pretrained_model, faceParsing_demo, vis_parsing_maps
13,793
sys.path.append(".") sys.path.append("..") toPIL = transforms.ToPILImage() celelbAHQ_label_list = ['background','skin', 'nose', 'eye_g', 'l_eye', 'r_eye', 'l_brow', 'r_brow', 'l_ear', 'r_ear', 'mouth', 'u_lip', 'l_lip', 'hair', 'hat', 'ear_r', 'neck_l', 'neck', 'cloth'] # supported_swap_comps= ['background','skin', 'nose', 'eye', # 'brow','ear', 'mouth','hair', # 'hat','ear_r','neck', 'cloth'] # 9个属性 faceParser_label_list = ['background', 'mouth', 'eyebrows', 'eyes', 'hair', 'nose', 'skin', 'ears', 'belowface'] def paste_image(coeffs, img, orig_image): pasted_image = orig_image.copy().convert('RGBA') projected = img.convert('RGBA').transform(orig_image.size, Image.PERSPECTIVE, coeffs, Image.BILINEAR) pasted_image.paste(projected, (0, 0), mask=projected) return pasted_image def save_image(image, output_folder, image_name, image_index, ext='jpg'): if ext == 'jpeg' or ext == 'jpg': image = image.convert('RGB') folder = os.path.join(output_folder, image_name) os.makedirs(folder, exist_ok=True) image.save(os.path.join(folder, "%04d.%s"%(image_index,ext))) class Optimizer: def __init__(self, opts, net=None): self.opts = opts if net is None: """ self.test_ds = CelebAHQDataset(dataset_root=self.opts.dataset_root, mode="test", img_transform=transforms.Compose( [TO_TENSOR, NORMALIZE]), label_transform=transforms.Compose( [ MASK_CONVERT_TF_DETAILED,TO_TENSOR]), # MASK_CONVERT_TF, fraction=self.opts.ds_frac) print(f"Number of test samples: {len(self.test_ds)}") """ # self.test_dataloader = DataLoader(self.test_ds, batch_size=self.opts.test_batch_size, # shuffle=False, num_workers=int(self.opts.test_workers), drop_last=False) assert self.opts.checkpoint_path is not None, "please specify the pre-trained weights!" self.net = Net3(self.opts).eval().to(self.opts.device) ckpt_dict = torch.load(self.opts.checkpoint_path) self.net.latent_avg = ckpt_dict['latent_avg'].to(self.opts.device) if self.opts.start_from_latent_avg else None if self.opts.load_ema: self.net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["state_dict_ema"],prefix="module.")) else: self.net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["state_dict"],prefix="module.")) print("Load pre-trained weights.") # # 重新保存一下 # torch.save(ckpt_dict,"./ckpt.pth",_use_new_zipfile_serialization=False) else: self.net = net # loss 函数 self.mse_loss = nn.MSELoss().to(self.opts.device).eval() if self.opts.lpips_lambda > 0: self.lpips_loss = LPIPS(net_type='alex').to(self.opts.device).eval() if self.opts.id_lambda > 0: self.id_loss = IDLoss(self.opts).to(self.opts.device).eval() if self.opts.face_parsing_lambda > 0: self.face_parsing_loss = FaceParsingLoss(self.opts).to(self.opts.device).eval() self.img_transform = transforms.Compose([TO_TENSOR, NORMALIZE]) self.label_transform_wo_converter = transforms.Compose([TO_TENSOR])
sys.path.append(".") sys.path.append("..") toPIL = transforms.ToPILImage() celelbAHQ_label_list = ['background','skin', 'nose', 'eye_g', 'l_eye', 'r_eye', 'l_brow', 'r_brow', 'l_ear', 'r_ear', 'mouth', 'u_lip', 'l_lip', 'hair', 'hat', 'ear_r', 'neck_l', 'neck', 'cloth'] # supported_swap_comps= ['background','skin', 'nose', 'eye', # 'brow','ear', 'mouth','hair', # 'hat','ear_r','neck', 'cloth'] # 9个属性 faceParser_label_list = ['background', 'mouth', 'eyebrows', 'eyes', 'hair', 'nose', 'skin', 'ears', 'belowface'] def paste_image(coeffs, img, orig_image): pasted_image = orig_image.copy().convert('RGBA') projected = img.convert('RGBA').transform(orig_image.size, Image.PERSPECTIVE, coeffs, Image.BILINEAR) pasted_image.paste(projected, (0, 0), mask=projected) return pasted_image def save_image(image, output_folder, image_name, image_index, ext='jpg'): if ext == 'jpeg' or ext == 'jpg': image = image.convert('RGB') folder = os.path.join(output_folder, image_name) os.makedirs(folder, exist_ok=True) image.save(os.path.join(folder, "%04d.%s"%(image_index,ext))) class Optimizer: def __init__(self, opts, net=None): self.opts = opts if net is None: """ self.test_ds = CelebAHQDataset(dataset_root=self.opts.dataset_root, mode="test", img_transform=transforms.Compose( [TO_TENSOR, NORMALIZE]), label_transform=transforms.Compose( [ MASK_CONVERT_TF_DETAILED,TO_TENSOR]), # MASK_CONVERT_TF, fraction=self.opts.ds_frac) print(f"Number of test samples: {len(self.test_ds)}") """ # self.test_dataloader = DataLoader(self.test_ds, batch_size=self.opts.test_batch_size, # shuffle=False, num_workers=int(self.opts.test_workers), drop_last=False) assert self.opts.checkpoint_path is not None, "please specify the pre-trained weights!" self.net = Net3(self.opts).eval().to(self.opts.device) ckpt_dict = torch.load(self.opts.checkpoint_path) self.net.latent_avg = ckpt_dict['latent_avg'].to(self.opts.device) if self.opts.start_from_latent_avg else None if self.opts.load_ema: self.net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["state_dict_ema"],prefix="module.")) else: self.net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["state_dict"],prefix="module.")) print("Load pre-trained weights.") # # 重新保存一下 # torch.save(ckpt_dict,"./ckpt.pth",_use_new_zipfile_serialization=False) else: self.net = net # loss 函数 self.mse_loss = nn.MSELoss().to(self.opts.device).eval() if self.opts.lpips_lambda > 0: self.lpips_loss = LPIPS(net_type='alex').to(self.opts.device).eval() if self.opts.id_lambda > 0: self.id_loss = IDLoss(self.opts).to(self.opts.device).eval() if self.opts.face_parsing_lambda > 0: self.face_parsing_loss = FaceParsingLoss(self.opts).to(self.opts.device).eval() self.img_transform = transforms.Compose([TO_TENSOR, NORMALIZE]) self.label_transform_wo_converter = transforms.Compose([TO_TENSOR])
self.label_transform_w_converter = transforms.Compose([MASK_CONVERT_TF_DETAILED, TO_TENSOR])
5
2023-10-15 12:15:01+00:00
16k
sotopia-lab/sotopia
examples/experiment_eval.py
[ { "identifier": "LLMAgent", "path": "sotopia/agents/llm_agent.py", "snippet": "class LLMAgent(BaseAgent[Observation, AgentAction]):\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n agent_profile: AgentProfile | None = None,\n ...
import asyncio import logging import os import subprocess import sys import gin from datetime import datetime from logging import FileHandler from typing import Any, Callable, Generator, Literal, Sequence, cast from absl import app, flags from rich import print from rich.logging import RichHandler from tqdm import tqdm from sotopia.agents import LLMAgent from sotopia.database import ( AgentProfile, EnvAgentComboStorage, EnvironmentProfile, EpisodeLog, ) from sotopia.envs.evaluators import ( ReachGoalLLMEvaluator, RuleBasedTerminatedEvaluator, ) from sotopia.envs.parallel import ParallelSotopiaEnv from sotopia.generation_utils.generate import LLM_Name from sotopia.messages import AgentAction, Message, Observation from sotopia.samplers import ( BaseSampler, ConstraintBasedSampler, EnvAgentCombo, ) from sotopia.server import run_async_server from sotopia_conf.gin_utils import parse_gin_flags, run
12,523
_DEFAULT_GIN_SEARCH_PATHS = [ os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ] FLAGS = flags.FLAGS # date and message only FORMAT = "%(asctime)s - %(levelname)s - %(name)s - %(message)s" process = subprocess.Popen( ["git", "rev-parse", "HEAD"], shell=False, stdout=subprocess.PIPE ) git_head_hash = process.communicate()[0].strip() logging.basicConfig( level=15, format=FORMAT, datefmt="[%X]", handlers=[ RichHandler(), FileHandler( datetime.now().strftime( f"./logs/%H_%M_%d_%m_%Y_{str(git_head_hash.decode('utf-8'))}.log" ) ), ], ) env_ids: list[str] = list(EnvironmentProfile.all_pks()) assert all( isinstance(env_id, str) for env_id in env_ids ), "env_ids should be a list of strings" def check_existing_episodes( env_id: str, agent_ids: list[str], models: dict[str, LLM_Name], tag: str | None = None, ) -> bool: if tag: existing_episode = EpisodeLog.find( (EpisodeLog.environment == env_id) & (EpisodeLog.tag == tag) ).all() else: existing_episode = EpisodeLog.find( EpisodeLog.environment == env_id ).all() if existing_episode: for episode in existing_episode: assert isinstance( episode, EpisodeLog ), "episode should be an EpisodeLog" if episode.agents == agent_ids and episode.models == list( models.values() ): return True return False else: return False def _sample_env_agent_combo_and_push_to_db(env_id: str) -> None:
_DEFAULT_GIN_SEARCH_PATHS = [ os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ] FLAGS = flags.FLAGS # date and message only FORMAT = "%(asctime)s - %(levelname)s - %(name)s - %(message)s" process = subprocess.Popen( ["git", "rev-parse", "HEAD"], shell=False, stdout=subprocess.PIPE ) git_head_hash = process.communicate()[0].strip() logging.basicConfig( level=15, format=FORMAT, datefmt="[%X]", handlers=[ RichHandler(), FileHandler( datetime.now().strftime( f"./logs/%H_%M_%d_%m_%Y_{str(git_head_hash.decode('utf-8'))}.log" ) ), ], ) env_ids: list[str] = list(EnvironmentProfile.all_pks()) assert all( isinstance(env_id, str) for env_id in env_ids ), "env_ids should be a list of strings" def check_existing_episodes( env_id: str, agent_ids: list[str], models: dict[str, LLM_Name], tag: str | None = None, ) -> bool: if tag: existing_episode = EpisodeLog.find( (EpisodeLog.environment == env_id) & (EpisodeLog.tag == tag) ).all() else: existing_episode = EpisodeLog.find( EpisodeLog.environment == env_id ).all() if existing_episode: for episode in existing_episode: assert isinstance( episode, EpisodeLog ), "episode should be an EpisodeLog" if episode.agents == agent_ids and episode.models == list( models.values() ): return True return False else: return False def _sample_env_agent_combo_and_push_to_db(env_id: str) -> None:
sampler = ConstraintBasedSampler[Observation, AgentAction](
11
2023-10-23 19:47:26+00:00
16k
uukuguy/multi_loras
multi_loras/slora/router/manager.py
[ { "identifier": "SamplingParams", "path": "multi_loras/slora/sampling_params.py", "snippet": "class SamplingParams:\n\n def __init__(\n self,\n do_sample: bool = False,\n presence_penalty: float = 0.0,\n frequency_penalty: float = 0.0,\n temperature: float = 1.0,\n ...
import uvloop import asyncio import os import pickle import time import torch import zmq import zmq.asyncio import traceback from typing import Dict, List, Optional from rpyc.utils.classic import obtain from slora.utils.infer_utils import calculate_time from ..sampling_params import SamplingParams from ..io_struct import Req, Batch, BatchAbortReq, BatchTokenIdOut, AbortReq from .input_params import InputParams from .model_infer.model_rpc import start_model_process, ModelRpcClient from .req_queue import ReqQueue from .stats import Stats from .profiler import AlphaModel, BetaModel from .pets_req_queue import PETSReqQueue from .peft_req_queue import PEFTReqQueue from .cluster_req_queue import ClusterReqQueue from .abort_req_queue import AbortReqQueue from ..models.peft.lora_adapter import get_lora_config
13,943
for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].load_adapters(new_mini_batch.adapter_dirs)) await asyncio.gather(*ret) await self._prefill_batch(new_mini_batch, minibatch=True) if not new_mini_batch.is_clear(): await self._merge_batch(self.running_batch, new_mini_batch) self.running_batch.merge(new_mini_batch) self.has_wait_tokens = 0 else: self.stats_tool.count_output_tokens(self.running_batch) await self._decode_batch(self.running_batch) await self._filter_runing_batch() async def _init_batch(self, batch: Batch): reqs = [r.to_rpc_obj() for r in batch.reqs] rets = [self.model_rpcs[tp_rank].init_batch(batch.batch_id, reqs) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _prefill_batch(self, batch, minibatch=True): await self._init_batch(batch) rets = [self.model_rpcs[tp_rank].prefill_batch(batch.batch_id) for tp_rank in range(self.world_size)] ans = await asyncio.gather(*rets) if self.world_size != 1: req_to_out_token_id = obtain(ans[0]) else: req_to_out_token_id = ans[0] self._add_token_id_to_req(batch, req_to_out_token_id) has_new_finished_req = batch.mark_finished_req(self.eos_id) self._send_to_detokenization_proc(batch, req_to_out_token_id) await self._handle_finish_req(batch, has_new_finished_req, minibatch=True) return async def _decode_batch(self, batch:Batch): rets = [self.model_rpcs[tp_rank].decode_batch(batch.batch_id) for tp_rank in range(self.world_size)] ans = await asyncio.gather(*rets) if self.world_size != 1: req_to_out_token_id = obtain(ans[0]) else: req_to_out_token_id = ans[0] self._add_token_id_to_req(batch, req_to_out_token_id) has_new_finished_req = batch.mark_finished_req(self.eos_id) self._send_to_detokenization_proc(batch, req_to_out_token_id) await self._handle_finish_req(batch, has_new_finished_req) return async def _filter_batch(self, batch: Batch): req_id_list = [r.request_id for r in batch.reqs] rets = [self.model_rpcs[tp_rank].filter_batch(batch.batch_id, req_id_list) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _merge_batch(self, batch1, batch2): rets = [self.model_rpcs[tp_rank].merge_batch(batch1.batch_id, batch2.batch_id) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _remove_batch(self, batch): rets = [self.model_rpcs[tp_rank].remove_batch(batch.batch_id) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _handle_finish_req(self, batch: Batch, has_new_finished_req, minibatch=False): if has_new_finished_req: batch.filter_finished() # unmerge adapter from base model if self.input_params.scheduler == "peft" and batch.is_clear(): ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].unmerge_adapter()) await asyncio.gather(*ret) if not minibatch: ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].offload_adapters(batch.adapter_dirs)) await asyncio.gather(*ret) if batch.is_clear(): await self._remove_batch(batch) else: await self._filter_batch(batch) return async def _filter_runing_batch(self): if self.running_batch is not None and self.running_batch.is_clear(): # offload model and adapters ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].offload_adapters()) await asyncio.gather(*ret) self.running_batch = None return def _add_token_id_to_req(self, batch: Batch, req_ans): for req_id, (new_token_id, new_gen_metadata) in req_ans.items(): req = batch.id_to_reqs[req_id] req.output_ids.append(new_token_id) req.output_metadata_list.append(new_gen_metadata) return def _send_to_detokenization_proc(self, batch: Batch, req_ans): batch_out = BatchTokenIdOut() for req_id, (new_token_id, new_gen_metadata) in req_ans.items(): req = batch.id_to_reqs[req_id] batch_out.reqs_infs.append((req_id, new_token_id, new_gen_metadata, req.has_generate_finished, req.aborted)) self.send_to_detokenization.send_pyobj(batch_out) return async def loop_for_netio_req(self): while True: recv_req = await self.recv_from_httpserver.recv_pyobj() if isinstance(recv_req, tuple) and len(recv_req) == 4: adapter_dir, prompt_ids, sampling_params, request_id = recv_req self.add_req(adapter_dir, prompt_ids, sampling_params, request_id)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) class RouterManager: def __init__(self, weightdir, adapter_dirs, load_way, world_size, eos_id, router_port, detokenization_port, model_rpc_ports, input_params, mode=[], log_stats=True, log_stats_interval=10): self.model_weightdir = weightdir self.adapter_dirs = adapter_dirs self.world_size = world_size self.load_way = load_way self.mode = mode self.input_params = input_params if self.input_params.prefetch: self.prefetch_stream = torch.cuda.Stream() else: self.prefetch_stream = None # get adapter rank self.lora_ranks = {} for lora_dir in adapter_dirs: config, _ = get_lora_config(lora_dir, input_params.dummy) self.lora_ranks[lora_dir] = config["r"] self.lora_ranks[None] = 0 if input_params.scheduler == "pets": self.req_queue = PETSReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "peft": self.req_queue = PEFTReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.batch_num_adapters is not None: self.req_queue = ClusterReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, input_params.batch_num_adapters) elif input_params.enable_abort: self.req_queue = AbortReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) else: self.req_queue = ReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) self.running_batch: Batch = None self.eos_id = eos_id self.has_wait_tokens = 0 self.max_wait_tokens = 10 context = zmq.asyncio.Context(2) self.recv_from_httpserver = context.socket(zmq.PULL) self.recv_from_httpserver.bind(f"tcp://127.0.0.1:{router_port}") self.send_to_detokenization = context.socket(zmq.PUSH) self.send_to_detokenization.connect(f"tcp://127.0.0.1:{detokenization_port}") self.model_rpc_ports = model_rpc_ports self.stats_tool = Stats(log_stats, log_stats_interval) async def wait_to_model_ready(self): self.model_rpcs: List[ModelRpcClient] = [] for rank_id in range(self.world_size): rpc_model = await start_model_process(port=self.model_rpc_ports[rank_id], world_size=self.world_size) self.model_rpcs.append(rpc_model) init_model_ret = [] for rank_id in range(self.world_size): # async init model process init_model_ret.append( self.model_rpcs[rank_id].init_model( rank_id, self.world_size, self.model_weightdir, self.adapter_dirs, self.input_params.max_total_token_num, self.load_way, self.mode, input_params=self.input_params, prefetch_stream=self.prefetch_stream, )) await asyncio.gather(*init_model_ret) return async def profile_prefill(self): res = [] for rank_id in range(self.world_size): # async init model process res.append( self.model_rpcs[rank_id].profile_prefill()) results = await asyncio.gather(*res) self.alpha_model = AlphaModel(results[0]) self.beta_model = BetaModel(results[0]) # check if the path exists else create it cache_dir = os.path.expanduser("~/.cache/slora") if not os.path.exists(cache_dir): os.makedirs(cache_dir) with open(cache_dir+"/profile_results.pkl", "wb") as f: pickle.dump(results[0], f) return def add_req( self, adapter_dir: str, prompt_ids: List[int], sampling_params: SamplingParams, request_id: str ): req = Req(adapter_dir, request_id, prompt_ids, sampling_params) self.req_queue.append(req) self.send_to_detokenization.send_pyobj(req.to_req_detokenization_state()) return async def abort(self, request_id): if self.running_batch is not None: for req in self.running_batch.reqs: if req.request_id == request_id: req.has_generate_finished = True req.aborted = True for req in self.req_queue.waiting_req_list: if req.request_id == request_id: req.has_generate_finished = True req.aborted = True return async def loop_for_fwd(self,): counter_count = 0 while True: await self._step() counter_count += 1 if self.running_batch is not None: if counter_count % 50 == 0: print("current batch size:", len(self.running_batch.reqs), "token used ratio:", self.running_batch.calcu_used_tokens() / self.input_params.max_total_token_num) pass self.stats_tool.print_stats() if self.running_batch is None: await asyncio.sleep(0.01) # 10ms async def _step(self): """ 事件处理循环 """ # 删除所有已经 finished 的 req if self.running_batch is None: new_batch = self.req_queue.generate_new_batch(self.running_batch, self.lora_ranks) if self.input_params.enable_abort and len(self.req_queue.abort_req_list) > 0: self.send_to_detokenization.send_pyobj(BatchAbortReq(self.req_queue.abort_req_list)) self.req_queue.reset_abort_list() if new_batch is not None: self.stats_tool.count_prompt_tokens(new_batch) self.running_batch = new_batch # load adapters ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].load_adapters(new_batch.adapter_dirs)) await asyncio.gather(*ret) # merge adapter to base model if self.input_params.scheduler == "peft": torch.cuda.synchronize() ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].merge_adapter()) await asyncio.gather(*ret) torch.cuda.synchronize() await self._prefill_batch(self.running_batch) await self._filter_runing_batch() self.has_wait_tokens = 0 return if self.has_wait_tokens < self.max_wait_tokens: self.stats_tool.count_output_tokens(self.running_batch) # prefetch if (self.input_params.prefetch and (self.has_wait_tokens == self.max_wait_tokens // 2 or self.has_wait_tokens == self.max_wait_tokens - 3) and self.input_params.scheduler != "peft"): next_batch = self.req_queue.next_batch() if next_batch is not None: ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].load_adapters( next_batch.adapter_dirs, prefetch=True)) await asyncio.gather(*ret) await self._decode_batch(self.running_batch) await self._filter_runing_batch() self.has_wait_tokens += 1 return else: new_mini_batch = self.req_queue.generate_new_batch(self.running_batch, self.lora_ranks) if self.input_params.enable_abort and len(self.req_queue.abort_req_list) > 0: self.send_to_detokenization.send_pyobj(BatchAbortReq(self.req_queue.abort_req_list)) self.req_queue.reset_abort_list() if new_mini_batch is not None: self.stats_tool.count_prompt_tokens(new_mini_batch) ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].load_adapters(new_mini_batch.adapter_dirs)) await asyncio.gather(*ret) await self._prefill_batch(new_mini_batch, minibatch=True) if not new_mini_batch.is_clear(): await self._merge_batch(self.running_batch, new_mini_batch) self.running_batch.merge(new_mini_batch) self.has_wait_tokens = 0 else: self.stats_tool.count_output_tokens(self.running_batch) await self._decode_batch(self.running_batch) await self._filter_runing_batch() async def _init_batch(self, batch: Batch): reqs = [r.to_rpc_obj() for r in batch.reqs] rets = [self.model_rpcs[tp_rank].init_batch(batch.batch_id, reqs) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _prefill_batch(self, batch, minibatch=True): await self._init_batch(batch) rets = [self.model_rpcs[tp_rank].prefill_batch(batch.batch_id) for tp_rank in range(self.world_size)] ans = await asyncio.gather(*rets) if self.world_size != 1: req_to_out_token_id = obtain(ans[0]) else: req_to_out_token_id = ans[0] self._add_token_id_to_req(batch, req_to_out_token_id) has_new_finished_req = batch.mark_finished_req(self.eos_id) self._send_to_detokenization_proc(batch, req_to_out_token_id) await self._handle_finish_req(batch, has_new_finished_req, minibatch=True) return async def _decode_batch(self, batch:Batch): rets = [self.model_rpcs[tp_rank].decode_batch(batch.batch_id) for tp_rank in range(self.world_size)] ans = await asyncio.gather(*rets) if self.world_size != 1: req_to_out_token_id = obtain(ans[0]) else: req_to_out_token_id = ans[0] self._add_token_id_to_req(batch, req_to_out_token_id) has_new_finished_req = batch.mark_finished_req(self.eos_id) self._send_to_detokenization_proc(batch, req_to_out_token_id) await self._handle_finish_req(batch, has_new_finished_req) return async def _filter_batch(self, batch: Batch): req_id_list = [r.request_id for r in batch.reqs] rets = [self.model_rpcs[tp_rank].filter_batch(batch.batch_id, req_id_list) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _merge_batch(self, batch1, batch2): rets = [self.model_rpcs[tp_rank].merge_batch(batch1.batch_id, batch2.batch_id) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _remove_batch(self, batch): rets = [self.model_rpcs[tp_rank].remove_batch(batch.batch_id) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _handle_finish_req(self, batch: Batch, has_new_finished_req, minibatch=False): if has_new_finished_req: batch.filter_finished() # unmerge adapter from base model if self.input_params.scheduler == "peft" and batch.is_clear(): ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].unmerge_adapter()) await asyncio.gather(*ret) if not minibatch: ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].offload_adapters(batch.adapter_dirs)) await asyncio.gather(*ret) if batch.is_clear(): await self._remove_batch(batch) else: await self._filter_batch(batch) return async def _filter_runing_batch(self): if self.running_batch is not None and self.running_batch.is_clear(): # offload model and adapters ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].offload_adapters()) await asyncio.gather(*ret) self.running_batch = None return def _add_token_id_to_req(self, batch: Batch, req_ans): for req_id, (new_token_id, new_gen_metadata) in req_ans.items(): req = batch.id_to_reqs[req_id] req.output_ids.append(new_token_id) req.output_metadata_list.append(new_gen_metadata) return def _send_to_detokenization_proc(self, batch: Batch, req_ans): batch_out = BatchTokenIdOut() for req_id, (new_token_id, new_gen_metadata) in req_ans.items(): req = batch.id_to_reqs[req_id] batch_out.reqs_infs.append((req_id, new_token_id, new_gen_metadata, req.has_generate_finished, req.aborted)) self.send_to_detokenization.send_pyobj(batch_out) return async def loop_for_netio_req(self): while True: recv_req = await self.recv_from_httpserver.recv_pyobj() if isinstance(recv_req, tuple) and len(recv_req) == 4: adapter_dir, prompt_ids, sampling_params, request_id = recv_req self.add_req(adapter_dir, prompt_ids, sampling_params, request_id)
elif isinstance(recv_req, AbortReq):
5
2023-10-16 02:39:47+00:00
16k
MobileLLM/AutoDroid
droidbot/input_manager.py
[ { "identifier": "EventLog", "path": "droidbot/input_event.py", "snippet": "class EventLog(object):\n \"\"\"\n save an event to local file system\n \"\"\"\n\n def __init__(self, device, app, event, profiling_method=None, tag=None):\n self.device = device\n self.app = app\n ...
import json import logging import subprocess import time from .input_event import EventLog from .input_policy import UtgBasedInputPolicy, UtgNaiveSearchPolicy, UtgGreedySearchPolicy, \ UtgReplayPolicy, \ ManualPolicy, TaskPolicy, \ POLICY_NAIVE_DFS, POLICY_GREEDY_DFS, \ POLICY_NAIVE_BFS, POLICY_GREEDY_BFS, \ POLICY_REPLAY, POLICY_MEMORY_GUIDED, \ POLICY_MANUAL, POLICY_MONKEY, POLICY_NONE, POLICY_TASK from .input_script import DroidBotScript from .input_policy2 import MemoryGuidedPolicy
13,475
DEFAULT_POLICY = POLICY_GREEDY_DFS DEFAULT_EVENT_INTERVAL = 1 DEFAULT_EVENT_COUNT = 100000000 DEFAULT_TIMEOUT = -1 class UnknownInputException(Exception): pass class InputManager(object): """ This class manages all events to send during app running """ def __init__(self, device, app, task, policy_name, random_input, event_count, event_interval, script_path=None, profiling_method=None, master=None, replay_output=None): """ manage input event sent to the target device :param device: instance of Device :param app: instance of App :param policy_name: policy of generating events, string :return: """ self.logger = logging.getLogger('InputEventManager') self.enabled = True self.device = device self.app = app self.task = task self.policy_name = policy_name self.random_input = random_input self.events = [] self.policy = None self.script = None self.event_count = event_count self.event_interval = event_interval self.replay_output = replay_output self.monkey = None if script_path is not None: f = open(script_path, 'r') script_dict = json.load(f) self.script = DroidBotScript(script_dict) self.policy = self.get_input_policy(device, app, master) self.profiling_method = profiling_method def get_input_policy(self, device, app, master): if self.policy_name == POLICY_NONE: input_policy = None elif self.policy_name == POLICY_MONKEY: input_policy = None elif self.policy_name in [POLICY_NAIVE_DFS, POLICY_NAIVE_BFS]: input_policy = UtgNaiveSearchPolicy(device, app, self.random_input, self.policy_name) elif self.policy_name in [POLICY_GREEDY_DFS, POLICY_GREEDY_BFS]:
DEFAULT_POLICY = POLICY_GREEDY_DFS DEFAULT_EVENT_INTERVAL = 1 DEFAULT_EVENT_COUNT = 100000000 DEFAULT_TIMEOUT = -1 class UnknownInputException(Exception): pass class InputManager(object): """ This class manages all events to send during app running """ def __init__(self, device, app, task, policy_name, random_input, event_count, event_interval, script_path=None, profiling_method=None, master=None, replay_output=None): """ manage input event sent to the target device :param device: instance of Device :param app: instance of App :param policy_name: policy of generating events, string :return: """ self.logger = logging.getLogger('InputEventManager') self.enabled = True self.device = device self.app = app self.task = task self.policy_name = policy_name self.random_input = random_input self.events = [] self.policy = None self.script = None self.event_count = event_count self.event_interval = event_interval self.replay_output = replay_output self.monkey = None if script_path is not None: f = open(script_path, 'r') script_dict = json.load(f) self.script = DroidBotScript(script_dict) self.policy = self.get_input_policy(device, app, master) self.profiling_method = profiling_method def get_input_policy(self, device, app, master): if self.policy_name == POLICY_NONE: input_policy = None elif self.policy_name == POLICY_MONKEY: input_policy = None elif self.policy_name in [POLICY_NAIVE_DFS, POLICY_NAIVE_BFS]: input_policy = UtgNaiveSearchPolicy(device, app, self.random_input, self.policy_name) elif self.policy_name in [POLICY_GREEDY_DFS, POLICY_GREEDY_BFS]:
input_policy = UtgGreedySearchPolicy(device, app, self.random_input, self.policy_name)
3
2023-10-23 03:32:58+00:00
16k
openvpi/SingingVocoders
training/univnet_nsf_msd.py
[ { "identifier": "MultiScaleDiscriminator", "path": "models/nsf_HiFigan/models.py", "snippet": "class MultiScaleDiscriminator(torch.nn.Module):\n def __init__(self):\n super(MultiScaleDiscriminator, self).__init__()\n self.discriminators = nn.ModuleList(\n [\n D...
import pathlib import random import numpy as np import torch.utils.data import utils from matplotlib import pyplot as plt from torch import nn from torch.utils.data import Dataset from models.nsf_HiFigan.models import MultiScaleDiscriminator from models.nsf_univnet.nsfunivnet import nsfUnivNet from modules.loss.nsf_univloss import nsf_univloss from modules.loss.nsf_univloss_msd import nsf_univloss_msd from modules.univ_D.discriminator import MultiPeriodDiscriminator, MultiResSpecDiscriminator from training.base_task_gan import GanBaseTask from utils.wav2mel import PitchAdjustableMelSpectrogram
12,972
def __len__(self): return len(self.data_index) def collater(self, minibatch): samples_per_frame = self.config['hop_size'] if self.infer: crop_mel_frames = 0 else: crop_mel_frames = self.config['crop_mel_frames'] for record in minibatch: # Filter out records that aren't long enough. if len(record['spectrogram']) < crop_mel_frames: del record['spectrogram'] del record['audio'] del record['f0'] del record['uv'] continue start = random.randint(0, record['spectrogram'].shape[0] - 1 - crop_mel_frames) end = start + crop_mel_frames if self.infer: record['spectrogram'] = record['spectrogram'].T record['f0'] = record['f0'] record['uv']=record['uv'] else: record['spectrogram'] = record['spectrogram'][start:end].T record['f0'] = record['f0'][start:end] record['uv']=record['uv'][start:end] start *= samples_per_frame end *= samples_per_frame if self.infer: cty=(len(record['spectrogram'].T) * samples_per_frame) record['audio'] = record['audio'][:cty] record['audio'] = np.pad(record['audio'], ( 0, (len(record['spectrogram'].T) * samples_per_frame) - len(record['audio'])), mode='constant') pass else: # record['spectrogram'] = record['spectrogram'][start:end].T record['audio'] = record['audio'][start:end] record['audio'] = np.pad(record['audio'], (0, (end - start) - len(record['audio'])), mode='constant') if self.volume_aug: for record in minibatch: if random.random() < self.volume_aug_prob: audio = record['audio'] audio_mel = record['spectrogram'] max_amp = float(np.max(np.abs(audio))) + 1e-5 max_shift = min(3, np.log(1 / max_amp)) log_mel_shift = random.uniform(-3, max_shift) # audio *= (10 ** log_mel_shift) audio *= np.exp(log_mel_shift) audio_mel += log_mel_shift audio_mel = torch.clamp(torch.from_numpy(audio_mel), min=np.log(1e-5)).numpy() record['audio'] = audio record['spectrogram'] = audio_mel audio = np.stack([record['audio'] for record in minibatch if 'audio' in record]) spectrogram = np.stack([record['spectrogram'] for record in minibatch if 'spectrogram' in record]) f0 = np.stack([record['f0'] for record in minibatch if 'f0' in record]) uv=np.stack([record['uv'] for record in minibatch if 'uv' in record]) return { 'audio': torch.from_numpy(audio).unsqueeze(1), 'mel': torch.from_numpy(spectrogram), 'f0': torch.from_numpy(f0),'uv':torch.from_numpy(uv) } class stftlog: def __init__(self, n_fft=2048, win_length=2048, hop_length=512, center=False,): self.hop_length=hop_length self.win_size=win_length self.n_fft = n_fft self.win_size = win_length self.center = center self.hann_window = {} def exc(self,y): hann_window_key = f"{y.device}" if hann_window_key not in self.hann_window: self.hann_window[hann_window_key] = torch.hann_window( self.win_size, device=y.device ) y = torch.nn.functional.pad( y.unsqueeze(1), ( int((self.win_size - self.hop_length) // 2), int((self.win_size - self.hop_length+1) // 2), ), mode="reflect", ) y = y.squeeze(1) spec = torch.stft( y, self.n_fft, hop_length=self.hop_length, win_length=self.win_size, window=self.hann_window[hann_window_key], center=self.center, pad_mode="reflect", normalized=False, onesided=True, return_complex=True, ).abs() return spec
# import logging # import os # import sys # from typing import Dict # # import lightning.pytorch as pl # import matplotlib # from lightning.pytorch.utilities.rank_zero import rank_zero_debug, rank_zero_info, rank_zero_only # from torchmetrics import Metric, MeanMetric # from models.ddsp_univnet.ddspunivnet import ddspUnivNet # from models.univnet.univnet import UnivNet # from models.lvc_ddspgan.lvc_ddspgan import DDSPgan # from models.nsf_HiFigan.models import Generator, AttrDict, MultiScaleDiscriminator, MultiPeriodDiscriminator # from modules.loss.ddsp_univloss import ddsp_univloss def spec_to_figure(spec, vmin=None, vmax=None): if isinstance(spec, torch.Tensor): spec = spec.cpu().numpy() fig = plt.figure(figsize=(12, 9),dpi=100) plt.pcolor(spec.T, vmin=vmin, vmax=vmax) plt.tight_layout() return fig class ddsp_univ_dataset(Dataset): def __init__(self, config: dict, data_dir, infer=False): super().__init__() self.config = config self.data_dir = data_dir if isinstance(data_dir, pathlib.Path) else pathlib.Path(data_dir) with open(self.data_dir, 'r', encoding='utf8') as f: fills = f.read().strip().split('\n') self.data_index = fills self.infer = infer self.volume_aug = self.config['volume_aug'] self.volume_aug_prob = self.config['volume_aug_prob'] if not infer else 0 def __getitem__(self, index): data_path = self.data_index[index] data = np.load(data_path) return {'f0':data['f0'],'spectrogram':data['mel'],'audio':data['audio'],'uv':data['uv']} def __len__(self): return len(self.data_index) def collater(self, minibatch): samples_per_frame = self.config['hop_size'] if self.infer: crop_mel_frames = 0 else: crop_mel_frames = self.config['crop_mel_frames'] for record in minibatch: # Filter out records that aren't long enough. if len(record['spectrogram']) < crop_mel_frames: del record['spectrogram'] del record['audio'] del record['f0'] del record['uv'] continue start = random.randint(0, record['spectrogram'].shape[0] - 1 - crop_mel_frames) end = start + crop_mel_frames if self.infer: record['spectrogram'] = record['spectrogram'].T record['f0'] = record['f0'] record['uv']=record['uv'] else: record['spectrogram'] = record['spectrogram'][start:end].T record['f0'] = record['f0'][start:end] record['uv']=record['uv'][start:end] start *= samples_per_frame end *= samples_per_frame if self.infer: cty=(len(record['spectrogram'].T) * samples_per_frame) record['audio'] = record['audio'][:cty] record['audio'] = np.pad(record['audio'], ( 0, (len(record['spectrogram'].T) * samples_per_frame) - len(record['audio'])), mode='constant') pass else: # record['spectrogram'] = record['spectrogram'][start:end].T record['audio'] = record['audio'][start:end] record['audio'] = np.pad(record['audio'], (0, (end - start) - len(record['audio'])), mode='constant') if self.volume_aug: for record in minibatch: if random.random() < self.volume_aug_prob: audio = record['audio'] audio_mel = record['spectrogram'] max_amp = float(np.max(np.abs(audio))) + 1e-5 max_shift = min(3, np.log(1 / max_amp)) log_mel_shift = random.uniform(-3, max_shift) # audio *= (10 ** log_mel_shift) audio *= np.exp(log_mel_shift) audio_mel += log_mel_shift audio_mel = torch.clamp(torch.from_numpy(audio_mel), min=np.log(1e-5)).numpy() record['audio'] = audio record['spectrogram'] = audio_mel audio = np.stack([record['audio'] for record in minibatch if 'audio' in record]) spectrogram = np.stack([record['spectrogram'] for record in minibatch if 'spectrogram' in record]) f0 = np.stack([record['f0'] for record in minibatch if 'f0' in record]) uv=np.stack([record['uv'] for record in minibatch if 'uv' in record]) return { 'audio': torch.from_numpy(audio).unsqueeze(1), 'mel': torch.from_numpy(spectrogram), 'f0': torch.from_numpy(f0),'uv':torch.from_numpy(uv) } class stftlog: def __init__(self, n_fft=2048, win_length=2048, hop_length=512, center=False,): self.hop_length=hop_length self.win_size=win_length self.n_fft = n_fft self.win_size = win_length self.center = center self.hann_window = {} def exc(self,y): hann_window_key = f"{y.device}" if hann_window_key not in self.hann_window: self.hann_window[hann_window_key] = torch.hann_window( self.win_size, device=y.device ) y = torch.nn.functional.pad( y.unsqueeze(1), ( int((self.win_size - self.hop_length) // 2), int((self.win_size - self.hop_length+1) // 2), ), mode="reflect", ) y = y.squeeze(1) spec = torch.stft( y, self.n_fft, hop_length=self.hop_length, win_length=self.win_size, window=self.hann_window[hann_window_key], center=self.center, pad_mode="reflect", normalized=False, onesided=True, return_complex=True, ).abs() return spec
class nsf_univnet_task(GanBaseTask):
6
2023-10-17 13:45:09+00:00
16k
Jacob-Zhou/gecdi
gec/parser.py
[ { "identifier": "Dataset", "path": "gec/data.py", "snippet": "class Dataset(torch.utils.data.Dataset):\n r\"\"\"\n Dataset that is compatible with :class:`torch.utils.data.Dataset`, serving as a wrapper for manipulating all data fields\n with the operating behaviours defined in :class:`~supar.u...
import os import shutil import tempfile import math import dill import torch import torch.distributed as dist from datetime import datetime, timedelta from typing import Iterable, Union from gec.data import Dataset from gec.fn import map_token_ids from supar.parser import Parser from supar.utils import Config from supar.utils.common import MIN, NUL, UNK from supar.utils.field import RawField from supar.utils.fn import set_rng_state from supar.utils.logging import get_logger, init_logger, progress_bar from supar.utils.metric import Metric from supar.utils.optim import PolynomialLR from supar.utils.parallel import DistributedDataParallel as DDP, gather, is_dist from supar.utils.parallel import is_master from supar.utils.tokenizer import TransformerTokenizer from supar.utils.transform import AttachJuxtaposeTree, Batch from torch.cuda.amp import GradScaler from torch.optim import AdamW from torch.optim.lr_scheduler import ExponentialLR from torch.nn.functional import embedding from .metric import PerplexityMetric, SpanMetric from .model import Seq2SeqDetectModel, Seq2SeqModel from .transform import Field, Text, Tree from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import fp16_compress_hook from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import fp16_compress_hook from transformers import AutoTokenizer, GPT2LMHeadModel
12,034
# -*- coding: utf-8 -*- logger = get_logger(__name__) class Seq2SeqParser(Parser): NAME = 'seq2seq'
# -*- coding: utf-8 -*- logger = get_logger(__name__) class Seq2SeqParser(Parser): NAME = 'seq2seq'
MODEL = Seq2SeqModel
5
2023-10-18 10:55:33+00:00
16k
jianlanluo/SAQ
vqn/conservative_sac_main.py
[ { "identifier": "VQN", "path": "vqn/vqn.py", "snippet": "class VQN(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.embedding_dim = 128\n config.codebook_size = 64\n config.commitment_cost = 1.0\n config.quantiza...
import os import time import uuid import numpy as np import pprint import jax import jax.numpy as jnp import flax import gym import d4rl import absl.app import absl.flags from copy import deepcopy from .vqn import VQN from .conservative_sac import ConservativeSAC from .replay_buffer import get_d4rl_dataset, subsample_batch from .jax_utils import batch_to_jax from .model import TanhGaussianPolicy, FullyConnectedQFunction, SamplerPolicy from .sampler import StepSampler, TrajSampler from .robomimic_utils import ( SequenceDataset, make_dataset, process_robomimic_dataset, D4RLDataset, get_robomimic_env, ENV_TO_HORIZON_MAP, OBS_KEYS ) from .utils import ( Timer, define_flags_with_default, set_random_seed, print_flags, get_user_flags, prefix_metrics, WandBLogger ) from viskit.logging import logger, setup_logger
11,036
FLAGS_DEF = define_flags_with_default( env='halfcheetah-medium-v2', algorithm='cql', max_traj_length=200, seed=42, save_model=False, batch_size=256, reward_scale=1.0, reward_bias=0.0, clip_action=0.999, policy_arch='256-256', qf_arch='256-256', orthogonal_init=False, policy_log_std_multiplier=1.0, policy_log_std_offset=-1.0, n_epochs=1000, bc_epochs=1000, n_train_step_per_epoch=1000, eval_period=10, eval_n_trajs=5, cql=ConservativeSAC.get_default_config(), logging=WandBLogger.get_default_config(), ) def main(argv): FLAGS = absl.flags.FLAGS variant = get_user_flags(FLAGS, FLAGS_DEF) wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant) setup_logger( variant=variant, exp_id=wandb_logger.experiment_id, seed=FLAGS.seed, base_log_dir=FLAGS.logging.output_dir, include_exp_prefix_sub_dir=False ) set_random_seed(FLAGS.seed) if FLAGS.env in ENV_TO_HORIZON_MAP: dataset_path = f'./robomimic/datasets/{FLAGS.env}/low_dim_v141.hdf5' seq_dataset = SequenceDataset(hdf5_path=dataset_path, obs_keys=OBS_KEYS, dataset_keys=("actions", "rewards", "dones"), hdf5_cache_mode="all", load_next_obs=True) dataset = process_robomimic_dataset(seq_dataset) dataset = D4RLDataset(env=None, custom_dataset=dataset) example_ob = dataset.dataset_dict['observations'][0][np.newaxis] example_action = dataset.dataset_dict['actions'][0][np.newaxis] env = get_robomimic_env(dataset_path, example_action, FLAGS.env) max_len = ENV_TO_HORIZON_MAP[FLAGS.env] else: env = gym.make(FLAGS.env).unwrapped dataset = get_d4rl_dataset(env) dataset['rewards'] = dataset['rewards'] * FLAGS.reward_scale + FLAGS.reward_bias dataset['actions'] = np.clip(dataset['actions'], -FLAGS.clip_action, FLAGS.clip_action) max_len = FLAGS.max_traj_length example_ob = env.observation_space.sample()[np.newaxis] example_action = env.action_space.sample()[np.newaxis] eval_sampler = TrajSampler(env, max_len) observation_dim = example_ob.shape[1] action_dim = example_action.shape[1] dataset = make_dataset(dataset, FLAGS.env) policy = TanhGaussianPolicy( observation_dim, action_dim, FLAGS.policy_arch, FLAGS.orthogonal_init, FLAGS.policy_log_std_multiplier, FLAGS.policy_log_std_offset ) qf = FullyConnectedQFunction(observation_dim, action_dim, FLAGS.qf_arch, FLAGS.orthogonal_init) if FLAGS.cql.target_entropy >= 0.0: FLAGS.cql.target_entropy = -np.prod(eval_sampler.env.action_space.shape).item() sac = ConservativeSAC(FLAGS.cql, policy, qf) sampler_policy = SamplerPolicy(sac.policy, sac.train_params['policy']) viskit_metrics = {} for epoch in range(FLAGS.n_epochs): metrics = {'epoch': epoch} with Timer() as train_timer: for batch_idx in range(FLAGS.n_train_step_per_epoch): batch = dataset.sample(FLAGS.batch_size)
FLAGS_DEF = define_flags_with_default( env='halfcheetah-medium-v2', algorithm='cql', max_traj_length=200, seed=42, save_model=False, batch_size=256, reward_scale=1.0, reward_bias=0.0, clip_action=0.999, policy_arch='256-256', qf_arch='256-256', orthogonal_init=False, policy_log_std_multiplier=1.0, policy_log_std_offset=-1.0, n_epochs=1000, bc_epochs=1000, n_train_step_per_epoch=1000, eval_period=10, eval_n_trajs=5, cql=ConservativeSAC.get_default_config(), logging=WandBLogger.get_default_config(), ) def main(argv): FLAGS = absl.flags.FLAGS variant = get_user_flags(FLAGS, FLAGS_DEF) wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant) setup_logger( variant=variant, exp_id=wandb_logger.experiment_id, seed=FLAGS.seed, base_log_dir=FLAGS.logging.output_dir, include_exp_prefix_sub_dir=False ) set_random_seed(FLAGS.seed) if FLAGS.env in ENV_TO_HORIZON_MAP: dataset_path = f'./robomimic/datasets/{FLAGS.env}/low_dim_v141.hdf5' seq_dataset = SequenceDataset(hdf5_path=dataset_path, obs_keys=OBS_KEYS, dataset_keys=("actions", "rewards", "dones"), hdf5_cache_mode="all", load_next_obs=True) dataset = process_robomimic_dataset(seq_dataset) dataset = D4RLDataset(env=None, custom_dataset=dataset) example_ob = dataset.dataset_dict['observations'][0][np.newaxis] example_action = dataset.dataset_dict['actions'][0][np.newaxis] env = get_robomimic_env(dataset_path, example_action, FLAGS.env) max_len = ENV_TO_HORIZON_MAP[FLAGS.env] else: env = gym.make(FLAGS.env).unwrapped dataset = get_d4rl_dataset(env) dataset['rewards'] = dataset['rewards'] * FLAGS.reward_scale + FLAGS.reward_bias dataset['actions'] = np.clip(dataset['actions'], -FLAGS.clip_action, FLAGS.clip_action) max_len = FLAGS.max_traj_length example_ob = env.observation_space.sample()[np.newaxis] example_action = env.action_space.sample()[np.newaxis] eval_sampler = TrajSampler(env, max_len) observation_dim = example_ob.shape[1] action_dim = example_action.shape[1] dataset = make_dataset(dataset, FLAGS.env) policy = TanhGaussianPolicy( observation_dim, action_dim, FLAGS.policy_arch, FLAGS.orthogonal_init, FLAGS.policy_log_std_multiplier, FLAGS.policy_log_std_offset ) qf = FullyConnectedQFunction(observation_dim, action_dim, FLAGS.qf_arch, FLAGS.orthogonal_init) if FLAGS.cql.target_entropy >= 0.0: FLAGS.cql.target_entropy = -np.prod(eval_sampler.env.action_space.shape).item() sac = ConservativeSAC(FLAGS.cql, policy, qf) sampler_policy = SamplerPolicy(sac.policy, sac.train_params['policy']) viskit_metrics = {} for epoch in range(FLAGS.n_epochs): metrics = {'epoch': epoch} with Timer() as train_timer: for batch_idx in range(FLAGS.n_train_step_per_epoch): batch = dataset.sample(FLAGS.batch_size)
metrics.update(prefix_metrics(sac.train(batch, bc=epoch < FLAGS.bc_epochs), 'sac'))
16
2023-10-18 06:31:20+00:00
16k
SLDGroup/G-CASCADE
lib/networks.py
[ { "identifier": "pvt_v2_b2", "path": "lib/pvtv2.py", "snippet": "class pvt_v2_b2(PyramidVisionTransformerImpr):\n def __init__(self, **kwargs):\n super(pvt_v2_b2, self).__init__(\n patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],\n ...
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import timm import logging from scipy import ndimage from lib.pvtv2 import pvt_v2_b2, pvt_v2_b5, pvt_v2_b0 from lib.decoders import CUP, CASCADE, CASCADE_Cat, GCUP, GCUP_Cat, GCASCADE, GCASCADE_Cat from lib.pyramid_vig import pvig_ti_224_gelu, pvig_s_224_gelu, pvig_m_224_gelu, pvig_b_224_gelu from lib.maxxvit_4out import maxvit_tiny_rw_224 as maxvit_tiny_rw_224_4out from lib.maxxvit_4out import maxvit_rmlp_tiny_rw_256 as maxvit_rmlp_tiny_rw_256_4out from lib.maxxvit_4out import maxxvit_rmlp_small_rw_256 as maxxvit_rmlp_small_rw_256_4out from lib.maxxvit_4out import maxvit_rmlp_small_rw_224 as maxvit_rmlp_small_rw_224_4out
13,348
('GCUP_decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(self.channels[0], self.n_class, 1) self.out_head2 = nn.Conv2d(self.channels[1], self.n_class, 1) self.out_head3 = nn.Conv2d(self.channels[2], self.n_class, 1) self.out_head4 = nn.Conv2d(self.channels[3], self.n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv_1cto3c(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_GCASCADE(nn.Module): def __init__(self, n_class=1, img_size=224, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', skip_aggregation='additive'): super(PVT_GCASCADE, self).__init__() self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) self.channels = [512, 320, 128, 64] # decoder initialization if self.skip_aggregation == 'additive': self.decoder = GCASCADE(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) elif self.skip_aggregation == 'concatenation': self.decoder = GCASCADE_Cat(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) self.channels = [self.channels[0], self.channels[1]*2, self.channels[2]*2, self.channels[3]*2] else: print('No implementation found for the skip_aggregation ' + self.skip_aggregation + '. Continuing with the default additive aggregation.') self.decoder = GCASCADE(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) print('Model %s created, param count: %d' % ('GCASCADE decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(self.channels[0], self.n_class, 1) self.out_head2 = nn.Conv2d(self.channels[1], self.n_class, 1) self.out_head3 = nn.Conv2d(self.channels[2], self.n_class, 1) self.out_head4 = nn.Conv2d(self.channels[3], self.n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv_1cto3c(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class MERIT_GCASCADE(nn.Module): def __init__(self, n_class=1, img_size_s1=(256,256), img_size_s2=(224,224), k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', interpolation='bilinear', skip_aggregation='additive'): super(MERIT_GCASCADE, self).__init__() self.interpolation = interpolation self.img_size_s1 = img_size_s1 self.img_size_s2 = img_size_s2 self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight
logger = logging.getLogger(__name__) def np2th(weights, conv=False): """Possibly convert HWIO to OIHW.""" if conv: weights = weights.transpose([3, 2, 0, 1]) return torch.from_numpy(weights) class PVT_CUP(nn.Module): def __init__(self, n_class=1): super(PVT_CUP, self).__init__() # conv block to convert single channel to 3 channels self.conv = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) # decoder initialization self.decoder = CUP(channels=[512, 320, 128, 64]) print('Model %s created, param count: %d' % ('CUP decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(512, n_class, 1) self.out_head2 = nn.Conv2d(320, n_class, 1) self.out_head3 = nn.Conv2d(128, n_class, 1) self.out_head4 = nn.Conv2d(64, n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_CASCADE(nn.Module): def __init__(self, n_class=1): super(PVT_CASCADE, self).__init__() # conv block to convert single channel to 3 channels self.conv = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) # decoder initialization self.decoder = CASCADE(channels=[512, 320, 128, 64]) print('Model %s created, param count: %d' % ('CASCADE decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(512, n_class, 1) self.out_head2 = nn.Conv2d(320, n_class, 1) self.out_head3 = nn.Conv2d(128, n_class, 1) self.out_head4 = nn.Conv2d(64, n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_CASCADE_Cat(nn.Module): def __init__(self, n_class=1): super(PVT_CASCADE_Cat, self).__init__() # conv block to convert single channel to 3 channels self.conv = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) print('Model %s created, param count: %d' % ('PVT backbone: ', sum([m.numel() for m in self.backbone.parameters()]))) # decoder initialization self.decoder = CASCADE_Cat(channels=[512, 320, 128, 64]) print('Model %s created, param count: %d' % ('CASCADE_Cat decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(512, n_class, 1) self.out_head2 = nn.Conv2d(320, n_class, 1) self.out_head3 = nn.Conv2d(128, n_class, 1) self.out_head4 = nn.Conv2d(64, n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_GCUP(nn.Module): def __init__(self, n_class=1, img_size=224, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', skip_aggregation='additive'): super(PVT_GCUP, self).__init__() self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) self.channels = [512, 320, 128, 64] # decoder initialization if self.skip_aggregation == 'additive': self.decoder = GCUP(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) elif self.skip_aggregation == 'concatenation': self.decoder = GCUP_Cat(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) self.channels = [self.channels[0], self.channels[1]*2, self.channels[2]*2, self.channels[3]*2] else: print('No implementation found for the skip_aggregation ' + self.skip_aggregation + '. Continuing with the default additive aggregation.') self.decoder = GCUP(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) print('Model %s created, param count: %d' % ('GCUP_decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(self.channels[0], self.n_class, 1) self.out_head2 = nn.Conv2d(self.channels[1], self.n_class, 1) self.out_head3 = nn.Conv2d(self.channels[2], self.n_class, 1) self.out_head4 = nn.Conv2d(self.channels[3], self.n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv_1cto3c(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_GCASCADE(nn.Module): def __init__(self, n_class=1, img_size=224, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', skip_aggregation='additive'): super(PVT_GCASCADE, self).__init__() self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) self.channels = [512, 320, 128, 64] # decoder initialization if self.skip_aggregation == 'additive': self.decoder = GCASCADE(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) elif self.skip_aggregation == 'concatenation': self.decoder = GCASCADE_Cat(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) self.channels = [self.channels[0], self.channels[1]*2, self.channels[2]*2, self.channels[3]*2] else: print('No implementation found for the skip_aggregation ' + self.skip_aggregation + '. Continuing with the default additive aggregation.') self.decoder = GCASCADE(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) print('Model %s created, param count: %d' % ('GCASCADE decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(self.channels[0], self.n_class, 1) self.out_head2 = nn.Conv2d(self.channels[1], self.n_class, 1) self.out_head3 = nn.Conv2d(self.channels[2], self.n_class, 1) self.out_head4 = nn.Conv2d(self.channels[3], self.n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv_1cto3c(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class MERIT_GCASCADE(nn.Module): def __init__(self, n_class=1, img_size_s1=(256,256), img_size_s2=(224,224), k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', interpolation='bilinear', skip_aggregation='additive'): super(MERIT_GCASCADE, self).__init__() self.interpolation = interpolation self.img_size_s1 = img_size_s1 self.img_size_s2 = img_size_s2 self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight
self.backbone1 = maxxvit_rmlp_small_rw_256_4out() # [64, 128, 320, 512]
9
2023-10-24 17:49:10+00:00
16k
boppreh/hello_tls
src/hello_tls/scan.py
[ { "identifier": "ClientHello", "path": "src/hello_tls/protocol.py", "snippet": "class ScanError(Exception):\nclass ServerAlertError(ScanError):\nclass BadServerResponse(ScanError):\nclass ServerHello:\nclass ClientHello:\n def __init__(self, level: AlertLevel, description: AlertDescription):\ndef _ma...
from enum import Enum from multiprocessing.pool import ThreadPool from typing import Iterable, Union, List, Optional, Iterator, Callable, Any from urllib.parse import urlparse from datetime import datetime, timezone from .protocol import ClientHello, ScanError, make_client_hello, parse_server_hello, ServerAlertError, BadServerResponse, ServerHello, logger from .names_and_numbers import AlertDescription, CipherSuite, Group, Protocol, CompressionMethod from OpenSSL import SSL, crypto import socket import re import dataclasses import ssl, select
14,302
# Default number of workers/threads/concurrent connections to use. DEFAULT_MAX_WORKERS: int = 6 # Default socket connection timeout, in seconds. DEFAULT_TIMEOUT: float = 2 class DowngradeError(ScanError): """ Error for servers that attempt to downgrade beyond supported versions. """ pass class ConnectionError(ScanError): """ Class for error in resolving or connecting to a server. """ pass class ProxyError(ConnectionError): """ Class for errors in connecting through a proxy. """ pass @dataclasses.dataclass class ConnectionSettings: """ Settings for a connection to a server, including the host, port, and proxy. """ host: str port: int = 443 proxy: Optional[str] = None timeout_in_seconds: Optional[float] = DEFAULT_TIMEOUT date: datetime = dataclasses.field(default_factory=lambda: datetime.now(tz=timezone.utc).replace(microsecond=0)) def make_socket(settings: ConnectionSettings) -> socket.socket: """ Creates and connects a socket to the target server, through the chosen proxy if any. """ socket_host, socket_port = None, None # To appease the type checker. try: if not settings.proxy: socket_host, socket_port = settings.host, settings.port return socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) if not settings.proxy.startswith('http://'): raise ProxyError("Only HTTP proxies are supported at the moment.", settings.proxy) socket_host, socket_port = parse_target(settings.proxy, 80) sock = socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) sock.send(f"CONNECT {settings.host}:{settings.port} HTTP/1.1\r\nhost:{socket_host}\r\n\r\n".encode('utf-8')) sock_file = sock.makefile('r', newline='\r\n') line = sock_file.readline() if not re.fullmatch(r'HTTP/1\.[01] 200 Connection [Ee]stablished\r\n', line): sock_file.close() sock.close() raise ProxyError("Proxy refused the connection: ", line) while True: if sock_file.readline() == '\r\n': break return sock except TimeoutError as e: raise ConnectionError(f"Connection to {socket_host}:{socket_port} timed out after {settings.timeout_in_seconds} seconds") from e except socket.gaierror as e: raise ConnectionError(f"Could not resolve host {socket_host}") from e except socket.error as e: raise ConnectionError(f"Could not connect to {socket_host}:{socket_port}") from e def send_hello(connection_settings: ConnectionSettings, client_hello: ClientHello) -> ServerHello: """ Sends a Client Hello to the server, and returns the parsed ServerHello. Raises exceptions for the different alert messages the server can send. """ sock = make_socket(connection_settings) sock.send(make_client_hello(client_hello)) packet_stream = iter(lambda: sock.recv(4096), b'') server_hello = parse_server_hello(packet_stream) if server_hello.version not in client_hello.protocols: # Server picked a protocol we didn't ask for.
# Default number of workers/threads/concurrent connections to use. DEFAULT_MAX_WORKERS: int = 6 # Default socket connection timeout, in seconds. DEFAULT_TIMEOUT: float = 2 class DowngradeError(ScanError): """ Error for servers that attempt to downgrade beyond supported versions. """ pass class ConnectionError(ScanError): """ Class for error in resolving or connecting to a server. """ pass class ProxyError(ConnectionError): """ Class for errors in connecting through a proxy. """ pass @dataclasses.dataclass class ConnectionSettings: """ Settings for a connection to a server, including the host, port, and proxy. """ host: str port: int = 443 proxy: Optional[str] = None timeout_in_seconds: Optional[float] = DEFAULT_TIMEOUT date: datetime = dataclasses.field(default_factory=lambda: datetime.now(tz=timezone.utc).replace(microsecond=0)) def make_socket(settings: ConnectionSettings) -> socket.socket: """ Creates and connects a socket to the target server, through the chosen proxy if any. """ socket_host, socket_port = None, None # To appease the type checker. try: if not settings.proxy: socket_host, socket_port = settings.host, settings.port return socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) if not settings.proxy.startswith('http://'): raise ProxyError("Only HTTP proxies are supported at the moment.", settings.proxy) socket_host, socket_port = parse_target(settings.proxy, 80) sock = socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) sock.send(f"CONNECT {settings.host}:{settings.port} HTTP/1.1\r\nhost:{socket_host}\r\n\r\n".encode('utf-8')) sock_file = sock.makefile('r', newline='\r\n') line = sock_file.readline() if not re.fullmatch(r'HTTP/1\.[01] 200 Connection [Ee]stablished\r\n', line): sock_file.close() sock.close() raise ProxyError("Proxy refused the connection: ", line) while True: if sock_file.readline() == '\r\n': break return sock except TimeoutError as e: raise ConnectionError(f"Connection to {socket_host}:{socket_port} timed out after {settings.timeout_in_seconds} seconds") from e except socket.gaierror as e: raise ConnectionError(f"Could not resolve host {socket_host}") from e except socket.error as e: raise ConnectionError(f"Could not connect to {socket_host}:{socket_port}") from e def send_hello(connection_settings: ConnectionSettings, client_hello: ClientHello) -> ServerHello: """ Sends a Client Hello to the server, and returns the parsed ServerHello. Raises exceptions for the different alert messages the server can send. """ sock = make_socket(connection_settings) sock.send(make_client_hello(client_hello)) packet_stream = iter(lambda: sock.recv(4096), b'') server_hello = parse_server_hello(packet_stream) if server_hello.version not in client_hello.protocols: # Server picked a protocol we didn't ask for.
logger.info(f"Server attempted to downgrade protocol to unsupported version {server_hello.version}")
0
2023-10-21 02:00:13+00:00
16k
YefanZhou/TempBalance
object_detection/src/YOLOv8/ultralytics/vit/sam/modules/mask_generator.py
[ { "identifier": "MaskData", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kw...
from typing import Any, Dict, List, Optional, Tuple from torchvision.ops.boxes import batched_nms, box_area # type: ignore from ..amg import (MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points) from .prompt_predictor import PromptPredictor from .sam import Sam from pycocotools import mask as mask_utils # type: ignore # noqa: F401 import numpy as np import torch import cv2 # type: ignore # noqa: F401
11,298
return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes(orig_size, self.crop_n_layers, self.crop_overlap_ratio) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data['crop_boxes']) scores = scores.to(data['boxes'].device) keep_by_nms = batched_nms( data['boxes'].float(), scores, torch.zeros_like(data['boxes'][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points, ) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data['boxes'].float(), data['iou_preds'], torch.zeros_like(data['boxes'][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data['boxes'] = uncrop_boxes_xyxy(data['boxes'], crop_box) data['points'] = uncrop_points(data['points'], crop_box) data['crop_boxes'] = torch.tensor([crop_box for _ in range(len(data['rles']))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data['iou_preds'] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data['stability_score'] = calculate_stability_score(data['masks'], self.predictor.model.mask_threshold, self.stability_score_offset) if self.stability_score_thresh > 0.0: keep_mask = data['stability_score'] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data['masks'] = data['masks'] > self.predictor.model.mask_threshold data['boxes'] = batched_mask_to_box(data['masks']) # Filter boxes that touch crop boundaries keep_mask = ~is_box_near_crop_edge(data['boxes'], crop_box, [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): data.filter(keep_mask) # Compress to RLE
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamAutomaticMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = 'binary_mask', ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int, None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray), None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != (point_grids is None), \ 'Exactly one of points_per_side or point_grid must be provided.' if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in {'binary_mask', 'uncompressed_rle', 'coco_rle'}, f'Unknown output_mode {output_mode}.' if output_mode == 'coco_rle': if min_mask_region_area > 0: self.predictor = PromptPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode # TODO: Temporary implementation for compatibility def __call__(self, image: np.ndarray, augment=False, visualize=False) -> List[Dict[str, Any]]: return self.generate(image) @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any), np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == 'coco_rle': mask_data['segmentations'] = [coco_encode_rle(rle) for rle in mask_data['rles']] elif self.output_mode == 'binary_mask': mask_data['segmentations'] = [rle_to_mask(rle) for rle in mask_data['rles']] else: mask_data['segmentations'] = mask_data['rles'] # Write mask records curr_anns = [] for idx in range(len(mask_data['segmentations'])): ann = { 'segmentation': mask_data['segmentations'][idx], 'area': area_from_rle(mask_data['rles'][idx]), 'bbox': box_xyxy_to_xywh(mask_data['boxes'][idx]).tolist(), 'predicted_iou': mask_data['iou_preds'][idx].item(), 'point_coords': [mask_data['points'][idx].tolist()], 'stability_score': mask_data['stability_score'][idx].item(), 'crop_box': box_xyxy_to_xywh(mask_data['crop_boxes'][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes(orig_size, self.crop_n_layers, self.crop_overlap_ratio) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data['crop_boxes']) scores = scores.to(data['boxes'].device) keep_by_nms = batched_nms( data['boxes'].float(), scores, torch.zeros_like(data['boxes'][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points, ) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data['boxes'].float(), data['iou_preds'], torch.zeros_like(data['boxes'][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data['boxes'] = uncrop_boxes_xyxy(data['boxes'], crop_box) data['points'] = uncrop_points(data['points'], crop_box) data['crop_boxes'] = torch.tensor([crop_box for _ in range(len(data['rles']))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data['iou_preds'] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data['stability_score'] = calculate_stability_score(data['masks'], self.predictor.model.mask_threshold, self.stability_score_offset) if self.stability_score_thresh > 0.0: keep_mask = data['stability_score'] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data['masks'] = data['masks'] > self.predictor.model.mask_threshold data['boxes'] = batched_mask_to_box(data['masks']) # Filter boxes that touch crop boundaries keep_mask = ~is_box_near_crop_edge(data['boxes'], crop_box, [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): data.filter(keep_mask) # Compress to RLE
data['masks'] = uncrop_masks(data['masks'], crop_box, orig_h, orig_w)
14
2023-10-24 00:45:55+00:00
16k
bytedance/ColTrack
models/dino/dino.py
[ { "identifier": "box_ops", "path": "util/box_ops.py", "snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef box_iou_pairwise(boxes1, boxes2):\ndef generalized_box_iou_pairwise(boxes1, boxes2):\ndef masks_to_boxes(m...
import copy import math import torch import torch.nn.functional as F from typing import List from torch import nn from torchvision.ops.boxes import nms from util import box_ops from util.misc import (NestedTensor, nested_tensor_from_tensor_list, accuracy, get_world_size, interpolate, is_dist_avail_and_initialized, inverse_sigmoid, scale_sigmoid) from .backbone import build_backbone from .matcher import build_matcher from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm, dice_loss) from .deformable_transformer import build_deformable_transformer from .utils import sigmoid_focal_loss, MLP from ..registry import MODULE_BUILD_FUNCS from .dn_components import prepare_for_cdn,dn_post_process
11,436
# is the maximum id for a class in your dataset. For example, # COCO has a max_obj_id of 90, so we pass `num_classes` to be 91. # As another example, for a dataset that has a single class with id 1, # you should pass `num_classes` to be 2 (max_obj_id + 1). # For more details on this, check the following discussion # https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223 # num_classes = 20 if args.dataset_file != 'coco' else 91 # if args.dataset_file == "coco_panoptic": # # for panoptic, we just add a num_classes that is large enough to hold # # max_obj_id + 1, but the exact value doesn't really matter # num_classes = 250 # if args.dataset_file == 'o365': # num_classes = 366 # if args.dataset_file == 'vanke': # num_classes = 51 num_classes = args.num_classes device = torch.device(args.device) backbone = build_backbone(args) transformer = build_deformable_transformer(args) try: match_unstable_error = args.match_unstable_error dn_labelbook_size = args.dn_labelbook_size except: match_unstable_error = True dn_labelbook_size = num_classes try: dec_pred_class_embed_share = args.dec_pred_class_embed_share except: dec_pred_class_embed_share = True try: dec_pred_bbox_embed_share = args.dec_pred_bbox_embed_share except: dec_pred_bbox_embed_share = True model = DINO( backbone, transformer, num_classes=num_classes, num_queries=args.num_queries, aux_loss=True, iter_update=True, query_dim=4, random_refpoints_xy=args.random_refpoints_xy, fix_refpoints_hw=args.fix_refpoints_hw, num_feature_levels=args.num_feature_levels, nheads=args.nheads, dec_pred_class_embed_share=dec_pred_class_embed_share, dec_pred_bbox_embed_share=dec_pred_bbox_embed_share, # two stage two_stage_type=args.two_stage_type, # box_share two_stage_bbox_embed_share=args.two_stage_bbox_embed_share, two_stage_class_embed_share=args.two_stage_class_embed_share, decoder_sa_type=args.decoder_sa_type, num_patterns=args.num_patterns, dn_number = args.dn_number if args.use_dn else 0, dn_box_noise_scale = args.dn_box_noise_scale, dn_label_noise_ratio = args.dn_label_noise_ratio, dn_labelbook_size = dn_labelbook_size, ) if args.masks: model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None)) matcher = build_matcher(args) # prepare weight dict weight_dict = {'loss_ce': args.cls_loss_coef, 'loss_bbox': args.bbox_loss_coef} weight_dict['loss_giou'] = args.giou_loss_coef clean_weight_dict_wo_dn = copy.deepcopy(weight_dict) # for DN training if args.use_dn: weight_dict['loss_ce_dn'] = args.cls_loss_coef weight_dict['loss_bbox_dn'] = args.bbox_loss_coef weight_dict['loss_giou_dn'] = args.giou_loss_coef if args.masks: weight_dict["loss_mask"] = args.mask_loss_coef weight_dict["loss_dice"] = args.dice_loss_coef clean_weight_dict = copy.deepcopy(weight_dict) # TODO this is a hack if args.aux_loss: aux_weight_dict = {} for i in range(args.dec_layers - 1): aux_weight_dict.update({k + f'_{i}': v for k, v in clean_weight_dict.items()}) weight_dict.update(aux_weight_dict) if args.two_stage_type != 'no': interm_weight_dict = {} try: no_interm_box_loss = args.no_interm_box_loss except: no_interm_box_loss = False _coeff_weight_dict = { 'loss_ce': 1.0, 'loss_bbox': 1.0 if not no_interm_box_loss else 0.0, 'loss_giou': 1.0 if not no_interm_box_loss else 0.0, } try: interm_loss_coef = args.interm_loss_coef except: interm_loss_coef = 1.0 interm_weight_dict.update({k + f'_interm': v * interm_loss_coef * _coeff_weight_dict[k] for k, v in clean_weight_dict_wo_dn.items()}) weight_dict.update(interm_weight_dict) losses = ['labels', 'boxes', 'cardinality'] if args.masks: losses += ["masks"] criterion = SetCriterion(num_classes, matcher=matcher, weight_dict=weight_dict, focal_alpha=args.focal_alpha, losses=losses, ) criterion.to(device) postprocessors = {'bbox': PostProcess(num_select=args.num_select, nms_iou_threshold=args.nms_iou_threshold)} if args.masks:
# ------------------------------------------------------------------------ # DINO # Copyright (c) 2022 IDEA. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Conditional DETR model and criterion classes. # Copyright (c) 2021 Microsoft. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # ------------------------------------------------------------------------ # Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) # Copyright (c) 2020 SenseTime. All Rights Reserved. # ------------------------------------------------------------------------ class DINO(nn.Module): """ This is the Cross-Attention Detector module that performs object detection """ def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False, iter_update=False, query_dim=2, random_refpoints_xy=False, fix_refpoints_hw=-1, num_feature_levels=1, nheads=8, # two stage two_stage_type='no', # ['no', 'standard'] two_stage_add_query_num=0, dec_pred_class_embed_share=True, dec_pred_bbox_embed_share=True, two_stage_class_embed_share=True, two_stage_bbox_embed_share=True, decoder_sa_type = 'sa', num_patterns = 0, dn_number = 100, dn_box_noise_scale = 0.4, dn_label_noise_ratio = 0.5, dn_labelbook_size = 100, ): """ Initializes the model. Parameters: backbone: torch module of the backbone to be used. See backbone.py transformer: torch module of the transformer architecture. See transformer.py num_classes: number of object classes num_queries: number of object queries, ie detection slot. This is the maximal number of objects Conditional DETR can detect in a single image. For COCO, we recommend 100 queries. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. fix_refpoints_hw: -1(default): learn w and h for each box seperately >0 : given fixed number -2 : learn a shared w and h """ super().__init__() self.num_queries = num_queries self.transformer = transformer self.num_classes = num_classes self.hidden_dim = hidden_dim = transformer.d_model self.num_feature_levels = num_feature_levels self.nheads = nheads self.label_enc = nn.Embedding(dn_labelbook_size + 1, hidden_dim) # setting query dim self.query_dim = query_dim assert query_dim == 4 self.random_refpoints_xy = random_refpoints_xy self.fix_refpoints_hw = fix_refpoints_hw # for dn training self.num_patterns = num_patterns self.dn_number = dn_number self.dn_box_noise_scale = dn_box_noise_scale self.dn_label_noise_ratio = dn_label_noise_ratio self.dn_labelbook_size = dn_labelbook_size # prepare input projection layers if num_feature_levels > 1: num_backbone_outs = len(backbone.num_channels) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), )) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: assert two_stage_type == 'no', "two_stage_type should be no if num_feature_levels=1 !!!" self.input_proj = nn.ModuleList([ nn.Sequential( nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )]) self.backbone = backbone self.aux_loss = aux_loss self.box_pred_damping = box_pred_damping = None self.iter_update = iter_update assert iter_update, "Why not iter_update?" # prepare pred layers self.dec_pred_class_embed_share = dec_pred_class_embed_share self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share # prepare class & box embed _class_embed = nn.Linear(hidden_dim, num_classes) _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) # init the two embed layers prior_prob = 0.01 bias_value = -math.log((1 - prior_prob) / prior_prob) _class_embed.bias.data = torch.ones(self.num_classes) * bias_value nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) if dec_pred_bbox_embed_share: box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] else: box_embed_layerlist = [copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers)] if dec_pred_class_embed_share: class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] else: class_embed_layerlist = [copy.deepcopy(_class_embed) for i in range(transformer.num_decoder_layers)] self.bbox_embed = nn.ModuleList(box_embed_layerlist) self.class_embed = nn.ModuleList(class_embed_layerlist) self.transformer.decoder.bbox_embed = self.bbox_embed self.transformer.decoder.class_embed = self.class_embed # two stage self.two_stage_type = two_stage_type self.two_stage_add_query_num = two_stage_add_query_num assert two_stage_type in ['no', 'standard'], "unknown param {} of two_stage_type".format(two_stage_type) if two_stage_type != 'no': if two_stage_bbox_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None if self.two_stage_add_query_num > 0: self.init_ref_points(two_stage_add_query_num) self.decoder_sa_type = decoder_sa_type assert decoder_sa_type in ['sa', 'ca_label', 'ca_content'] # self.replace_sa_with_double_ca = replace_sa_with_double_ca if decoder_sa_type == 'ca_label': self.label_embedding = nn.Embedding(num_classes, hidden_dim) for layer in self.transformer.decoder.layers: layer.label_embedding = self.label_embedding else: for layer in self.transformer.decoder.layers: layer.label_embedding = None self.label_embedding = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def init_ref_points(self, use_num_queries): raise NotImplementedError def forward(self, samples: NestedTensor, targets:List=None): """ The forward expects a NestedTensor, which consists of: - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels It returns a dict with the following elements: - "pred_logits": the classification logits (including no-object) for all queries. Shape= [batch_size x num_queries x num_classes] - "pred_boxes": The normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image (disregarding possible padding). See PostProcess for information on how to retrieve the unnormalized bounding box. - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of dictionnaries containing the two above keys for each decoder layer. """ if isinstance(samples, (list, torch.Tensor)): samples = nested_tensor_from_tensor_list(samples) features, poss = self.backbone(samples) srcs = [] masks = [] for l, feat in enumerate(features): src, mask = feat.decompose() srcs.append(self.input_proj[l](src)) masks.append(mask) assert mask is not None if self.num_feature_levels > len(srcs): _len_srcs = len(srcs) for l in range(_len_srcs, self.num_feature_levels): if l == _len_srcs: src = self.input_proj[l](features[-1].tensors) else: src = self.input_proj[l](srcs[-1]) m = samples.mask mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0] pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype) srcs.append(src) masks.append(mask) poss.append(pos_l) if self.dn_number > 0 or targets is not None: input_query_label, input_query_bbox, attn_mask, dn_meta =\ prepare_for_cdn(dn_args=(targets, self.dn_number, self.dn_label_noise_ratio, self.dn_box_noise_scale), training=self.training,num_queries=self.num_queries,num_classes=self.num_classes, hidden_dim=self.hidden_dim,label_enc=self.label_enc) else: assert targets is None input_query_bbox = input_query_label = attn_mask = dn_meta = None hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer(srcs, masks, input_query_bbox, poss,input_query_label,attn_mask) # In case num object=0 hs[0]+=self.label_enc.weight[0,0]*0.0 outputs_coord_list = [] for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate(zip(reference[:-1], self.bbox_embed, hs)): layer_delta_unsig = layer_bbox_embed(layer_hs) layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig) layer_outputs_unsig = scale_sigmoid(layer_outputs_unsig.sigmoid()) outputs_coord_list.append(layer_outputs_unsig) outputs_coord_list = torch.stack(outputs_coord_list) # outputs_class = self.class_embed(hs) outputs_class = torch.stack([layer_cls_embed(layer_hs) for layer_cls_embed, layer_hs in zip(self.class_embed, hs)]) if self.dn_number > 0 and dn_meta is not None: outputs_class, outputs_coord_list = \ dn_post_process(outputs_class, outputs_coord_list, dn_meta,self.aux_loss,self._set_aux_loss) out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord_list[-1]} if self.aux_loss: out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord_list) # for encoder output if hs_enc is not None: # prepare intermediate outputs interm_coord = ref_enc[-1] interm_class = self.transformer.enc_out_class_embed(hs_enc[-1]) out['interm_outputs'] = {'pred_logits': interm_class, 'pred_boxes': interm_coord} out['interm_outputs_for_matching_pre'] = {'pred_logits': interm_class, 'pred_boxes': init_box_proposal} # prepare enc outputs # import ipdb; ipdb.set_trace() if hs_enc.shape[0] > 1: enc_outputs_coord = [] enc_outputs_class = [] for layer_id, (layer_box_embed, layer_class_embed, layer_hs_enc, layer_ref_enc) in enumerate(zip(self.enc_bbox_embed, self.enc_class_embed, hs_enc[:-1], ref_enc[:-1])): layer_enc_delta_unsig = layer_box_embed(layer_hs_enc) layer_enc_outputs_coord_unsig = layer_enc_delta_unsig + inverse_sigmoid(layer_ref_enc) layer_enc_outputs_coord = scale_sigmoid(layer_enc_outputs_coord_unsig.sigmoid()) layer_enc_outputs_class = layer_class_embed(layer_hs_enc) enc_outputs_coord.append(layer_enc_outputs_coord) enc_outputs_class.append(layer_enc_outputs_class) # enc_delta_unsig = self.enc_bbox_embed(hs_enc[:-1]) # enc_outputs_unsig = enc_delta_unsig + ref_enc[:-1] # enc_outputs_coord = enc_outputs_unsig.sigmoid() # enc_outputs_class = self.enc_class_embed(hs_enc[:-1]) out['enc_outputs'] = [ {'pred_logits': a, 'pred_boxes': b} for a, b in zip(enc_outputs_class, enc_outputs_coord) ] out['dn_meta'] = dn_meta return out @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{'pred_logits': a, 'pred_boxes': b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] class SetCriterion(nn.Module): """ This class computes the loss for Conditional DETR. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) """ def __init__(self, num_classes, matcher, weight_dict, focal_alpha, losses): """ Create the criterion. Parameters: num_classes: number of object categories, omitting the special no-object category matcher: module able to compute a matching between targets and proposals weight_dict: dict containing as key the names of the losses and as values their relative weight. losses: list of all the losses to be applied. See get_loss for list of available losses. focal_alpha: alpha in Focal Loss """ super().__init__() self.num_classes = num_classes self.matcher = matcher self.weight_dict = weight_dict self.losses = losses self.focal_alpha = focal_alpha def loss_labels(self, outputs, targets, indices, num_boxes, log=True): """Classification loss (Binary focal loss) targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] """ assert 'pred_logits' in outputs src_logits = outputs['pred_logits'] idx = self._get_src_permutation_idx(indices) target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2]+1], dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:,:,:-1] loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * src_logits.shape[1] losses = {'loss_ce': loss_ce} if log: # TODO this should probably be a separate loss, not hacked in this one here losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0] return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients """ pred_logits = outputs['pred_logits'] device = pred_logits.device tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1) card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) losses = {'cardinality_error': card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ assert 'pred_boxes' in outputs idx = self._get_src_permutation_idx(indices) src_boxes = outputs['pred_boxes'][idx] target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') losses = {} losses['loss_bbox'] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag(box_ops.generalized_box_iou( box_ops.box_cxcywh_to_xyxy(src_boxes), box_ops.box_cxcywh_to_xyxy(target_boxes))) losses['loss_giou'] = loss_giou.sum() / num_boxes # calculate the x,y and h,w loss with torch.no_grad(): losses['loss_xy'] = loss_bbox[..., :2].sum() / num_boxes losses['loss_hw'] = loss_bbox[..., 2:].sum() / num_boxes return losses def loss_masks(self, outputs, targets, indices, num_boxes): """Compute the losses related to the masks: the focal loss and the dice loss. targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] """ assert "pred_masks" in outputs src_idx = self._get_src_permutation_idx(indices) tgt_idx = self._get_tgt_permutation_idx(indices) src_masks = outputs["pred_masks"] src_masks = src_masks[src_idx] masks = [t["masks"] for t in targets] # TODO use valid to mask invalid areas due to padding in loss target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() target_masks = target_masks.to(src_masks) target_masks = target_masks[tgt_idx] # upsample predictions to the target size src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False) src_masks = src_masks[:, 0].flatten(1) target_masks = target_masks.flatten(1) target_masks = target_masks.view(src_masks.shape) losses = { "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes), "loss_dice": dice_loss(src_masks, target_masks, num_boxes), } return losses def _get_src_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) src_idx = torch.cat([src for (src, _) in indices]) return batch_idx, src_idx def _get_tgt_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) tgt_idx = torch.cat([tgt for (_, tgt) in indices]) return batch_idx, tgt_idx def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs): loss_map = { 'labels': self.loss_labels, 'cardinality': self.loss_cardinality, 'boxes': self.loss_boxes, 'masks': self.loss_masks, # 'dn_labels': self.loss_dn_labels, # 'dn_boxes': self.loss_dn_boxes } assert loss in loss_map, f'do you really want to compute {loss} loss?' return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs) def forward(self, outputs, targets, return_indices=False): """ This performs the loss computation. Parameters: outputs: dict of tensors, see the output specification of the model for the format targets: list of dicts, such that len(targets) == batch_size. The expected keys in each dict depends on the losses applied, see each loss' doc return_indices: used for vis. if True, the layer0-5 indices will be returned as well. """ outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'} device=next(iter(outputs.values())).device indices = self.matcher(outputs_without_aux, targets) if return_indices: indices0_copy = indices indices_list = [] # Compute the average number of target boxes accross all nodes, for normalization purposes num_boxes = sum(len(t["labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=device) if is_dist_avail_and_initialized(): torch.distributed.all_reduce(num_boxes) num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item() # Compute all the requested losses losses = {} # prepare for dn loss dn_meta = outputs['dn_meta'] if self.training and dn_meta and 'output_known_lbs_bboxes' in dn_meta: output_known_lbs_bboxes,single_pad, scalar = self.prep_for_dn(dn_meta) dn_pos_idx = [] dn_neg_idx = [] for i in range(len(targets)): if len(targets[i]['labels']) > 0: t = torch.range(0, len(targets[i]['labels']) - 1).long().cuda() t = t.unsqueeze(0).repeat(scalar, 1) tgt_idx = t.flatten() output_idx = (torch.tensor(range(scalar)) * single_pad).long().cuda().unsqueeze(1) + t output_idx = output_idx.flatten() else: output_idx = tgt_idx = torch.tensor([]).long().cuda() dn_pos_idx.append((output_idx, tgt_idx)) dn_neg_idx.append((output_idx + single_pad // 2, tgt_idx)) output_known_lbs_bboxes=dn_meta['output_known_lbs_bboxes'] l_dict = {} for loss in self.losses: kwargs = {} if 'labels' in loss: kwargs = {'log': False} l_dict.update(self.get_loss(loss, output_known_lbs_bboxes, targets, dn_pos_idx, num_boxes*scalar,**kwargs)) l_dict = {k + f'_dn': v for k, v in l_dict.items()} losses.update(l_dict) else: l_dict = dict() l_dict['loss_bbox_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_giou_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_ce_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_xy_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_hw_dn'] = torch.as_tensor(0.).to('cuda') l_dict['cardinality_error_dn'] = torch.as_tensor(0.).to('cuda') losses.update(l_dict) for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if 'aux_outputs' in outputs: for idx, aux_outputs in enumerate(outputs['aux_outputs']): indices = self.matcher(aux_outputs, targets) if return_indices: indices_list.append(indices) for loss in self.losses: if loss == 'masks': # Intermediate masks losses are too costly to compute, we ignore them. continue kwargs = {} if loss == 'labels': # Logging is enabled only for the last layer kwargs = {'log': False} l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k + f'_{idx}': v for k, v in l_dict.items()} losses.update(l_dict) if self.training and dn_meta and 'output_known_lbs_bboxes' in dn_meta: aux_outputs_known = output_known_lbs_bboxes['aux_outputs'][idx] l_dict={} for loss in self.losses: kwargs = {} if 'labels' in loss: kwargs = {'log': False} l_dict.update(self.get_loss(loss, aux_outputs_known, targets, dn_pos_idx, num_boxes*scalar, **kwargs)) l_dict = {k + f'_dn_{idx}': v for k, v in l_dict.items()} losses.update(l_dict) else: l_dict = dict() l_dict['loss_bbox_dn']=torch.as_tensor(0.).to('cuda') l_dict['loss_giou_dn']=torch.as_tensor(0.).to('cuda') l_dict['loss_ce_dn']=torch.as_tensor(0.).to('cuda') l_dict['loss_xy_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_hw_dn'] = torch.as_tensor(0.).to('cuda') l_dict['cardinality_error_dn'] = torch.as_tensor(0.).to('cuda') l_dict = {k + f'_{idx}': v for k, v in l_dict.items()} losses.update(l_dict) # interm_outputs loss if 'interm_outputs' in outputs: interm_outputs = outputs['interm_outputs'] indices = self.matcher(interm_outputs, targets) if return_indices: indices_list.append(indices) for loss in self.losses: if loss == 'masks': # Intermediate masks losses are too costly to compute, we ignore them. continue kwargs = {} if loss == 'labels': # Logging is enabled only for the last layer kwargs = {'log': False} l_dict = self.get_loss(loss, interm_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k + f'_interm': v for k, v in l_dict.items()} losses.update(l_dict) # enc output loss if 'enc_outputs' in outputs: for i, enc_outputs in enumerate(outputs['enc_outputs']): indices = self.matcher(enc_outputs, targets) if return_indices: indices_list.append(indices) for loss in self.losses: if loss == 'masks': # Intermediate masks losses are too costly to compute, we ignore them. continue kwargs = {} if loss == 'labels': # Logging is enabled only for the last layer kwargs = {'log': False} l_dict = self.get_loss(loss, enc_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k + f'_enc_{i}': v for k, v in l_dict.items()} losses.update(l_dict) if return_indices: indices_list.append(indices0_copy) return losses, indices_list return losses def prep_for_dn(self,dn_meta): output_known_lbs_bboxes = dn_meta['output_known_lbs_bboxes'] num_dn_groups,pad_size=dn_meta['num_dn_group'],dn_meta['pad_size'] assert pad_size % num_dn_groups==0 single_pad=pad_size//num_dn_groups return output_known_lbs_bboxes,single_pad,num_dn_groups class PostProcess(nn.Module): """ This module converts the model's output into the format expected by the coco api""" def __init__(self, num_select=100, nms_iou_threshold=-1) -> None: super().__init__() self.num_select = num_select self.nms_iou_threshold = nms_iou_threshold @torch.no_grad() def forward(self, outputs, target_sizes, not_to_xyxy=False, test=False): """ Perform the computation Parameters: outputs: raw outputs of the model target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch For evaluation, this must be the original image size (before any data augmentation) For visualization, this should be the image size after data augment, but before padding """ num_select = self.num_select out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes'] assert len(out_logits) == len(target_sizes) assert target_sizes.shape[1] == 2 prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), num_select, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2] if not_to_xyxy: boxes = out_bbox else: boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) if test: assert not not_to_xyxy boxes[:,:,2:] = boxes[:,:,2:] - boxes[:,:,:2] boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4)) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] if self.nms_iou_threshold > 0: item_indices = [nms(b, s, iou_threshold=self.nms_iou_threshold) for b,s in zip(boxes, scores)] # import ipdb; ipdb.set_trace() results = [{'scores': s[i], 'labels': l[i], 'boxes': b[i]} for s, l, b, i in zip(scores, labels, boxes, item_indices)] else: results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] return results @MODULE_BUILD_FUNCS.registe_with_name(module_name='dino') def build_dino(args): # the `num_classes` naming here is somewhat misleading. # it indeed corresponds to `max_obj_id + 1`, where max_obj_id # is the maximum id for a class in your dataset. For example, # COCO has a max_obj_id of 90, so we pass `num_classes` to be 91. # As another example, for a dataset that has a single class with id 1, # you should pass `num_classes` to be 2 (max_obj_id + 1). # For more details on this, check the following discussion # https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223 # num_classes = 20 if args.dataset_file != 'coco' else 91 # if args.dataset_file == "coco_panoptic": # # for panoptic, we just add a num_classes that is large enough to hold # # max_obj_id + 1, but the exact value doesn't really matter # num_classes = 250 # if args.dataset_file == 'o365': # num_classes = 366 # if args.dataset_file == 'vanke': # num_classes = 51 num_classes = args.num_classes device = torch.device(args.device) backbone = build_backbone(args) transformer = build_deformable_transformer(args) try: match_unstable_error = args.match_unstable_error dn_labelbook_size = args.dn_labelbook_size except: match_unstable_error = True dn_labelbook_size = num_classes try: dec_pred_class_embed_share = args.dec_pred_class_embed_share except: dec_pred_class_embed_share = True try: dec_pred_bbox_embed_share = args.dec_pred_bbox_embed_share except: dec_pred_bbox_embed_share = True model = DINO( backbone, transformer, num_classes=num_classes, num_queries=args.num_queries, aux_loss=True, iter_update=True, query_dim=4, random_refpoints_xy=args.random_refpoints_xy, fix_refpoints_hw=args.fix_refpoints_hw, num_feature_levels=args.num_feature_levels, nheads=args.nheads, dec_pred_class_embed_share=dec_pred_class_embed_share, dec_pred_bbox_embed_share=dec_pred_bbox_embed_share, # two stage two_stage_type=args.two_stage_type, # box_share two_stage_bbox_embed_share=args.two_stage_bbox_embed_share, two_stage_class_embed_share=args.two_stage_class_embed_share, decoder_sa_type=args.decoder_sa_type, num_patterns=args.num_patterns, dn_number = args.dn_number if args.use_dn else 0, dn_box_noise_scale = args.dn_box_noise_scale, dn_label_noise_ratio = args.dn_label_noise_ratio, dn_labelbook_size = dn_labelbook_size, ) if args.masks: model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None)) matcher = build_matcher(args) # prepare weight dict weight_dict = {'loss_ce': args.cls_loss_coef, 'loss_bbox': args.bbox_loss_coef} weight_dict['loss_giou'] = args.giou_loss_coef clean_weight_dict_wo_dn = copy.deepcopy(weight_dict) # for DN training if args.use_dn: weight_dict['loss_ce_dn'] = args.cls_loss_coef weight_dict['loss_bbox_dn'] = args.bbox_loss_coef weight_dict['loss_giou_dn'] = args.giou_loss_coef if args.masks: weight_dict["loss_mask"] = args.mask_loss_coef weight_dict["loss_dice"] = args.dice_loss_coef clean_weight_dict = copy.deepcopy(weight_dict) # TODO this is a hack if args.aux_loss: aux_weight_dict = {} for i in range(args.dec_layers - 1): aux_weight_dict.update({k + f'_{i}': v for k, v in clean_weight_dict.items()}) weight_dict.update(aux_weight_dict) if args.two_stage_type != 'no': interm_weight_dict = {} try: no_interm_box_loss = args.no_interm_box_loss except: no_interm_box_loss = False _coeff_weight_dict = { 'loss_ce': 1.0, 'loss_bbox': 1.0 if not no_interm_box_loss else 0.0, 'loss_giou': 1.0 if not no_interm_box_loss else 0.0, } try: interm_loss_coef = args.interm_loss_coef except: interm_loss_coef = 1.0 interm_weight_dict.update({k + f'_interm': v * interm_loss_coef * _coeff_weight_dict[k] for k, v in clean_weight_dict_wo_dn.items()}) weight_dict.update(interm_weight_dict) losses = ['labels', 'boxes', 'cardinality'] if args.masks: losses += ["masks"] criterion = SetCriterion(num_classes, matcher=matcher, weight_dict=weight_dict, focal_alpha=args.focal_alpha, losses=losses, ) criterion.to(device) postprocessors = {'bbox': PostProcess(num_select=args.num_select, nms_iou_threshold=args.nms_iou_threshold)} if args.masks:
postprocessors['segm'] = PostProcessSegm()
13
2023-10-16 02:18:33+00:00
16k
YuroFR/freqtrade-modded-crypto-trading-bot
tests/commands/test_commands.py
[ { "identifier": "start_convert_data", "path": "freqtrade/commands/data_commands.py", "snippet": "def start_convert_data(args: Dict[str, Any], ohlcv: bool = True) -> None:\n \"\"\"\n Convert data from one format to another\n \"\"\"\n config = setup_utils_configuration(args, RunMode.UTIL_NO_EX...
import json import re import pytest from datetime import datetime, timedelta from io import BytesIO from pathlib import Path from unittest.mock import MagicMock, PropertyMock from zipfile import ZipFile from freqtrade.commands import (start_backtesting_show, start_convert_data, start_convert_trades, start_create_userdir, start_download_data, start_hyperopt_list, start_hyperopt_show, start_install_ui, start_list_data, start_list_exchanges, start_list_markets, start_list_strategies, start_list_timeframes, start_new_strategy, start_show_trades, start_strategy_update, start_test_pairlist, start_trading, start_webserver) from freqtrade.commands.db_commands import start_convert_db from freqtrade.commands.deploy_commands import (clean_ui_subdir, download_and_install_ui, get_ui_download_url, read_ui_version) from freqtrade.commands.list_commands import start_list_freqAI_models from freqtrade.configuration import setup_utils_configuration from freqtrade.enums import RunMode from freqtrade.exceptions import OperationalException from freqtrade.persistence.models import init_db from freqtrade.persistence.pairlock_middleware import PairLocks from freqtrade.util import dt_floor_day, dt_now, dt_utc from tests.conftest import (CURRENT_TEST_STRATEGY, EXMS, create_mock_trades, get_args, log_has, log_has_re, patch_exchange, patched_configuration_load_config_file) from tests.conftest_trades import MOCK_TRADE_COUNT
13,096
captured = capsys.readouterr() assert ("Exchange Bittrex has 1 active market with LTC as base currency and " "with USDT, NONEXISTENT as quote currencies: XLTCUSDT.\n" in captured.out) # active markets, base=LTC, quote=NONEXISTENT args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--base", "LTC", "--quote", "NONEXISTENT", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 0 active markets with LTC as base currency and " "with NONEXISTENT as quote currency.\n" in captured.out) # Test tabular output args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 12 active markets:\n" in captured.out) # Test tabular output, no markets found args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--base", "LTC", "--quote", "NONEXISTENT", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 0 active markets with LTC as base currency and " "with NONEXISTENT as quote currency.\n" in captured.out) # Test --print-json args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--print-json" ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ('["ADA/USDT:USDT","BLK/BTC","ETH/BTC","ETH/USDT","ETH/USDT:USDT",' '"LTC/BTC","LTC/ETH","LTC/USD","NEO/BTC","TKN/BTC","XLTCUSDT","XRP/BTC"]' in captured.out) # Test --print-csv args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--print-csv" ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Id,Symbol,Base,Quote,Active,Spot,Margin,Future,Leverage" in captured.out) assert ("blkbtc,BLK/BTC,BLK,BTC,True,Spot" in captured.out) assert ("USD-LTC,LTC/USD,LTC,USD,True,Spot" in captured.out) # Test --one-column args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--one-column" ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert re.search(r"^BLK/BTC$", captured.out, re.MULTILINE) assert re.search(r"^LTC/USD$", captured.out, re.MULTILINE) mocker.patch(f'{EXMS}.markets', PropertyMock(side_effect=ValueError)) # Test --one-column args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--one-column" ] with pytest.raises(OperationalException, match=r"Cannot get markets.*"): start_list_markets(get_args(args), False) def test_create_datadir_failed(caplog): args = [ "create-userdir", ] with pytest.raises(SystemExit): start_create_userdir(get_args(args)) assert log_has("`create-userdir` requires --userdir to be set.", caplog) def test_create_datadir(caplog, mocker): cud = mocker.patch("freqtrade.commands.deploy_commands.create_userdata_dir", MagicMock()) csf = mocker.patch("freqtrade.commands.deploy_commands.copy_sample_files", MagicMock()) args = [ "create-userdir", "--userdir", "/temp/freqtrade/test" ] start_create_userdir(get_args(args)) assert cud.call_count == 1 assert csf.call_count == 1 def test_start_new_strategy(mocker, caplog): wt_mock = mocker.patch.object(Path, "write_text", MagicMock()) mocker.patch.object(Path, "exists", MagicMock(return_value=False)) args = [ "new-strategy", "--strategy", "CoolNewStrategy" ]
def test_setup_utils_configuration(): args = [ 'list-exchanges', '--config', 'config_examples/config_bittrex.example.json', ] config = setup_utils_configuration(get_args(args), RunMode.OTHER) assert "exchange" in config assert config['dry_run'] is True def test_start_trading_fail(mocker, caplog): mocker.patch("freqtrade.worker.Worker.run", MagicMock(side_effect=OperationalException)) mocker.patch("freqtrade.worker.Worker.__init__", MagicMock(return_value=None)) exitmock = mocker.patch("freqtrade.worker.Worker.exit", MagicMock()) args = [ 'trade', '-c', 'config_examples/config_bittrex.example.json' ] start_trading(get_args(args)) assert exitmock.call_count == 1 exitmock.reset_mock() caplog.clear() mocker.patch("freqtrade.worker.Worker.__init__", MagicMock(side_effect=OperationalException)) start_trading(get_args(args)) assert exitmock.call_count == 0 assert log_has('Fatal exception!', caplog) def test_start_webserver(mocker, caplog): api_server_mock = mocker.patch("freqtrade.rpc.api_server.ApiServer", ) args = [ 'webserver', '-c', 'config_examples/config_bittrex.example.json' ] start_webserver(get_args(args)) assert api_server_mock.call_count == 1 def test_list_exchanges(capsys): args = [ "list-exchanges", ] start_list_exchanges(get_args(args)) captured = capsys.readouterr() assert re.match(r"Exchanges available for Freqtrade.*", captured.out) assert re.search(r".*binance.*", captured.out) assert re.search(r".*bittrex.*", captured.out) # Test with --one-column args = [ "list-exchanges", "--one-column", ] start_list_exchanges(get_args(args)) captured = capsys.readouterr() assert re.search(r"^binance$", captured.out, re.MULTILINE) assert re.search(r"^bittrex$", captured.out, re.MULTILINE) # Test with --all args = [ "list-exchanges", "--all", ] start_list_exchanges(get_args(args)) captured = capsys.readouterr() assert re.match(r"All exchanges supported by the ccxt library.*", captured.out) assert re.search(r".*binance.*", captured.out) assert re.search(r".*bittrex.*", captured.out) assert re.search(r".*bitmex.*", captured.out) # Test with --one-column --all args = [ "list-exchanges", "--one-column", "--all", ] start_list_exchanges(get_args(args)) captured = capsys.readouterr() assert re.search(r"^binance$", captured.out, re.MULTILINE) assert re.search(r"^bittrex$", captured.out, re.MULTILINE) assert re.search(r"^bitmex$", captured.out, re.MULTILINE) def test_list_timeframes(mocker, capsys): api_mock = MagicMock() api_mock.timeframes = {'1m': 'oneMin', '5m': 'fiveMin', '30m': 'thirtyMin', '1h': 'hour', '1d': 'day', } patch_exchange(mocker, api_mock=api_mock, id='bittrex') args = [ "list-timeframes", ] pargs = get_args(args) pargs['config'] = None with pytest.raises(OperationalException, match=r"This command requires a configured exchange.*"): start_list_timeframes(pargs) # Test with --config config_examples/config_bittrex.example.json args = [ "list-timeframes", '--config', 'config_examples/config_bittrex.example.json', ] start_list_timeframes(get_args(args)) captured = capsys.readouterr() assert re.match("Timeframes available for the exchange `Bittrex`: " "1m, 5m, 30m, 1h, 1d", captured.out) # Test with --exchange bittrex args = [ "list-timeframes", "--exchange", "bittrex", ] start_list_timeframes(get_args(args)) captured = capsys.readouterr() assert re.match("Timeframes available for the exchange `Bittrex`: " "1m, 5m, 30m, 1h, 1d", captured.out) api_mock.timeframes = {'1m': '1m', '5m': '5m', '15m': '15m', '30m': '30m', '1h': '1h', '6h': '6h', '12h': '12h', '1d': '1d', '3d': '3d', } patch_exchange(mocker, api_mock=api_mock, id='binance') # Test with --exchange binance args = [ "list-timeframes", "--exchange", "binance", ] start_list_timeframes(get_args(args)) captured = capsys.readouterr() assert re.match("Timeframes available for the exchange `Binance`: " "1m, 5m, 15m, 30m, 1h, 6h, 12h, 1d, 3d", captured.out) # Test with --one-column args = [ "list-timeframes", '--config', 'config_examples/config_bittrex.example.json', "--one-column", ] start_list_timeframes(get_args(args)) captured = capsys.readouterr() assert re.search(r"^1m$", captured.out, re.MULTILINE) assert re.search(r"^5m$", captured.out, re.MULTILINE) assert re.search(r"^1h$", captured.out, re.MULTILINE) assert re.search(r"^1d$", captured.out, re.MULTILINE) # Test with --exchange binance --one-column args = [ "list-timeframes", "--exchange", "binance", "--one-column", ] start_list_timeframes(get_args(args)) captured = capsys.readouterr() assert re.search(r"^1m$", captured.out, re.MULTILINE) assert re.search(r"^5m$", captured.out, re.MULTILINE) assert re.search(r"^1h$", captured.out, re.MULTILINE) assert re.search(r"^1d$", captured.out, re.MULTILINE) def test_list_markets(mocker, markets_static, capsys): api_mock = MagicMock() patch_exchange(mocker, api_mock=api_mock, id='bittrex', mock_markets=markets_static) # Test with no --config args = [ "list-markets", ] pargs = get_args(args) pargs['config'] = None with pytest.raises(OperationalException, match=r"This command requires a configured exchange.*"): start_list_markets(pargs, False) # Test with --config config_examples/config_bittrex.example.json args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 12 active markets: " "ADA/USDT:USDT, BLK/BTC, ETH/BTC, ETH/USDT, ETH/USDT:USDT, LTC/BTC, " "LTC/ETH, LTC/USD, NEO/BTC, TKN/BTC, XLTCUSDT, XRP/BTC.\n" in captured.out) patch_exchange(mocker, api_mock=api_mock, id="binance", mock_markets=markets_static) # Test with --exchange args = [ "list-markets", "--exchange", "binance" ] pargs = get_args(args) pargs['config'] = None start_list_markets(pargs, False) captured = capsys.readouterr() assert re.match("\nExchange Binance has 12 active markets:\n", captured.out) patch_exchange(mocker, api_mock=api_mock, id="bittrex", mock_markets=markets_static) # Test with --all: all markets args = [ "list-markets", "--all", '--config', 'config_examples/config_bittrex.example.json', "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 14 markets: " "ADA/USDT:USDT, BLK/BTC, BTT/BTC, ETH/BTC, ETH/USDT, ETH/USDT:USDT, " "LTC/BTC, LTC/ETH, LTC/USD, LTC/USDT, NEO/BTC, TKN/BTC, XLTCUSDT, XRP/BTC.\n" in captured.out) # Test list-pairs subcommand: active pairs args = [ "list-pairs", '--config', 'config_examples/config_bittrex.example.json', "--print-list", ] start_list_markets(get_args(args), True) captured = capsys.readouterr() assert ("Exchange Bittrex has 9 active pairs: " "BLK/BTC, ETH/BTC, ETH/USDT, LTC/BTC, LTC/ETH, LTC/USD, NEO/BTC, TKN/BTC, XRP/BTC.\n" in captured.out) # Test list-pairs subcommand with --all: all pairs args = [ "list-pairs", "--all", '--config', 'config_examples/config_bittrex.example.json', "--print-list", ] start_list_markets(get_args(args), True) captured = capsys.readouterr() assert ("Exchange Bittrex has 11 pairs: " "BLK/BTC, BTT/BTC, ETH/BTC, ETH/USDT, LTC/BTC, LTC/ETH, LTC/USD, LTC/USDT, NEO/BTC, " "TKN/BTC, XRP/BTC.\n" in captured.out) # active markets, base=ETH, LTC args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--base", "ETH", "LTC", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 7 active markets with ETH, LTC as base currencies: " "ETH/BTC, ETH/USDT, ETH/USDT:USDT, LTC/BTC, LTC/ETH, LTC/USD, XLTCUSDT.\n" in captured.out) # active markets, base=LTC args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--base", "LTC", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 4 active markets with LTC as base currency: " "LTC/BTC, LTC/ETH, LTC/USD, XLTCUSDT.\n" in captured.out) # active markets, quote=USDT, USD args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--quote", "USDT", "USD", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 5 active markets with USDT, USD as quote currencies: " "ADA/USDT:USDT, ETH/USDT, ETH/USDT:USDT, LTC/USD, XLTCUSDT.\n" in captured.out) # active markets, quote=USDT args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--quote", "USDT", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 4 active markets with USDT as quote currency: " "ADA/USDT:USDT, ETH/USDT, ETH/USDT:USDT, XLTCUSDT.\n" in captured.out) # active markets, base=LTC, quote=USDT args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--base", "LTC", "--quote", "USDT", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 1 active market with LTC as base currency and " "with USDT as quote currency: XLTCUSDT.\n" in captured.out) # active pairs, base=LTC, quote=USDT args = [ "list-pairs", '--config', 'config_examples/config_bittrex.example.json', "--base", "LTC", "--quote", "USD", "--print-list", ] start_list_markets(get_args(args), True) captured = capsys.readouterr() assert ("Exchange Bittrex has 1 active pair with LTC as base currency and " "with USD as quote currency: LTC/USD.\n" in captured.out) # active markets, base=LTC, quote=USDT, NONEXISTENT args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--base", "LTC", "--quote", "USDT", "NONEXISTENT", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 1 active market with LTC as base currency and " "with USDT, NONEXISTENT as quote currencies: XLTCUSDT.\n" in captured.out) # active markets, base=LTC, quote=NONEXISTENT args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--base", "LTC", "--quote", "NONEXISTENT", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 0 active markets with LTC as base currency and " "with NONEXISTENT as quote currency.\n" in captured.out) # Test tabular output args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 12 active markets:\n" in captured.out) # Test tabular output, no markets found args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--base", "LTC", "--quote", "NONEXISTENT", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 0 active markets with LTC as base currency and " "with NONEXISTENT as quote currency.\n" in captured.out) # Test --print-json args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--print-json" ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ('["ADA/USDT:USDT","BLK/BTC","ETH/BTC","ETH/USDT","ETH/USDT:USDT",' '"LTC/BTC","LTC/ETH","LTC/USD","NEO/BTC","TKN/BTC","XLTCUSDT","XRP/BTC"]' in captured.out) # Test --print-csv args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--print-csv" ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Id,Symbol,Base,Quote,Active,Spot,Margin,Future,Leverage" in captured.out) assert ("blkbtc,BLK/BTC,BLK,BTC,True,Spot" in captured.out) assert ("USD-LTC,LTC/USD,LTC,USD,True,Spot" in captured.out) # Test --one-column args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--one-column" ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert re.search(r"^BLK/BTC$", captured.out, re.MULTILINE) assert re.search(r"^LTC/USD$", captured.out, re.MULTILINE) mocker.patch(f'{EXMS}.markets', PropertyMock(side_effect=ValueError)) # Test --one-column args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--one-column" ] with pytest.raises(OperationalException, match=r"Cannot get markets.*"): start_list_markets(get_args(args), False) def test_create_datadir_failed(caplog): args = [ "create-userdir", ] with pytest.raises(SystemExit): start_create_userdir(get_args(args)) assert log_has("`create-userdir` requires --userdir to be set.", caplog) def test_create_datadir(caplog, mocker): cud = mocker.patch("freqtrade.commands.deploy_commands.create_userdata_dir", MagicMock()) csf = mocker.patch("freqtrade.commands.deploy_commands.copy_sample_files", MagicMock()) args = [ "create-userdir", "--userdir", "/temp/freqtrade/test" ] start_create_userdir(get_args(args)) assert cud.call_count == 1 assert csf.call_count == 1 def test_start_new_strategy(mocker, caplog): wt_mock = mocker.patch.object(Path, "write_text", MagicMock()) mocker.patch.object(Path, "exists", MagicMock(return_value=False)) args = [ "new-strategy", "--strategy", "CoolNewStrategy" ]
start_new_strategy(get_args(args))
6
2023-10-21 10:02:05+00:00
16k
generative-skill-chaining/gsc-code
generative_skill_chaining/envs/pybullet/table/predicates.py
[ { "identifier": "primitive_actions", "path": "generative_skill_chaining/envs/pybullet/table/primitive_actions.py", "snippet": "class PrimitiveAction:\nclass PickAction(PrimitiveAction):\nclass PlaceAction(PrimitiveAction):\nclass PullAction(PrimitiveAction):\nclass PushAction(PrimitiveAction):\n RANG...
import dataclasses import random import numpy as np import pybullet as p import symbolic from typing import Optional, Dict, List, Sequence, Tuple, Type from ctrlutils import eigen from shapely.geometry import Polygon, LineString from generative_skill_chaining.envs.pybullet.table import primitive_actions, utils from generative_skill_chaining.envs.pybullet.table.objects import Box, Hook, Null, Object, Rack from generative_skill_chaining.envs.pybullet.sim import math from generative_skill_chaining.envs.pybullet.sim.robot import Robot
11,347
return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: assert child_obj.name == self.args[0] and parent_obj.name == "table" margin = TableBounds.scale_margin(child_obj, margin) bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["obstruction_x_min"] xy_max[0] = utils.TABLE_CONSTRAINTS["workspace_radius"] xy_min += margin xy_max -= margin return bounds, margin class BeyondWorkspace(Predicate, TableBounds): """Unary predicate ensuring than an object is in beyond the robot workspace.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True distance = float(np.linalg.norm(obj.pose().pos[:2])) if not utils.is_beyondworkspace(obj=obj, distance=distance): return False dbprint(f"{self}.value():", False, "- distance:", distance) return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: """Returns the minimum and maximum x-y bounds outside the workspace.""" assert child_obj.name == self.args[0] and parent_obj.name == "table" zone = type(self).__name__.lower() if f"aligned({child_obj})" in state: theta = Aligned.sample_angle(obj=child_obj, zone=zone) child_obj.set_pose(utils.compute_object_pose(child_obj, theta)) margin = utils.compute_margins(child_obj) poslimit = TableBounds.get_poslimit(child_obj, state) if poslimit is not None: return poslimit.bounds(child_obj)[zone], margin bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds r = utils.TABLE_CONSTRAINTS["workspace_radius"] xy_min[0] = r * np.cos(np.arcsin(0.5 * (xy_max[1] - xy_min[1]) / r)) xy_min += margin xy_max -= margin return bounds, margin class InOodZone(Predicate, TableBounds): """Unary predicate ensuring than an object is in beyond the robot workspace.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: """Returns the minimum and maximum x-y bounds outside the workspace.""" assert child_obj.name == self.args[0] and parent_obj.name == "table" bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = bounds[0, 0] xy_max[0] = utils.TABLE_CONSTRAINTS["table_x_min"] xy_min += margin xy_max -= margin return bounds, margin class Inhand(Predicate): MAX_GRASP_ATTEMPTS = 1 def sample( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: """Samples a geometric grounding of the InHand(a) predicate.""" obj = self.get_arg_objects(objects)[0] if obj.is_static: return True # Generate grasp pose. for i in range(Inhand.MAX_GRASP_ATTEMPTS): grasp_pose = self.generate_grasp_pose( obj, handlegrasp=f"handlegrasp({obj})" in state, upperhandlegrasp=f"upperhandlegrasp({obj})" in state, )
dbprint = lambda *args: None # noqa # dbprint = print @dataclasses.dataclass class Predicate: args: List[str] @classmethod def create(cls, proposition: str) -> "Predicate": predicate, args = symbolic.parse_proposition(proposition) predicate_classes = { name.lower(): predicate_class for name, predicate_class in globals().items() } predicate_class = predicate_classes[predicate] return predicate_class(args) def sample( self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"] ) -> bool: """Generates a geometric grounding of a predicate.""" return True def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"] ) -> bool: """Evaluates to True if the geometrically grounded predicate is satisfied.""" return True def get_arg_objects(self, objects: Dict[str, Object]) -> List[Object]: return [objects[arg] for arg in self.args] def __str__(self) -> str: return f"{type(self).__name__.lower()}({', '.join(self.args)})" def __hash__(self) -> int: return hash(str(self)) def __eq__(self, other) -> bool: return str(self) == str(other) class HandleGrasp(Predicate): """Unary predicate enforcing a handle grasp towards the tail end on a hook object.""" pass class UpperHandleGrasp(Predicate): """Unary predicate enforcing a handle grasp towards the head on a hook object.""" pass class Free(Predicate): """Unary predicate enforcing that no top-down occlusions exist on the object.""" DISTANCE_MIN: Dict[Tuple[Type[Object], Type[Object]], float] = { (Box, Box): 0.05, (Box, Hook): 0.05, (Box, Rack): 0.1, (Hook, Rack): 0.1, } def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: child_obj = self.get_arg_objects(objects)[0] if child_obj.isinstance(Null): return True for obj in objects.values(): if f"inhand({obj})" in state or obj.isinstance(Null) or obj == child_obj: continue if utils.is_under(child_obj, obj): dbprint(f"{self}.value():", False, f"{child_obj} under {obj}") return False obj_a, obj_b = sorted( (child_obj.type(), obj.type()), key=lambda x: x.__name__ ) try: min_distance = Free.DISTANCE_MIN[(obj_a, obj_b)] except KeyError: continue if ( (obj.isinstance(Rack) and f"beyondworkspace({obj})" in state) or f"infront({child_obj}, rack)" in state or f"infront({obj}, rack)" in state ): min_distance = 0.04 if utils.is_within_distance( child_obj, obj, min_distance, obj.physics_id ) and not utils.is_above(child_obj, obj): dbprint( f"{self}.value():", False, f"{child_obj} and {obj} are within min distance", ) return False return True class Tippable(Predicate): """Unary predicate admitting non-upright configurations of an object.""" pass class TableBounds: """Predicate that specifies minimum and maximum x-y bounds on the table.""" MARGIN_SCALE: Dict[Type[Object], float] = {Hook: 0.25} def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: """Returns the minimum and maximum x-y bounds on the table as well as the modified margins.""" assert parent_obj.name == "table" zone = type(self).__name__.lower() poslimit = TableBounds.get_poslimit(child_obj, state) if poslimit is not None: pos_bounds = poslimit.bounds(child_obj) zone = random.choice(list(pos_bounds.keys())) # Compute poslimit zone-specific angle if f"aligned({child_obj})" in state: theta = Aligned.sample_angle(obj=child_obj, zone=zone) child_obj.set_pose(utils.compute_object_pose(child_obj, theta)) margin = utils.compute_margins(child_obj) return pos_bounds[zone], margin elif f"aligned({child_obj})" in state: theta = Aligned.sample_angle(obj=child_obj, zone=zone) child_obj.set_pose(utils.compute_object_pose(child_obj, theta)) margin = utils.compute_margins(child_obj) bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["table_x_min"] xy_min += margin xy_max -= margin return bounds, margin @staticmethod def get_poslimit( obj: Object, state: Sequence[Predicate], ) -> Optional["PosLimit"]: try: idx_prop = state.index(f"poslimit({obj})") except ValueError: return None prop = state[idx_prop] assert isinstance(prop, PosLimit) return prop @classmethod def get_zone( cls, obj: Object, state: Sequence[Predicate], ) -> Optional["TableBounds"]: zones = [ prop for prop in state if isinstance(prop, TableBounds) and prop.args[0] == obj ] if not zones and f"on({obj}, table)" in state: return cls() elif len(zones) == 1: return zones[0] elif len(zones) != 1: raise ValueError(f"{obj} cannot be in multiple zones: {zones}") return None @staticmethod def scale_margin(obj: Object, margins: np.ndarray) -> np.ndarray: try: bounds = TableBounds.MARGIN_SCALE[obj.type()] except KeyError: return margins return bounds * margins class Aligned(Predicate): """Unary predicate enforcing that the object and world coordinate frames align.""" ANGLE_EPS: float = 0.002 ANGLE_STD: float = 0.05 ANGLE_ABS: float = 0.1 ZONE_ANGLES: Dict[Tuple[Type[Object], Optional[str]], float] = { (Rack, "inworkspace"): 0.5 * np.pi, (Rack, "beyondworkspace"): 0.0, } # def value( # self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] # ) -> bool: # obj = self.get_arg_objects(objects)[0] # if obj.isinstance(Null): # return True # try: # zone = TableBounds.get_zone(obj=obj, state=state) # angle_mean = Aligned.ZONE_ANGLES[(obj.type(), type(zone).__name__.lower())] # if ( # angle_mean - Aligned.ANGLE_ABS < -np.pi # or angle_mean + Aligned.ANGLE_ABS > np.pi # ): # raise ValueError("Cannot recover wrapped angle.") # except KeyError: # angle_mean = 0.0 # angle = eigen.AngleAxisd(eigen.Quaterniond(obj.pose().quat)).angle - angle_mean # if not ( # Aligned.ANGLE_EPS <= abs(angle) <= Aligned.ANGLE_ABS # and utils.is_upright(obj) # ): # dbprint(f"{self}.value():", False) # return False # return True @staticmethod def sample_angle(obj: Object, zone: Optional[str] = None) -> float: angle = 0.0 while abs(angle) < Aligned.ANGLE_EPS: angle = np.random.randn() * Aligned.ANGLE_STD try: angle_mu = Aligned.ZONE_ANGLES[(obj.type(), zone)] except KeyError: angle_mu = 0.0 angle = np.clip( angle + angle_mu, angle_mu - Aligned.ANGLE_ABS, angle_mu + Aligned.ANGLE_ABS, ) angle = (angle + np.pi) % (2 * np.pi) - np.pi return angle class PosLimit(Predicate): """Unary predicate limiting the placement positions of particular object types.""" POS_EPS: Dict[Type[Object], float] = {Rack: 0.01} POS_SPEC: Dict[Type[Object], Dict[str, np.ndarray]] = { Rack: { "inworkspace": np.array([0.44, -0.33]), "beyondworkspace": np.array([0.82, 0.00]), } } def bounds(self, child_obj: Object) -> Dict[str, np.ndarray]: assert child_obj.name == self.args[0] if child_obj.type() not in PosLimit.POS_SPEC: raise ValueError(f"Positions not specified for {child_obj.type()}") eps = PosLimit.POS_EPS[child_obj.type()] xys = PosLimit.POS_SPEC[child_obj.type()] bounds = {k: np.array([xy - eps, xy + eps]) for k, xy in xys.items()} return bounds class InWorkspace(Predicate, TableBounds): """Unary predicate ensuring than an object is in the robot workspace.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance((Null, Rack)): # Rack is in workspace by construction. return True obj_pos = obj.pose().pos[:2] distance = float(np.linalg.norm(obj_pos)) if not utils.is_inworkspace(obj_pos=obj_pos, distance=distance): dbprint( f"{self}.value():", False, "- pos:", obj_pos[:2], "distance:", distance ) return False return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: """Returns the minimum and maximum x-y bounds inside the workspace.""" assert child_obj.name == self.args[0] and parent_obj.name == "table" zone = type(self).__name__.lower() if f"aligned({child_obj})" in state: theta = Aligned.sample_angle(obj=child_obj, zone=zone) child_obj.set_pose(utils.compute_object_pose(child_obj, theta)) margin = utils.compute_margins(child_obj) poslimit = TableBounds.get_poslimit(child_obj, state) if poslimit is not None: return poslimit.bounds(child_obj)[zone], margin bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["workspace_x_min"] xy_max[0] = utils.TABLE_CONSTRAINTS["workspace_radius"] xy_min += margin xy_max -= margin return bounds, margin class InCollisionZone(Predicate, TableBounds): """Unary predicate ensuring the object is in the collision zone.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True obj_pos = obj.pose().pos[:2] distance = float(np.linalg.norm(obj_pos)) if not ( utils.TABLE_CONSTRAINTS["workspace_x_min"] <= obj.pose().pos[0] < utils.TABLE_CONSTRAINTS["operational_x_min"] and distance < utils.TABLE_CONSTRAINTS["workspace_radius"] ): dbprint(f"{self}.value():", False, "- pos:", obj_pos, "distance:", distance) return False return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: assert child_obj.name == self.args[0] and parent_obj.name == "table" margin = TableBounds.scale_margin(child_obj, margin) bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["workspace_x_min"] xy_max[0] = utils.TABLE_CONSTRAINTS["operational_x_min"] xy_min += margin xy_max -= margin return bounds, margin class InOperationalZone(Predicate, TableBounds): """Unary predicate ensuring the object is in the operational zone.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True obj_pos = obj.pose().pos[:2] distance = float(np.linalg.norm(obj_pos)) if not ( utils.TABLE_CONSTRAINTS["operational_x_min"] <= obj_pos[0] < utils.TABLE_CONSTRAINTS["operational_x_max"] and distance < utils.TABLE_CONSTRAINTS["workspace_radius"] ): dbprint(f"{self}.value():", False, "- pos:", obj_pos, "distance:", distance) return False return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: assert child_obj.name == self.args[0] and parent_obj.name == "table" margin = TableBounds.scale_margin(child_obj, margin) bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["operational_x_min"] xy_max[0] = utils.TABLE_CONSTRAINTS["operational_x_max"] xy_min += margin xy_max -= margin return bounds, margin class InObstructionZone(Predicate, TableBounds): """Unary predicate ensuring the object is in the obstruction zone.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True obj_pos = obj.pose().pos[:2] distance = float(np.linalg.norm(obj_pos)) if not ( obj_pos[0] >= utils.TABLE_CONSTRAINTS["obstruction_x_min"] and distance < utils.TABLE_CONSTRAINTS["workspace_radius"] ): dbprint(f"{self}.value():", False, "- pos:", obj_pos, "distance:", distance) return False return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: assert child_obj.name == self.args[0] and parent_obj.name == "table" margin = TableBounds.scale_margin(child_obj, margin) bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["obstruction_x_min"] xy_max[0] = utils.TABLE_CONSTRAINTS["workspace_radius"] xy_min += margin xy_max -= margin return bounds, margin class BeyondWorkspace(Predicate, TableBounds): """Unary predicate ensuring than an object is in beyond the robot workspace.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True distance = float(np.linalg.norm(obj.pose().pos[:2])) if not utils.is_beyondworkspace(obj=obj, distance=distance): return False dbprint(f"{self}.value():", False, "- distance:", distance) return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: """Returns the minimum and maximum x-y bounds outside the workspace.""" assert child_obj.name == self.args[0] and parent_obj.name == "table" zone = type(self).__name__.lower() if f"aligned({child_obj})" in state: theta = Aligned.sample_angle(obj=child_obj, zone=zone) child_obj.set_pose(utils.compute_object_pose(child_obj, theta)) margin = utils.compute_margins(child_obj) poslimit = TableBounds.get_poslimit(child_obj, state) if poslimit is not None: return poslimit.bounds(child_obj)[zone], margin bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds r = utils.TABLE_CONSTRAINTS["workspace_radius"] xy_min[0] = r * np.cos(np.arcsin(0.5 * (xy_max[1] - xy_min[1]) / r)) xy_min += margin xy_max -= margin return bounds, margin class InOodZone(Predicate, TableBounds): """Unary predicate ensuring than an object is in beyond the robot workspace.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: """Returns the minimum and maximum x-y bounds outside the workspace.""" assert child_obj.name == self.args[0] and parent_obj.name == "table" bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = bounds[0, 0] xy_max[0] = utils.TABLE_CONSTRAINTS["table_x_min"] xy_min += margin xy_max -= margin return bounds, margin class Inhand(Predicate): MAX_GRASP_ATTEMPTS = 1 def sample( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: """Samples a geometric grounding of the InHand(a) predicate.""" obj = self.get_arg_objects(objects)[0] if obj.is_static: return True # Generate grasp pose. for i in range(Inhand.MAX_GRASP_ATTEMPTS): grasp_pose = self.generate_grasp_pose( obj, handlegrasp=f"handlegrasp({obj})" in state, upperhandlegrasp=f"upperhandlegrasp({obj})" in state, )
obj_pose = math.Pose.from_eigen(grasp_pose.to_eigen().inverse())
7
2023-10-16 00:22:40+00:00
16k
akashgreninja/GreSec
backend/venv/lib/python3.10/site-packages/numpy/array_api/linalg.py
[ { "identifier": "_floating_dtypes", "path": "backend/venv/lib/python3.10/site-packages/numpy/array_api/_dtypes.py", "snippet": "def _result_type(type1, type2):" }, { "identifier": "reshape", "path": "backend/venv/lib/python3.10/site-packages/numpy/array_api/_manipulation_functions.py", "...
from ._dtypes import ( _floating_dtypes, _numeric_dtypes, float32, float64, complex64, complex128 ) from ._manipulation_functions import reshape from ._array_object import Array from ..core.numeric import normalize_axis_tuple from typing import TYPE_CHECKING from ._typing import Literal, Optional, Sequence, Tuple, Union, Dtype from typing import NamedTuple from ..linalg.linalg import (_makearray, _assert_stacked_2d, _assert_stacked_square, _commonType, isComplexType, get_linalg_error_extobj, _raise_linalgerror_singular) from ..linalg import _umath_linalg import numpy.linalg import numpy as np
12,080
from __future__ import annotations if TYPE_CHECKING: class EighResult(NamedTuple): eigenvalues: Array eigenvectors: Array class QRResult(NamedTuple): Q: Array R: Array class SlogdetResult(NamedTuple): sign: Array logabsdet: Array class SVDResult(NamedTuple): U: Array S: Array Vh: Array # Note: the inclusion of the upper keyword is different from # np.linalg.cholesky, which does not have it. def cholesky(x: Array, /, *, upper: bool = False) -> Array: """ Array API compatible wrapper for :py:func:`np.linalg.cholesky <numpy.linalg.cholesky>`. See its docstring for more information. """ # Note: the restriction to floating-point dtypes only is different from # np.linalg.cholesky. if x.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed in cholesky') L = np.linalg.cholesky(x._array) if upper: return Array._new(L).mT return Array._new(L) # Note: cross is the numpy top-level namespace, not np.linalg def cross(x1: Array, x2: Array, /, *, axis: int = -1) -> Array: """ Array API compatible wrapper for :py:func:`np.cross <numpy.cross>`. See its docstring for more information. """
from __future__ import annotations if TYPE_CHECKING: class EighResult(NamedTuple): eigenvalues: Array eigenvectors: Array class QRResult(NamedTuple): Q: Array R: Array class SlogdetResult(NamedTuple): sign: Array logabsdet: Array class SVDResult(NamedTuple): U: Array S: Array Vh: Array # Note: the inclusion of the upper keyword is different from # np.linalg.cholesky, which does not have it. def cholesky(x: Array, /, *, upper: bool = False) -> Array: """ Array API compatible wrapper for :py:func:`np.linalg.cholesky <numpy.linalg.cholesky>`. See its docstring for more information. """ # Note: the restriction to floating-point dtypes only is different from # np.linalg.cholesky. if x.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed in cholesky') L = np.linalg.cholesky(x._array) if upper: return Array._new(L).mT return Array._new(L) # Note: cross is the numpy top-level namespace, not np.linalg def cross(x1: Array, x2: Array, /, *, axis: int = -1) -> Array: """ Array API compatible wrapper for :py:func:`np.cross <numpy.cross>`. See its docstring for more information. """
if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
0
2023-10-23 18:09:28+00:00
16k
zju3dv/nr_in_a_room
test/test_light_adaptation.py
[ { "identifier": "RoomOptimizer", "path": "optim/room_optimizer.py", "snippet": "class RoomOptimizer:\n def __init__(\n self,\n scale_factor: float,\n bg_scale_factor: float,\n bg_scene_center: list,\n img_wh: list,\n near: float,\n far: float,\n ...
import sys import os import torch import numpy as np from PIL import Image from omegaconf import OmegaConf from optim.room_optimizer import RoomOptimizer from optim.misc_utils import ( read_real_scene_localization, read_real_scene_localization_with_name, read_testing_config, ) from utils.util import read_json
14,166
os.environ["OMP_NUM_THREADS"] = "1" # noqa os.environ["MKL_NUM_THREADS"] = "1" # noqa sys.path.append(".") # noqa def main(config): # active_instance_id = config.active_instance_id scene_info_json_path = config.scene_info_json active_instance_id = []
os.environ["OMP_NUM_THREADS"] = "1" # noqa os.environ["MKL_NUM_THREADS"] = "1" # noqa sys.path.append(".") # noqa def main(config): # active_instance_id = config.active_instance_id scene_info_json_path = config.scene_info_json active_instance_id = []
for obj_info in read_json(scene_info_json_path)["objs"]:
4
2023-10-15 08:41:29+00:00
16k
WenzhengZhang/Seq2seqCoref
trainer.py
[ { "identifier": "CorefAllMetrics", "path": "metrics.py", "snippet": "class CorefAllMetrics(object):\n \"\"\"\n Wrapper for coreference resolution metrics.\n \"\"\"\n\n @staticmethod\n def _get_mention_to_x(clusters: List[list]) -> dict:\n mention_to_x = {}\n for cluster in c...
import time import torch.distributed as dist import sys import numpy as np import os import json import re import torch.nn as nn import torch import shutil import math import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met import torch_xla.distributed.parallel_loader as pl import smdistributed.modelparallel.torch as smp import safetensors.torch from tqdm.auto import tqdm from transformers.trainer_utils import HPSearchBackend, speed_metrics, \ TrainOutput from pathlib import Path from torch.utils.data import RandomSampler from torch.utils.data.distributed import DistributedSampler from transformers.trainer_callback import TrainerState from transformers.trainer import TRAINER_STATE_NAME, OptimizerNames from transformers.utils import is_apex_available from transformers.integrations import hp_params from transformers import Seq2SeqTrainer from packaging import version from collections import defaultdict from metrics import CorefAllMetrics from typing import Dict, Union, Any, Optional, Tuple, List from transformers.debug_utils import DebugOption, DebugUnderflowOverflow from transformers.pytorch_utils import is_torch_less_than_1_11 from torch.utils.data import DataLoader from transformers.trainer_utils import EvalLoopOutput, has_length, \ denumpify_detensorize, ShardedDDPOption from data import get_document_predicts, parse_int_output_tokens, \ parse_short_target_tokens, parse_nonint_output_tokens from constants import SPECIAL_IDS, MARK_SPECIAL_IDS, NON_INT_SPECIAL_IDS, \ MENTION_END_NON_INT_SPECIAL_IDS from transformers.deepspeed import deepspeed_init from transformers.trainer_pt_utils import find_batch_size, nested_concat, \ nested_numpify, IterableDatasetShard, nested_truncate, get_parameter_names from transformers.modeling_utils import PreTrainedModel, unwrap_model, \ load_sharded_checkpoint from transformers.utils import logging, is_torch_tpu_available, \ is_sagemaker_mp_enabled, is_safetensors_available, SAFE_WEIGHTS_NAME, \ WEIGHTS_NAME, WEIGHTS_INDEX_NAME from transformers.integrations import is_fairscale_available from transformers.dependency_versions_check import dep_version_check from smdistributed.modelparallel import __version__ as SMP_VERSION from apex import amp from transformers import LogitsProcessorList from logits_processor import ShortSeqProcessor, IntProcessor, NonIntProcessor from transformers.trainer_seq2seq import is_deepspeed_zero3_enabled
10,954
self.log(metrics) run_dir = self._get_output_dir(trial) checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir) # Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint. if self.state.best_model_checkpoint is not None and \ self.args.save_total_limit == 1 and self.is_world_process_zero(): for checkpoint in checkpoints_sorted: if checkpoint != self.state.best_model_checkpoint: logger.info( f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint) self.control = self.callback_handler.on_train_end(args, self.state, self.control) return TrainOutput(self.state.global_step, train_loss, metrics) def my_compute_metrics(self, doc_labels: Dict[str, List[List]], predicts: Any, samples: List, split: str, id_to_name: Dict = None ) -> Dict: if self.args.joint_train: data_names = self.args.joint_data_names.split(',') joint_threds = [ int(t) for t in self.args.joint_min_num_mentions.split(',')] name_to_threds = {n: t for n, t in zip(data_names, joint_threds)} documents_to_chunk_data = defaultdict(list) documents_to_chunk_gold = defaultdict(list) predictions = {} golds = {} assert len(samples) == len(predicts) out_sents = [] last_doc_id = re.sub(r'_\d+$', '', samples[0]['doc_key']) for sample, predict in zip(samples, predicts): doc_key = sample['doc_key'] doc_id = re.sub(r'_\d+$', '', doc_key) # require convert to ids first input_ids = sample['sentence'] subtoken_map = sample['subtoken_map'] offset = sample['offset'] # remove bos predict_ids = predict[1:].tolist() gold_data = sample['seg_clusters'] if self.args.joint_train: thred = name_to_threds[id_to_name[doc_id]] else: thred = self.args.min_num_mentions if self.args.seq2seq_type == "short_seq": special_ids = MARK_SPECIAL_IDS if self.args.mark_sentence \ else SPECIAL_IDS pred_data, aligned_input_ids, aligned_pred_ids = \ parse_short_target_tokens(input_ids, predict_ids, special_ids, subtoken_map, self.tokenizer, self.args.align_mode, thred, self.args.mark_sentence ) pred_tokens = self.tokenizer.convert_ids_to_tokens( predict_ids) out_predict = { 'doc_key': doc_key, 'pred_tokens': pred_tokens, 'pred_text': self.tokenizer.convert_tokens_to_string( pred_tokens), 'pred_aligned_text': self.tokenizer.convert_ids_to_tokens( aligned_pred_ids ), 'input_aligned_text': self.tokenizer.convert_ids_to_tokens( aligned_input_ids ) } else: is_tagging = (self.args.seq2seq_type == 'tagging') if self.args.action_type == 'integer': pred_data, pred_token_mentions, predict_ids = \ parse_int_output_tokens( input_ids, predict_ids, SPECIAL_IDS, subtoken_map, self.tokenizer, thred, is_tagging) else: special_ids = MENTION_END_NON_INT_SPECIAL_IDS if \ self.args.add_mention_end else NON_INT_SPECIAL_IDS pred_data, pred_token_mentions, predict_ids = \ parse_nonint_output_tokens( input_ids, predict_ids, special_ids, subtoken_map, self.tokenizer, self.args.add_mention_end, thred) pred_token_mentions = [(m[0] + offset, m[1] + offset) for m in pred_token_mentions] pred_tokens = self.tokenizer.convert_ids_to_tokens( predict_ids) out_predict = {'doc_key': doc_key, 'pred_tokens': pred_tokens, 'pred_text': self.tokenizer.convert_tokens_to_string( pred_tokens), 'predict_clusters': pred_data, 'gold_clusters': gold_data, 'predict_token_mentions': pred_token_mentions } # list of (m1,m2) documents_to_chunk_data[doc_id].extend(pred_data) documents_to_chunk_gold[doc_id].extend(gold_data) out_sents.append(out_predict) if doc_id != last_doc_id:
if is_torch_tpu_available(check_device=False): if is_fairscale_available(): dep_version_check("fairscale") if is_sagemaker_mp_enabled(): IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse( "1.10") else: IS_SAGEMAKER_MP_POST_1_10 = False if is_safetensors_available(): if is_apex_available(): logger = logging.get_logger(__name__) TRAINING_ARGS_NAME = "training_args.bin" TRAINER_STATE_NAME = "trainer_state.json" OPTIMIZER_NAME = "optimizer.pt" SCHEDULER_NAME = "scheduler.pt" SCALER_NAME = "scaler.pt" class CorefTrainer(Seq2SeqTrainer): def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None: if self.args.save_total_limit is None or self.args.save_total_limit <= 0: return # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir) if self.args.val_after_train and self.args.eval_delay < \ self.state.global_step: for checkpoint in checkpoints_sorted[:-1]: states_dir = [str(x) for x in Path( checkpoint).glob(f'global_step*') if os.path.isdir(x)] for state_dir in states_dir: logger.info(f"Deleting optimizer states of saved " f"checkpoint {checkpoint}") if os.path.exists(state_dir) and os.path.isdir( state_dir): shutil.rmtree(state_dir) else: if len(checkpoints_sorted) <= self.args.save_total_limit: return # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which # we don't do to allow resuming. save_total_limit = self.args.save_total_limit if ( self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1 and checkpoints_sorted[ -1] != self.state.best_model_checkpoint ): save_total_limit = 2 number_of_checkpoints_to_delete = max(0, len( checkpoints_sorted) - save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[ :number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info( f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint) def _save(self, output_dir: Optional[str] = None, state_dict=None): # If we are executing this function, we are the process zero, so we don't check for that. output_dir = output_dir if output_dir is not None else self.args.output_dir os.makedirs(output_dir, exist_ok=True) logger.info(f"Saving model checkpoint to {output_dir}") # Save a trained model and configuration using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` if not isinstance(self.model, PreTrainedModel) and not hasattr( self.model, 'save_pretrained'): if state_dict is None: state_dict = self.model.state_dict() if isinstance(unwrap_model(self.model), PreTrainedModel): unwrap_model(self.model).save_pretrained( output_dir, state_dict=state_dict, # safe_serialization=self.args.save_safetensors ) else: logger.info( "Trainer.model is not a `PreTrainedModel`, only saving its state dict.") # if self.args.save_safetensors: # safetensors.torch.save_file(state_dict, # os.path.join(output_dir, # SAFE_WEIGHTS_NAME)) # else: torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: self.model.save_pretrained( output_dir, state_dict=state_dict, # safe_serialization=self.args.save_safetensors ) if self.tokenizer is not None: self.tokenizer.save_pretrained(output_dir) # Good practice: save your training arguments together with the trained model torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) def _inner_training_loop( self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None ): self._train_batch_size = batch_size # Data loader and number of training steps train_dataloader = self.get_train_dataloader() # Setting up training control variables: # number of training epochs: num_train_epochs # number of training steps per epoch: num_update_steps_per_epoch # total number of training steps to execute: max_steps total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size len_dataloader = None if has_length(train_dataloader): len_dataloader = len(train_dataloader) num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1) num_examples = self.num_examples(train_dataloader) if args.max_steps > 0: max_steps = args.max_steps num_train_epochs = args.max_steps // num_update_steps_per_epoch + int( args.max_steps % num_update_steps_per_epoch > 0 ) # May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's # the best we can do. num_train_samples = args.max_steps * total_train_batch_size else: max_steps = math.ceil( args.num_train_epochs * num_update_steps_per_epoch) num_train_epochs = math.ceil(args.num_train_epochs) num_train_samples = self.num_examples( train_dataloader) * args.num_train_epochs elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size max_steps = args.max_steps # Setting a very large number of epochs so we go as many times as necessary over the iterator. num_train_epochs = sys.maxsize num_update_steps_per_epoch = max_steps num_examples = total_train_batch_size * args.max_steps num_train_samples = args.max_steps * total_train_batch_size else: raise ValueError( "args.max_steps must be set to a positive value if dataloader does not have a length, was" f" {args.max_steps}" ) if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug: if self.args.n_gpu > 1: # nn.DataParallel(model) replicates the model, creating new variables and module # references registered here no longer work on other gpus, breaking the module raise ValueError( "Currently --debug underflow_overflow is not supported under DP. Please use DDP" " (torch.distributed.launch)." ) else: debug_overflow = DebugUnderflowOverflow(self.model) # noqa delay_optimizer_creation = ( self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE or is_sagemaker_mp_enabled() or self.fsdp is not None ) if args.deepspeed: deepspeed_engine, optimizer, lr_scheduler = deepspeed_init( self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint ) self.model = deepspeed_engine.module self.model_wrapped = deepspeed_engine self.deepspeed = deepspeed_engine self.optimizer = optimizer self.lr_scheduler = lr_scheduler elif not delay_optimizer_creation: self.create_optimizer_and_scheduler(num_training_steps=max_steps) self.state = TrainerState() self.state.is_hyper_param_search = trial is not None # Activate gradient checkpointing if needed if args.gradient_checkpointing: self.model.gradient_checkpointing_enable() model = self._wrap_model(self.model_wrapped) if is_sagemaker_mp_enabled() and resume_from_checkpoint is not None: self._load_from_checkpoint(resume_from_checkpoint, model) # for the rest of this function `model` is the outside model, whether it was wrapped or not if model is not self.model: self.model_wrapped = model if delay_optimizer_creation: self.create_optimizer_and_scheduler(num_training_steps=max_steps) # Check if saved optimizer or scheduler states exist self._load_optimizer_and_scheduler(resume_from_checkpoint) # important: at this point: # self.model is the Transformers Model # self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc. # Train! logger.info("***** Running training *****") logger.info(f" Num examples = {num_examples}") logger.info(f" Num Epochs = {num_train_epochs}") logger.info( f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info( f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}") logger.info( f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {max_steps}") logger.info( f" Number of trainable parameters = {sum(p.numel() for p in model.parameters() if p.requires_grad)}" ) self.state.epoch = 0 start_time = time.time() epochs_trained = 0 steps_trained_in_current_epoch = 0 steps_trained_progress_bar = None # Check if continuing training from a checkpoint if resume_from_checkpoint is not None and os.path.isfile( os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME) ): self.state = TrainerState.load_from_json( os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) epochs_trained = self.state.global_step // num_update_steps_per_epoch if not args.ignore_data_skip: steps_trained_in_current_epoch = self.state.global_step % ( num_update_steps_per_epoch) steps_trained_in_current_epoch *= args.gradient_accumulation_steps else: steps_trained_in_current_epoch = 0 logger.info( " Continuing training from checkpoint, will skip to saved global_step") logger.info(f" Continuing training from epoch {epochs_trained}") logger.info( f" Continuing training from global step {self.state.global_step}") if not args.ignore_data_skip: logger.info( f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} " "batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` " "flag to your launch command, but you will resume the training on data already seen by your model." ) if self.is_local_process_zero() and not args.disable_tqdm: steps_trained_progress_bar = tqdm( total=steps_trained_in_current_epoch) steps_trained_progress_bar.set_description( "Skipping the first batches") # Update the references self.callback_handler.model = self.model self.callback_handler.optimizer = self.optimizer self.callback_handler.lr_scheduler = self.lr_scheduler self.callback_handler.train_dataloader = train_dataloader if self.hp_name is not None and self._trial is not None: # use self._trial because the SigOpt/Optuna hpo only call `_hp_search_setup(trial)` instead of passing trial # parameter to Train when using DDP. self.state.trial_name = self.hp_name(self._trial) if trial is not None: assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial self.state.trial_params = hp_params(assignments) else: self.state.trial_params = None # This should be the same if the state has been saved but in case the training arguments changed, it's safer # to set this after the load. self.state.max_steps = max_steps self.state.num_train_epochs = num_train_epochs self.state.is_local_process_zero = self.is_local_process_zero() self.state.is_world_process_zero = self.is_world_process_zero() # tr_loss is a tensor to avoid synchronization of TPUs through .item() tr_loss = torch.tensor(0.0).to(args.device) # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses self._total_loss_scalar = 0.0 self._globalstep_last_logged = self.state.global_step model.zero_grad() self.control = self.callback_handler.on_train_begin(args, self.state, self.control) # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point. if not args.ignore_data_skip: for epoch in range(epochs_trained): is_random_sampler = hasattr(train_dataloader, "sampler") and isinstance( train_dataloader.sampler, RandomSampler ) if is_torch_less_than_1_11 or not is_random_sampler: # We just need to begin an iteration to create the randomization of the sampler. # That was before PyTorch 1.11 however... if self.args.joint_train: train_dataloader.dataset.set_samples(epoch) for _ in train_dataloader: break else: # Otherwise we need to call the whooooole sampler cause there is some random operation added # AT THE VERY END! _ = list(train_dataloader.sampler) if args.manual_empty_cache: torch.cuda.empty_cache() for epoch in range(epochs_trained, num_train_epochs): if self.args.joint_train: train_dataloader.dataset.set_samples(epoch) if isinstance(train_dataloader, DataLoader) and isinstance( train_dataloader.sampler, DistributedSampler): train_dataloader.sampler.set_epoch(epoch) elif hasattr(train_dataloader, "dataset") and isinstance( train_dataloader.dataset, IterableDatasetShard): train_dataloader.dataset.set_epoch(epoch) if is_torch_tpu_available(): parallel_loader = pl.ParallelLoader(train_dataloader, [ args.device]).per_device_loader(args.device) epoch_iterator = parallel_loader else: epoch_iterator = train_dataloader # Reset the past mems state at the beginning of each epoch if necessary. if args.past_index >= 0: self._past = None steps_in_epoch = ( len(epoch_iterator) if len_dataloader is not None else args.max_steps * args.gradient_accumulation_steps ) self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control) if epoch == epochs_trained and resume_from_checkpoint is not None and steps_trained_in_current_epoch == 0: self._load_rng_state(resume_from_checkpoint) step = -1 if args.manual_empty_cache: torch.cuda.empty_cache() for step, inputs in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if args.manual_empty_cache: torch.cuda.empty_cache() if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 if steps_trained_progress_bar is not None: steps_trained_progress_bar.update(1) if steps_trained_in_current_epoch == 0: self._load_rng_state(resume_from_checkpoint) continue elif steps_trained_progress_bar is not None: steps_trained_progress_bar.close() steps_trained_progress_bar = None if step % args.gradient_accumulation_steps == 0: self.control = self.callback_handler.on_step_begin(args, self.state, self.control) # if args.manual_empty_cache: # torch.cuda.empty_cache() if ( ((step + 1) % args.gradient_accumulation_steps != 0) and args.local_rank != -1 and args._no_sync_in_gradient_accumulation ): # Avoid unnecessary DDP synchronization since there will be no backward pass on this example. with model.no_sync(): tr_loss_step = self.training_step(model, inputs) else: tr_loss_step = self.training_step(model, inputs) if ( args.logging_nan_inf_filter and not is_torch_tpu_available() and ( torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step)) ): # if loss is nan or inf simply add the average of previous logged losses tr_loss += tr_loss / ( 1 + self.state.global_step - self._globalstep_last_logged) else: tr_loss += tr_loss_step self.current_flos += float(self.floating_point_ops(inputs)) # Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps if self.deepspeed: if args.manual_empty_cache: torch.cuda.empty_cache() self.deepspeed.step() if (step + 1) % args.gradient_accumulation_steps == 0 or ( # last step in epoch but step is always smaller than gradient_accumulation_steps steps_in_epoch <= args.gradient_accumulation_steps and (step + 1) == steps_in_epoch ): # Gradient clipping if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed: # deepspeed does its own clipping if self.do_grad_scaling: # Reduce gradients first for XLA if is_torch_tpu_available(): gradients = xm._fetch_gradients(self.optimizer) xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size()) # AMP: gradients need unscaling self.scaler.unscale_(self.optimizer) if is_sagemaker_mp_enabled() and args.fp16: self.optimizer.clip_master_grads(args.max_grad_norm) elif hasattr(self.optimizer, "clip_grad_norm"): # Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping self.optimizer.clip_grad_norm(args.max_grad_norm) elif hasattr(model, "clip_grad_norm_"): # Some models (like FullyShardedDDP) have a specific way to do gradient clipping model.clip_grad_norm_(args.max_grad_norm) else: # Revert to normal clipping otherwise, handling Apex or full precision nn.utils.clip_grad_norm_( amp.master_params( self.optimizer) if self.use_apex else model.parameters(), args.max_grad_norm, ) # Optimizer step optimizer_was_run = True if self.deepspeed: pass # called outside the loop elif is_torch_tpu_available(): if self.do_grad_scaling: self.scaler.step(self.optimizer) self.scaler.update() else: xm.optimizer_step(self.optimizer) elif self.do_grad_scaling: scale_before = self.scaler.get_scale() self.scaler.step(self.optimizer) self.scaler.update() scale_after = self.scaler.get_scale() optimizer_was_run = scale_before <= scale_after else: self.optimizer.step() if optimizer_was_run and not self.deepspeed: self.lr_scheduler.step() model.zero_grad() self.state.global_step += 1 self.state.epoch = epoch + (step + 1) / steps_in_epoch if args.manual_empty_cache: torch.cuda.empty_cache() self.control = self.callback_handler.on_step_end(args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) else: self.control = self.callback_handler.on_substep_end(args, self.state, self.control) if self.control.should_epoch_stop or self.control.should_training_stop: break if step < 0: logger.warning( "There seems to be not a single sample in your epoch_iterator, stopping training at step" f" {self.state.global_step}! This is expected if you're using an IterableDataset and set" f" num_steps ({max_steps}) higher than the number of available samples." ) self.control.should_training_stop = True self.control = self.callback_handler.on_epoch_end(args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) if DebugOption.TPU_METRICS_DEBUG in self.args.debug: if is_torch_tpu_available(): # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) else: logger.warning( "You enabled PyTorch/XLA debug metrics but you don't have a TPU " "configured. Check your training configuration if this is unexpected." ) if self.control.should_training_stop: break if args.past_index and hasattr(self, "_past"): # Clean the state at the end of training delattr(self, "_past") logger.info( "\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n") if args.load_best_model_at_end and self.state.best_model_checkpoint is not None: # Wait for everyone to get here so we are sur the model has been saved by process 0. if is_torch_tpu_available(): xm.rendezvous("load_best_model_at_end") elif args.local_rank != -1: dist.barrier() elif is_sagemaker_mp_enabled(): smp.barrier() self._load_best_model() # add remaining tr_loss self._total_loss_scalar += tr_loss.item() train_loss = self._total_loss_scalar / self.state.global_step metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps) self.store_flos() metrics["total_flos"] = self.state.total_flos metrics["train_loss"] = train_loss self.is_in_train = False self._memory_tracker.stop_and_update_metrics(metrics) self.log(metrics) run_dir = self._get_output_dir(trial) checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir) # Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint. if self.state.best_model_checkpoint is not None and \ self.args.save_total_limit == 1 and self.is_world_process_zero(): for checkpoint in checkpoints_sorted: if checkpoint != self.state.best_model_checkpoint: logger.info( f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint) self.control = self.callback_handler.on_train_end(args, self.state, self.control) return TrainOutput(self.state.global_step, train_loss, metrics) def my_compute_metrics(self, doc_labels: Dict[str, List[List]], predicts: Any, samples: List, split: str, id_to_name: Dict = None ) -> Dict: if self.args.joint_train: data_names = self.args.joint_data_names.split(',') joint_threds = [ int(t) for t in self.args.joint_min_num_mentions.split(',')] name_to_threds = {n: t for n, t in zip(data_names, joint_threds)} documents_to_chunk_data = defaultdict(list) documents_to_chunk_gold = defaultdict(list) predictions = {} golds = {} assert len(samples) == len(predicts) out_sents = [] last_doc_id = re.sub(r'_\d+$', '', samples[0]['doc_key']) for sample, predict in zip(samples, predicts): doc_key = sample['doc_key'] doc_id = re.sub(r'_\d+$', '', doc_key) # require convert to ids first input_ids = sample['sentence'] subtoken_map = sample['subtoken_map'] offset = sample['offset'] # remove bos predict_ids = predict[1:].tolist() gold_data = sample['seg_clusters'] if self.args.joint_train: thred = name_to_threds[id_to_name[doc_id]] else: thred = self.args.min_num_mentions if self.args.seq2seq_type == "short_seq": special_ids = MARK_SPECIAL_IDS if self.args.mark_sentence \ else SPECIAL_IDS pred_data, aligned_input_ids, aligned_pred_ids = \ parse_short_target_tokens(input_ids, predict_ids, special_ids, subtoken_map, self.tokenizer, self.args.align_mode, thred, self.args.mark_sentence ) pred_tokens = self.tokenizer.convert_ids_to_tokens( predict_ids) out_predict = { 'doc_key': doc_key, 'pred_tokens': pred_tokens, 'pred_text': self.tokenizer.convert_tokens_to_string( pred_tokens), 'pred_aligned_text': self.tokenizer.convert_ids_to_tokens( aligned_pred_ids ), 'input_aligned_text': self.tokenizer.convert_ids_to_tokens( aligned_input_ids ) } else: is_tagging = (self.args.seq2seq_type == 'tagging') if self.args.action_type == 'integer': pred_data, pred_token_mentions, predict_ids = \ parse_int_output_tokens( input_ids, predict_ids, SPECIAL_IDS, subtoken_map, self.tokenizer, thred, is_tagging) else: special_ids = MENTION_END_NON_INT_SPECIAL_IDS if \ self.args.add_mention_end else NON_INT_SPECIAL_IDS pred_data, pred_token_mentions, predict_ids = \ parse_nonint_output_tokens( input_ids, predict_ids, special_ids, subtoken_map, self.tokenizer, self.args.add_mention_end, thred) pred_token_mentions = [(m[0] + offset, m[1] + offset) for m in pred_token_mentions] pred_tokens = self.tokenizer.convert_ids_to_tokens( predict_ids) out_predict = {'doc_key': doc_key, 'pred_tokens': pred_tokens, 'pred_text': self.tokenizer.convert_tokens_to_string( pred_tokens), 'predict_clusters': pred_data, 'gold_clusters': gold_data, 'predict_token_mentions': pred_token_mentions } # list of (m1,m2) documents_to_chunk_data[doc_id].extend(pred_data) documents_to_chunk_gold[doc_id].extend(gold_data) out_sents.append(out_predict) if doc_id != last_doc_id:
predictions[last_doc_id] = get_document_predicts(
1
2023-10-17 17:39:16+00:00
16k
chenxn2020/GOSE
GOSEfinetune/models/LiLTRobertaLike/modeling_LiLTRobertaLike.py
[ { "identifier": "LiLTRobertaLikeConfig", "path": "GOSEfinetune/models/LiLTRobertaLike/configuration_LiLTRobertaLike.py", "snippet": "class LiLTRobertaLikeConfig(RobertaConfig):\n model_type = \"liltrobertalike\"\n\n def __init__(\n self,\n channel_shrink_ratio=4,\n max_2d_posi...
import math import torch import torch.nn as nn import torch.utils.checkpoint import os from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.activations import ACT2FN, gelu from transformers.file_utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from transformers.modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from transformers.utils import logging from .configuration_LiLTRobertaLike import LiLTRobertaLikeConfig from dataclasses import dataclass from typing import Dict, Optional, Tuple from transformers.file_utils import ModelOutput from ...modules.decoders.RE import RE from ...modules.decoders.gose import GOSE from ...utils import ReOutput
12,301
output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ), layout_encoder_outputs class LiLTRobertaLikeForTokenClassification(LiLTRobertaLikePreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.lilt = LiLTRobertaLikeModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size + config.hidden_size//config.channel_shrink_ratio, config.num_labels) self.init_weights() def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs, layout_outputs = self.lilt( input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = torch.cat([sequence_output, layout_outputs], -1) sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class LiLTRobertaLikeForRelationExtraction(LiLTRobertaLikePreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.lilt = LiLTRobertaLikeModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.input_type = config.input_type self.freeze_model = config.freeze_model print(f'=============model freeze {self.freeze_model}===============') #from IPython import embed;embed() self.decoder = config.decoder_name if self.decoder == 're': self.extractor = REDecoder(config, config.hidden_size + config.hidden_size // config.channel_shrink_ratio) elif self.decoder == 'gose':
# coding=utf-8 logger = logging.get_logger(__name__) class LiLTRobertaLikeTextEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") # End copy self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids( input_ids, self.padding_idx, past_key_values_length ).to(input_ids.device) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings, position_ids def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) class LiLTRobertaLikeLayoutEmbeddings(nn.Module): def __init__(self, config): super(LiLTRobertaLikeLayoutEmbeddings, self).__init__() self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.padding_idx = config.pad_token_id self.box_position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size//config.channel_shrink_ratio, padding_idx=self.padding_idx ) self.box_linear_embeddings = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size//config.channel_shrink_ratio) self.LayerNorm = nn.LayerNorm(config.hidden_size//config.channel_shrink_ratio, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward( self, bbox=None, position_ids=None, ): try: left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1]) right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2]) lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3]) except IndexError as e: raise IndexError("The :obj:`bbox`coordinate values should be within 0-1000 range.") from e h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1]) w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0]) spatial_position_embeddings = torch.cat( [ left_position_embeddings, upper_position_embeddings, right_position_embeddings, lower_position_embeddings, h_position_embeddings, w_position_embeddings, ], dim=-1, ) spatial_position_embeddings = self.box_linear_embeddings(spatial_position_embeddings) box_position_embeddings = self.box_position_embeddings(position_ids) spatial_position_embeddings = spatial_position_embeddings + box_position_embeddings spatial_position_embeddings = self.LayerNorm(spatial_position_embeddings) spatial_position_embeddings = self.dropout(spatial_position_embeddings) return spatial_position_embeddings class LiLTRobertaLikeSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.layout_query = nn.Linear(config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio) self.layout_key = nn.Linear(config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio) self.layout_value = nn.Linear(config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder self.channel_shrink_ratio = config.channel_shrink_ratio def transpose_for_scores(self, x, r=1): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size//r) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): layout_value_layer = self.transpose_for_scores(self.layout_value(layout_inputs), r=self.channel_shrink_ratio) layout_key_layer = self.transpose_for_scores(self.layout_key(layout_inputs), r=self.channel_shrink_ratio) layout_query_layer = self.transpose_for_scores(self.layout_query(layout_inputs), r=self.channel_shrink_ratio) mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) layout_attention_scores = torch.matmul(layout_query_layer, layout_key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key tmp_attention_scores = attention_scores / math.sqrt(self.attention_head_size) tmp_layout_attention_scores = layout_attention_scores / math.sqrt(self.attention_head_size//self.channel_shrink_ratio) attention_scores = tmp_attention_scores + tmp_layout_attention_scores layout_attention_scores = tmp_layout_attention_scores + tmp_attention_scores if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) layout_attention_scores = layout_attention_scores + attention_mask # Normalize the attention scores to probabilities. layout_attention_probs = nn.Softmax(dim=-1)(layout_attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. layout_attention_probs = self.dropout(layout_attention_probs) # Mask heads if we want to if head_mask is not None: layout_attention_probs = layout_attention_probs * head_mask layout_context_layer = torch.matmul(layout_attention_probs, layout_value_layer) layout_context_layer = layout_context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = layout_context_layer.size()[:-2] + (self.all_head_size//self.channel_shrink_ratio,) layout_context_layer = layout_context_layer.view(*new_context_layer_shape) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = ((context_layer, layout_context_layer), attention_probs) if output_attentions else ((context_layer, layout_context_layer),) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class LiLTRobertaLikeSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LiLTRobertaLikeAttention(nn.Module): def __init__(self, config): super().__init__() self.self = LiLTRobertaLikeSelfAttention(config) self.output = LiLTRobertaLikeSelfOutput(config) self.pruned_heads = set() ori_hidden_size = config.hidden_size config.hidden_size = config.hidden_size // config.channel_shrink_ratio self.layout_output = LiLTRobertaLikeSelfOutput(config) config.hidden_size = ori_hidden_size def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): self_outputs = self.self( hidden_states, layout_inputs, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0][0], hidden_states) layout_attention_output = self.layout_output(self_outputs[0][1], layout_inputs) outputs = ((attention_output, layout_attention_output),) + self_outputs[1:] # add attentions if we output them return outputs class LiLTRobertaLikeIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class LiLTRobertaLikeOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LiLTRobertaLikeLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = LiLTRobertaLikeAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added" self.crossattention = LiLTRobertaLikeAttention(config) self.intermediate = LiLTRobertaLikeIntermediate(config) self.output = LiLTRobertaLikeOutput(config) ori_hidden_size = config.hidden_size ori_intermediate_size = config.intermediate_size config.hidden_size = config.hidden_size // config.channel_shrink_ratio config.intermediate_size = config.intermediate_size // config.channel_shrink_ratio self.layout_intermediate = LiLTRobertaLikeIntermediate(config) self.layout_output = LiLTRobertaLikeOutput(config) config.hidden_size = ori_hidden_size config.intermediate_size = ori_intermediate_size def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, layout_inputs, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0][0] layout_attention_output = self_attention_outputs[0][1] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: assert hasattr( self, "crossattention" ), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`" # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) layout_layer_output = apply_chunking_to_forward( self.layout_feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, layout_attention_output ) outputs = ((layer_output, layout_layer_output),) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output def layout_feed_forward_chunk(self, attention_output): intermediate_output = self.layout_intermediate(attention_output) layer_output = self.layout_output(intermediate_output, attention_output) return layer_output class LiLTRobertaLikeEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([LiLTRobertaLikeLayer(config) for _ in range(config.num_hidden_layers)]) def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " "`use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, layout_inputs, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, layout_inputs, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0][0] layout_inputs = layer_outputs[0][1] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ), layout_inputs return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ), layout_inputs class LiLTRobertaLikePooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class LiLTRobertaLikePreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LiLTRobertaLikeConfig base_model_prefix = "liltrobertalike" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) class LiLTRobertaLikeModel(LiLTRobertaLikePreTrainedModel): _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = LiLTRobertaLikeTextEmbeddings(config) self.layout_embeddings = LiLTRobertaLikeLayoutEmbeddings(config) self.encoder = LiLTRobertaLikeEncoder(config) self.pooler = LiLTRobertaLikePooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() batch_size, seq_length = input_shape elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if bbox is None: bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output, position_ids = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) layout_embedding_output = self.layout_embeddings( bbox=bbox, position_ids=position_ids, ) encoder_outputs, layout_encoder_outputs = self.encoder( embedding_output, layout_embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ), layout_encoder_outputs class LiLTRobertaLikeForTokenClassification(LiLTRobertaLikePreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.lilt = LiLTRobertaLikeModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size + config.hidden_size//config.channel_shrink_ratio, config.num_labels) self.init_weights() def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs, layout_outputs = self.lilt( input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = torch.cat([sequence_output, layout_outputs], -1) sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class LiLTRobertaLikeForRelationExtraction(LiLTRobertaLikePreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.lilt = LiLTRobertaLikeModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.input_type = config.input_type self.freeze_model = config.freeze_model print(f'=============model freeze {self.freeze_model}===============') #from IPython import embed;embed() self.decoder = config.decoder_name if self.decoder == 're': self.extractor = REDecoder(config, config.hidden_size + config.hidden_size // config.channel_shrink_ratio) elif self.decoder == 'gose':
self.extractor = GOSE(config)
2
2023-10-19 14:36:32+00:00
16k
BurgerBurgerBurger/AA
run.py
[ { "identifier": "add_args", "path": "args.py", "snippet": "def add_args(parser):\n parser.add_argument(\"--do_train\", action=\"store_true\")\n parser.add_argument(\"--data_dir\", default=\"./dataset/docred\", type=str)\n parser.add_argument(\"--transformer_type\", default=\"bert\", type=str)\n...
import argparse import os import numpy as np import torch import ujson as json import pandas as pd import pickle from torch.cuda.amp import GradScaler from torch.utils.data import DataLoader from transformers import AutoConfig, AutoModel, AutoTokenizer from transformers.optimization import AdamW, get_linear_schedule_with_warmup from args import add_args from model import DocREModel from utils import set_seed, collate_fn, create_directory from prepro import read_docred from evaluation import to_official, official_evaluate, merge_results from tqdm import tqdm
10,826
for epoch in tqdm(train_iterator, desc='Train epoch'): for step, batch in enumerate(train_dataloader): model.zero_grad() optimizer.zero_grad() model.train() inputs = load_input(batch, args.device) outputs = model(**inputs) loss = [outputs["loss"]["rel_loss"]] if inputs["sent_labels"] is not None: loss.append(outputs["loss"]["evi_loss"] * args.evi_lambda) if inputs["teacher_attns"] is not None: loss.append(outputs["loss"]["attn_loss"] * args.attn_lambda) loss = sum(loss) / args.gradient_accumulation_steps scaler.scale(loss).backward() if step % args.gradient_accumulation_steps == 0: if args.max_grad_norm > 0: scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) scaler.step(optimizer) scaler.update() scheduler.step() model.zero_grad() num_steps += 1 if (step + 1) == len(train_dataloader) or ( args.evaluation_steps > 0 and num_steps % args.evaluation_steps == 0 and step % args.gradient_accumulation_steps == 0): dev_scores, dev_output, official_results, results = evaluate(args, model, dev_features, tag="dev") print(dev_output) if dev_scores["dev_F1_ign"] > best_score: best_score = dev_scores["dev_F1_ign"] best_offi_results = official_results best_results = results best_output = dev_output ckpt_file = os.path.join(args.save_path, "best.ckpt") print(f"saving model checkpoint into {ckpt_file} ...") torch.save(model.state_dict(), ckpt_file) if epoch == train_iterator[-1]: # last epoch ckpt_file = os.path.join(args.save_path, "last.ckpt") print(f"saving model checkpoint into {ckpt_file} ...") torch.save(model.state_dict(), ckpt_file) pred_file = os.path.join(args.save_path, args.pred_file) score_file = os.path.join(args.save_path, "scores.csv") results_file = os.path.join(args.save_path, f"topk_{args.pred_file}") dump_to_file(best_offi_results, pred_file, best_output, score_file, best_results, results_file) return num_steps new_layer = ["extractor", "bilinear", "graph"] optimizer_grouped_parameters = [ {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in new_layer)], }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in new_layer)], "lr": args.lr_added}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr_transformer, eps=args.adam_epsilon) num_steps = 0 set_seed(args) model.zero_grad() finetune(train_features, optimizer, args.num_train_epochs, num_steps) def evaluate(args, model, features, tag="dev"): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds, evi_preds = [], [] scores, topks = [], [] attns = [] for batch in dataloader: model.eval() if args.save_attn: tag = "infer" inputs = load_input(batch, args.device, tag) with torch.no_grad(): outputs = model(**inputs) pred = outputs["rel_pred"] pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) if "scores" in outputs: scores.append(outputs["scores"].cpu().numpy()) topks.append(outputs["topks"].cpu().numpy()) if "evi_pred" in outputs: # relation extraction and evidence extraction evi_pred = outputs["evi_pred"] evi_pred = evi_pred.cpu().numpy() evi_preds.append(evi_pred) if "attns" in outputs: # attention recorded attn = outputs["attns"] attns.extend([a.cpu().numpy() for a in attn]) preds = np.concatenate(preds, axis=0) if scores: scores = np.concatenate(scores, axis=0) topks = np.concatenate(topks, axis=0) if evi_preds: evi_preds = np.concatenate(evi_preds, axis=0) official_results, results = to_official(preds, features, evi_preds=evi_preds, scores=scores, topks=topks) if len(official_results) > 0: if tag == "test":
def load_input(batch, device, tag="dev"): input = {'input_ids': batch[0].to(device), 'attention_mask': batch[1].to(device), 'labels': batch[2].to(device), 'entity_pos': batch[3], 'hts': batch[4], 'sent_pos': batch[5], 'sent_labels': batch[6].to(device) if (not batch[6] is None) and (batch[7] is None) else None, 'teacher_attns': batch[7].to(device) if not batch[7] is None else None, 'graph': batch[8], 'tag': tag } return input def train(args, model, train_features, dev_features): def finetune(features, optimizer, num_epoch, num_steps): best_score = -1 train_dataloader = DataLoader(features, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, drop_last=True) train_iterator = range(int(num_epoch)) total_steps = int(len(train_dataloader) * num_epoch // args.gradient_accumulation_steps) warmup_steps = int(total_steps * args.warmup_ratio) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps) scaler = GradScaler() print("Total steps: {}".format(total_steps)) print("Warmup steps: {}".format(warmup_steps)) for epoch in tqdm(train_iterator, desc='Train epoch'): for step, batch in enumerate(train_dataloader): model.zero_grad() optimizer.zero_grad() model.train() inputs = load_input(batch, args.device) outputs = model(**inputs) loss = [outputs["loss"]["rel_loss"]] if inputs["sent_labels"] is not None: loss.append(outputs["loss"]["evi_loss"] * args.evi_lambda) if inputs["teacher_attns"] is not None: loss.append(outputs["loss"]["attn_loss"] * args.attn_lambda) loss = sum(loss) / args.gradient_accumulation_steps scaler.scale(loss).backward() if step % args.gradient_accumulation_steps == 0: if args.max_grad_norm > 0: scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) scaler.step(optimizer) scaler.update() scheduler.step() model.zero_grad() num_steps += 1 if (step + 1) == len(train_dataloader) or ( args.evaluation_steps > 0 and num_steps % args.evaluation_steps == 0 and step % args.gradient_accumulation_steps == 0): dev_scores, dev_output, official_results, results = evaluate(args, model, dev_features, tag="dev") print(dev_output) if dev_scores["dev_F1_ign"] > best_score: best_score = dev_scores["dev_F1_ign"] best_offi_results = official_results best_results = results best_output = dev_output ckpt_file = os.path.join(args.save_path, "best.ckpt") print(f"saving model checkpoint into {ckpt_file} ...") torch.save(model.state_dict(), ckpt_file) if epoch == train_iterator[-1]: # last epoch ckpt_file = os.path.join(args.save_path, "last.ckpt") print(f"saving model checkpoint into {ckpt_file} ...") torch.save(model.state_dict(), ckpt_file) pred_file = os.path.join(args.save_path, args.pred_file) score_file = os.path.join(args.save_path, "scores.csv") results_file = os.path.join(args.save_path, f"topk_{args.pred_file}") dump_to_file(best_offi_results, pred_file, best_output, score_file, best_results, results_file) return num_steps new_layer = ["extractor", "bilinear", "graph"] optimizer_grouped_parameters = [ {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in new_layer)], }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in new_layer)], "lr": args.lr_added}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr_transformer, eps=args.adam_epsilon) num_steps = 0 set_seed(args) model.zero_grad() finetune(train_features, optimizer, args.num_train_epochs, num_steps) def evaluate(args, model, features, tag="dev"): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds, evi_preds = [], [] scores, topks = [], [] attns = [] for batch in dataloader: model.eval() if args.save_attn: tag = "infer" inputs = load_input(batch, args.device, tag) with torch.no_grad(): outputs = model(**inputs) pred = outputs["rel_pred"] pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) if "scores" in outputs: scores.append(outputs["scores"].cpu().numpy()) topks.append(outputs["topks"].cpu().numpy()) if "evi_pred" in outputs: # relation extraction and evidence extraction evi_pred = outputs["evi_pred"] evi_pred = evi_pred.cpu().numpy() evi_preds.append(evi_pred) if "attns" in outputs: # attention recorded attn = outputs["attns"] attns.extend([a.cpu().numpy() for a in attn]) preds = np.concatenate(preds, axis=0) if scores: scores = np.concatenate(scores, axis=0) topks = np.concatenate(topks, axis=0) if evi_preds: evi_preds = np.concatenate(evi_preds, axis=0) official_results, results = to_official(preds, features, evi_preds=evi_preds, scores=scores, topks=topks) if len(official_results) > 0: if tag == "test":
best_re, best_evi, best_re_ign, _ = official_evaluate(official_results, args.data_dir, args.train_file,
7
2023-10-20 05:53:25+00:00
16k
xingchenshanyao/YOLOP-E
lib/core/function.py
[ { "identifier": "ConfusionMatrix", "path": "lib/core/evaluate.py", "snippet": "class ConfusionMatrix:\n # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix\n def __init__(self, nc=1, conf=0.25, iou_thres=0.45):\n nc = 10 # 20230904 nc是类别数\n self.matrix ...
import time import torch import numpy as np import json import random import cv2 import os import math import wandb from lib.core.evaluate import ConfusionMatrix,SegmentationMetric from lib.core.general import non_max_suppression,check_img_size,scale_coords,xyxy2xywh,xywh2xyxy,box_iou,coco80_to_coco91_class,plot_images,ap_per_class,output_to_target from lib.utils.utils import time_synchronized from lib.utils import plot_img_and_mask,plot_one_box,show_seg_result from threading import Thread from PIL import Image from torchvision import transforms from pathlib import Path from torch.cuda import amp from tqdm import tqdm from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
11,793
img_test1 = img_test.copy() _ = show_seg_result(img_test, da_seg_mask, i,epoch,save_dir) _ = show_seg_result(img_test1, da_gt_mask, i, epoch, save_dir, is_gt=True) img_ll = cv2.imread(paths[i]) ll_seg_mask = ll_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) ll_seg_mask = torch.nn.functional.interpolate(ll_seg_mask, scale_factor=int(1/ratio), mode='bilinear') _, ll_seg_mask = torch.max(ll_seg_mask, 1) ll_gt_mask = target[2][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) ll_gt_mask = torch.nn.functional.interpolate(ll_gt_mask, scale_factor=int(1/ratio), mode='bilinear') _, ll_gt_mask = torch.max(ll_gt_mask, 1) ll_seg_mask = ll_seg_mask.int().squeeze().cpu().numpy() ll_gt_mask = ll_gt_mask.int().squeeze().cpu().numpy() # seg_mask = seg_mask > 0.5 # plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir) img_ll1 = img_ll.copy() _ = show_seg_result(img_ll, ll_seg_mask, i,epoch,save_dir, is_ll=True) _ = show_seg_result(img_ll1, ll_gt_mask, i, epoch, save_dir, is_ll=True, is_gt=True) img_det = cv2.imread(paths[i]) img_gt = img_det.copy() det = output[i].clone() if len(det): det[:,:4] = scale_coords(img[i].shape[1:],det[:,:4],img_det.shape).round() for *xyxy,conf,cls in reversed(det): #print(cls) # import pdb;pdb.set_trace() label_det_pred = f'{names[int(cls)]} {conf:.3f}' plot_one_box(xyxy, img_det , label=label_det_pred, color=colors[int(cls)], line_thickness=3) cv2.imwrite(save_dir+"/batch_{}_{}_det_pred.png".format(epoch,i),img_det) labels = target[0][target[0][:, 0] == i, 1:] # print(labels) labels[:,1:5]=xywh2xyxy(labels[:,1:5]) if len(labels): labels[:,1:5]=scale_coords(img[i].shape[1:],labels[:,1:5],img_gt.shape).round() for cls,x1,y1,x2,y2 in labels: # print(names) # print(cls) label_det_gt = f'{names[int(cls)]}' xyxy = (x1,y1,x2,y2) plot_one_box(xyxy, img_gt , label=label_det_gt, color=colors[int(cls)], line_thickness=3) cv2.imwrite(save_dir+"/batch_{}_{}_det_gt.png".format(epoch,i),img_gt) # Statistics per image # output([xyxy,conf,cls]) # target[0] ([img_id,cls,xyxy]) for si, pred in enumerate(output): labels = target[0][target[0][:, 0] == si, 1:] #all object in one image nl = len(labels) # num of object tcls = labels[:, 0].tolist() if nl else [] # target class path = Path(paths[si]) seen += 1 if len(pred) == 0: if nl: stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) continue # Predictions predn = pred.clone() scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred # Append to text file if config.TEST.SAVE_TXT: gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') # W&B logging if config.TEST.PLOTS and len(wandb_images) < log_imgs: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), "box_caption": "%s %.3f" % (names[cls], conf), "scores": {"class_score": conf}, "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name)) # Append to pycocotools JSON dictionary if config.TEST.SAVE_JSON: # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred.tolist(), box.tolist()): jdict.append({'image_id': image_id, 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]), 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5)}) # Assign all predictions as incorrect correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) if nl: detected = [] # target indices tcls_tensor = labels[:, 0] # target boxes tbox = xywh2xyxy(labels[:, 1:5]) scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels if config.TEST.PLOTS: confusion_matrix.process_batch(pred, torch.cat((labels[:, 0:1], tbox), 1)) # Per target class for cls in torch.unique(tcls_tensor): ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices # Search for detections if pi.shape[0]: # Prediction to target ious # n*m n:pred m:label
id_dict_SDExpressway = { 0:'Car', 1:'Truck', 2:'Guidance Sign', 3:'Warning Sign', 4:'Pending Sign', 5:'Speed Limit Sign', 6:'Emergency Telephone Sign', 7:'Directional Sign', 8:'Straight Ahead Arrow', 9:'Straight or Right Turn Arrow'} def train(cfg, train_loader, model, criterion, optimizer, scaler, epoch, num_batch, num_warmup, writer_dict, logger, device, rank=-1): """ train for one epoch Inputs: - config: configurations - train_loader: loder for data - model: - criterion: (function) calculate all the loss, return total_loss, head_losses - writer_dict: outputs(2,) output[0] len:3, [1,3,32,32,85], [1,3,16,16,85], [1,3,8,8,85] output[1] len:1, [2,256,256] output[2] len:1, [2,256,256] target(2,) target[0] [1,n,5] target[1] [2,256,256] target[2] [2,256,256] Returns: None """ batch_time = AverageMeter() # batch_time = <lib.core.function.AverageMeter object at 0x7f0255618970> data_time = AverageMeter() # data_time = <lib.core.function.AverageMeter object at 0x7f025561a4f0> losses = AverageMeter() # losses = <lib.core.function.AverageMeter object at 0x7f02402e7cd0> # switch to train mode model.train() start = time.time() # start = 1688805138.6791408 for i, (input, target, paths, shapes) in enumerate(train_loader): # i=0 # target = [tensor([[0.0000e+00,...335e-01]]), tensor([[[[1., 1., 1...., 0.]]]]), tensor([[[[1., 1., 1...., 0.]]]])] # paths = ('/home/xingchen/Study...3225df.jpg', '/home/xingchen/Study...49926c.jpg', ...) # shapes = (((720, 1280), ((0.5, 0.5), (0.0, 12.0))), ((...), (...)), ...) intermediate = time.time() # intermediate = 1688805496.5324085 #print('tims:{}'.format(intermediate-start)) num_iter = i + num_batch * (epoch - 1) # num_iter = 0 # num_batch = 4375 if num_iter < num_warmup: # warm up lf = lambda x: ((1 + math.cos(x * math.pi / cfg.TRAIN.END_EPOCH)) / 2) * \ (1 - cfg.TRAIN.LRF) + cfg.TRAIN.LRF # cosine xi = [0, num_warmup] # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 # 偏置lr从0.1下降到lr0,所有其他lr从0.0上升到lr0 x['lr'] = np.interp(num_iter, xi, [cfg.TRAIN.WARMUP_BIASE_LR if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) if 'momentum' in x: x['momentum'] = np.interp(num_iter, xi, [cfg.TRAIN.WARMUP_MOMENTUM, cfg.TRAIN.MOMENTUM]) data_time.update(time.time() - start) if not cfg.DEBUG: input = input.to(device, non_blocking=True) assign_target = [] for tgt in target: assign_target.append(tgt.to(device)) target = assign_target with amp.autocast(enabled=device.type != 'cpu'): outputs = model(input) # outputs = [[tensor([[[[[ 8.8806e...ackward0>), tensor([[[[[ 4.6631e...ackward0>), tensor([[[[[ 1.4758e...ackward0>)], tensor([[[[0.5151, 0...ackward0>), tensor([[[[0.4868, 0...ackward0>)] total_loss, head_losses = criterion(outputs, target, shapes,model) # print(head_losses) # compute gradient and do update step optimizer.zero_grad() scaler.scale(total_loss).backward() scaler.step(optimizer) scaler.update() if rank in [-1, 0]: # measure accuracy and record loss losses.update(total_loss.item(), input.size(0)) # _, avg_acc, cnt, pred = accuracy(output.detach().cpu().numpy(), # target.detach().cpu().numpy()) # acc.update(avg_acc, cnt) # measure elapsed time batch_time.update(time.time() - start) end = time.time() if i % cfg.PRINT_FREQ == 0: msg = 'Epoch: [{0}][{1}/{2}]\t' \ 'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \ 'Speed {speed:.1f} samples/s\t' \ 'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \ 'Loss {loss.val:.5f} ({loss.avg:.5f})'.format( epoch, i, len(train_loader), batch_time=batch_time, speed=input.size(0)/batch_time.val, data_time=data_time, loss=losses) logger.info(msg) writer = writer_dict['writer'] global_steps = writer_dict['train_global_steps'] writer.add_scalar('train_loss', losses.val, global_steps) # writer.add_scalar('train_acc', acc.val, global_steps) writer_dict['train_global_steps'] = global_steps + 1 def validate(epoch,config, val_loader, val_dataset, model, criterion, output_dir, tb_log_dir, writer_dict=None, logger=None, device='cpu', rank=-1,nc = 1): """ validata Inputs: - config: configurations - train_loader: loder for data - model: - criterion: (function) calculate all the loss, return - writer_dict: Return: None """ # setting max_stride = 32 weights = None save_dir = output_dir + os.path.sep + 'visualization' # save_dir = 'runs/BddDataset/_2023-07-09-09-50/visualization' if not os.path.exists(save_dir): os.mkdir(save_dir) # print(save_dir) _, imgsz = [check_img_size(x, s=max_stride) for x in config.MODEL.IMAGE_SIZE] #imgsz is multiple of max_stride batch_size = config.TRAIN.BATCH_SIZE_PER_GPU * len(config.GPUS) # batch_size = 16 test_batch_size = config.TEST.BATCH_SIZE_PER_GPU * len(config.GPUS) # test_batch_size = 16 training = False is_coco = False #is coco dataset save_conf=False # save auto-label confidences verbose=False save_hybrid=False log_imgs,wandb = min(16,100), None nc = 10 #20230904 iouv = torch.linspace(0.5,0.95,10).to(device) #iou vector for mAP@0.5:0.95 niou = iouv.numel() # niou = 10 try: except ImportError: wandb = None log_imgs = 0 seen = 0 # import pdb;pdb.set_trace() confusion_matrix = ConfusionMatrix(nc=model.nc) #detector confusion matrix # confusion matrix 混合矩阵 da_metric = SegmentationMetric(config.num_seg_class) #segment confusion matrix ll_metric = SegmentationMetric(2) #segment confusion matrix # names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} # names = {'0':0} names = id_dict_SDExpressway #20230904 colors = [[random.randint(0, 255) for _ in range(3)] for _ in names] # colors = [[191, 83, 111]] coco91class = coco80_to_coco91_class() s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') # s = ' Class Images Targets P R mAP@.5 mAP@.5:.95' p, r, f1, mp, mr, map50, map, t_inf, t_nms = 0., 0., 0., 0., 0., 0., 0., 0., 0. losses = AverageMeter() da_acc_seg = AverageMeter() da_IoU_seg = AverageMeter() da_mIoU_seg = AverageMeter() ll_acc_seg = AverageMeter() ll_IoU_seg = AverageMeter() ll_mIoU_seg = AverageMeter() T_inf = AverageMeter() T_nms = AverageMeter() # switch to train mode model.eval() jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, target, paths, shapes) in tqdm(enumerate(val_loader), total=len(val_loader)): if not config.DEBUG: img = img.to(device, non_blocking=True) assign_target = [] for tgt in target: assign_target.append(tgt.to(device)) target = assign_target nb, _, height, width = img.shape #batch size, channel, height, width with torch.no_grad(): pad_w, pad_h = shapes[0][1][1] pad_w = int(pad_w) pad_h = int(pad_h) ratio = shapes[0][1][0][0] t = time_synchronized() det_out, da_seg_out, ll_seg_out= model(img) # 检测图片? t_inf = time_synchronized() - t if batch_i > 0: T_inf.update(t_inf/img.size(0),img.size(0)) inf_out,train_out = det_out #driving area segment evaluation # 可驾驶区域分割评估 _,da_predict=torch.max(da_seg_out, 1) _,da_gt=torch.max(target[1], 1) da_predict = da_predict[:, pad_h:height-pad_h, pad_w:width-pad_w] da_gt = da_gt[:, pad_h:height-pad_h, pad_w:width-pad_w] da_metric.reset() da_metric.addBatch(da_predict.cpu(), da_gt.cpu()) da_acc = da_metric.pixelAccuracy() da_IoU = da_metric.IntersectionOverUnion() da_mIoU = da_metric.meanIntersectionOverUnion() da_acc_seg.update(da_acc,img.size(0)) da_IoU_seg.update(da_IoU,img.size(0)) da_mIoU_seg.update(da_mIoU,img.size(0)) #lane line segment evaluation # 车道线分割评估 _,ll_predict=torch.max(ll_seg_out, 1) _,ll_gt=torch.max(target[2], 1) ll_predict = ll_predict[:, pad_h:height-pad_h, pad_w:width-pad_w] ll_gt = ll_gt[:, pad_h:height-pad_h, pad_w:width-pad_w] ll_metric.reset() ll_metric.addBatch(ll_predict.cpu(), ll_gt.cpu()) ll_acc = ll_metric.lineAccuracy() ll_IoU = ll_metric.IntersectionOverUnion() ll_mIoU = ll_metric.meanIntersectionOverUnion() ll_acc_seg.update(ll_acc,img.size(0)) ll_IoU_seg.update(ll_IoU,img.size(0)) ll_mIoU_seg.update(ll_mIoU,img.size(0)) total_loss, head_losses = criterion((train_out,da_seg_out, ll_seg_out), target, shapes,model) #Compute loss losses.update(total_loss.item(), img.size(0)) #NMS # 非极大值抑制 t = time_synchronized() target[0][:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [target[0][target[0][:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling output = non_max_suppression(inf_out, conf_thres= config.TEST.NMS_CONF_THRESHOLD, iou_thres=config.TEST.NMS_IOU_THRESHOLD, labels=lb) #output = non_max_suppression(inf_out, conf_thres=0.001, iou_thres=0.6) #output = non_max_suppression(inf_out, conf_thres=config.TEST.NMS_CONF_THRES, iou_thres=config.TEST.NMS_IOU_THRES) t_nms = time_synchronized() - t if batch_i > 0: T_nms.update(t_nms/img.size(0),img.size(0)) if config.TEST.PLOTS: if batch_i == 0: for i in range(test_batch_size): img_test = cv2.imread(paths[i]) da_seg_mask = da_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) da_seg_mask = torch.nn.functional.interpolate(da_seg_mask, scale_factor=int(1/ratio), mode='bilinear') _, da_seg_mask = torch.max(da_seg_mask, 1) da_gt_mask = target[1][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) da_gt_mask = torch.nn.functional.interpolate(da_gt_mask, scale_factor=int(1/ratio), mode='bilinear') _, da_gt_mask = torch.max(da_gt_mask, 1) da_seg_mask = da_seg_mask.int().squeeze().cpu().numpy() da_gt_mask = da_gt_mask.int().squeeze().cpu().numpy() # seg_mask = seg_mask > 0.5 # plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir) img_test1 = img_test.copy() _ = show_seg_result(img_test, da_seg_mask, i,epoch,save_dir) _ = show_seg_result(img_test1, da_gt_mask, i, epoch, save_dir, is_gt=True) img_ll = cv2.imread(paths[i]) ll_seg_mask = ll_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) ll_seg_mask = torch.nn.functional.interpolate(ll_seg_mask, scale_factor=int(1/ratio), mode='bilinear') _, ll_seg_mask = torch.max(ll_seg_mask, 1) ll_gt_mask = target[2][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) ll_gt_mask = torch.nn.functional.interpolate(ll_gt_mask, scale_factor=int(1/ratio), mode='bilinear') _, ll_gt_mask = torch.max(ll_gt_mask, 1) ll_seg_mask = ll_seg_mask.int().squeeze().cpu().numpy() ll_gt_mask = ll_gt_mask.int().squeeze().cpu().numpy() # seg_mask = seg_mask > 0.5 # plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir) img_ll1 = img_ll.copy() _ = show_seg_result(img_ll, ll_seg_mask, i,epoch,save_dir, is_ll=True) _ = show_seg_result(img_ll1, ll_gt_mask, i, epoch, save_dir, is_ll=True, is_gt=True) img_det = cv2.imread(paths[i]) img_gt = img_det.copy() det = output[i].clone() if len(det): det[:,:4] = scale_coords(img[i].shape[1:],det[:,:4],img_det.shape).round() for *xyxy,conf,cls in reversed(det): #print(cls) # import pdb;pdb.set_trace() label_det_pred = f'{names[int(cls)]} {conf:.3f}' plot_one_box(xyxy, img_det , label=label_det_pred, color=colors[int(cls)], line_thickness=3) cv2.imwrite(save_dir+"/batch_{}_{}_det_pred.png".format(epoch,i),img_det) labels = target[0][target[0][:, 0] == i, 1:] # print(labels) labels[:,1:5]=xywh2xyxy(labels[:,1:5]) if len(labels): labels[:,1:5]=scale_coords(img[i].shape[1:],labels[:,1:5],img_gt.shape).round() for cls,x1,y1,x2,y2 in labels: # print(names) # print(cls) label_det_gt = f'{names[int(cls)]}' xyxy = (x1,y1,x2,y2) plot_one_box(xyxy, img_gt , label=label_det_gt, color=colors[int(cls)], line_thickness=3) cv2.imwrite(save_dir+"/batch_{}_{}_det_gt.png".format(epoch,i),img_gt) # Statistics per image # output([xyxy,conf,cls]) # target[0] ([img_id,cls,xyxy]) for si, pred in enumerate(output): labels = target[0][target[0][:, 0] == si, 1:] #all object in one image nl = len(labels) # num of object tcls = labels[:, 0].tolist() if nl else [] # target class path = Path(paths[si]) seen += 1 if len(pred) == 0: if nl: stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) continue # Predictions predn = pred.clone() scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred # Append to text file if config.TEST.SAVE_TXT: gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') # W&B logging if config.TEST.PLOTS and len(wandb_images) < log_imgs: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), "box_caption": "%s %.3f" % (names[cls], conf), "scores": {"class_score": conf}, "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name)) # Append to pycocotools JSON dictionary if config.TEST.SAVE_JSON: # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred.tolist(), box.tolist()): jdict.append({'image_id': image_id, 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]), 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5)}) # Assign all predictions as incorrect correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) if nl: detected = [] # target indices tcls_tensor = labels[:, 0] # target boxes tbox = xywh2xyxy(labels[:, 1:5]) scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels if config.TEST.PLOTS: confusion_matrix.process_batch(pred, torch.cat((labels[:, 0:1], tbox), 1)) # Per target class for cls in torch.unique(tcls_tensor): ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices # Search for detections if pi.shape[0]: # Prediction to target ious # n*m n:pred m:label
ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices
7
2023-10-24 02:08:25+00:00
16k
giulio98/functional-diffusion-processes
src/functional_diffusion_processes/trainers/trainer.py
[ { "identifier": "AudioDataset", "path": "src/functional_diffusion_processes/datasets/audio_dataset.py", "snippet": "class AudioDataset(BaseDataset, abc.ABC):\n \"\"\"Base class for defining audio datasets.\n\n This class serves as the foundation for defining datasets containing audio data.\n It...
import abc import gc import io import logging import os import flax import flax.jax_utils as flax_utils import hydra.utils import jax import numpy as np import tensorflow as tf import wandb from typing import Any, Callable, Tuple, Union from cleanfid import fid from flax import linen, traverse_util from flax.training import checkpoints from flax.training.checkpoints import restore_checkpoint from jax import numpy as jnp from omegaconf import DictConfig, OmegaConf from tqdm.auto import tqdm from wandb.sdk.lib import RunDisabled from wandb.sdk.wandb_run import Run from ..datasets import AudioDataset, ImageDataset from ..datasets.base_dataset import BaseDataset from ..losses.base_loss import Loss from ..metrics import FIDMetric from ..samplers import Sampler from ..sdetools.base_sde import SDE from ..utils.common import filter_mask, make_grid_image, process_images, save_samples, to_grayscale from ..utils.scaler import get_data_inverse_scaler, get_data_scaler from ..utils.training_state import TrainState from .helpers import colorizing_fn, construct_sampling_fn, construct_train_step, inpainting_fn, sampling_fn
12,975
# import imageio # import imageio pylogger = logging.getLogger(__name__) class Trainer(abc.ABC): """Class for training a model.""" def __init__( self, mode: str, model_name: str, training_config: DictConfig, optimizer, evaluation_config: DictConfig, trainer_logging: DictConfig, sampler: Sampler, loss_obj: Loss, ) -> None: """Initialize a Trainer instance with configurations and core components. Args: mode (str): Specifies the mode of the trainer which can be either "train" or "eval". model_name (str): The name identifier for the model. training_config (DictConfig): A configuration dictionary for training settings. optimizer: The optimizer instance used for training. evaluation_config (DictConfig): A configuration dictionary for evaluation settings. trainer_logging (DictConfig): A configuration dictionary for logging settings. sampler (Sampler): A sampler instance for sampling from the model. loss_obj (Loss): A loss object used for computing the loss during training. """ self.mode = mode self.model_name = model_name self.training_config = training_config self.optimizer = hydra.utils.instantiate(optimizer) self.evaluation_config = evaluation_config self.logging = trainer_logging self.sampler = sampler self.loss_obj = loss_obj self.checkpoint_dir = os.path.join(self.training_config.save_dir, self.training_config.checkpoint_dir) self.sample_dir = os.path.join(self.training_config.save_dir, self.training_config.sample_dir) self.eval_dir = os.path.join(self.training_config.save_dir, self.evaluation_config.eval_dir) # Create the directories for saving samples and checkpoints tf.io.gfile.makedirs(self.checkpoint_dir) tf.io.gfile.makedirs(self.sample_dir) tf.io.gfile.makedirs(self.eval_dir) tf.io.gfile.makedirs(os.path.join(self.eval_dir, "clean")) def initialize_wandb( self, dataset_config: DictConfig, sde_config: DictConfig, model_config: DictConfig ) -> Union[Run, RunDisabled, None]: """Initialize wandb if logging is enabled.""" if self.logging.use_wandb: run = wandb.init( name=os.path.basename(self.logging.wandb_init.name), project=self.logging.wandb_init.project, entity=self.logging.wandb_init.entity, save_code=self.logging.wandb_init.save_code, config={ **self.training_config, **dataset_config, **sde_config, **model_config, }, ) else: run = None return run def initialize_run(self, model, ds_train, sde): """Perform all initialization steps required for training.""" run = self.initialize_wandb(ds_train.data_config, sde.sde_config, model.model_config) scaler = get_data_scaler(is_centered=ds_train.data_config.data_centered) inverse_scaler = get_data_inverse_scaler(is_centered=ds_train.data_config.data_centered) rng = jax.random.PRNGKey(seed=self.training_config.seed) rng, step_rng = jax.random.split(rng) batch_input = model.initialize_input( (ds_train.data_config.batch_size, *sde.sde_config.shape, ds_train.data_config.output_size) ) params = jax.jit(model.initialize_model, backend="cpu")(step_rng, batch_input) flat_params = traverse_util.flatten_dict(params).values() tot_params = sum([jnp.size(p) for p in flat_params]) pylogger.info("Total number of parameters: {:.2f}M".format(tot_params / 1e6))
# import imageio # import imageio pylogger = logging.getLogger(__name__) class Trainer(abc.ABC): """Class for training a model.""" def __init__( self, mode: str, model_name: str, training_config: DictConfig, optimizer, evaluation_config: DictConfig, trainer_logging: DictConfig, sampler: Sampler, loss_obj: Loss, ) -> None: """Initialize a Trainer instance with configurations and core components. Args: mode (str): Specifies the mode of the trainer which can be either "train" or "eval". model_name (str): The name identifier for the model. training_config (DictConfig): A configuration dictionary for training settings. optimizer: The optimizer instance used for training. evaluation_config (DictConfig): A configuration dictionary for evaluation settings. trainer_logging (DictConfig): A configuration dictionary for logging settings. sampler (Sampler): A sampler instance for sampling from the model. loss_obj (Loss): A loss object used for computing the loss during training. """ self.mode = mode self.model_name = model_name self.training_config = training_config self.optimizer = hydra.utils.instantiate(optimizer) self.evaluation_config = evaluation_config self.logging = trainer_logging self.sampler = sampler self.loss_obj = loss_obj self.checkpoint_dir = os.path.join(self.training_config.save_dir, self.training_config.checkpoint_dir) self.sample_dir = os.path.join(self.training_config.save_dir, self.training_config.sample_dir) self.eval_dir = os.path.join(self.training_config.save_dir, self.evaluation_config.eval_dir) # Create the directories for saving samples and checkpoints tf.io.gfile.makedirs(self.checkpoint_dir) tf.io.gfile.makedirs(self.sample_dir) tf.io.gfile.makedirs(self.eval_dir) tf.io.gfile.makedirs(os.path.join(self.eval_dir, "clean")) def initialize_wandb( self, dataset_config: DictConfig, sde_config: DictConfig, model_config: DictConfig ) -> Union[Run, RunDisabled, None]: """Initialize wandb if logging is enabled.""" if self.logging.use_wandb: run = wandb.init( name=os.path.basename(self.logging.wandb_init.name), project=self.logging.wandb_init.project, entity=self.logging.wandb_init.entity, save_code=self.logging.wandb_init.save_code, config={ **self.training_config, **dataset_config, **sde_config, **model_config, }, ) else: run = None return run def initialize_run(self, model, ds_train, sde): """Perform all initialization steps required for training.""" run = self.initialize_wandb(ds_train.data_config, sde.sde_config, model.model_config) scaler = get_data_scaler(is_centered=ds_train.data_config.data_centered) inverse_scaler = get_data_inverse_scaler(is_centered=ds_train.data_config.data_centered) rng = jax.random.PRNGKey(seed=self.training_config.seed) rng, step_rng = jax.random.split(rng) batch_input = model.initialize_input( (ds_train.data_config.batch_size, *sde.sde_config.shape, ds_train.data_config.output_size) ) params = jax.jit(model.initialize_model, backend="cpu")(step_rng, batch_input) flat_params = traverse_util.flatten_dict(params).values() tot_params = sum([jnp.size(p) for p in flat_params]) pylogger.info("Total number of parameters: {:.2f}M".format(tot_params / 1e6))
state = TrainState.create(
14
2023-10-24 22:01:35+00:00
16k
KosinskiLab/pyTME
tme/tests/test_structure.py
[ { "identifier": "Structure", "path": "tme/structure.py", "snippet": "class Structure:\n \"\"\"Represents atomic structures in accordance with the Protein Data Bank (PDB)\n format specification.\n\n Attributes\n ----------\n record_type : NDArray\n Type of the record, e.g., ATOM, HE...
from tempfile import mkstemp from os import remove from tme import Structure from tme.matching_utils import euler_to_rotationmatrix, minimum_enclosing_box import pytest import numpy as np
13,226
def test__getitem__(self): ret_single_index = self.structure[1] ret = self.structure[[1]] self.compare_structures(ret_single_index, ret) ret = self.structure[self.structure.record_type == "ATOM"] assert np.all(ret.record_type == "ATOM") ret = self.structure[self.structure.element_symbol == "C"] assert np.all(ret.element_symbol == "C") def test__repr__(self): unique_chains = "-".join( [ ",".join([str(x) for x in entity]) for entity in self.structure.details["unique_chains"] ] ) min_atom = np.min(self.structure.atom_serial_number) max_atom = np.max(self.structure.atom_serial_number) n_atom = self.structure.atom_serial_number.size min_residue = np.min(self.structure.residue_sequence_number) max_residue = np.max(self.structure.residue_sequence_number) n_residue = self.structure.residue_sequence_number.size repr_str = ( f"Structure object at {id(self.structure)}\n" f"Unique Chains: {unique_chains}, " f"Atom Range: {min_atom}-{max_atom} [N = {n_atom}], " f"Residue Range: {min_residue}-{max_residue} [N = {n_residue}]" ) assert repr_str == self.structure.__repr__() @pytest.mark.parametrize( "path", [ ("./tme/tests/data/Structures/5khe.cif"), ("./tme/tests/data/Structures/5khe.pdb"), ], ) def test_fromfile(self, path): _ = Structure.from_file(path) def test_fromfile_error(self): with pytest.raises(NotImplementedError): _ = Structure.from_file("madeup.extension") @pytest.mark.parametrize("file_format", [("cif"), ("pdb")]) def test_to_file(self, file_format): _, path = mkstemp() path = f"{path}.{file_format}" self.structure.to_file(path) read = self.structure.from_file(path) comparison = self.structure.copy() self.compare_structures(comparison, read, exclude_attributes=["details"]) def test_to_file_error(self): _, path = mkstemp() path = f"{path}.RAISERROR" with pytest.raises(NotImplementedError): self.structure.to_file(path) def test_subset_by_chain(self): chain = "A" ret = self.structure.subset_by_chain(chain=chain) assert np.all(ret.chain_identifier == chain) def test_subset_by_chain_range(self): chain, start, stop = "A", 0, 20 ret = self.structure.subset_by_range(chain=chain, start=start, stop=stop) assert np.all(ret.chain_identifier == chain) assert np.all( np.logical_and( ret.residue_sequence_number >= start, ret.residue_sequence_number <= stop, ) ) def test_center_of_mass(self): center_of_mass = self.structure.center_of_mass() assert center_of_mass.shape[0] == self.structure.atom_coordinate.shape[1] assert np.allclose(center_of_mass, [-0.89391639, 29.94908928, -2.64736741]) def test_centered(self): ret, translation = self.structure.centered() box = minimum_enclosing_box(coordinates=self.structure.atom_coordinate.T) assert np.allclose(ret.center_of_mass(), np.divide(box, 2), atol=1) def test__get_atom_weights_error(self): with pytest.raises(NotImplementedError): self.structure._get_atom_weights( self.structure.atom_name, weight_type="RAISEERROR" ) def test_compare_structures(self): rmsd = Structure.compare_structures(self.structure, self.structure) assert rmsd == 0 rmsd = Structure.compare_structures( self.structure, self.structure, weighted=True ) assert rmsd == 0 translation = (3, 0, 0) structure_transform = self.structure.rigid_transform( translation=translation, rotation_matrix=np.eye(self.structure.atom_coordinate.shape[1]), ) rmsd = Structure.compare_structures(self.structure, structure_transform) assert np.allclose(rmsd, np.linalg.norm(translation)) def test_comopare_structures_error(self): ret = self.structure[[1, 2, 3, 4, 5]] with pytest.raises(ValueError): Structure.compare_structures(self.structure, ret) def test_align_structures(self):
STRUCTURE_ATTRIBUTES = [ "record_type", "atom_serial_number", "atom_name", "atom_coordinate", "alternate_location_indicator", "residue_name", "chain_identifier", "residue_sequence_number", "code_for_residue_insertion", "occupancy", "temperature_factor", "segment_identifier", "element_symbol", "charge", "details", ] class TestStructure: def setup_method(self): self.structure = Structure.from_file("./tme/tests/data/Structures/5khe.cif") _, self.path = mkstemp() def teardown_method(self): del self.structure remove(self.path) def compare_structures(self, structure1, structure2, exclude_attributes=[]): for attribute in STRUCTURE_ATTRIBUTES: if attribute in exclude_attributes: continue value = getattr(structure1, attribute) value_comparison = getattr(structure2, attribute) if type(value) == np.ndarray: assert np.all(value_comparison == value) else: assert value == value_comparison def test_initialization(self): structure = Structure( record_type=self.structure.record_type, atom_serial_number=self.structure.atom_serial_number, atom_name=self.structure.atom_name, atom_coordinate=self.structure.atom_coordinate, alternate_location_indicator=self.structure.alternate_location_indicator, residue_name=self.structure.residue_name, chain_identifier=self.structure.chain_identifier, residue_sequence_number=self.structure.residue_sequence_number, code_for_residue_insertion=self.structure.code_for_residue_insertion, occupancy=self.structure.occupancy, temperature_factor=self.structure.temperature_factor, segment_identifier=self.structure.segment_identifier, element_symbol=self.structure.element_symbol, charge=self.structure.charge, details=self.structure.details, ) for attribute in STRUCTURE_ATTRIBUTES: value = getattr(self.structure, attribute) value_comparison = getattr(structure, attribute) if type(value) == np.ndarray: assert np.all(value_comparison == value) else: assert value == value_comparison @pytest.mark.parametrize( "modified_attribute", [ ("record_type"), ("atom_serial_number"), ("atom_name"), ("atom_coordinate"), ("alternate_location_indicator"), ("residue_name"), ("chain_identifier"), ("residue_sequence_number"), ("code_for_residue_insertion"), ("occupancy"), ("temperature_factor"), ("segment_identifier"), ("element_symbol"), ], ) def test_initialization_errors(self, modified_attribute): kwargs = { attribute: getattr(self.structure, attribute) for attribute in STRUCTURE_ATTRIBUTES if attribute != modified_attribute } kwargs[modified_attribute] = getattr(self.structure, modified_attribute)[:1] with pytest.raises(ValueError): Structure(**kwargs) def test__getitem__(self): ret_single_index = self.structure[1] ret = self.structure[[1]] self.compare_structures(ret_single_index, ret) ret = self.structure[self.structure.record_type == "ATOM"] assert np.all(ret.record_type == "ATOM") ret = self.structure[self.structure.element_symbol == "C"] assert np.all(ret.element_symbol == "C") def test__repr__(self): unique_chains = "-".join( [ ",".join([str(x) for x in entity]) for entity in self.structure.details["unique_chains"] ] ) min_atom = np.min(self.structure.atom_serial_number) max_atom = np.max(self.structure.atom_serial_number) n_atom = self.structure.atom_serial_number.size min_residue = np.min(self.structure.residue_sequence_number) max_residue = np.max(self.structure.residue_sequence_number) n_residue = self.structure.residue_sequence_number.size repr_str = ( f"Structure object at {id(self.structure)}\n" f"Unique Chains: {unique_chains}, " f"Atom Range: {min_atom}-{max_atom} [N = {n_atom}], " f"Residue Range: {min_residue}-{max_residue} [N = {n_residue}]" ) assert repr_str == self.structure.__repr__() @pytest.mark.parametrize( "path", [ ("./tme/tests/data/Structures/5khe.cif"), ("./tme/tests/data/Structures/5khe.pdb"), ], ) def test_fromfile(self, path): _ = Structure.from_file(path) def test_fromfile_error(self): with pytest.raises(NotImplementedError): _ = Structure.from_file("madeup.extension") @pytest.mark.parametrize("file_format", [("cif"), ("pdb")]) def test_to_file(self, file_format): _, path = mkstemp() path = f"{path}.{file_format}" self.structure.to_file(path) read = self.structure.from_file(path) comparison = self.structure.copy() self.compare_structures(comparison, read, exclude_attributes=["details"]) def test_to_file_error(self): _, path = mkstemp() path = f"{path}.RAISERROR" with pytest.raises(NotImplementedError): self.structure.to_file(path) def test_subset_by_chain(self): chain = "A" ret = self.structure.subset_by_chain(chain=chain) assert np.all(ret.chain_identifier == chain) def test_subset_by_chain_range(self): chain, start, stop = "A", 0, 20 ret = self.structure.subset_by_range(chain=chain, start=start, stop=stop) assert np.all(ret.chain_identifier == chain) assert np.all( np.logical_and( ret.residue_sequence_number >= start, ret.residue_sequence_number <= stop, ) ) def test_center_of_mass(self): center_of_mass = self.structure.center_of_mass() assert center_of_mass.shape[0] == self.structure.atom_coordinate.shape[1] assert np.allclose(center_of_mass, [-0.89391639, 29.94908928, -2.64736741]) def test_centered(self): ret, translation = self.structure.centered() box = minimum_enclosing_box(coordinates=self.structure.atom_coordinate.T) assert np.allclose(ret.center_of_mass(), np.divide(box, 2), atol=1) def test__get_atom_weights_error(self): with pytest.raises(NotImplementedError): self.structure._get_atom_weights( self.structure.atom_name, weight_type="RAISEERROR" ) def test_compare_structures(self): rmsd = Structure.compare_structures(self.structure, self.structure) assert rmsd == 0 rmsd = Structure.compare_structures( self.structure, self.structure, weighted=True ) assert rmsd == 0 translation = (3, 0, 0) structure_transform = self.structure.rigid_transform( translation=translation, rotation_matrix=np.eye(self.structure.atom_coordinate.shape[1]), ) rmsd = Structure.compare_structures(self.structure, structure_transform) assert np.allclose(rmsd, np.linalg.norm(translation)) def test_comopare_structures_error(self): ret = self.structure[[1, 2, 3, 4, 5]] with pytest.raises(ValueError): Structure.compare_structures(self.structure, ret) def test_align_structures(self):
rotation_matrix = euler_to_rotationmatrix((20, -10, 45))
1
2023-10-20 13:46:01+00:00
16k
tonnetonne814/MB-iSTFT-BERT-VITS2-44100-Ja
train_ms.py
[ { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files...
import os import torch import torch.distributed as dist import logging import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, ) from losses import generator_loss, discriminator_loss, feature_loss, kl_loss from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
11,145
spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, ) in tqdm(enumerate(train_loader)): if net_g.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.mas_noise_scale_initial - net_g.noise_scale_delta * global_step ) net_g.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda( rank, non_blocking=True ) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda( rank, non_blocking=True ) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda( rank, non_blocking=True ) speakers = speakers.cuda(rank, non_blocking=True) tone = tone.cuda(rank, non_blocking=True) language = language.cuda(rank, non_blocking=True) bert = bert.cuda(rank, non_blocking=True) ja_bert = ja_bert.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length ) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax, ) y = commons.slice_segments( y, ids_slice * hps.data.hop_length, hps.train.segment_size ) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss( y_d_hat_r, y_d_hat_g ) loss_disc_all = loss_disc if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc( hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach() ) with autocast(enabled=False): # TODO: I think need to mean using the mask, but for now, just mean all ( loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g, ) = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) commons.clip_grad_value_(net_dur_disc.parameters(), None) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl loss_fm = feature_loss(fmap_r, fmap_g)
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cudnn.benchmark = True torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 torch.backends.cuda.enable_math_sdp(True) global_step = 0 def run(): #dist.init_process_group( # backend="gloo", # init_method="env://", # Due to some training problem,we proposed to use gloo instead of nccl. #) # Use torchrun instead of mp.spawn #rank = dist.get_rank() #n_gpus = dist.get_world_size() rank = 0 n_gpus = 1 hps = utils.get_hparams() torch.manual_seed(hps.train.seed) torch.cuda.set_device(rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=16, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=4, ) # DataLoader config could be adjusted. if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas is True ): print("Using noise scaled MAS for VITS2") mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator is True ): print("Using duration discriminator for VITS2") net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder is True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) else: print("Using normal encoder for VITS1") net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(rank) net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) else: optim_dur_disc = None # net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) # net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) # if net_dur_disc is not None: # net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True) try: if net_dur_disc is not None: _, _, dur_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_g, g_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_d, d_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_g.param_groups[0].get("initial_lr"): optim_g.param_groups[0]["initial_lr"] = g_resume_lr if not optim_d.param_groups[0].get("initial_lr"): optim_d.param_groups[0]["initial_lr"] = d_resume_lr if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr epoch_str = max(epoch_str, 1) global_step = (epoch_str - 1) * len(train_loader) except Exception as e: print(e) epoch_str = 1 global_step = 0 scheduler_g = torch.optim.lr_scheduler.ExponentialLR( optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_d = torch.optim.lr_scheduler.ExponentialLR( optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) if net_dur_disc is not None: # if not optim_dur_disc.param_groups[0].get("initial_lr"): # optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR( optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers ): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, ) in tqdm(enumerate(train_loader)): if net_g.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.mas_noise_scale_initial - net_g.noise_scale_delta * global_step ) net_g.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda( rank, non_blocking=True ) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda( rank, non_blocking=True ) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda( rank, non_blocking=True ) speakers = speakers.cuda(rank, non_blocking=True) tone = tone.cuda(rank, non_blocking=True) language = language.cuda(rank, non_blocking=True) bert = bert.cuda(rank, non_blocking=True) ja_bert = ja_bert.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length ) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax, ) y = commons.slice_segments( y, ids_slice * hps.data.hop_length, hps.train.segment_size ) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss( y_d_hat_r, y_d_hat_g ) loss_disc_all = loss_disc if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc( hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach() ) with autocast(enabled=False): # TODO: I think need to mean using the mask, but for now, just mean all ( loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g, ) = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) commons.clip_grad_value_(net_dur_disc.parameters(), None) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl loss_fm = feature_loss(fmap_r, fmap_g)
loss_gen, losses_gen = generator_loss(y_d_hat_g)
6
2023-10-16 10:04:32+00:00
16k
violet-sto/HN-GFN
main.py
[ { "identifier": "Dataset", "path": "dataset.py", "snippet": "class Dataset:\n\n def __init__(self, args, bpath, oracle, device):\n self.test_split_rng = np.random.RandomState(142857)\n self.train_rng = np.random.RandomState(int(time.time()))\n self.train_mols = []\n self.t...
from curses import raw from dataset import Dataset from mol_mdp_ext import MolMDPExtended, BlockMoleculeDataExtended from oracle.oracle import Oracle from proxy import get_proxy from generator import TBGFlowNet, FMGFlowNet from utils.metrics import circle_points, compute_success, compute_diversity, compute_novelty, evaluate, compute_correlation from utils.utils import set_random_seed from utils.logging import get_logger from datetime import datetime from botorch.utils.multi_objective.hypervolume import Hypervolume from botorch.utils.sampling import sample_simplex from botorch.utils.transforms import normalize, unnormalize from torch.distributions.dirichlet import Dirichlet from rdkit.Chem import AllChem from rdkit import DataStructs from pymoo.util.ref_dirs import get_reference_directions import os import argparse import json import time import threading import pdb import pickle import gzip import warnings import torch.multiprocessing as mp import torch.nn.functional as F import torch import pandas as pd import numpy as np
12,252
stop_everything() pdb.post_mortem(thread.exception.__traceback__) return p, pb, a, pw, w, r, s, d, mols = r else: p, pb, a, pw, w, r, s, d, mols = rollout_worker.sample2batch( rollout_worker.execute_train_episode_batch(generator, dataset, use_rand_policy=True)) loss = generator.train_step(p, pb, a, pw, w, r, s, d, mols, i) last_losses.append(loss) if not i % 100: train_loss = [np.round(np.mean(loss), 3) for loss in zip(*last_losses)] train_losses.append(train_loss) args.logger.add_scalar( 'Loss/train', train_loss[0], use_context=False) print('Iter {}: Loss {}, Time {}'.format( i, train_loss, round(time.time() - time_last_check, 3))) time_last_check = time.time() last_losses = [] if not i % args.sample_iterations and i != 0: volume, diversity = evaluate(args, generator, rollout_worker, 100) corrs = compute_correlation(args, generator, rollout_worker, rollout_worker.test_mols) args.logger.add_scalar( 'Top-100-sampled/volumes', volume, use_context=False) args.logger.add_scalar( 'Top-100-sampled/dists', diversity, use_context=False) args.logger.add_scalar( 'Top-100-sampled/corr', np.mean(corrs), use_context=False) if do_save: save_stuff(i) if volume > best_hv: best_hv = volume if do_save: save_stuff('volume') if np.mean(corrs) > best_corr: best_corr = np.mean(corrs) if do_save: save_stuff('corr') stop_everything() if do_save: save_stuff(i) return rollout_worker, {'train_losses': train_losses, 'test_losses': test_losses, 'test_infos': test_infos, 'train_infos': train_infos} def get_test_mols(args, mdp, num): samples = [] fps = [] early_stops = [] while len(samples) < num: if len(samples) % 5000 == 0: print(f'{len(samples)}/{num} mols have been sampled') m = BlockMoleculeDataExtended() min_blocks = args.min_blocks max_blocks = args.max_blocks early_stop_at = np.random.randint(min_blocks, max_blocks + 1) early_stops.append(early_stop_at) for t in range(max_blocks): if t == 0: length = mdp.num_blocks+1 else: length = len(m.stems)*mdp.num_blocks+1 action = np.random.randint(1, length) if t == early_stop_at: action = 0 if t >= min_blocks and action == 0: fp = AllChem.GetMorganFingerprintAsBitVect(m.mol, 3, 2048) if len(samples)==0: samples.append(m) fps.append(fp) else: sims = DataStructs.BulkTanimotoSimilarity(fp, fps) if max(sims) < 0.7: samples.append(m) fps.append(fp) break else: action = max(0, action-1) action = (action % mdp.num_blocks, action // mdp.num_blocks) #print('..', action) m = mdp.add_block_to(m, *action) if len(m.blocks) and not len(m.stems) or t == max_blocks - 1: # can't add anything more to this mol so let's make it # terminal. Note that this node's parent isn't just m, # because this is a sink for all parent transitions fp = AllChem.GetMorganFingerprintAsBitVect(m.mol, 3, 2048) if len(samples)==0: samples.append(m) fps.append(fp) else: sims = DataStructs.BulkTanimotoSimilarity(fp, fps) if max(sims) < 0.7: samples.append(m) fps.append(fp) break return samples def get_test_rays(): if args.n_objectives == 3: n_partitions = 6 elif args.n_objectives == 4: n_partitions = 7 test_rays = get_reference_directions("das-dennis", args.n_objectives, n_partitions=n_partitions).astype(np.float32) test_rays = test_rays[[(r > 0).all() for r in test_rays]] print(f"initialize {len(test_rays)} test rays") return test_rays def main(args):
warnings.filterwarnings('ignore') def arg_parse(): parser = argparse.ArgumentParser() parser.add_argument("--device", type=str, default='cuda') parser.add_argument('--seed', type=int, default=42, help='seed') parser.add_argument("--run", default=0, help="run", type=int) parser.add_argument('--save', action='store_true', default=False, help='Save model.') parser.add_argument('--debug', action='store_true', default=False, help='debug mode, no multi thread') parser.add_argument("--enable_tensorboard", action='store_true', default=False) parser.add_argument("--log_dir", default='runs/synthetic') parser.add_argument("--include_nblocks", default=False) parser.add_argument("--num_samples", default=1000, type=int) parser.add_argument("--floatX", default='float32') parser.add_argument('--sample_iterations', type=int, default=1000, help='sample mols and compute metrics') # objectives parser.add_argument("--objectives", type=str, default='gsk3b,jnk3') parser.add_argument("--scalar", default='WeightedSum', type=str) #TODO: other scalars parser.add_argument("--alpha", default=1., type=float, help='dirichlet distribution') parser.add_argument("--alpha_vector", default='1,1', type=str) # GFlowNet parser.add_argument("--min_blocks", default=2, type=int) parser.add_argument("--max_blocks", default=8, type=int) parser.add_argument("--num_iterations", default=30000, type=int) # 30k parser.add_argument("--criterion", default="FM", type=str) parser.add_argument("--learning_rate", default=5e-4, help="Learning rate", type=float) parser.add_argument("--Z_learning_rate", default=5e-3, help="Learning rate", type=float) parser.add_argument("--clip_grad", default=0, type=float) parser.add_argument("--trajectories_mbsize", default=16, type=int) parser.add_argument("--offline_mbsize", default=0, type=int) parser.add_argument("--hindsight_mbsize", default=0, type=int) parser.add_argument("--reward_min", default=1e-2, type=float) parser.add_argument("--reward_norm", default=0.8, type=float) parser.add_argument("--reward_exp", default=6, type=float) parser.add_argument("--reward_exp_ramping", default=0, type=float) # Hyperparameters for TB parser.add_argument("--partition_init", default=30, type=float) # Hyperparameters for FM parser.add_argument("--log_reg_c", default=(0.1/8) ** 4, type=float) # (0.1/8)**8 parser.add_argument("--balanced_loss", default=True) parser.add_argument("--leaf_coef", default=10, type=float) # Architecture parser.add_argument("--repr_type", default='block_graph') parser.add_argument("--model_version", default='v4') parser.add_argument("--condition_type", default='HN', type=str) # 'HN', 'FiLM', 'concat' parser.add_argument("--num_conv_steps", default=10, type=int) parser.add_argument("--nemb", default=256, help="#hidden", type=int) parser.add_argument("--weight_decay", default=0, type=float) parser.add_argument("--random_action_prob", default=0.05, type=float) parser.add_argument("--bootstrap_tau", default=0, type=float) parser.add_argument("--ray_hidden_dim", default=100, type=int) parser.add_argument("--logit_clipping", default=0., type=float) return parser.parse_args() class RolloutWorker: def __init__(self, args, bpath, proxy, device): self.args = args self.test_split_rng = np.random.RandomState(142857) self.train_rng = np.random.RandomState(int(time.time())) self.mdp = MolMDPExtended(bpath) self.mdp.post_init(device, args.repr_type, include_nblocks=args.include_nblocks) self.mdp.build_translation_table() if args.floatX == 'float64': self.mdp.floatX = self.floatX = torch.double else: self.mdp.floatX = self.floatX = torch.float self.proxy = proxy self._device = device self.seen_molecules = set() self.stop_event = threading.Event() ####### # This is the "result", here a list of (reward, BlockMolDataExt, info...) tuples self.sampled_mols = [] self.online_mols = [] self.hindsight_mols = [] self.max_online_mols = 1000 self.max_hindsight_mols = 1000 self.min_blocks = args.min_blocks self.max_blocks = args.max_blocks self.mdp._cue_max_blocks = self.max_blocks self.reward_exp = args.reward_exp self.reward_min = args.reward_min self.reward_norm = args.reward_norm self.reward_exp_ramping = args.reward_exp_ramping self.random_action_prob = args.random_action_prob # If True this basically implements Buesing et al's TreeSample Q, # samples uniformly from it though, no MTCS involved if args.criterion == 'TB' or args.criterion == "Reinforce": self.ignore_parents = True elif args.criterion == 'FM': self.ignore_parents = False def rollout(self, generator, use_rand_policy=True, weights=None, replay=False): weights = Dirichlet(torch.ones(len(self.args.objectives))*self.args.alpha).sample_n(1).to( self.args.device) if weights is None else weights m = BlockMoleculeDataExtended() samples = [] max_blocks = self.max_blocks trajectory_stats = [] for t in range(max_blocks): s = self.mdp.mols2batch([self.mdp.mol2repr(m)]) s_o, m_o = generator(s, vec_data=weights, do_stems=True) # fix from run 330 onwards if t < self.min_blocks: m_o = m_o*0 - 1000 # prevent assigning prob to stop # when we can't stop ## logits = torch.cat([m_o.reshape(-1), s_o.reshape(-1)]) cat = torch.distributions.Categorical( logits=logits) action = cat.sample().item() if use_rand_policy and self.random_action_prob > 0: # just for training if self.train_rng.uniform() < self.random_action_prob: action = self.train_rng.randint( int(t < self.min_blocks), logits.shape[0]) q = torch.cat([m_o.reshape(-1), s_o.reshape(-1)]) trajectory_stats.append( (q[action].item(), action, torch.logsumexp(q, 0).item())) if t >= self.min_blocks and action == 0: r, raw_r = self._get_reward(m, weights) # r: reward, raw_r: scores for the objectives samples.append(((m,), ((-1, 0),), weights, weights, r, m, 1)) break else: action = max(0, action-1) action = (action % self.mdp.num_blocks, action // self.mdp.num_blocks) m_old = m m = self.mdp.add_block_to(m, *action) if len(m.blocks) and not len(m.stems) or t == max_blocks - 1: # can't add anything more to this mol so let's make it # terminal. Note that this node's parent isn't just m, # because this is a sink for all parent transitions r, raw_r = self._get_reward(m, weights) if self.ignore_parents: samples.append( ((m_old,), (action,), weights, weights, r, m, 1)) else: parents, actions = zip(*self.mdp.parents(m)) samples.append((parents, actions, weights.repeat( len(parents), 1), weights, r, m, 1)) break else: if self.ignore_parents: samples.append( ((m_old,), (action,), weights, weights, 0, m, 0)) else: parents, actions = zip(*self.mdp.parents(m)) samples.append( (parents, actions, weights.repeat(len(parents), 1), weights, 0, m, 0)) p = self.mdp.mols2batch([self.mdp.mol2repr(i) for i in samples[-1][0]]) qp = generator(p, weights.repeat(p.num_graphs, 1)) qsa_p = generator.model.index_output_by_action( p, qp[0], qp[1][:, 0], torch.tensor(samples[-1][1], device=self._device).long()) inflow = torch.logsumexp(qsa_p.flatten(), 0).item() self.sampled_mols.append( ([i.cpu().numpy() for i in raw_r], weights.cpu().numpy(), m, trajectory_stats, inflow)) if replay and self.args.hindsight_prob > 0.0: self._add_mol_to_replay(m) return samples def _get_reward(self, m, weights=None): rdmol = m.mol if rdmol is None: return self.reward_min # get scores from oracle score = self.proxy.get_score([m]) score = torch.tensor(list(score.values())).to(self.args.device) if self.args.scalar == 'WeightedSum': raw_reward = (weights*score).sum() elif self.args.scalar == 'Tchebycheff': raw_reward = (weights*score).min() + 0.1 * (weights*score).sum() reward = self.l2r(raw_reward.clip(self.reward_min)) return reward, (raw_reward, score) def execute_train_episode_batch(self, generator, dataset=None, use_rand_policy=True): if self.args.condition_type is None: weights = self.test_weights # train specific model else: weights = Dirichlet(torch.tensor(self.args.alpha_vector)*self.args.alpha).sample_n(1).to(self.args.device) #* sample weights per batch, seem better samples = sum((self.rollout(generator, use_rand_policy, weights) for i in range(self.args.trajectories_mbsize)), []) return zip(*samples) def sample2batch(self, mb): p, a, p_weights, weights, r, s, d, *o = mb mols = (p, s) # The batch index of each parent p_batch = torch.tensor(sum([[i]*len(p) for i, p in enumerate(p)], []), device=self._device).long() # Convert all parents and states to repr. Note that this # concatenates all the parent lists, which is why we need # p_batch p = self.mdp.mols2batch(list(map(self.mdp.mol2repr, sum(p, ())))) s = self.mdp.mols2batch([self.mdp.mol2repr(i) for i in s]) # Concatenate all the actions (one per parent per sample) a = torch.tensor(sum(a, ()), device=self._device).long() # rewards and dones r = torch.tensor(r, device=self._device).to(self.floatX) d = torch.tensor(d, device=self._device).to(self.floatX) # weights p_w = torch.cat(p_weights, 0) w = torch.cat(weights, 0) return (p, p_batch, a, p_w, w, r, s, d, mols, *o) def l2r(self, raw_reward, t=0): if self.reward_exp_ramping > 0: reward_exp = 1 + (self.reward_exp - 1) * \ (1 - 1/(1 + t / self.reward_exp_ramping)) # when t=0, exp = 1; t->∞, exp = self.reward_exp else: reward_exp = self.reward_exp reward = (raw_reward/self.reward_norm)**reward_exp return reward def start_samplers(self, generator, n, dataset): self.ready_events = [threading.Event() for i in range(n)] self.resume_events = [threading.Event() for i in range(n)] self.results = [None] * n def f(idx): while not self.stop_event.is_set(): try: self.results[idx] = self.sample2batch( self.execute_train_episode_batch(generator, dataset, use_rand_policy=True)) except Exception as e: print("Exception while sampling:") print(e) self.sampler_threads[idx].failed = True self.sampler_threads[idx].exception = e self.ready_events[idx].set() break self.ready_events[idx].set() self.resume_events[idx].clear() self.resume_events[idx].wait() self.sampler_threads = [threading.Thread( target=f, args=(i,)) for i in range(n)] [setattr(i, 'failed', False) for i in self.sampler_threads] [i.start() for i in self.sampler_threads] round_robin_idx = [0] def get(): while True: idx = round_robin_idx[0] round_robin_idx[0] = (round_robin_idx[0] + 1) % n if self.ready_events[idx].is_set(): r = self.results[idx] self.ready_events[idx].clear() self.resume_events[idx].set() return r elif round_robin_idx[0] == 0: time.sleep(0.001) return get def stop_samplers_and_join(self): self.stop_event.set() if hasattr(self, 'sampler_threads'): while any([i.is_alive() for i in self.sampler_threads]): [i.set() for i in self.resume_events] [i.join(0.05) for i in self.sampler_threads] def train_generative_model_with_oracle(args, generator, bpath, oracle, test_weights, dataset=None, do_save=False): print("Training generator...") device = args.device rollout_worker = RolloutWorker(args, bpath, oracle, device) if args.condition_type is None: rollout_worker.test_weights = torch.tensor(test_weights).to(device)[args.run :args.run+1] else: rollout_worker.test_weights = torch.tensor(test_weights).to(device) rollout_worker.test_mols = pickle.load(gzip.open('./data/test_mols_6062.pkl.gz', 'rb')) def save_stuff(iter): torch.save(generator.state_dict(), os.path.join( args.log_dir, '{}_generator_checkpoint.pth'.format(iter))) pickle.dump(rollout_worker.sampled_mols, gzip.open(f'{args.log_dir}/sampled_mols.pkl.gz', 'wb')) multi_thread = not args.debug if multi_thread: sampler = rollout_worker.start_samplers(generator, 8, dataset) def stop_everything(): print('joining') rollout_worker.stop_samplers_and_join() last_losses = [] train_losses = [] test_losses = [] test_infos = [] train_infos = [] best_hv = 0 best_corr = 0 time_last_check = time.time() for i in range(args.num_iterations + 1): rollout_worker.reward_exp = 1 + (args.reward_exp-1) * (1-1/(1+i/20)) if multi_thread: r = sampler() for thread in rollout_worker.sampler_threads: if thread.failed: stop_everything() pdb.post_mortem(thread.exception.__traceback__) return p, pb, a, pw, w, r, s, d, mols = r else: p, pb, a, pw, w, r, s, d, mols = rollout_worker.sample2batch( rollout_worker.execute_train_episode_batch(generator, dataset, use_rand_policy=True)) loss = generator.train_step(p, pb, a, pw, w, r, s, d, mols, i) last_losses.append(loss) if not i % 100: train_loss = [np.round(np.mean(loss), 3) for loss in zip(*last_losses)] train_losses.append(train_loss) args.logger.add_scalar( 'Loss/train', train_loss[0], use_context=False) print('Iter {}: Loss {}, Time {}'.format( i, train_loss, round(time.time() - time_last_check, 3))) time_last_check = time.time() last_losses = [] if not i % args.sample_iterations and i != 0: volume, diversity = evaluate(args, generator, rollout_worker, 100) corrs = compute_correlation(args, generator, rollout_worker, rollout_worker.test_mols) args.logger.add_scalar( 'Top-100-sampled/volumes', volume, use_context=False) args.logger.add_scalar( 'Top-100-sampled/dists', diversity, use_context=False) args.logger.add_scalar( 'Top-100-sampled/corr', np.mean(corrs), use_context=False) if do_save: save_stuff(i) if volume > best_hv: best_hv = volume if do_save: save_stuff('volume') if np.mean(corrs) > best_corr: best_corr = np.mean(corrs) if do_save: save_stuff('corr') stop_everything() if do_save: save_stuff(i) return rollout_worker, {'train_losses': train_losses, 'test_losses': test_losses, 'test_infos': test_infos, 'train_infos': train_infos} def get_test_mols(args, mdp, num): samples = [] fps = [] early_stops = [] while len(samples) < num: if len(samples) % 5000 == 0: print(f'{len(samples)}/{num} mols have been sampled') m = BlockMoleculeDataExtended() min_blocks = args.min_blocks max_blocks = args.max_blocks early_stop_at = np.random.randint(min_blocks, max_blocks + 1) early_stops.append(early_stop_at) for t in range(max_blocks): if t == 0: length = mdp.num_blocks+1 else: length = len(m.stems)*mdp.num_blocks+1 action = np.random.randint(1, length) if t == early_stop_at: action = 0 if t >= min_blocks and action == 0: fp = AllChem.GetMorganFingerprintAsBitVect(m.mol, 3, 2048) if len(samples)==0: samples.append(m) fps.append(fp) else: sims = DataStructs.BulkTanimotoSimilarity(fp, fps) if max(sims) < 0.7: samples.append(m) fps.append(fp) break else: action = max(0, action-1) action = (action % mdp.num_blocks, action // mdp.num_blocks) #print('..', action) m = mdp.add_block_to(m, *action) if len(m.blocks) and not len(m.stems) or t == max_blocks - 1: # can't add anything more to this mol so let's make it # terminal. Note that this node's parent isn't just m, # because this is a sink for all parent transitions fp = AllChem.GetMorganFingerprintAsBitVect(m.mol, 3, 2048) if len(samples)==0: samples.append(m) fps.append(fp) else: sims = DataStructs.BulkTanimotoSimilarity(fp, fps) if max(sims) < 0.7: samples.append(m) fps.append(fp) break return samples def get_test_rays(): if args.n_objectives == 3: n_partitions = 6 elif args.n_objectives == 4: n_partitions = 7 test_rays = get_reference_directions("das-dennis", args.n_objectives, n_partitions=n_partitions).astype(np.float32) test_rays = test_rays[[(r > 0).all() for r in test_rays]] print(f"initialize {len(test_rays)} test rays") return test_rays def main(args):
set_random_seed(args.seed)
13
2023-10-24 14:10:35+00:00
16k
SALT-NLP/Efficient_Unlearning
src/models/transformers/parameter-efficient-finetuning/wrappers/configuration.py
[ { "identifier": "PretrainedConfig", "path": "src/models/transformers/configuration_utils.py", "snippet": "class PretrainedConfig(PushToHubMixin):\n r\"\"\"\n Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as\n methods for loading/...
from ...configuration_utils import PretrainedConfig from ...models.encoder_decoder.configuration_encoder_decoder import EncoderDecoderConfig from ..configuration import ModelAdaptersConfig
14,385
CONFIG_CLASS_KEYS_MAPPING = { "bart": { "num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model", "hidden_dropout_prob": "dropout", "attention_probs_dropout_prob": "attention_dropout", }, "beit": {}, "bert": {}, "distilbert": { "hidden_dropout_prob": "dropout", "attention_probs_dropout_prob": "attention_dropout", }, "gpt2": { "hidden_dropout_prob": "resid_pdrop", "attention_probs_dropout_prob": "attn_pdrop", }, "gptj": { "hidden_dropout_prob": "resid_pdrop", "attention_probs_dropout_prob": "attn_pdrop", }, "mbart": { "num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model", "hidden_dropout_prob": "dropout", "attention_probs_dropout_prob": "attention_dropout", }, "roberta": {}, "t5": { "hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", "hidden_dropout_prob": "dropout_rate", "attention_probs_dropout_prob": "dropout_rate", }, "vit": {}, "xlm_roberta": {}, } def wrap_config(config: PretrainedConfig) -> PretrainedConfig: """ Makes required changes to a model config class to allow usage with adapters. Args: config (PretrainedConfig): The config to be wrapped. Returns: PretrainedConfig: The same config object, with modifications applied. """ if getattr(config, "is_adaptable", False): return config # Init ModelAdaptersConfig if not hasattr(config, "adapters"):
CONFIG_CLASS_KEYS_MAPPING = { "bart": { "num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model", "hidden_dropout_prob": "dropout", "attention_probs_dropout_prob": "attention_dropout", }, "beit": {}, "bert": {}, "distilbert": { "hidden_dropout_prob": "dropout", "attention_probs_dropout_prob": "attention_dropout", }, "gpt2": { "hidden_dropout_prob": "resid_pdrop", "attention_probs_dropout_prob": "attn_pdrop", }, "gptj": { "hidden_dropout_prob": "resid_pdrop", "attention_probs_dropout_prob": "attn_pdrop", }, "mbart": { "num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model", "hidden_dropout_prob": "dropout", "attention_probs_dropout_prob": "attention_dropout", }, "roberta": {}, "t5": { "hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", "hidden_dropout_prob": "dropout_rate", "attention_probs_dropout_prob": "dropout_rate", }, "vit": {}, "xlm_roberta": {}, } def wrap_config(config: PretrainedConfig) -> PretrainedConfig: """ Makes required changes to a model config class to allow usage with adapters. Args: config (PretrainedConfig): The config to be wrapped. Returns: PretrainedConfig: The same config object, with modifications applied. """ if getattr(config, "is_adaptable", False): return config # Init ModelAdaptersConfig if not hasattr(config, "adapters"):
config.adapters = ModelAdaptersConfig()
2
2023-10-18 18:05:54+00:00
16k
nchen909/Pass-Tuning
models_list/adapter/modeling_auto.py
[ { "identifier": "PLBartForConditionalGeneration", "path": "models_list/adapter/modeling_plbart.py", "snippet": "class PLBartForConditionalGeneration(PLBartPreTrainedModel):\n base_model_prefix = \"model\"\n _keys_to_ignore_on_load_missing = [\n r\"final_logits_bias\",\n r\"encoder.ve...
import warnings from collections import OrderedDict from transformers.utils import logging from transformers.models.albert.modeling_albert import ( AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from .modeling_plbart import ( PLBartForConditionalGeneration, PLBartModel, ) from transformers.models.bart.modeling_bart import ( BartForCausalLM, BartForQuestionAnswering, BartForSequenceClassification, ) from transformers.models.bert.modeling_bert import ( BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLMHeadModel, BertModel, ) from transformers.models.bert_generation.modeling_bert_generation import BertGenerationDecoder, BertGenerationEncoder from transformers.models.big_bird.modeling_big_bird import ( BigBirdForCausalLM, BigBirdForMaskedLM, BigBirdForMultipleChoice, BigBirdForPreTraining, BigBirdForQuestionAnswering, BigBirdForSequenceClassification, BigBirdForTokenClassification, BigBirdModel, ) from transformers.models.bigbird_pegasus.modeling_bigbird_pegasus import ( BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, ) from transformers.models.blenderbot.modeling_blenderbot import BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel from transformers.models.blenderbot_small.modeling_blenderbot_small import ( BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, ) from transformers.models.camembert.modeling_camembert import ( CamembertForCausalLM, CamembertForMaskedLM, CamembertForMultipleChoice, CamembertForQuestionAnswering, CamembertForSequenceClassification, CamembertForTokenClassification, CamembertModel, ) from transformers.models.canine.modeling_canine import ( CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineModel, ) from transformers.models.clip.modeling_clip import CLIPModel from transformers.models.convbert.modeling_convbert import ( ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertModel, ) from transformers.models.ctrl.modeling_ctrl import CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel from transformers.models.deberta.modeling_deberta import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta_v2.modeling_deberta_v2 import ( DebertaV2ForMaskedLM, DebertaV2ForQuestionAnswering, DebertaV2ForSequenceClassification, DebertaV2ForTokenClassification, DebertaV2Model, ) from transformers.models.deit.modeling_deit import DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTModel from transformers.models.detr.modeling_detr import DetrForObjectDetection, DetrModel from transformers.models.distilbert.modeling_distilbert import ( DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) from transformers.models.dpr.modeling_dpr import DPRQuestionEncoder from transformers.models.electra.modeling_electra import ( ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ) from transformers.models.encoder_decoder.modeling_encoder_decoder import EncoderDecoderModel from transformers.models.flaubert.modeling_flaubert import ( FlaubertForMultipleChoice, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.fsmt.modeling_fsmt import FSMTForConditionalGeneration, FSMTModel from transformers.models.funnel.modeling_funnel import ( FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, ) from transformers.models.gpt2.modeling_gpt2 import GPT2ForSequenceClassification, GPT2LMHeadModel, GPT2Model from transformers.models.gpt_neo.modeling_gpt_neo import GPTNeoForCausalLM, GPTNeoForSequenceClassification, GPTNeoModel from transformers.models.hubert.modeling_hubert import HubertModel from transformers.models.ibert.modeling_ibert import ( IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, ) from transformers.models.layoutlm.modeling_layoutlm import ( LayoutLMForMaskedLM, LayoutLMForSequenceClassification, LayoutLMForTokenClassification, LayoutLMModel, ) from transformers.models.led.modeling_led import ( LEDForConditionalGeneration, LEDForQuestionAnswering, LEDForSequenceClassification, LEDModel, ) from transformers.models.longformer.modeling_longformer import ( LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, ) from transformers.models.luke.modeling_luke import LukeModel from transformers.models.lxmert.modeling_lxmert import LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel from transformers.models.m2m_100.modeling_m2m_100 import M2M100ForConditionalGeneration, M2M100Model from transformers.models.marian.modeling_marian import MarianForCausalLM, MarianModel, MarianMTModel from transformers.models.mbart.modeling_mbart import ( MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, ) from transformers.models.megatron_bert.modeling_megatron_bert import ( MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) from transformers.models.mobilebert.modeling_mobilebert import ( MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) from transformers.models.mpnet.modeling_mpnet import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) from transformers.models.mt5.modeling_mt5 import MT5ForConditionalGeneration, MT5Model from transformers.models.openai.modeling_openai import OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel from transformers.models.pegasus.modeling_pegasus import PegasusForCausalLM, PegasusForConditionalGeneration, PegasusModel from transformers.models.prophetnet.modeling_prophetnet import ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel from transformers.models.rag.modeling_rag import ( # noqa: F401 - need to import all RagModels to be in globals() function RagModel, RagSequenceForGeneration, RagTokenForGeneration, ) from transformers.models.reformer.modeling_reformer import ( ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerModel, ReformerModelWithLMHead, ) from transformers.models.retribert.modeling_retribert import RetriBertModel from transformers.models.roberta.modeling_roberta import ( RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, ) from transformers.models.roformer.modeling_roformer import ( RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerModel, ) from transformers.models.speech_to_text.modeling_speech_to_text import Speech2TextForConditionalGeneration, Speech2TextModel from transformers.models.squeezebert.modeling_squeezebert import ( SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) from .modeling_t5 import T5ForConditionalGeneration, T5Model from transformers.models.tapas.modeling_tapas import ( TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, ) from transformers.models.transfo_xl.modeling_transfo_xl import TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel from transformers.models.visual_bert.modeling_visual_bert import VisualBertForPreTraining, VisualBertModel from transformers.models.vit.modeling_vit import ViTForImageClassification, ViTModel from transformers.models.wav2vec2.modeling_wav2vec2 import Wav2Vec2ForMaskedLM, Wav2Vec2ForPreTraining, Wav2Vec2Model from transformers.models.xlm.modeling_xlm import ( XLMForMultipleChoice, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm_prophetnet.modeling_xlm_prophetnet import ( XLMProphetNetForCausalLM, XLMProphetNetForConditionalGeneration, XLMProphetNetModel, ) from transformers.models.xlm_roberta.modeling_xlm_roberta import ( XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, ) from transformers.models.xlnet.modeling_xlnet import ( XLNetForMultipleChoice, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, ) from transformers.models.auto.auto_factory import _BaseAutoModelClass, auto_class_update from transformers.models.auto.configuration_auto import ( AlbertConfig, PLBartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BlenderbotConfig, BlenderbotSmallConfig, CamembertConfig, CanineConfig, CLIPConfig, ConvBertConfig, CTRLConfig, DebertaConfig, DebertaV2Config, DeiTConfig, DetrConfig, DistilBertConfig, DPRConfig, ElectraConfig, EncoderDecoderConfig, FlaubertConfig, FSMTConfig, FunnelConfig, GPT2Config, GPTNeoConfig, HubertConfig, IBertConfig, LayoutLMConfig, LEDConfig, LongformerConfig, LukeConfig, LxmertConfig, M2M100Config, MarianConfig, MBartConfig, MegatronBertConfig, MobileBertConfig, MPNetConfig, MT5Config, OpenAIGPTConfig, PegasusConfig, ProphetNetConfig, ReformerConfig, RetriBertConfig, RobertaConfig, RoFormerConfig, Speech2TextConfig, SqueezeBertConfig, T5Config, TapasConfig, TransfoXLConfig, VisualBertConfig, ViTConfig, Wav2Vec2Config, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLNetConfig, )
11,355
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Auto Model class. """ # Add modeling imports here # # Instead of loading the BART from the transformers==4.9.1, we choose to load from our own prefix-tuning version. # Instead of loading the T5 from the transformers==4.9.1, we choose to load from our prefix-tuning version. logger = logging.get_logger(__name__) MODEL_MAPPING = OrderedDict( [ # Base model mapping (VisualBertConfig, VisualBertModel), (CanineConfig, CanineModel), (RoFormerConfig, RoFormerModel), (CLIPConfig, CLIPModel), (BigBirdPegasusConfig, BigBirdPegasusModel), (DeiTConfig, DeiTModel), (LukeConfig, LukeModel), (DetrConfig, DetrModel), (GPTNeoConfig, GPTNeoModel), (BigBirdConfig, BigBirdModel), (Speech2TextConfig, Speech2TextModel), (ViTConfig, ViTModel), (Wav2Vec2Config, Wav2Vec2Model), (HubertConfig, HubertModel), (M2M100Config, M2M100Model), (ConvBertConfig, ConvBertModel), (LEDConfig, LEDModel), (BlenderbotSmallConfig, BlenderbotSmallModel), (RetriBertConfig, RetriBertModel), (MT5Config, MT5Model), (T5Config, T5Model), (PegasusConfig, PegasusModel), (MarianConfig, MarianMTModel), (MBartConfig, MBartModel), (BlenderbotConfig, BlenderbotModel), (DistilBertConfig, DistilBertModel), (AlbertConfig, AlbertModel), (CamembertConfig, CamembertModel), (XLMRobertaConfig, XLMRobertaModel), (PLBartConfig, PLBartModel), (LongformerConfig, LongformerModel), (RobertaConfig, RobertaModel), (LayoutLMConfig, LayoutLMModel), (SqueezeBertConfig, SqueezeBertModel), (BertConfig, BertModel), (OpenAIGPTConfig, OpenAIGPTModel), (GPT2Config, GPT2Model), (MegatronBertConfig, MegatronBertModel), (MobileBertConfig, MobileBertModel), (TransfoXLConfig, TransfoXLModel), (XLNetConfig, XLNetModel), (FlaubertConfig, FlaubertModel), (FSMTConfig, FSMTModel), (XLMConfig, XLMModel), (CTRLConfig, CTRLModel), (ElectraConfig, ElectraModel), (ReformerConfig, ReformerModel), (FunnelConfig, (FunnelModel, FunnelBaseModel)), (LxmertConfig, LxmertModel), (BertGenerationConfig, BertGenerationEncoder), (DebertaConfig, DebertaModel), (DebertaV2Config, DebertaV2Model), (DPRConfig, DPRQuestionEncoder), (XLMProphetNetConfig, XLMProphetNetModel), (ProphetNetConfig, ProphetNetModel), (MPNetConfig, MPNetModel), (TapasConfig, TapasModel), (MarianConfig, MarianModel), (IBertConfig, IBertModel), ] ) MODEL_FOR_PRETRAINING_MAPPING = OrderedDict( [ # Model for pre-training mapping (VisualBertConfig, VisualBertForPreTraining), (LayoutLMConfig, LayoutLMForMaskedLM), (RetriBertConfig, RetriBertModel), (T5Config, T5ForConditionalGeneration), (DistilBertConfig, DistilBertForMaskedLM), (AlbertConfig, AlbertForPreTraining), (CamembertConfig, CamembertForMaskedLM), (XLMRobertaConfig, XLMRobertaForMaskedLM),
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Auto Model class. """ # Add modeling imports here # # Instead of loading the BART from the transformers==4.9.1, we choose to load from our own prefix-tuning version. # Instead of loading the T5 from the transformers==4.9.1, we choose to load from our prefix-tuning version. logger = logging.get_logger(__name__) MODEL_MAPPING = OrderedDict( [ # Base model mapping (VisualBertConfig, VisualBertModel), (CanineConfig, CanineModel), (RoFormerConfig, RoFormerModel), (CLIPConfig, CLIPModel), (BigBirdPegasusConfig, BigBirdPegasusModel), (DeiTConfig, DeiTModel), (LukeConfig, LukeModel), (DetrConfig, DetrModel), (GPTNeoConfig, GPTNeoModel), (BigBirdConfig, BigBirdModel), (Speech2TextConfig, Speech2TextModel), (ViTConfig, ViTModel), (Wav2Vec2Config, Wav2Vec2Model), (HubertConfig, HubertModel), (M2M100Config, M2M100Model), (ConvBertConfig, ConvBertModel), (LEDConfig, LEDModel), (BlenderbotSmallConfig, BlenderbotSmallModel), (RetriBertConfig, RetriBertModel), (MT5Config, MT5Model), (T5Config, T5Model), (PegasusConfig, PegasusModel), (MarianConfig, MarianMTModel), (MBartConfig, MBartModel), (BlenderbotConfig, BlenderbotModel), (DistilBertConfig, DistilBertModel), (AlbertConfig, AlbertModel), (CamembertConfig, CamembertModel), (XLMRobertaConfig, XLMRobertaModel), (PLBartConfig, PLBartModel), (LongformerConfig, LongformerModel), (RobertaConfig, RobertaModel), (LayoutLMConfig, LayoutLMModel), (SqueezeBertConfig, SqueezeBertModel), (BertConfig, BertModel), (OpenAIGPTConfig, OpenAIGPTModel), (GPT2Config, GPT2Model), (MegatronBertConfig, MegatronBertModel), (MobileBertConfig, MobileBertModel), (TransfoXLConfig, TransfoXLModel), (XLNetConfig, XLNetModel), (FlaubertConfig, FlaubertModel), (FSMTConfig, FSMTModel), (XLMConfig, XLMModel), (CTRLConfig, CTRLModel), (ElectraConfig, ElectraModel), (ReformerConfig, ReformerModel), (FunnelConfig, (FunnelModel, FunnelBaseModel)), (LxmertConfig, LxmertModel), (BertGenerationConfig, BertGenerationEncoder), (DebertaConfig, DebertaModel), (DebertaV2Config, DebertaV2Model), (DPRConfig, DPRQuestionEncoder), (XLMProphetNetConfig, XLMProphetNetModel), (ProphetNetConfig, ProphetNetModel), (MPNetConfig, MPNetModel), (TapasConfig, TapasModel), (MarianConfig, MarianModel), (IBertConfig, IBertModel), ] ) MODEL_FOR_PRETRAINING_MAPPING = OrderedDict( [ # Model for pre-training mapping (VisualBertConfig, VisualBertForPreTraining), (LayoutLMConfig, LayoutLMForMaskedLM), (RetriBertConfig, RetriBertModel), (T5Config, T5ForConditionalGeneration), (DistilBertConfig, DistilBertForMaskedLM), (AlbertConfig, AlbertForPreTraining), (CamembertConfig, CamembertForMaskedLM), (XLMRobertaConfig, XLMRobertaForMaskedLM),
(PLBartConfig, PLBartForConditionalGeneration),
0
2023-10-20 09:24:44+00:00
16k
JoaoPedro9674/django-ledger
django_ledger/io/io_mixin.py
[ { "identifier": "settings", "path": "django_ledger/settings.py", "snippet": " DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = True\n DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = False\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = True\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = False\nDJANGO_LEDGER_USE_CLOSING_ENTRIES...
from collections import defaultdict, namedtuple from datetime import datetime, date from itertools import groupby from pathlib import Path from random import choice from typing import List, Set, Union, Tuple, Optional, Dict from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError, ObjectDoesNotExist from django.db.models import Sum, QuerySet from django.db.models.functions import TruncMonth from django.http import Http404 from django.utils.dateparse import parse_date, parse_datetime from django.utils.timezone import make_aware, is_naive, localtime from django.utils.translation import gettext_lazy as _ from django_ledger import settings from django_ledger.exceptions import InvalidDateInputError, TransactionNotInBalanceError from django_ledger.io import roles as roles_module from django_ledger.io.io_context import (RoleContextManager, GroupContextManager, ActivityContextManager, BalanceSheetStatementContextManager, IncomeStatementContextManager, CashFlowStatementContextManager) from django_ledger.io.io_digest import IODigestContextManager from django_ledger.io.ratios import FinancialRatioManager from django_ledger.models.utils import lazy_loader
14,209
""" Django Ledger created by Miguel Sanda <msanda@arrobalytics.com>. Copyright© EDMA Group Inc licensed under the GPLv3 Agreement. Contributions to this module: * Miguel Sanda <msanda@arrobalytics.com> """ UserModel = get_user_model() def diff_tx_data(tx_data: list, raise_exception: bool = True): IS_TX_MODEL = False TransactionModel = lazy_loader.get_txs_model() if isinstance(tx_data[0], TransactionModel): CREDITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'credit') DEBITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'debit') IS_TX_MODEL = True elif isinstance(tx_data[0], dict): CREDITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'credit') DEBITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'debit') else: raise ValidationError('Only Dictionary or TransactionModel allowed.') is_valid = (CREDITS == DEBITS) diff = CREDITS - DEBITS if not is_valid and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: if raise_exception: raise TransactionNotInBalanceError( f'Invalid tx data. Credits and debits must match. Currently cr: {CREDITS}, db {DEBITS}.' f'Max Tolerance {settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE}' ) return IS_TX_MODEL, is_valid, diff def check_tx_balance(tx_data: list, perform_correction: bool = False) -> bool: if tx_data: IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data, raise_exception=perform_correction) if not perform_correction and abs(diff): return False if not perform_correction and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: return False while not is_valid: tx_type_choice = choice(['debit', 'credit']) txs_candidates = list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice) if len(txs_candidates) > 0: tx = choice(list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice)) if any([diff > 0 and tx_type_choice == 'debit', diff < 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION elif any([diff < 0 and tx_type_choice == 'debit', diff > 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount -= settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data) return True def validate_io_date(dt: Union[str, date, datetime], no_parse_localdate: bool = True) -> Optional[datetime]: if not dt: return if isinstance(dt, date): dt = make_aware( value=datetime.combine( dt, datetime.min.time() )) return dt elif isinstance(dt, datetime): if is_naive(dt): return make_aware(dt) return dt elif isinstance(dt, str): # try to parse a date object from string... fdt = parse_date(dt) if not fdt: # try to parse a datetime object from string... fdt = parse_datetime(dt) if not fdt:
""" Django Ledger created by Miguel Sanda <msanda@arrobalytics.com>. Copyright© EDMA Group Inc licensed under the GPLv3 Agreement. Contributions to this module: * Miguel Sanda <msanda@arrobalytics.com> """ UserModel = get_user_model() def diff_tx_data(tx_data: list, raise_exception: bool = True): IS_TX_MODEL = False TransactionModel = lazy_loader.get_txs_model() if isinstance(tx_data[0], TransactionModel): CREDITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'credit') DEBITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'debit') IS_TX_MODEL = True elif isinstance(tx_data[0], dict): CREDITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'credit') DEBITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'debit') else: raise ValidationError('Only Dictionary or TransactionModel allowed.') is_valid = (CREDITS == DEBITS) diff = CREDITS - DEBITS if not is_valid and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: if raise_exception: raise TransactionNotInBalanceError( f'Invalid tx data. Credits and debits must match. Currently cr: {CREDITS}, db {DEBITS}.' f'Max Tolerance {settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE}' ) return IS_TX_MODEL, is_valid, diff def check_tx_balance(tx_data: list, perform_correction: bool = False) -> bool: if tx_data: IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data, raise_exception=perform_correction) if not perform_correction and abs(diff): return False if not perform_correction and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: return False while not is_valid: tx_type_choice = choice(['debit', 'credit']) txs_candidates = list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice) if len(txs_candidates) > 0: tx = choice(list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice)) if any([diff > 0 and tx_type_choice == 'debit', diff < 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION elif any([diff < 0 and tx_type_choice == 'debit', diff > 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount -= settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data) return True def validate_io_date(dt: Union[str, date, datetime], no_parse_localdate: bool = True) -> Optional[datetime]: if not dt: return if isinstance(dt, date): dt = make_aware( value=datetime.combine( dt, datetime.min.time() )) return dt elif isinstance(dt, datetime): if is_naive(dt): return make_aware(dt) return dt elif isinstance(dt, str): # try to parse a date object from string... fdt = parse_date(dt) if not fdt: # try to parse a datetime object from string... fdt = parse_datetime(dt) if not fdt:
raise InvalidDateInputError(
1
2023-10-20 01:07:20+00:00
16k
Glasgow-AI4BioMed/GenKIE
tasks/pretrain_tasks/unify_task.py
[ { "identifier": "OFATask", "path": "tasks/ofa_task.py", "snippet": "class OFATask(FairseqTask):\n def __init__(self, cfg: OFAConfig, src_dict, tgt_dict):\n super().__init__(cfg)\n self.src_dict = src_dict\n self.tgt_dict = tgt_dict\n\n @classmethod\n def setup_task(cls, cfg...
from dataclasses import dataclass, field from typing import Optional from fairseq.tasks import register_task from fairseq.data import FairseqDataset, iterators from tasks.ofa_task import OFATask, OFAConfig from data.pretrain_data.unify_dataset import UnifyDataset from data.file_dataset import FileDataset import json import logging import os import math
11,373
# Copyright 2022 The OFA-Sys Team. # All rights reserved. # This source code is licensed under the Apache 2.0 license # found in the LICENSE file in the root directory. logger = logging.getLogger(__name__) @dataclass class UnifyConfig(OFAConfig): max_image_size: int = field( default=512, metadata={"help": ""} ) text_data: Optional[str] = field( default=None, metadata={"help": "pure text data"}, ) image_data: Optional[str] = field( default=None, metadata={"help": "pure image data"}, ) detection_data: Optional[str] = field( default=None, metadata={"help": "detection data"}, ) text_selected_cols: Optional[str] = field( default=None, metadata={"help": "pure text data selected cols"}, ) image_selected_cols: Optional[str] = field( default=None, metadata={"help": "pure image data selected cols"}, ) detection_selected_cols: Optional[str] = field( default=None, metadata={"help": "detection data selected cols"}, ) neg_sample_dir: Optional[str] = field( default=None, metadata={"help": "negative sample directory, which contains captions (taken from all image-text pairs), " "answers (taken from VQA), " "objects (taken form OpenImages) "}, ) code_image_size: int = field( default=128, metadata={"help": "the resolution of the generated image in the image infilling task"} ) pretrain_seed: int = field( default=7, metadata={"help": "pretrain seed"}, ) mask_ratio: float = field( default=0.3, metadata={"help": "fraction of words/subwords that will be masked"}, ) random_ratio: float = field( default=0.0, metadata={"help": "instead of using [MASK], use random token this often"}, ) keep_ratio: float = field( default=0.0, metadata={"help": "instead of using [MASK], keep original token this often"}, ) mask_length: str = field( default="span-poisson", metadata={"help": "mask length to choose ['subword', 'word', 'span-poisson']"}, ) poisson_lambda: float = field( default=3.0, metadata={"help": "randomly shuffle sentences for this proportion of inputs"}, ) replace_length: int = field( default=1, metadata={"help": "when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)"}, ) @register_task("unify_task", dataclass=UnifyConfig) class UnifyTask(OFATask): def __init__(self, cfg: UnifyConfig, src_dict, tgt_dict): super().__init__(cfg, src_dict, tgt_dict) self.type2ans_dict = json.load(open(os.path.join(self.cfg.neg_sample_dir, 'type2ans.json'))) self.ans2type_dict = {} for type, answer_list in self.type2ans_dict.items(): if type == 'other': continue for answer in answer_list: self.ans2type_dict[answer] = type self.all_object_list = [ row.strip() for row in open(os.path.join(self.cfg.neg_sample_dir, 'object.txt')) if row.strip() != '' ] self.all_caption_list = [ row.strip() for row in open(os.path.join(self.cfg.neg_sample_dir, 'all_captions.txt')) if row.strip() != '' ] self.pure_text_dataset = None self.pure_image_dataset = None self.detection_dataset = None if self.cfg.text_data is not None:
# Copyright 2022 The OFA-Sys Team. # All rights reserved. # This source code is licensed under the Apache 2.0 license # found in the LICENSE file in the root directory. logger = logging.getLogger(__name__) @dataclass class UnifyConfig(OFAConfig): max_image_size: int = field( default=512, metadata={"help": ""} ) text_data: Optional[str] = field( default=None, metadata={"help": "pure text data"}, ) image_data: Optional[str] = field( default=None, metadata={"help": "pure image data"}, ) detection_data: Optional[str] = field( default=None, metadata={"help": "detection data"}, ) text_selected_cols: Optional[str] = field( default=None, metadata={"help": "pure text data selected cols"}, ) image_selected_cols: Optional[str] = field( default=None, metadata={"help": "pure image data selected cols"}, ) detection_selected_cols: Optional[str] = field( default=None, metadata={"help": "detection data selected cols"}, ) neg_sample_dir: Optional[str] = field( default=None, metadata={"help": "negative sample directory, which contains captions (taken from all image-text pairs), " "answers (taken from VQA), " "objects (taken form OpenImages) "}, ) code_image_size: int = field( default=128, metadata={"help": "the resolution of the generated image in the image infilling task"} ) pretrain_seed: int = field( default=7, metadata={"help": "pretrain seed"}, ) mask_ratio: float = field( default=0.3, metadata={"help": "fraction of words/subwords that will be masked"}, ) random_ratio: float = field( default=0.0, metadata={"help": "instead of using [MASK], use random token this often"}, ) keep_ratio: float = field( default=0.0, metadata={"help": "instead of using [MASK], keep original token this often"}, ) mask_length: str = field( default="span-poisson", metadata={"help": "mask length to choose ['subword', 'word', 'span-poisson']"}, ) poisson_lambda: float = field( default=3.0, metadata={"help": "randomly shuffle sentences for this proportion of inputs"}, ) replace_length: int = field( default=1, metadata={"help": "when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)"}, ) @register_task("unify_task", dataclass=UnifyConfig) class UnifyTask(OFATask): def __init__(self, cfg: UnifyConfig, src_dict, tgt_dict): super().__init__(cfg, src_dict, tgt_dict) self.type2ans_dict = json.load(open(os.path.join(self.cfg.neg_sample_dir, 'type2ans.json'))) self.ans2type_dict = {} for type, answer_list in self.type2ans_dict.items(): if type == 'other': continue for answer in answer_list: self.ans2type_dict[answer] = type self.all_object_list = [ row.strip() for row in open(os.path.join(self.cfg.neg_sample_dir, 'object.txt')) if row.strip() != '' ] self.all_caption_list = [ row.strip() for row in open(os.path.join(self.cfg.neg_sample_dir, 'all_captions.txt')) if row.strip() != '' ] self.pure_text_dataset = None self.pure_image_dataset = None self.detection_dataset = None if self.cfg.text_data is not None:
self.pure_text_dataset = FileDataset(self.cfg.text_data, self.cfg.text_selected_cols)
3
2023-10-20 20:01:42+00:00
16k
timapage/pyqt6-yolov8
main.py
[ { "identifier": "CameraCaptureThread", "path": "src/qt/stream/video_capture.py", "snippet": "class CameraCaptureThread(QThread):\n send_video_info = pyqtSignal(dict)\n send_frame = pyqtSignal(list)\n def __init__(self):\n super(CameraCaptureThread, self).__init__()\n self.thread_n...
from src.qt.stream.video_capture import CameraCaptureThread from src.qt.stream.visualize import VideoVisualizationThread from src.qt.stream.ai_worker import AiWorkerThread from src.ui.main_window import Ui_MainWindow from src.qt.video.video_worker import FileProcessThread from PyQt6 import QtGui, QtWidgets from PyQt6.QtCore import Qt import sys import numpy as np
12,238
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) self.setupUi(self) self.ai_thread = AiWorkerThread() self.camera_thread = CameraCaptureThread()
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) self.setupUi(self) self.ai_thread = AiWorkerThread() self.camera_thread = CameraCaptureThread()
self.display_thread = VideoVisualizationThread()
1
2023-10-18 09:21:01+00:00
16k
S-LoRA/S-LoRA
slora/server/router/manager.py
[ { "identifier": "SamplingParams", "path": "slora/server/sampling_params.py", "snippet": "class SamplingParams:\n\n def __init__(\n self,\n do_sample: bool = False,\n presence_penalty: float = 0.0,\n frequency_penalty: float = 0.0,\n temperature: float = 1.0,\n ...
import uvloop import asyncio import os import pickle import time import torch import zmq import zmq.asyncio import traceback from typing import Dict, List, Optional from ..sampling_params import SamplingParams from ..io_struct import Req, Batch, BatchAbortReq from .model_infer.model_rpc import start_model_process, ModelRpcClient from .req_queue import ReqQueue from rpyc.utils.classic import obtain from slora.utils.infer_utils import calculate_time from ..io_struct import BatchTokenIdOut, AbortReq from .stats import Stats from slora.server.input_params import InputParams from slora.models.peft.lora_adapter import get_lora_config from slora.server.router.profiler import AlphaModel, BetaModel from slora.server.router.abort_req_queue import AbortReqQueue from slora.server.router.cluster_req_queue import ClusterReqQueue from slora.server.router.vtc_req_queue import VTCReqQueue from slora.server.router.pets_req_queue import PETSReqQueue from slora.server.router.peft_req_queue import PEFTReqQueue
12,899
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) def get_scheduler(input_params, adapter_dirs): if input_params.scheduler == "vtc_fair": return VTCReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, adapter_dirs, input_params.fair_weights) elif input_params.scheduler == "pets": return PETSReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "peft":
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) def get_scheduler(input_params, adapter_dirs): if input_params.scheduler == "vtc_fair": return VTCReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, adapter_dirs, input_params.fair_weights) elif input_params.scheduler == "pets": return PETSReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "peft":
return PEFTReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens,
19
2023-11-05 04:08:36+00:00
16k
fleet-ai/context
cli.py
[ { "identifier": "print_markdown", "path": "utils/utils.py", "snippet": "def print_markdown(message):\n for line in message.split(\"\\n\"):\n line = line.strip()\n if line == \"\":\n print(\"\")\n elif line == \"---\":\n rprint(Rule(style=\"white\"))\n ...
import os import openai import sys import argparse import traceback from getpass import getpass from rich import print as rprint from utils.utils import print_markdown, print_exception, extract_code_blocks, print_help from utils.stream import TextStream from utils.ai import ( retrieve_context, construct_prompt, get_remote_chat_response, get_other_chat_response, ) from constants.cli import ARGUMENTS, LIBRARIES, OPENAI_MODELS from constants.ai import MODELS_TO_TOKENS
13,164
model = "local-model" print_markdown( f"""--- **You are using a local model.** We're working with LM Studio to provide access to local models for you. Download and start your model to get started. Instructions: 1. Download LM Studio. You can find the download link here: https://lmstudio.ai 2. Open LM Studio and download your model of choice. 3. Click the **↔ icon** on the very left sidebar 4. Select your model and click "Start Server" Note that your context window is set to {context_window}. To change this, run `context --context_window <context window>`. ---""" ) else: openrouter_key = os.environ.get("OPENROUTER_API_KEY") openai_key = os.environ.get("OPENAI_API_KEY") # Get the OpenAI API key, if not found if model in OPENAI_MODELS and not openai_key: print_markdown( """--- !!!**OpenAI API key not found.** Please provide a key to proceed. --- """ ) openai_key = getpass("OpenAI API key: ") os.environ["OPENAI_API_KEY"] = openai_key print_markdown( """ --- **Tip**: To save this key for later, run `export OPENAI_API_KEY=<your key>` on mac/linux or `setx OPENAI_API_KEY <your key>` on windows. For non-OpenAI models, you should set `OPENROUTER_API_KEY`, and optionally `OPENROUTER_APP_URL` and `OPENROUTER_APP_TITLE`. ---""" ) # Otherwise, grab the openrouter key, if not found elif model not in OPENAI_MODELS and not openrouter_key: print_markdown( """--- !!!**OpenRouter API key not found.** Please provide a key to proceed. --- """ ) api_key = getpass("OpenRouter API key: ") os.environ["OPENROUTER_API_KEY"] = api_key print_markdown( f""" --- **Tip**: To save this key for later, run `export OPENROUTER_API_KEY=<your key>` on mac/linux or `setx OPENROUTER_API_KEY <your key>` on windows. You can optionally set `OPENROUTER_APP_URL` and `OPENROUTER_APP_TITLE`, too. Note that your context window is set to {context_window}. To change this, run `context --context_window <context window>`. ---""" ) if model == "gpt-4-1106-preview": print_markdown( """!!!Welcome to Fleet Context! Generate and run code using the most up-to-date libraries. *Warning*: You are using gpt-4-turbo, which is not yet stable and is rate limited at 100 requests per day. Please use with caution. """ ) else: print_markdown( """!!!Welcome to Fleet Context! Generate and run code using the most up-to-date libraries. """ ) messages = [] while True: try: query = input("> ") query = query.strip() if not query: continue if query.lower() == "exit": rprint("Exiting. Goodbye!") break messages.append({"role": "user", "content": query}) rag_context = retrieve_context(query, k=k, filters=filters) prompts = construct_prompt( messages, rag_context, model=model, cite_sources=cite_sources, context_window=context_window, ) full_response = "" try: streamer = TextStream() if model in OPENAI_MODELS: for response in get_remote_chat_response(prompts, model=model): if response: full_response += response streamer.print_stream(full_response) else:
# pylint: disable=E0401 # pylint: disable=W0122 # pylint: disable=W0718 def main(): parser = argparse.ArgumentParser(description="Fleet Data Retriever", add_help=False) parser.add_argument("help", nargs="?", default=argparse.SUPPRESS) # Add arguments for arg in ARGUMENTS: if arg["type"] == bool: default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], action="store_true", default=default, ) elif arg["type"] == list: choices = arg["choices"] if "choices" in arg else None default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=str, nargs="+", choices=choices, default=default, ) else: choices = arg["choices"] if "choices" in arg else None default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=arg["type"], choices=choices, default=default, ) # Hit the retrieve endpoint args = parser.parse_args() k = args.k_value model = args.model cite_sources = args.cite_sources filters = {} if getattr(args, "help", None) is not None: print_help() return # If library specified, match library name to uuid if args.libraries: for library in args.libraries: if library not in LIBRARIES: rprint( "Library not found. Please refer to the list of available libraries." ) return filters["library_name"] = args.libraries # Get context window if model in OPENAI_MODELS: context_window = MODELS_TO_TOKENS[model] else: context_window = args.context_window # If local model requested, use LMStudio api_key = "" if args.local: model = "local-model" print_markdown( f"""--- **You are using a local model.** We're working with LM Studio to provide access to local models for you. Download and start your model to get started. Instructions: 1. Download LM Studio. You can find the download link here: https://lmstudio.ai 2. Open LM Studio and download your model of choice. 3. Click the **↔ icon** on the very left sidebar 4. Select your model and click "Start Server" Note that your context window is set to {context_window}. To change this, run `context --context_window <context window>`. ---""" ) else: openrouter_key = os.environ.get("OPENROUTER_API_KEY") openai_key = os.environ.get("OPENAI_API_KEY") # Get the OpenAI API key, if not found if model in OPENAI_MODELS and not openai_key: print_markdown( """--- !!!**OpenAI API key not found.** Please provide a key to proceed. --- """ ) openai_key = getpass("OpenAI API key: ") os.environ["OPENAI_API_KEY"] = openai_key print_markdown( """ --- **Tip**: To save this key for later, run `export OPENAI_API_KEY=<your key>` on mac/linux or `setx OPENAI_API_KEY <your key>` on windows. For non-OpenAI models, you should set `OPENROUTER_API_KEY`, and optionally `OPENROUTER_APP_URL` and `OPENROUTER_APP_TITLE`. ---""" ) # Otherwise, grab the openrouter key, if not found elif model not in OPENAI_MODELS and not openrouter_key: print_markdown( """--- !!!**OpenRouter API key not found.** Please provide a key to proceed. --- """ ) api_key = getpass("OpenRouter API key: ") os.environ["OPENROUTER_API_KEY"] = api_key print_markdown( f""" --- **Tip**: To save this key for later, run `export OPENROUTER_API_KEY=<your key>` on mac/linux or `setx OPENROUTER_API_KEY <your key>` on windows. You can optionally set `OPENROUTER_APP_URL` and `OPENROUTER_APP_TITLE`, too. Note that your context window is set to {context_window}. To change this, run `context --context_window <context window>`. ---""" ) if model == "gpt-4-1106-preview": print_markdown( """!!!Welcome to Fleet Context! Generate and run code using the most up-to-date libraries. *Warning*: You are using gpt-4-turbo, which is not yet stable and is rate limited at 100 requests per day. Please use with caution. """ ) else: print_markdown( """!!!Welcome to Fleet Context! Generate and run code using the most up-to-date libraries. """ ) messages = [] while True: try: query = input("> ") query = query.strip() if not query: continue if query.lower() == "exit": rprint("Exiting. Goodbye!") break messages.append({"role": "user", "content": query}) rag_context = retrieve_context(query, k=k, filters=filters) prompts = construct_prompt( messages, rag_context, model=model, cite_sources=cite_sources, context_window=context_window, ) full_response = "" try: streamer = TextStream() if model in OPENAI_MODELS: for response in get_remote_chat_response(prompts, model=model): if response: full_response += response streamer.print_stream(full_response) else:
for response in get_other_chat_response(prompts, model=model):
8
2023-11-02 07:07:13+00:00
16k
ForceFledgling/proxyhub
proxyhub/api.py
[ { "identifier": "Checker", "path": "proxyhub/checker.py", "snippet": "class Checker:\n \"\"\"Proxy checker.\"\"\"\n\n def __init__(\n self,\n judges,\n max_tries=3,\n timeout=8,\n verify_ssl=False,\n strict=False,\n dnsbl=None,\n real_ext_ip=...
import asyncio import io import signal import warnings from collections import Counter, defaultdict from functools import partial from pprint import pprint from .checker import Checker from .errors import ResolveError from .providers import PROVIDERS, Provider from .proxy import Proxy from .resolver import Resolver from .server import Server from .utils import IPPortPatternLine, log
13,671
verify_ssl=False, loop=None, stop_broker_on_sigint=True, **kwargs, ): self._loop = loop or asyncio.get_event_loop_policy().get_event_loop() self._proxies = queue or asyncio.Queue() self._resolver = Resolver(loop=self._loop) self._timeout = timeout self._verify_ssl = verify_ssl self.unique_proxies = {} self._all_tasks = [] self._checker = None self._server = None self._limit = 0 # not limited self._countries = None max_concurrent_conn = kwargs.get('max_concurrent_conn') if max_concurrent_conn: warnings.warn( '`max_concurrent_conn` is deprecated, use `max_conn` instead', DeprecationWarning, ) if isinstance(max_concurrent_conn, asyncio.Semaphore): max_conn = max_concurrent_conn._value else: max_conn = max_concurrent_conn attempts_conn = kwargs.get('attempts_conn') if attempts_conn: warnings.warn( '`attempts_conn` is deprecated, use `max_tries` instead', DeprecationWarning, ) max_tries = attempts_conn # The maximum number of concurrent checking proxies self._on_check = asyncio.Queue(maxsize=max_conn) self._max_tries = max_tries self._judges = judges self._providers = [ p if isinstance(p, Provider) else Provider(p) for p in (providers or PROVIDERS) ] if stop_broker_on_sigint: try: self._loop.add_signal_handler(signal.SIGINT, self.stop) # add_signal_handler() is not implemented on Win # https://docs.python.org/3.5/library/asyncio-eventloops.html#windows except NotImplementedError: pass async def grab(self, *, countries=None, limit=0): """Gather proxies from the providers without checking. :param list countries: (optional) List of ISO country codes where should be located proxies :param int limit: (optional) The maximum number of proxies :ref:`Example of usage <proxyhub-examples-grab>`. """ self._countries = countries self._limit = limit task = asyncio.ensure_future(self._grab(check=False)) self._all_tasks.append(task) async def find( self, *, types=None, data=None, countries=None, post=False, strict=False, dnsbl=None, limit=0, **kwargs, ): """Gather and check proxies from providers or from a passed data. :ref:`Example of usage <proxyhub-examples-find>`. :param list types: Types (protocols) that need to be check on support by proxy. Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25 And levels of anonymity (HTTP only): Transparent, Anonymous, High :param data: (optional) String or list with proxies. Also can be a file-like object supports `read()` method. Used instead of providers :param list countries: (optional) List of ISO country codes where should be located proxies :param bool post: (optional) Flag indicating use POST instead of GET for requests when checking proxies :param bool strict: (optional) Flag indicating that anonymity levels of types (protocols) supported by a proxy must be equal to the requested types and levels of anonymity. By default, strict mode is off and for a successful check is enough to satisfy any one of the requested types :param list dnsbl: (optional) Spam databases for proxy checking. `Wiki <https://en.wikipedia.org/wiki/DNSBL>`_ :param int limit: (optional) The maximum number of proxies :raises ValueError: If :attr:`types` not given. .. versionchanged:: 0.2.0 Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`. Changed: :attr:`types` is required. """ ip = await self._resolver.get_real_ext_ip() types = _update_types(types) if not types: raise ValueError('`types` is required')
# Pause between grabbing cycles; in seconds. GRAB_PAUSE = 180 # The maximum number of providers that are parsed concurrently MAX_CONCURRENT_PROVIDERS = 3 class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxyhub.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxyhub.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop :param stop_broker_on_sigint: (optional) whether set SIGINT signal on broker object. Useful for a thread other than main thread. .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, stop_broker_on_sigint=True, **kwargs, ): self._loop = loop or asyncio.get_event_loop_policy().get_event_loop() self._proxies = queue or asyncio.Queue() self._resolver = Resolver(loop=self._loop) self._timeout = timeout self._verify_ssl = verify_ssl self.unique_proxies = {} self._all_tasks = [] self._checker = None self._server = None self._limit = 0 # not limited self._countries = None max_concurrent_conn = kwargs.get('max_concurrent_conn') if max_concurrent_conn: warnings.warn( '`max_concurrent_conn` is deprecated, use `max_conn` instead', DeprecationWarning, ) if isinstance(max_concurrent_conn, asyncio.Semaphore): max_conn = max_concurrent_conn._value else: max_conn = max_concurrent_conn attempts_conn = kwargs.get('attempts_conn') if attempts_conn: warnings.warn( '`attempts_conn` is deprecated, use `max_tries` instead', DeprecationWarning, ) max_tries = attempts_conn # The maximum number of concurrent checking proxies self._on_check = asyncio.Queue(maxsize=max_conn) self._max_tries = max_tries self._judges = judges self._providers = [ p if isinstance(p, Provider) else Provider(p) for p in (providers or PROVIDERS) ] if stop_broker_on_sigint: try: self._loop.add_signal_handler(signal.SIGINT, self.stop) # add_signal_handler() is not implemented on Win # https://docs.python.org/3.5/library/asyncio-eventloops.html#windows except NotImplementedError: pass async def grab(self, *, countries=None, limit=0): """Gather proxies from the providers without checking. :param list countries: (optional) List of ISO country codes where should be located proxies :param int limit: (optional) The maximum number of proxies :ref:`Example of usage <proxyhub-examples-grab>`. """ self._countries = countries self._limit = limit task = asyncio.ensure_future(self._grab(check=False)) self._all_tasks.append(task) async def find( self, *, types=None, data=None, countries=None, post=False, strict=False, dnsbl=None, limit=0, **kwargs, ): """Gather and check proxies from providers or from a passed data. :ref:`Example of usage <proxyhub-examples-find>`. :param list types: Types (protocols) that need to be check on support by proxy. Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25 And levels of anonymity (HTTP only): Transparent, Anonymous, High :param data: (optional) String or list with proxies. Also can be a file-like object supports `read()` method. Used instead of providers :param list countries: (optional) List of ISO country codes where should be located proxies :param bool post: (optional) Flag indicating use POST instead of GET for requests when checking proxies :param bool strict: (optional) Flag indicating that anonymity levels of types (protocols) supported by a proxy must be equal to the requested types and levels of anonymity. By default, strict mode is off and for a successful check is enough to satisfy any one of the requested types :param list dnsbl: (optional) Spam databases for proxy checking. `Wiki <https://en.wikipedia.org/wiki/DNSBL>`_ :param int limit: (optional) The maximum number of proxies :raises ValueError: If :attr:`types` not given. .. versionchanged:: 0.2.0 Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`. Changed: :attr:`types` is required. """ ip = await self._resolver.get_real_ext_ip() types = _update_types(types) if not types: raise ValueError('`types` is required')
self._checker = Checker(
0
2023-11-05 13:28:57+00:00
16k
TheFunny/ArisuAutoSweeper
module/device/method/minitouch.py
[ { "identifier": "Config", "path": "module/base/decorator.py", "snippet": "class Config:\n \"\"\"\n Decorator that calls different function with a same name according to config.\n\n func_list likes:\n func_list = {\n 'func1': [\n {'options': {'ENABLE': True}, 'func': 1},\n ...
import asyncio import json import re import socket import time import websockets from functools import wraps from typing import List from adbutils.errors import AdbError from uiautomator2 import _Service from module.base.decorator import Config, cached_property, del_cached_property from module.base.timer import Timer from module.base.utils import * from module.device.connection import Connection from module.device.method.utils import RETRY_TRIES, retry_sleep, handle_adb_error from module.exception import RequestHumanTakeover, ScriptError from module.logger import logger
12,465
""" DEFAULT_DELAY = 0.05 max_x = 1280 max_y = 720 def __init__(self, device, contact=0, handle_orientation=True): """ Args: device: """ self.device = device self.commands = [] self.delay = 0 self.contact = contact self.handle_orientation = handle_orientation @property def orientation(self): if self.handle_orientation: return self.device.orientation else: return 0 def convert(self, x, y): max_x, max_y = self.device.max_x, self.device.max_y orientation = self.orientation if orientation == 0: pass elif orientation == 1: x, y = 720 - y, x max_x, max_y = max_y, max_x elif orientation == 2: x, y = 1280 - x, 720 - y elif orientation == 3: x, y = y, 1280 - x max_x, max_y = max_y, max_x else: raise ScriptError(f'Invalid device orientation: {orientation}') self.max_x, self.max_y = max_x, max_y if not self.device.config.DEVICE_OVER_HTTP: # Maximum X and Y coordinates may, but usually do not, match the display size. x, y = int(x / 1280 * max_x), int(y / 720 * max_y) else: # When over http, max_x and max_y are default to 1280 and 720, skip matching display size x, y = int(x), int(y) return x, y def commit(self): """ add minitouch command: 'c\n' """ self.commands.append(Command('c')) return self def reset(self): """ add minitouch command: 'r\n' """ self.commands.append(Command('r')) return self def wait(self, ms=10): """ add minitouch command: 'w <ms>\n' """ self.commands.append(Command('w', ms=ms)) self.delay += ms return self def up(self): """ add minitouch command: 'u <contact>\n' """ self.commands.append(Command('u', contact=self.contact)) return self def down(self, x, y, pressure=100): """ add minitouch command: 'd <contact> <x> <y> <pressure>\n' """ x, y = self.convert(x, y) self.commands.append(Command('d', x=x, y=y, contact=self.contact, pressure=pressure)) return self def move(self, x, y, pressure=100): """ add minitouch command: 'm <contact> <x> <y> <pressure>\n' """ x, y = self.convert(x, y) self.commands.append(Command('m', x=x, y=y, contact=self.contact, pressure=pressure)) return self def clear(self): """ clear current commands """ self.commands = [] self.delay = 0 def to_minitouch(self) -> str: return ''.join([command.to_minitouch() for command in self.commands]) def to_atx_agent(self) -> List[str]: return [command.to_atx_agent(self.max_x, self.max_y) for command in self.commands] def send(self): return self.device.minitouch_send(builder=self) class MinitouchNotInstalledError(Exception): pass class MinitouchOccupiedError(Exception): pass class U2Service(_Service): def __init__(self, name, u2obj): self.name = name self.u2obj = u2obj self.service_url = self.u2obj.path2url("/services/" + name) def retry(func): @wraps(func) def retry_wrapper(self, *args, **kwargs): """ Args: self (Minitouch): """ init = None
def random_normal_distribution(a, b, n=5): output = np.mean(np.random.uniform(a, b, size=n)) return output def random_theta(): theta = np.random.uniform(0, 2 * np.pi) return np.array([np.sin(theta), np.cos(theta)]) def random_rho(dis): return random_normal_distribution(-dis, dis) def insert_swipe(p0, p3, speed=15, min_distance=10): """ Insert way point from start to end. First generate a cubic bézier curve Args: p0: Start point. p3: End point. speed: Average move speed, pixels per 10ms. min_distance: Returns: list[list[int]]: List of points. Examples: > insert_swipe((400, 400), (600, 600), speed=20) [[400, 400], [406, 406], [416, 415], [429, 428], [444, 442], [462, 459], [481, 478], [504, 500], [527, 522], [545, 540], [560, 557], [573, 570], [584, 582], [592, 590], [597, 596], [600, 600]] """ p0 = np.array(p0) p3 = np.array(p3) # Random control points in Bézier curve distance = np.linalg.norm(p3 - p0) p1 = 2 / 3 * p0 + 1 / 3 * p3 + random_theta() * random_rho(distance * 0.1) p2 = 1 / 3 * p0 + 2 / 3 * p3 + random_theta() * random_rho(distance * 0.1) # Random `t` on Bézier curve, sparse in the middle, dense at start and end segments = max(int(distance / speed) + 1, 5) lower = random_normal_distribution(-85, -60) upper = random_normal_distribution(80, 90) theta = np.arange(lower + 0., upper + 0.0001, (upper - lower) / segments) ts = np.sin(theta / 180 * np.pi) ts = np.sign(ts) * abs(ts) ** 0.9 ts = (ts - min(ts)) / (max(ts) - min(ts)) # Generate cubic Bézier curve points = [] prev = (-100, -100) for t in ts: point = p0 * (1 - t) ** 3 + 3 * p1 * t * (1 - t) ** 2 + 3 * p2 * t ** 2 * (1 - t) + p3 * t ** 3 point = point.astype(int).tolist() if np.linalg.norm(np.subtract(point, prev)) < min_distance: continue points.append(point) prev = point # Delete nearing points if len(points[1:]): distance = np.linalg.norm(np.subtract(points[1:], points[0]), axis=1) mask = np.append(True, distance > min_distance) points = np.array(points)[mask].tolist() else: points = [p0, p3] return points class Command: def __init__( self, operation: str, contact: int = 0, x: int = 0, y: int = 0, ms: int = 10, pressure: int = 100 ): """ See https://github.com/openstf/minitouch#writable-to-the-socket Args: operation: c, r, d, m, u, w contact: x: y: ms: pressure: """ self.operation = operation self.contact = contact self.x = x self.y = y self.ms = ms self.pressure = pressure def to_minitouch(self) -> str: """ String that write into minitouch socket """ if self.operation == 'c': return f'{self.operation}\n' elif self.operation == 'r': return f'{self.operation}\n' elif self.operation == 'd': return f'{self.operation} {self.contact} {self.x} {self.y} {self.pressure}\n' elif self.operation == 'm': return f'{self.operation} {self.contact} {self.x} {self.y} {self.pressure}\n' elif self.operation == 'u': return f'{self.operation} {self.contact}\n' elif self.operation == 'w': return f'{self.operation} {self.ms}\n' else: return '' def to_atx_agent(self, max_x=1280, max_y=720) -> str: """ Dict that send to atx-agent, $DEVICE_URL/minitouch See https://github.com/openatx/atx-agent#minitouch%E6%93%8D%E4%BD%9C%E6%96%B9%E6%B3%95 """ x, y = self.x / max_x, self.y / max_y if self.operation == 'c': out = dict(operation=self.operation) elif self.operation == 'r': out = dict(operation=self.operation) elif self.operation == 'd': out = dict(operation=self.operation, index=self.contact, pressure=self.pressure, xP=x, yP=y) elif self.operation == 'm': out = dict(operation=self.operation, index=self.contact, pressure=self.pressure, xP=x, yP=y) elif self.operation == 'u': out = dict(operation=self.operation, index=self.contact) elif self.operation == 'w': out = dict(operation=self.operation, milliseconds=self.ms) else: out = dict() return json.dumps(out) class CommandBuilder: """Build command str for minitouch. You can use this, to custom actions as you wish:: with safe_connection(_DEVICE_ID) as connection: builder = CommandBuilder() builder.down(0, 400, 400, 50) builder.commit() builder.move(0, 500, 500, 50) builder.commit() builder.move(0, 800, 400, 50) builder.commit() builder.up(0) builder.commit() builder.publish(connection) """ DEFAULT_DELAY = 0.05 max_x = 1280 max_y = 720 def __init__(self, device, contact=0, handle_orientation=True): """ Args: device: """ self.device = device self.commands = [] self.delay = 0 self.contact = contact self.handle_orientation = handle_orientation @property def orientation(self): if self.handle_orientation: return self.device.orientation else: return 0 def convert(self, x, y): max_x, max_y = self.device.max_x, self.device.max_y orientation = self.orientation if orientation == 0: pass elif orientation == 1: x, y = 720 - y, x max_x, max_y = max_y, max_x elif orientation == 2: x, y = 1280 - x, 720 - y elif orientation == 3: x, y = y, 1280 - x max_x, max_y = max_y, max_x else: raise ScriptError(f'Invalid device orientation: {orientation}') self.max_x, self.max_y = max_x, max_y if not self.device.config.DEVICE_OVER_HTTP: # Maximum X and Y coordinates may, but usually do not, match the display size. x, y = int(x / 1280 * max_x), int(y / 720 * max_y) else: # When over http, max_x and max_y are default to 1280 and 720, skip matching display size x, y = int(x), int(y) return x, y def commit(self): """ add minitouch command: 'c\n' """ self.commands.append(Command('c')) return self def reset(self): """ add minitouch command: 'r\n' """ self.commands.append(Command('r')) return self def wait(self, ms=10): """ add minitouch command: 'w <ms>\n' """ self.commands.append(Command('w', ms=ms)) self.delay += ms return self def up(self): """ add minitouch command: 'u <contact>\n' """ self.commands.append(Command('u', contact=self.contact)) return self def down(self, x, y, pressure=100): """ add minitouch command: 'd <contact> <x> <y> <pressure>\n' """ x, y = self.convert(x, y) self.commands.append(Command('d', x=x, y=y, contact=self.contact, pressure=pressure)) return self def move(self, x, y, pressure=100): """ add minitouch command: 'm <contact> <x> <y> <pressure>\n' """ x, y = self.convert(x, y) self.commands.append(Command('m', x=x, y=y, contact=self.contact, pressure=pressure)) return self def clear(self): """ clear current commands """ self.commands = [] self.delay = 0 def to_minitouch(self) -> str: return ''.join([command.to_minitouch() for command in self.commands]) def to_atx_agent(self) -> List[str]: return [command.to_atx_agent(self.max_x, self.max_y) for command in self.commands] def send(self): return self.device.minitouch_send(builder=self) class MinitouchNotInstalledError(Exception): pass class MinitouchOccupiedError(Exception): pass class U2Service(_Service): def __init__(self, name, u2obj): self.name = name self.u2obj = u2obj self.service_url = self.u2obj.path2url("/services/" + name) def retry(func): @wraps(func) def retry_wrapper(self, *args, **kwargs): """ Args: self (Minitouch): """ init = None
for _ in range(RETRY_TRIES):
5
2023-11-01 07:09:45+00:00
16k
BrianPugh/cyclopts
tests/test_help.py
[ { "identifier": "App", "path": "cyclopts/core.py", "snippet": "class App:\n _name: Optional[Tuple[str, ...]] = field(default=None, alias=\"name\", converter=optional_to_tuple_converter)\n\n _help: Optional[str] = field(default=None, alias=\"help\")\n\n usage: Optional[str] = field(default=None)...
import inspect import sys import attrs import pytest from enum import Enum from textwrap import dedent from typing import List, Literal, Optional, Union from typing_extensions import Annotated from typing import Annotated from cyclopts import App, Group, Parameter from cyclopts.help import ( HelpEntry, HelpPanel, create_parameter_help_panel, format_command_entries, format_doc, format_usage, ) from cyclopts.resolve import ResolvedCommand
12,182
def cmd( foo: Annotated[CompSciProblem, Parameter(help="Docstring for foo.")] = CompSciProblem.fizz, bar: Annotated[CompSciProblem, Parameter(help="Docstring for bar.")] = CompSciProblem.buzz, ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo Docstring for foo. [choices: fizz,buzz] [default: fizz] │ │ BAR,--bar Docstring for bar. [choices: fizz,buzz] [default: buzz] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_env_var(capture_format_group_parameters): def cmd( foo: Annotated[int, Parameter(env_var=["FOO", "BAR"], help="Docstring for foo.")] = 123, ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo Docstring for foo. [env var: FOO BAR] [default: 123] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_print_function(app, console): @app.command(help="Cmd help string.") def cmd( foo: Annotated[str, Parameter(help="Docstring for foo.")], *, bar: Annotated[str, Parameter(help="Docstring for bar.")], ): pass with console.capture() as capture: app.help_print(["cmd"], console=console) actual = capture.get() expected = dedent( """\ Usage: app cmd [ARGS] [OPTIONS] Cmd help string. ╭─ Parameters ───────────────────────────────────────────────────────╮ │ * FOO,--foo Docstring for foo. [required] │ │ * --bar Docstring for bar. [required] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_print_function_defaults(app, console): @app.command(help="Cmd help string.") def cmd( *tokens: Annotated[str, Parameter(show=False, allow_leading_hyphen=True)], bar: Annotated[str, Parameter(help="Docstring for bar.")] = "bar-value", baz: Annotated[str, Parameter(help="Docstring for bar.", env_var="BAZ")] = "baz-value", ): pass with console.capture() as capture: app.help_print(["cmd"], console=console) actual = capture.get() expected = dedent( """\ Usage: app cmd [ARGS] [OPTIONS] Cmd help string. ╭─ Parameters ───────────────────────────────────────────────────────╮ │ --bar Docstring for bar. [default: bar-value] │ │ --baz Docstring for bar. [env var: BAZ] [default: baz-value] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_print_function_no_parse(app, console): @app.command(help="Cmd help string.") def cmd( foo: Annotated[str, Parameter(help="Docstring for foo.")], *, bar: Annotated[str, Parameter(parse=False)], ): pass with console.capture() as capture: app.help_print(["cmd"], console=console) actual = capture.get() expected = dedent( """\ Usage: app cmd [ARGS] [OPTIONS] Cmd help string. ╭─ Parameters ───────────────────────────────────────────────────────╮ │ * FOO,--foo Docstring for foo. [required] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_print_parameter_group_description(app, console):
if sys.version_info < (3, 9): else: @pytest.fixture def app(): return App( name="app", help="App Help String Line 1.", ) def test_empty_help_panel_rich_silent(console): help_panel = HelpPanel(format="command", title="test") with console.capture() as capture: console.print(help_panel) actual = capture.get() assert actual == "" def test_help_default_action(app, console): """No command should default to help.""" with console.capture() as capture: app([], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_custom_usage(app, console): app.usage = "My custom usage." with console.capture() as capture: app([], console=console) actual = capture.get() expected = dedent( """\ My custom usage. App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_custom_usage_subapp(app, console): app.command(App(name="foo", usage="My custom usage.")) with console.capture() as capture: app(["foo", "--help"], console=console) actual = capture.get() expected = dedent( """\ My custom usage. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_default_help_flags(console): """Standard help flags.""" app = App(name="app", help="App Help String Line 1.") with console.capture() as capture: app(["--help"], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_usage_empty(console): app = App( name="app", help="App Help String Line 1.", help_flags=[], version_flags=[], ) with console.capture() as capture: console.print(format_usage(app, [])) actual = capture.get() assert actual == "Usage: app\n\n" def test_help_format_usage_command(app, console): @app.command def foo(): pass with console.capture() as capture: console.print(format_usage(app, [])) actual = capture.get() assert actual == "Usage: app COMMAND\n\n" def test_format_commands_docstring(app, console): @app.command def foo(): """Docstring for foo. This should not be shown. """ pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["foo"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ foo Docstring for foo. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_format_commands_no_show(app, console): @app.command def foo(): """Docstring for foo.""" pass @app.command(show=False) def bar(): """Should not be shown.""" pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app,))) with console.capture() as capture: app.help_print([], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ foo Docstring for foo. │ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_format_commands_explicit_help(app, console): @app.command(help="Docstring for foo.") def foo(): """Should not be shown.""" pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["foo"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ foo Docstring for foo. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_format_commands_explicit_name(app, console): @app.command(name="bar") def foo(): """Docstring for bar. This should not be shown. """ pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["bar"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ bar Docstring for bar. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_help_empty(console): app = App(name="foo", version_flags=[], help_flags=[]) with console.capture() as capture: app.help_print(console=console) actual = capture.get() assert actual == "Usage: foo\n\n" @pytest.fixture def capture_format_group_parameters(console, default_function_groups): def inner(cmd): command = ResolvedCommand(cmd, *default_function_groups) with console.capture() as capture: group, iparams = command.groups_iparams[0] cparams = [command.iparam_to_cparam[x] for x in iparams] console.print(create_parameter_help_panel(group, iparams, cparams)) return capture.get() return inner def test_help_format_group_parameters(capture_format_group_parameters): def cmd( foo: Annotated[str, Parameter(help="Docstring for foo.")], bar: Annotated[str, Parameter(help="Docstring for bar.")], ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ * FOO,--foo Docstring for foo. [required] │ │ * BAR,--bar Docstring for bar. [required] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_short_name(capture_format_group_parameters): def cmd( foo: Annotated[str, Parameter(name=["--foo", "-f"], help="Docstring for foo.")], ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ * FOO,--foo -f Docstring for foo. [required] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_from_docstring(capture_format_group_parameters): def cmd(foo: str, bar: str): """ Parameters ---------- foo: str Docstring for foo. bar: str Docstring for bar. """ pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ * FOO,--foo Docstring for foo. [required] │ │ * BAR,--bar Docstring for bar. [required] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_bool_flag(capture_format_group_parameters): def cmd( foo: Annotated[bool, Parameter(help="Docstring for foo.")] = True, ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo,--no-foo Docstring for foo. [default: True] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_bool_flag_custom_negative(capture_format_group_parameters): def cmd( foo: Annotated[bool, Parameter(negative="--yesnt-foo", help="Docstring for foo.")] = True, ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo,--yesnt-foo Docstring for foo. [default: True] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_list_flag(capture_format_group_parameters): def cmd( foo: Annotated[Optional[List[int]], Parameter(help="Docstring for foo.")] = None, ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo,--empty-foo Docstring for foo. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_defaults(capture_format_group_parameters): def cmd( foo: Annotated[str, Parameter(help="Docstring for foo.")] = "fizz", bar: Annotated[str, Parameter(help="Docstring for bar.")] = "buzz", ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo Docstring for foo. [default: fizz] │ │ BAR,--bar Docstring for bar. [default: buzz] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_defaults_no_show(capture_format_group_parameters): def cmd( foo: Annotated[str, Parameter(show_default=False, help="Docstring for foo.")] = "fizz", bar: Annotated[str, Parameter(help="Docstring for bar.")] = "buzz", ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo Docstring for foo. │ │ BAR,--bar Docstring for bar. [default: buzz] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_choices_literal_no_show(capture_format_group_parameters): def cmd( foo: Annotated[Literal["fizz", "buzz"], Parameter(show_choices=False, help="Docstring for foo.")] = "fizz", bar: Annotated[Literal["fizz", "buzz"], Parameter(help="Docstring for bar.")] = "buzz", ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo Docstring for foo. [default: fizz] │ │ BAR,--bar Docstring for bar. [choices: fizz,buzz] [default: buzz] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_choices_literal_union(capture_format_group_parameters): def cmd( foo: Annotated[ Union[int, Literal["fizz", "buzz"], Literal["bar"]], Parameter(help="Docstring for foo.") ] = "fizz", ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo Docstring for foo. [choices: fizz,buzz,bar] [default: │ │ fizz] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_choices_enum(capture_format_group_parameters): class CompSciProblem(Enum): fizz = "bleep bloop blop" buzz = "blop bleep bloop" def cmd( foo: Annotated[CompSciProblem, Parameter(help="Docstring for foo.")] = CompSciProblem.fizz, bar: Annotated[CompSciProblem, Parameter(help="Docstring for bar.")] = CompSciProblem.buzz, ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo Docstring for foo. [choices: fizz,buzz] [default: fizz] │ │ BAR,--bar Docstring for bar. [choices: fizz,buzz] [default: buzz] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_env_var(capture_format_group_parameters): def cmd( foo: Annotated[int, Parameter(env_var=["FOO", "BAR"], help="Docstring for foo.")] = 123, ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo Docstring for foo. [env var: FOO BAR] [default: 123] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_print_function(app, console): @app.command(help="Cmd help string.") def cmd( foo: Annotated[str, Parameter(help="Docstring for foo.")], *, bar: Annotated[str, Parameter(help="Docstring for bar.")], ): pass with console.capture() as capture: app.help_print(["cmd"], console=console) actual = capture.get() expected = dedent( """\ Usage: app cmd [ARGS] [OPTIONS] Cmd help string. ╭─ Parameters ───────────────────────────────────────────────────────╮ │ * FOO,--foo Docstring for foo. [required] │ │ * --bar Docstring for bar. [required] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_print_function_defaults(app, console): @app.command(help="Cmd help string.") def cmd( *tokens: Annotated[str, Parameter(show=False, allow_leading_hyphen=True)], bar: Annotated[str, Parameter(help="Docstring for bar.")] = "bar-value", baz: Annotated[str, Parameter(help="Docstring for bar.", env_var="BAZ")] = "baz-value", ): pass with console.capture() as capture: app.help_print(["cmd"], console=console) actual = capture.get() expected = dedent( """\ Usage: app cmd [ARGS] [OPTIONS] Cmd help string. ╭─ Parameters ───────────────────────────────────────────────────────╮ │ --bar Docstring for bar. [default: bar-value] │ │ --baz Docstring for bar. [env var: BAZ] [default: baz-value] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_print_function_no_parse(app, console): @app.command(help="Cmd help string.") def cmd( foo: Annotated[str, Parameter(help="Docstring for foo.")], *, bar: Annotated[str, Parameter(parse=False)], ): pass with console.capture() as capture: app.help_print(["cmd"], console=console) actual = capture.get() expected = dedent( """\ Usage: app cmd [ARGS] [OPTIONS] Cmd help string. ╭─ Parameters ───────────────────────────────────────────────────────╮ │ * FOO,--foo Docstring for foo. [required] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_print_parameter_group_description(app, console):
@app.command(group_parameters=Group("Custom Title", help="Parameter description."))
1
2023-11-03 02:24:25+00:00
16k
radekd91/inferno
inferno/datasets/AfewVaDataModule.py
[ { "identifier": "load_segmentation", "path": "inferno/datasets/IO.py", "snippet": "def load_segmentation(filename):\n with open(filename, \"rb\") as f:\n seg = cpkl.load(f, compression='gzip')\n seg_type = seg[0]\n seg_image = seg[1]\n # seg_type = pkl.load(f)\n # s...
import json import os, sys import numpy as np import scipy as sp import torch import pytorch_lightning as pl import pandas as pd import pickle as pkl import imgaug import traceback import json import bisect import warnings import yaml from enum import Enum from pathlib import Path from skimage.io import imread, imsave from skimage.transform import resize, rescale from inferno.datasets.IO import load_segmentation, process_segmentation, load_emotion, save_emotion from inferno.utils.image import numpy_image_to_torch from inferno.transforms.keypoints import KeypointNormalization from inferno.datasets.FaceDataModuleBase import FaceDataModuleBase from inferno.datasets.ImageDatasetHelpers import bbox2point, bbpoint_warp from inferno.datasets.EmotionalImageDataset import EmotionalImageDatasetBase from inferno.datasets.UnsupervisedImageDataset import UnsupervisedImageDataset from inferno.utils.FaceDetector import save_landmark, load_landmark from tqdm import auto from torch.utils.data.dataloader import DataLoader from inferno.transforms.imgaug import create_image_augmenter from torchvision.transforms import Resize, Compose from sklearn.neighbors import NearestNeighbors from torch.utils.data._utils.collate import default_collate from torch.utils.data.sampler import WeightedRandomSampler from collections import OrderedDict from munch import Munch from inferno.utils.other import class_from_str from omegaconf import OmegaConf, DictConfig from inferno.layers.losses.EmonetLoader import get_emonet
12,597
# for i in range(feat.shape[0]): # idx = bi*self.train_batch_size + i array[bi*self.train_batch_size:bi*self.train_batch_size + feat.shape[0], ...] = feat del array else: print(f"Feature array found in '{outfile_name}'") for bi, batch in enumerate(dl): feat = batch[feature_label].numpy() feat_size = feat.shape[1:] break array = self._get_retrieval_array(prefix, feature_label, len(dataset), feat_size, feat.dtype, modifier='r') nbrs = NearestNeighbors(n_neighbors=30, algorithm='auto', n_jobs=-1).fit(array) distances, indices = nbrs.kneighbors(array, NUM_NEIGHBORS) indices_array = np.memmap(nn_indices_file, dtype=indices.dtype, mode="w+", shape=indices.shape ) indices_array[...] = indices del indices_array distances_array = np.memmap(nn_distances_file, dtype=distances.dtype, mode="w+", shape=distances.shape ) distances_array[...] = distances del distances_array # save sizes a dtypes with open(nn_indices_file.parent / (nn_indices_file.stem + "_meta.pkl"), "wb") as f: pkl.dump(indices.dtype, f) pkl.dump(indices.shape, f) with open(nn_distances_file.parent / (nn_distances_file.stem + "_meta.pkl"), "wb") as f: pkl.dump(distances.dtype, f) pkl.dump(distances.shape, f) self.nn_indices_array = np.memmap(nn_indices_file, dtype=indices.dtype, mode="r", shape=indices.shape ) self.nn_distances_array = np.memmap(nn_distances_file, dtype=distances.dtype, mode="r", shape=distances.shape ) class AfewVaDataVisTestModule(AfewVaDataModule): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def setup(self, stage=None): self.training_set = None self.validation_set = TestSubsetAfewVa(self.image_path, self.val_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) self.test_dataframe_path = Path(self.output_dir) / "validation_representative_selection.csv" self.test_set = TestSubsetAfewVa(self.image_path, self.test_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) def val_dataloader(self): return DataLoader(self.validation_set, shuffle=False, num_workers=self.num_workers, pin_memory=True, batch_size=self.val_batch_size, drop_last=False) def test_dataloader(self): return [ self.val_dataloader(), DataLoader(self.test_set, shuffle=False, num_workers=self.num_workers, pin_memory=True, batch_size=self.test_batch_size, drop_last=False) ] class AfewVa(EmotionalImageDatasetBase): def __init__(self, image_path, sample_list, image_size, scale = 1.4, transforms : imgaug.augmenters.Augmenter = None, use_gt_bb=True, bb_center_shift_x=0.0, bb_center_shift_y=0.0, ring_type=None, ring_size=None, load_emotion_feature=False, nn_indices_array=None, nn_distances_array=None, ext=".png", use_processed = None, normalize_va = None, ): self.sample_list = sample_list self.image_path = image_path self.image_size = image_size self.use_gt_bb = use_gt_bb # self.transforms = transforms or imgaug.augmenters.Identity() self.transforms = transforms or imgaug.augmenters.Resize((image_size, image_size)) self.scale = scale
""" Author: Radek Danecek Copyright (c) 2022, Radek Danecek All rights reserved. # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is # holder of all proprietary rights on this computer program. # Using this computer program means that you agree to the terms # in the LICENSE file included with this software distribution. # Any use not explicitly granted by the LICENSE is prohibited. # # Copyright©2022 Max-Planck-Gesellschaft zur Förderung # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute # for Intelligent Systems. All rights reserved. # # For comments or questions, please email us at emoca@tue.mpg.de # For commercial licensing contact, please contact ps-license@tuebingen.mpg.de """ warnings.filterwarnings('ignore') # def make_class_balanced_sampler(labels): # class_counts = np.bincount(labels) # class_weights = 1. / class_counts # weights = class_weights[labels] # return WeightedRandomSampler(weights, len(weights)) # # def make_va_balanced_sampler(labels): # class_counts = np.bincount(labels) # class_weights = 1. / class_counts # weights = class_weights[labels] # return WeightedRandomSampler(weights, len(weights)) # # def make_balanced_sample_by_weights(weights): # return WeightedRandomSampler(weights, len(weights)) def new_affewva(class_name): dataset_class = class_from_str(class_name, sys.modules[__name__]) return dataset_class class AfewVaDataModule(FaceDataModuleBase): def __init__(self, input_dir, output_dir, processed_subfolder = None, face_detector='fan', face_detector_threshold=0.9, image_size=224, scale=1.25, bb_center_shift_x=0., bb_center_shift_y=0., processed_ext=".png", device=None, augmentation=None, train_batch_size=64, val_batch_size=64, test_batch_size=64, num_workers=0, ring_type=None, ring_size=None, drop_last=False, sampler=None, split_seed=0, train_fraction=0.6, val_fraction=0.2, test_fraction=0.2, k_fold_crossvalidation=None, k_index=None, dataset_type=None, ): super().__init__(input_dir, output_dir, processed_subfolder, face_detector=face_detector, face_detector_threshold=face_detector_threshold, image_size=image_size, bb_center_shift_x=bb_center_shift_x, bb_center_shift_y=bb_center_shift_y, scale=scale, processed_ext=processed_ext, device=device) self.dataset_type = dataset_type or "AfewVa" # # self.subsets = sorted([f.name for f in (Path(input_dir) / "Manually_Annotated" / "Manually_Annotated_Images").glob("*") if f.is_dir()]) # self.input_dir = Path(self.root_dir) / "Manually_Annotated" / "Manually_Annotated_Images" # train = pd.read_csv(self.input_dir.parent / "training.csv") # val = pd.read_csv(self.input_dir.parent / "validation.csv") # self.df = pd.concat([train, val], ignore_index=True, sort=False) self.face_detector_type = 'fan' self.scale = scale self.use_processed = False if not (Path(self.output_dir) / "gt.pkl").exists(): video_list = sorted([p for p in Path(input_dir).glob("*") if p.is_dir()]) video_gts = OrderedDict() for iv, vp in enumerate(auto.tqdm(video_list)): video_gts[vp.stem] = Munch( json.load(open(vp / (vp.stem + ".json"), "r"))) with open(Path(self.output_dir) / "gt.pkl", "wb") as f: pkl.dump(video_gts, f) else: with open(Path(self.output_dir) / "gt.pkl", "rb") as f: video_gts = pkl.load(f) if self.use_processed: self.image_path = Path(self.output_dir) / "detections" else: self.image_path = Path(input_dir) self.seed = split_seed np.random.seed(self.seed) indices = np.arange(len(video_gts), dtype=np.int32) + 1 np.random.shuffle(indices) if k_fold_crossvalidation is not None: training_indices = [] validation_indices = [] for k in range(k_fold_crossvalidation): start_i = (k * len(indices)) // k_fold_crossvalidation end_i = ((k + 1) * len(indices)) // k_fold_crossvalidation training_indices += [np.concatenate([indices[0:(start_i)], indices[end_i:]])] validation_indices += [indices[start_i:end_i]] self.train_indices = training_indices[k_index] self.val_indices = validation_indices[k_index] self.test_indices = np.copy(validation_indices[k_index]) else: self.train_fraction = train_fraction self.val_fraction = val_fraction self.test_fraction = test_fraction assert self.train_fraction + self.val_fraction + self.test_fraction == 1.0 train_end = int(len(indices) * self.train_fraction) val_end = int(len(indices) * ( self.train_fraction + self.val_fraction)) self.train_indices = indices[:train_end] self.val_indices = indices[train_end:val_end] self.test_indices = indices[val_end:] # iterate over the training indices and create a list of the corresponding video names self.train_list = OrderedDict() self.val_list = OrderedDict() self.test_list = OrderedDict() for tr_i in self.train_indices: self.train_list[f"{tr_i:03d}"] = video_gts[f"{tr_i:03d}"] for v_i in self.val_indices: self.val_list[f"{v_i:03d}"] = video_gts[f"{v_i:03d}"] for t_i in self.test_indices: self.test_list[f"{t_i:03d}"] = video_gts[f"{t_i:03d}"] # self.ignore_invalid = ignore_invalid self.train_batch_size = train_batch_size self.val_batch_size = val_batch_size self.test_batch_size = test_batch_size self.num_workers = num_workers self.augmentation = augmentation self.sampler = sampler or "uniform" if self.sampler not in ["uniform", "balanced_videos", "balanced_expr", "balanced_va", "balanced_v", "balanced_a"]: raise ValueError(f"Invalid sampler type: '{self.sampler}'") if self.sampler in ["balanced_expr", "balanced_va", "balanced_v", "balanced_a"]: raise NotImplementedError() if ring_type not in [None, "gt_va", "augment"]: raise ValueError(f"Invalid ring type: '{ring_type}'") if ring_type == "gt_va": raise NotImplementedError() self.ring_type = ring_type self.ring_size = ring_size self.drop_last = drop_last @property def subset_size(self): return 1000 # @property # def num_subsets(self): # num_subsets = len(self.df) // self.subset_size # if len(self.df) % self.subset_size != 0: # num_subsets += 1 # return num_subsets def _detect_faces(self): subset_size = 1000 num_subsets = len(self.df) // subset_size if len(self.df) % subset_size != 0: num_subsets += 1 for sid in range(self.num_subsets): self._detect_landmarks_and_segment_subset(self.subset_size * sid, min((sid + 1) * self.subset_size, len(self.df))) def _extract_emotion_features(self): subset_size = 1000 num_subsets = len(self.df) // subset_size if len(self.df) % subset_size != 0: num_subsets += 1 for sid in range(self.num_subsets): self._extract_emotion_features_from_subset(self.subset_size * sid, min((sid + 1) * self.subset_size, len(self.df))) def _path_to_detections(self): return Path(self.output_dir) / "detections" def _path_to_segmentations(self): return Path(self.output_dir) / "segmentations" def _path_to_landmarks(self): return Path(self.output_dir) / "landmarks" def _path_to_emotions(self): return Path(self.output_dir) / "emotions" def _get_emotion_net(self, device): net = get_emonet() net = net.to(device) return net, "emo_net" def _extract_emotion_features_from_subset(self, start_i, end_i): self._path_to_emotions().mkdir(parents=True, exist_ok=True) print(f"Processing subset {start_i // self.subset_size}") image_file_list = [] for i in auto.tqdm(range(start_i, end_i)): im_file = self.df.loc[i]["subDirectory_filePath"] in_detection_fname = self._path_to_detections() / Path(im_file).parent / (Path(im_file).stem + ".png") if in_detection_fname.is_file(): image_file_list += [in_detection_fname] transforms = Compose([ Resize((256, 256)), ]) batch_size = 32 dataset = UnsupervisedImageDataset(image_file_list, image_transforms=transforms, im_read='pil') loader = DataLoader(dataset, batch_size=batch_size, num_workers=4, shuffle=False) device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print(device) net, emotion_type = self._get_emotion_net(device) for i, batch in enumerate(auto.tqdm(loader)): # facenet_pytorch expects this stanadrization for the input to the net # images = fixed_image_standardization(batch['image'].to(device)) images = batch['image'].cuda() # start = time.time() with torch.no_grad(): out = net(images, intermediate_features=True) # end = time.time() # print(f" Inference batch {i} took : {end - start}") emotion_features = {key : val.detach().cpu().numpy() for key, val in out.items()} # start = time.time() for j in range(images.size()[0]): image_path = batch['path'][j] out_emotion_folder = self._path_to_emotions() / Path(image_path).parent.name out_emotion_folder.mkdir(exist_ok=True, parents=True) emotion_path = out_emotion_folder / (Path(image_path).stem + ".pkl") emotion_feature_j = {key: val[j] for key, val in emotion_features.items()} del emotion_feature_j['emo_feat'] # too large to be stored per frame = (768, 64, 64) del emotion_feature_j['heatmap'] # not too large but probably not usefull = (68, 64, 64) # we are keeping emo_feat_2 (output of last conv layer (before FC) and then the outputs of the FCs - expression, valence and arousal) save_emotion(emotion_path, emotion_feature_j, emotion_type) def _detect_landmarks_and_segment_subset(self, start_i, end_i): self._path_to_detections().mkdir(parents=True, exist_ok=True) self._path_to_segmentations().mkdir(parents=True, exist_ok=True) self._path_to_landmarks().mkdir(parents=True, exist_ok=True) detection_fnames = [] out_segmentation_folders = [] status_array = np.memmap(self.status_array_path, dtype=np.bool, mode='r', shape=(self.num_subsets,) ) completed = status_array[start_i // self.subset_size] if not completed: print(f"Processing subset {start_i // self.subset_size}") for i in auto.tqdm(range(start_i, end_i)): im_file = self.df.loc[i]["subDirectory_filePath"] left = self.df.loc[i]["face_x"] top = self.df.loc[i]["face_y"] right = left + self.df.loc[i]["face_width"] bottom = top + self.df.loc[i]["face_height"] bb = np.array([top, left, bottom, right]) im_fullfile = Path(self.input_dir) / im_file try: detection, _, _, bbox_type, landmarks, orig_landmarks = self._detect_faces_in_image(im_fullfile, detected_faces=[bb]) except Exception as e: # except ValueError as e: print(f"Failed to load file:") print(f"{im_fullfile}") print(traceback.print_exc()) continue # except SyntaxError as e: # print(f"Failed to load file:") # print(f"{im_fullfile}") # print(traceback.print_exc()) # continue out_detection_fname = self._path_to_detections() / Path(im_file).parent / (Path(im_file).stem + self.processed_ext) # detection_fnames += [out_detection_fname.relative_to(self.output_dir)] out_detection_fname.parent.mkdir(exist_ok=True) detection_fnames += [out_detection_fname] if self.processed_ext in [".jpg", ".JPG"]: imsave(out_detection_fname, detection[0], quality=100) else: imsave(out_detection_fname, detection[0]) # out_segmentation_folders += [self._path_to_segmentations() / Path(im_file).parent] # save landmarks out_landmark_fname = self._path_to_landmarks() / Path(im_file).parent / (Path(im_file).stem + ".pkl") out_landmark_fname.parent.mkdir(exist_ok=True) # landmark_fnames += [out_landmark_fname.relative_to(self.output_dir)] save_landmark(out_landmark_fname, landmarks[0], bbox_type) self._segment_images(detection_fnames, self._path_to_segmentations(), path_depth=1) status_array = np.memmap(self.status_array_path, dtype=np.bool, mode='r+', shape=(self.num_subsets,) ) status_array[start_i // self.subset_size] = True status_array.flush() del status_array print(f"Processing subset {start_i // self.subset_size} finished") else: print(f"Subset {start_i // self.subset_size} is already processed") @property def status_array_path(self): return Path(self.output_dir) / "status.memmap" @property def is_processed(self): status_array = np.memmap(self.status_array_path, dtype=np.bool, mode='r', shape=(self.num_subsets,) ) all_processed = status_array.all() return all_processed def prepare_data(self): pass # if self.use_processed: # if not self.status_array_path.is_file(): # print(f"Status file does not exist. Creating '{self.status_array_path}'") # self.status_array_path.parent.mkdir(exist_ok=True, parents=True) # status_array = np.memmap(self.status_array_path, # dtype=np.bool, # mode='w+', # shape=(self.num_subsets,) # ) # status_array[...] = False # del status_array # # all_processed = self.is_processed # if not all_processed: # self._detect_faces() # # # if self.ring_type == "emonet_feature": # self._prepare_emotion_retrieval() def _new_training_set(self, for_training=True): if for_training: im_transforms_train = create_image_augmenter(self.image_size, self.augmentation) if self.ring_type == "emonet_feature": prefix = self.mode + "_train_" if self.ignore_invalid: prefix += "valid_only_" feature_label = 'emo_net_emo_feat_2' self._load_retrieval_arrays(prefix, feature_label) nn_indices = self.nn_indices_array nn_distances = self.nn_distances_array else: nn_indices = None nn_distances = None return new_affewva(self.dataset_type)(self.image_path, self.train_list, self.image_size, self.scale, im_transforms_train, ring_type=self.ring_type, ring_size=self.ring_size, load_emotion_feature=False, nn_indices_array=nn_indices, nn_distances_array= nn_distances, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) return new_affewva(self.dataset_type)(self.image_path, self.train_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, load_emotion_feature=True, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) def setup(self, stage=None): self.training_set = self._new_training_set() self.validation_set = new_affewva(self.dataset_type)(self.image_path, self.val_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) self.test_dataframe_path = Path(self.output_dir) / "validation_representative_selection.csv" self.test_set = new_affewva(self.dataset_type)(self.image_path, self.test_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) # if self.mode in ['all', 'manual']: # # self.image_list += sorted(list((Path(self.path) / "Manually_Annotated").rglob(".jpg"))) # self.dataframe = pd.load_csv(self.path / "Manually_Annotated" / "Manually_Annotated.csv") # if self.mode in ['all', 'automatic']: # # self.image_list += sorted(list((Path(self.path) / "Automatically_Annotated").rglob("*.jpg"))) # self.dataframe = pd.load_csv( # self.path / "Automatically_Annotated" / "Automatically_annotated_file_list.csv") def train_dataloader(self): if self.sampler == "uniform": sampler = None else: raise NotImplementedError() # elif self.sampler == "balanced_expr": # sampler = make_class_balanced_sampler(self.training_set.df["expression"].to_numpy()) # elif self.sampler == "balanced_va": # sampler = make_balanced_sample_by_weights(self.training_set.va_sample_weights) # elif self.sampler == "balanced_v": # sampler = make_balanced_sample_by_weights(self.training_set.v_sample_weights) # elif self.sampler == "balanced_a": # sampler = make_balanced_sample_by_weights(self.training_set.a_sample_weights) # else: # raise ValueError(f"Invalid sampler value: '{self.sampler}'") dl = DataLoader(self.training_set, shuffle=sampler is None, num_workers=self.num_workers, pin_memory=True, batch_size=self.train_batch_size, drop_last=self.drop_last, sampler=sampler) return dl def val_dataloader(self): return DataLoader(self.validation_set, shuffle=True, num_workers=self.num_workers, pin_memory=True, batch_size=self.val_batch_size, drop_last=False) def test_dataloader(self): return [ self.val_dataloader(), DataLoader(self.test_set, shuffle=True, num_workers=self.num_workers, pin_memory=True, batch_size=self.test_batch_size, drop_last=False) ] def _get_retrieval_array(self, prefix, feature_label, dataset_size, feature_shape, feature_dtype, modifier='w+'): outfile_name = self._path_to_emotion_nn_retrieval_file(prefix, feature_label) if outfile_name.is_file() and modifier != 'r': raise RuntimeError(f"The retrieval array already exists! '{outfile_name}'") shape = tuple([dataset_size] + list(feature_shape)) outfile_name.parent.mkdir(exist_ok=True, parents=True) array = np.memmap(outfile_name, dtype=feature_dtype, mode=modifier, shape=shape ) return array def _path_to_emotion_nn_indices_file(self, prefix, feature_label): nn_indices_file = Path(self.output_dir) / "cache" / (prefix + feature_label + "_nn_indices.memmap") return nn_indices_file def _path_to_emotion_nn_distances_file(self, prefix, feature_label): nn_distances_file = Path(self.output_dir) / "cache" / (prefix + feature_label + "_nn_distances.memmap") return nn_distances_file def _path_to_emotion_nn_retrieval_file(self, prefix, feature_label): outfile_name = Path(self.output_dir) / "cache" / (prefix + feature_label + ".memmap") return outfile_name def _load_retrieval_arrays(self, prefix, feature_label): # prefix = self.mode + "_train_" # if self.ignore_invalid: # prefix += "valid_only_" # feature_label = 'emo_net_emo_feat_2' nn_indices_file = self._path_to_emotion_nn_indices_file(prefix, feature_label) nn_distances_file = self._path_to_emotion_nn_distances_file(prefix, feature_label) try: with open(nn_indices_file.parent / (nn_indices_file.stem + "_meta.pkl"), "rb") as f: indices_array_dtype = pkl.load(f) indices_array_shape = pkl.load(f) except: indices_array_dtype = np.int64, indices_array_shape = (len(dataset), NUM_NEIGHBORS) try: with open(nn_distances_file.parent / (nn_distances_file.stem + "_meta.pkl"), "rb") as f: distances_array_dtype = pkl.load(f) distances_array_shape = pkl.load(f) except: distances_array_dtype = np.float32, distances_array_shape = (len(dataset), NUM_NEIGHBORS) self.nn_indices_array = np.memmap(nn_indices_file, # dtype=np.int32, dtype=indices_array_dtype, mode="r", shape=indices_array_shape ) self.nn_distances_array = np.memmap(nn_distances_file, dtype=distances_array_dtype, # dtype=np.float64, mode="r", shape=distances_array_shape ) def _prepare_emotion_retrieval(self): prefix = self.mode + "_train_" if self.ignore_invalid: prefix += "valid_only_" feature_label = 'emo_net_emo_feat_2' nn_indices_file = self._path_to_emotion_nn_indices_file(prefix, feature_label) nn_distances_file = self._path_to_emotion_nn_distances_file(prefix, feature_label) NUM_NEIGHBORS = 100 if nn_indices_file.is_file() and nn_distances_file.is_file(): print("Precomputed nn arrays found.") return dataset = self._new_training_set(for_training=False) dl = DataLoader(dataset, shuffle=False, num_workers=self.num_workers, batch_size=self.train_batch_size) array = None if self.ring_type != "emonet_feature": raise ValueError(f"Invalid ring type for emotion retrieval {self.ring_type}") outfile_name = self._path_to_emotion_nn_retrieval_file(prefix, feature_label) if not outfile_name.is_file(): for bi, batch in enumerate(auto.tqdm(dl)): feat = batch[feature_label].numpy() feat_size = feat.shape[1:] if array is None: array = self._get_retrieval_array(prefix, feature_label, len(dataset), feat_size, feat.dtype) # for i in range(feat.shape[0]): # idx = bi*self.train_batch_size + i array[bi*self.train_batch_size:bi*self.train_batch_size + feat.shape[0], ...] = feat del array else: print(f"Feature array found in '{outfile_name}'") for bi, batch in enumerate(dl): feat = batch[feature_label].numpy() feat_size = feat.shape[1:] break array = self._get_retrieval_array(prefix, feature_label, len(dataset), feat_size, feat.dtype, modifier='r') nbrs = NearestNeighbors(n_neighbors=30, algorithm='auto', n_jobs=-1).fit(array) distances, indices = nbrs.kneighbors(array, NUM_NEIGHBORS) indices_array = np.memmap(nn_indices_file, dtype=indices.dtype, mode="w+", shape=indices.shape ) indices_array[...] = indices del indices_array distances_array = np.memmap(nn_distances_file, dtype=distances.dtype, mode="w+", shape=distances.shape ) distances_array[...] = distances del distances_array # save sizes a dtypes with open(nn_indices_file.parent / (nn_indices_file.stem + "_meta.pkl"), "wb") as f: pkl.dump(indices.dtype, f) pkl.dump(indices.shape, f) with open(nn_distances_file.parent / (nn_distances_file.stem + "_meta.pkl"), "wb") as f: pkl.dump(distances.dtype, f) pkl.dump(distances.shape, f) self.nn_indices_array = np.memmap(nn_indices_file, dtype=indices.dtype, mode="r", shape=indices.shape ) self.nn_distances_array = np.memmap(nn_distances_file, dtype=distances.dtype, mode="r", shape=distances.shape ) class AfewVaDataVisTestModule(AfewVaDataModule): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def setup(self, stage=None): self.training_set = None self.validation_set = TestSubsetAfewVa(self.image_path, self.val_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) self.test_dataframe_path = Path(self.output_dir) / "validation_representative_selection.csv" self.test_set = TestSubsetAfewVa(self.image_path, self.test_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) def val_dataloader(self): return DataLoader(self.validation_set, shuffle=False, num_workers=self.num_workers, pin_memory=True, batch_size=self.val_batch_size, drop_last=False) def test_dataloader(self): return [ self.val_dataloader(), DataLoader(self.test_set, shuffle=False, num_workers=self.num_workers, pin_memory=True, batch_size=self.test_batch_size, drop_last=False) ] class AfewVa(EmotionalImageDatasetBase): def __init__(self, image_path, sample_list, image_size, scale = 1.4, transforms : imgaug.augmenters.Augmenter = None, use_gt_bb=True, bb_center_shift_x=0.0, bb_center_shift_y=0.0, ring_type=None, ring_size=None, load_emotion_feature=False, nn_indices_array=None, nn_distances_array=None, ext=".png", use_processed = None, normalize_va = None, ): self.sample_list = sample_list self.image_path = image_path self.image_size = image_size self.use_gt_bb = use_gt_bb # self.transforms = transforms or imgaug.augmenters.Identity() self.transforms = transforms or imgaug.augmenters.Resize((image_size, image_size)) self.scale = scale
self.landmark_normalizer = KeypointNormalization()
5
2023-11-07 20:13:32+00:00
16k
hxz393/ConfigCenterComparer
ui/action_compare.py
[ { "identifier": "get_resource_path", "path": "lib/get_resource_path.py", "snippet": "def get_resource_path(relative_path: Union[str, os.PathLike]) -> Optional[str]:\n \"\"\"\n 获取资源的绝对路径。这个函数适用于 PyInstaller 打包后的可执行文件。\n\n :type relative_path: Union[str, os.PathLike]\n :param relative_path: 相对...
import logging from typing import Dict, Optional, List from PyQt5.QtCore import QObject, pyqtSignal from PyQt5.QtGui import QIcon from PyQt5.QtWidgets import QAction from lib.get_resource_path import get_resource_path from lib.log_time import log_time from ui.config_manager import ConfigManager from ui.dialog_comparison import DialogComparison from ui.global_signals import global_signals from ui.lang_manager import LangManager from ui.message_show import message_show from ui.table_main import TableMain
12,500
""" 本文件包含用于处理和比较配置数据的类和函数。 该模块主要包含 `ActionCompare` 类,用于在用户界面中处理数据比较的逻辑。该类提供了对比配置数据、更新界面语言、重组数据等功能,方便用户进行环境配置的对比分析。 :author: assassing :contact: https://github.com/hxz393 :copyright: Copyright 2023, hxz393. 保留所有权利。 """ logger = logging.getLogger(__name__) class ActionCompare(QObject): """ 提供数据比较功能的类。 该类负责处理用户界面中的数据对比逻辑,包括初始化UI组件、更新语言设置、执行数据对比等功能。它还负责处理各种事件和信号,并更新用户界面状态。 :param lang_manager: 语言管理器实例,用于处理界面语言更新。 :type lang_manager: LangManager :param config_manager: 配置管理器,用于获取网络测试相关配置。 :type config_manager: ConfigManager :param table: 主界面表格实例,提供数据获取和显示功能。 :type table: TableMain """ status_updated = pyqtSignal(str) def __init__(self, lang_manager: LangManager, config_manager: ConfigManager, table: TableMain): super().__init__() self.lang_manager = lang_manager self.lang_manager.lang_updated.connect(self.update_lang) self.config_manager = config_manager self.table = table self.initUI() def initUI(self) -> None: """ 初始化用户界面组件。 :rtype: None :return: 无返回值。 """ self.action_compare = QAction(QIcon(get_resource_path('media/icons8-diff-files-26')), 'Compare') self.action_compare.setShortcut('F8') # 为了记录运行时间,使用匿名函数 self.action_compare.triggered.connect(lambda checked=False: self.compare()) self.update_lang() def update_lang(self) -> None: """ 更新界面语言设置。 :rtype: None :return: 无返回值。 """ self.lang = self.lang_manager.get_lang() self.action_compare.setText(self.lang['ui.action_compare_1']) self.action_compare.setStatusTip(self.lang['ui.action_compare_2']) @log_time def compare(self) -> None: """ 执行数据对比操作。 该方法首先从表格获取原始数据,然后对数据进行重组和对比分析。最终,它将对比结果展示在对话框中。 :rtype: None :return: 无返回值。 """ try: # 获取原表格数据到字典。 original_data = self.table.get_table_data() if not original_data: logger.warning("No data available in the table for comparison.") message_show('Information', self.lang['ui.action_compare_4']) return # 对原表格数据进行重新整理分组。 new_data = self._reorganize_data(original_data) if not new_data: logger.error("Data reorganization failed.") self.status_updated.emit(self.lang['label_status_error']) return # 对整理分组后的数据进行对比。 result = self._compare_environments(new_data) if not result: logger.error("Environment comparison failed.") self.status_updated.emit(self.lang['label_status_error']) return # 打开带表格组件的对话框,展示结果。 self.dialog_comparison = DialogComparison(self.lang_manager, self.config_manager, result) self.dialog_comparison.status_updated.connect(self.forward_status) self.dialog_comparison.show() # 连接全局信号,主窗口关闭时一并关闭。
""" 本文件包含用于处理和比较配置数据的类和函数。 该模块主要包含 `ActionCompare` 类,用于在用户界面中处理数据比较的逻辑。该类提供了对比配置数据、更新界面语言、重组数据等功能,方便用户进行环境配置的对比分析。 :author: assassing :contact: https://github.com/hxz393 :copyright: Copyright 2023, hxz393. 保留所有权利。 """ logger = logging.getLogger(__name__) class ActionCompare(QObject): """ 提供数据比较功能的类。 该类负责处理用户界面中的数据对比逻辑,包括初始化UI组件、更新语言设置、执行数据对比等功能。它还负责处理各种事件和信号,并更新用户界面状态。 :param lang_manager: 语言管理器实例,用于处理界面语言更新。 :type lang_manager: LangManager :param config_manager: 配置管理器,用于获取网络测试相关配置。 :type config_manager: ConfigManager :param table: 主界面表格实例,提供数据获取和显示功能。 :type table: TableMain """ status_updated = pyqtSignal(str) def __init__(self, lang_manager: LangManager, config_manager: ConfigManager, table: TableMain): super().__init__() self.lang_manager = lang_manager self.lang_manager.lang_updated.connect(self.update_lang) self.config_manager = config_manager self.table = table self.initUI() def initUI(self) -> None: """ 初始化用户界面组件。 :rtype: None :return: 无返回值。 """ self.action_compare = QAction(QIcon(get_resource_path('media/icons8-diff-files-26')), 'Compare') self.action_compare.setShortcut('F8') # 为了记录运行时间,使用匿名函数 self.action_compare.triggered.connect(lambda checked=False: self.compare()) self.update_lang() def update_lang(self) -> None: """ 更新界面语言设置。 :rtype: None :return: 无返回值。 """ self.lang = self.lang_manager.get_lang() self.action_compare.setText(self.lang['ui.action_compare_1']) self.action_compare.setStatusTip(self.lang['ui.action_compare_2']) @log_time def compare(self) -> None: """ 执行数据对比操作。 该方法首先从表格获取原始数据,然后对数据进行重组和对比分析。最终,它将对比结果展示在对话框中。 :rtype: None :return: 无返回值。 """ try: # 获取原表格数据到字典。 original_data = self.table.get_table_data() if not original_data: logger.warning("No data available in the table for comparison.") message_show('Information', self.lang['ui.action_compare_4']) return # 对原表格数据进行重新整理分组。 new_data = self._reorganize_data(original_data) if not new_data: logger.error("Data reorganization failed.") self.status_updated.emit(self.lang['label_status_error']) return # 对整理分组后的数据进行对比。 result = self._compare_environments(new_data) if not result: logger.error("Environment comparison failed.") self.status_updated.emit(self.lang['label_status_error']) return # 打开带表格组件的对话框,展示结果。 self.dialog_comparison = DialogComparison(self.lang_manager, self.config_manager, result) self.dialog_comparison.status_updated.connect(self.forward_status) self.dialog_comparison.show() # 连接全局信号,主窗口关闭时一并关闭。
global_signals.close_all.connect(self.close_dialog)
4
2023-11-07 01:02:38+00:00
16k
pytorch-labs/ao
test/test.py
[ { "identifier": "DynamicallyPerAxisQuantizedLinear", "path": "torchao/quantization/dynamic_quant.py", "snippet": "class DynamicallyPerAxisQuantizedLinear(torch.nn.Linear):\n \"\"\"\n This class is a replacement for `torch.nn.Linear`. It implements a\n quantized matmul using int8 dynamic symmetr...
import copy import unittest import torch import torch.nn as nn import os from torch._inductor.utils import run_and_get_code from torch._dynamo import config from torch.ao.quantization import MinMaxObserver, QConfigMapping from torchao.quantization.dynamic_quant import ( DynamicallyPerAxisQuantizedLinear, ) from torchao.quantization.quant_api import ( apply_dynamic_quant, apply_weight_only_int8_quant, change_linear_weights_to_int8_dqtensors, change_linear_weights_to_int8_woqtensors, change_linear_weights_to_int4_woqtensors, _replace_with_custom_fn_if_matches_filter, ) from torchao.quantization.quant_primitives import ( dequantize_per_channel, dequantize_per_tensor, dynamically_quantize_per_channel, dynamically_quantize_per_tensor, quant_int8_dynamic_linear, quant_int8_dynamic_per_token_linear, quantize_activation_per_token_absmax, safe_int_mm, ) from torchao.quantization.smoothquant import ( get_scale, smooth_fq_linear_to_inference, SmoothFakeDynamicallyQuantizedLinear, swap_linear_with_smooth_fq_linear, ) from torchao.quantization.subclass import ( Int8DynamicallyQuantizedLinearWeight, Int8WeightOnlyQuantizedLinearWeight, Int4WeightOnlyQuantizedLinearWeight ) from torchao.quantization.utils import ( _apply_logging_hook, compute_error, compute_error as SQNR, _fqn_to_op_to_shape_to_count, LoggingTensorMode, ) from torch.ao.quantization.quantize_fx import convert_to_reference_fx, prepare_fx from transformers import ( # type: ignore[import-untyped] DistilBertModel, DistilBertTokenizer, )
11,283
x, qmin, qmax, int_dtype, w_vals, lin_ref_w_scale, w_vals_sums, bias, float_dtype, ) # print('y', y) # print('y_q_ref', y_q_ref) # print('y_ref', y_ref) sqnr_ref = compute_error(y_ref, y_q_ref) sqnr_our = compute_error(y_ref, y) # print('sqnr_ref', sqnr_ref, 'sqnr_our', sqnr_our) # for large shapes, sqnr can be in the high 30s for float32 and float16 self.assertTrue(sqnr_our.item() >= 37.5) def test_qlinear_per_channel_numerics_cpu(self): # Note: the AO codebase doesn't easily support qint8 activations, # so the test cases below are for the quant primitives defined in # this file only. The AO reference is using quint8 here. test_cases = ( ((2, 3), (3, 4), 0, 255, torch.uint8, torch.quint8, torch.float32, "cpu"), ((2, 3), (3, 4), -128, 127, torch.int8, torch.qint8, torch.float32, "cpu"), ) for test_case in test_cases: self._test_qlinear_per_channel_numerics(*test_case) def test_qlinear_per_channel_numerics_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return test_cases = ( # Note: torch._int_mm needs int8 activations, so we don't test uint8 # activations on CUDA at all ( (32, 32), (32, 16), -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", ), ( (32, 32), (32, 16), -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", ), # a large shape from LLaMa 1.5B - currently fails for float16 ( (17, 4096), (4096, 1536), -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", ), ( (17, 4096), (4096, 1536), -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", ), ) for test_case in test_cases: self._test_qlinear_per_channel_numerics(*test_case) class TestSubclass(unittest.TestCase): def _test_dequantize_impl( self, test_subclass_from_float, min_sqnr=35, test_dtype=torch.bfloat16, test_shape=(32, 64, 64), ): m, k, n = test_shape lin = torch.nn.Linear(k, n, device="cuda").to(test_dtype) w = lin.weight.detach() lin.weight = torch.nn.Parameter( test_subclass_from_float(lin.weight), requires_grad=False ) self.assertGreater( SQNR(w, lin.weight.dequantize()), min_sqnr, f"{lin.weight.__class__.__name__} failed dtype={test_dtype}" ) self.assertGreater( SQNR(w.t(), lin.weight.t().dequantize()), min_sqnr, f"{lin.weight.__class__.__name__} failed transpose on dtype={test_dtype}" ) def test_dequantize_int8_dynamic_quant_subclass(self): for test_dtype in [torch.float32, torch.float16, torch.bfloat16]: self._test_dequantize_impl( Int8DynamicallyQuantizedLinearWeight.from_float, 35, test_dtype ) def test_dequantize_int8_weight_only_quant_subclass(self): for test_dtype in [torch.float32, torch.float16, torch.bfloat16]: self._test_dequantize_impl(
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # mypy: ignore-errors torch.manual_seed(0) config.cache_size_limit = 100 class SmoothquantUnitTest(unittest.TestCase): # first, let's reproduce the graphic from the paper, Figure 4, to ensure # we are calculating the scales correctly def test_figure_4(self): X = torch.FloatTensor([1, -16, 2, 6, -2, 8, -1, -9]).reshape(1, 2, 4) W = torch.FloatTensor([2, 1, -2, 1, -1, -1, 2, -1, -2, -1, -1, 1]).reshape(4, 3) X_mul_W = torch.matmul(X, W) smoothquant_scale = get_scale( torch.amax(torch.abs(X), dim=(0, 1)), torch.amax(torch.abs(W), dim=1), alpha=0.5, ) # reproduce scaled calculation X_scaled = X / smoothquant_scale.reshape(1, 1, -1) W_scaled = torch.matmul(torch.diag(smoothquant_scale), W) X_scaled_mul_scaled_W = torch.matmul(X_scaled, W_scaled) assert torch.allclose(X_mul_W, X_scaled_mul_scaled_W), "not close!" assert X_mul_W.shape == X_scaled_mul_scaled_W.shape # next, run the above test on a sample of representative inputs def test_tensors(self): x_shape = (1, 5, 7) w_shape = (7, 9) for i in range(3): X = torch.randn(x_shape) * 10 W = torch.randn(w_shape) s = get_scale( torch.amax(torch.abs(X), dim=(0, 1)), torch.amax(torch.abs(W), dim=1), alpha=0.5, ) Y = torch.matmul(X, W) Y_ref = torch.matmul( X / s.reshape(1, 1, -1), torch.matmul(torch.diag(s), W), ) assert torch.allclose(Y, Y_ref, atol=1e-3, rtol=1e-3), "not close!" def _test_smooth_linear_impl(self, x_shape, lin_shape, device): # so we can use the full range torch.backends.quantized.engine = "qnnpack" x = torch.randn(*x_shape, device=device) * 9 + 10 lin_fp32 = nn.Linear(*lin_shape, device=device) # misc: ignore lin_smooth = SmoothFakeDynamicallyQuantizedLinear.from_float( copy.deepcopy(lin_fp32), alpha=0.25 ) lin_smooth_skip_scaling = SmoothFakeDynamicallyQuantizedLinear.from_float( copy.deepcopy(lin_fp32), alpha=0.25 ) lin_fp32_copy = copy.deepcopy(lin_fp32) # assignment: ignore lin_fp32_copy.qconfig = torch.ao.quantization.QConfig( # assignment: ignore activation=None, weight=torch.ao.quantization.default_per_channel_weight_observer, ) lin_dynamic_q = torch.ao.nn.quantized.dynamic.Linear.from_float( lin_fp32_copy.cpu() ) y_ref = lin_fp32(x) # calibrate the smoothquant versions y_smooth_nocalib = lin_smooth(x) _ = lin_smooth_skip_scaling(x) lin_smooth.to_inference() lin_smooth_skip_scaling.debug_skip_scaling = True lin_smooth_skip_scaling.to_inference() # verify that with scaling turned off, numerics match quantized version y_smooth_fq_only = lin_smooth_skip_scaling(x) y_smooth_fq = lin_smooth(x) y_dynamic_q = lin_dynamic_q(x.cpu()).to(device) # print('y_ref', y_ref) # print('y_smooth_nocalib', y_smooth_nocalib) # print('y_smooth_fq', y_smooth_fq) # print('y_smooth_fq_only', y_smooth_fq_only) # print('y_dynamic_q', y_dynamic_q) sqnr_smooth_fq = compute_error(y_ref, y_smooth_fq) sqnr_dynamic_q = compute_error(y_ref, y_dynamic_q) sqnr_fq = compute_error(y_smooth_fq_only, y_dynamic_q) # print('sqnr_smooth', sqnr_smooth_fq, 'sqnr_dynamic', sqnr_dynamic_q, 'sqnr_fq', sqnr_fq) assert torch.allclose( y_ref, y_smooth_nocalib ), "y_ref not close to y_smooth_nocalib" # after https://github.com/pytorch-labs/ao_benchmarks/pull/32, # numerics do not match exactly between production c++ code # and this Python code # assert torch.allclose( # y_smooth_fq_only, y_dynamic_q, # atol=torch.max(y_smooth_fq_only).item()*0.01, # rtol=0.00001), \ # 'y_smooth_fq_only not close to y_dynamic_q' self.assertTrue(sqnr_smooth_fq.item() >= 40.0) self.assertTrue(sqnr_dynamic_q.item() >= 40.0) self.assertTrue(sqnr_fq.item() >= 40.0) def test_smooth_linear_cpu(self): self._test_smooth_linear_impl((1, 5, 3), (3, 4), "cpu") def test_smooth_linear_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return self._test_smooth_linear_impl((1, 32, 32), (32, 16), "cuda") def test_smooth_linear_edge_cases(self): # so we can use the full range torch.backends.quantized.engine = "qnnpack" lin_fp32 = nn.Linear(3, 4) lin_smooth = SmoothFakeDynamicallyQuantizedLinear.from_float( lin_fp32, alpha=0.25 ) # test different ranks x0 = torch.randn(4, 5, 3) x1 = torch.randn(1, 8, 5, 3) x2 = torch.randn(2, 3, 7, 5, 3) # calibrate _ = lin_smooth(x0) _ = lin_smooth(x1) _ = lin_smooth(x2) # inference lin_smooth.to_inference() _ = lin_smooth(x0) _ = lin_smooth(x1) _ = lin_smooth(x2) def test_swap(self): m = nn.Sequential( nn.Sequential(nn.Linear(4, 4), nn.ReLU(), nn.Linear(4, 4)), nn.Linear(4, 4), ) m_copy = copy.deepcopy(m) swap_linear_with_smooth_fq_linear(m_copy, skip_fqn_list=["0.2"]) # verify all linears are swapped assert isinstance(m_copy[0][0], SmoothFakeDynamicallyQuantizedLinear) assert isinstance(m_copy[0][1], nn.ReLU) # this one was skipped assert isinstance(m_copy[0][2], nn.Linear) assert isinstance(m_copy[1], SmoothFakeDynamicallyQuantizedLinear) # verify results do not change without smoothing x = torch.randn(4, 4) y_ref = m(x) y = m_copy(x) assert torch.allclose(y_ref, y) def test_weight_t_and_non_t_numerics_match(self): # verify that numerics match whether weight is stored # in transposed format (for cuBLAS) vs non-transposed format # (for torch.compile) if not torch.cuda.is_available(): print("no cuda, skip") return dtype = torch.half device = "cuda" lin_ref = nn.Linear(32, 16, dtype=dtype, device=device) lin_eager_t = copy.deepcopy(lin_ref) lin_opt_t = copy.deepcopy(lin_eager_t) lin_opt = copy.deepcopy(lin_eager_t) lin_eager_t = SmoothFakeDynamicallyQuantizedLinear.from_float(lin_eager_t) lin_opt_t = SmoothFakeDynamicallyQuantizedLinear.from_float(lin_opt_t) lin_opt = SmoothFakeDynamicallyQuantizedLinear.from_float(lin_opt) lin_opt.store_w_int_repr_t = False x = torch.randn(32, 32, dtype=dtype, device=device) y_calib_eager_t = lin_eager_t(x) y_calib_opt_t = lin_opt_t(x) y_calib_opt = lin_opt(x) torch.testing.assert_close(y_calib_eager_t, y_calib_opt_t) torch.testing.assert_close(y_calib_eager_t, y_calib_opt) lin_eager_t.to_inference() lin_opt_t.to_inference() lin_opt.to_inference() torch.testing.assert_close(lin_eager_t.W_int_repr, lin_opt_t.W_int_repr) torch.testing.assert_close(lin_eager_t.W_int_repr, lin_opt.W_int_repr) lin_opt_t = torch.compile(lin_opt_t, mode="max-autotune") lin_opt = torch.compile(lin_opt, mode="max-autotune") y_ref = lin_ref(x) y_eager = lin_eager_t(x) y_opt_t = lin_opt_t(x) y_opt = lin_opt(x) if not torch.any(torch.isinf(y_ref)) and torch.any(torch.isinf(y_eager)): # eager mode torch._int_mm is sometimes buggy, when this happens # we can't really compare the compiled version against it properly print("eager mode torch._int_mm known bad, test is inconclusive") return sqnr_ref_eager = compute_error(y_ref, y_eager) sqnr_eager_opt_t = compute_error(y_eager, y_opt_t) sqnr_eager_opt = compute_error(y_eager, y_opt) # since torch.compile for a torch.half model can # change numerics significantly, we can only test for a high SQNR here # and not for closeness self.assertTrue(sqnr_eager_opt_t >= 45.0) self.assertTrue(sqnr_eager_opt >= 45.0) # y_opt_t and y_opt should be equivalent torch.testing.assert_close(y_opt_t, y_opt) def test_selective_torch_compile(self): m = nn.Sequential( nn.Linear(4, 4), nn.Sequential( nn.Linear(4, 4), nn.Linear(4, 4), ), nn.Linear(4, 4), ) x = torch.randn(4, 4) y_ref = m(x) _replace_with_custom_fn_if_matches_filter( m, lambda mod: torch.compile(mod), lambda mod, fqn: isinstance(mod, nn.Linear) and fqn != "1.0", ) self.assertTrue(isinstance(m[0], torch._dynamo.eval_frame.OptimizedModule)) self.assertTrue(isinstance(m[1][0], nn.Linear)) self.assertTrue(isinstance(m[1][1], torch._dynamo.eval_frame.OptimizedModule)) self.assertTrue(isinstance(m[2], torch._dynamo.eval_frame.OptimizedModule)) y = m(x) torch.testing.assert_close(y, y_ref) def test_debug_x_absmax(self): m = nn.Sequential(nn.Linear(3, 4)) x0 = torch.randn(4, 5, 3) y0 = m(x0) swap_linear_with_smooth_fq_linear(m) # no calibration, straight to inference, should not crash smooth_fq_linear_to_inference(m, debug_skip_calibration=True) y1 = m(x0) class PythonQuantPrimitivesUnitTest(unittest.TestCase): def _test_dynamic_quant_per_tensor_numerics_impl( self, qmin, qmax, int_dtype, qint_dtype, float_dtype, device, qscheme ): x = torch.randn(256, dtype=float_dtype, device=device) y_vals, y_scale, y_zero_point = dynamically_quantize_per_tensor( x, qmin, qmax, int_dtype, qscheme ) # reference # quantize_per_tensor_dynamic doesn't work for half, so we cast there and back x_for_ref = x.half().float() if float_dtype == torch.float16 else x # quantize_per_tensor_dynamic doesn't support qscheme, so we just do dynamic # quant manually with observers + static quant obs = MinMaxObserver( dtype=qint_dtype, qscheme=qscheme, quant_min=qmin, quant_max=qmax ).to(device) obs(x_for_ref) ref_scale, ref_zero_point = obs.calculate_qparams() y_ref = torch.quantize_per_tensor( x_for_ref, ref_scale, ref_zero_point, qint_dtype ) # y_ref = torch.quantize_per_tensor_dynamic(x_for_ref, qint_dtype, False) # print(y_ref) if float_dtype == torch.float: assert torch.equal(y_vals, y_ref.int_repr()) else: # numerics are not exactly aligned yet, off-by-one probably due # to rounding assert torch.max(torch.abs(y_vals - y_ref.int_repr())).item() <= 1 torch.testing.assert_close( y_scale, torch.tensor([y_ref.q_scale()], device=device, dtype=float_dtype) ) if y_zero_point is not None: assert torch.equal( y_zero_point, torch.tensor([y_ref.q_zero_point()], device=device) ) else: self.assertTrue(y_ref.q_zero_point() == 0) # dequantize and check again x_dq = dequantize_per_tensor(y_vals, y_scale, y_zero_point, float_dtype) y_ref_dq = y_ref.dequantize().to(float_dtype) if float_dtype == torch.float: torch.testing.assert_close(x_dq, y_ref_dq) else: sqnr = compute_error(x_dq, y_ref_dq) self.assertTrue(sqnr.item() > 45.0) def test_dynamic_quant_per_tensor_numerics_cpu(self): # verifies that dynamic quant per tensor in plain pytorch matches # numerics of production AO code # TODO(future): test this on cpu-half, need to first make # torch.aminmax support half on cpu test_cases = ( ( 0, 255, torch.uint8, torch.quint8, torch.float32, "cpu", torch.per_tensor_affine, ), ( -128, 127, torch.int8, torch.qint8, torch.float32, "cpu", torch.per_tensor_affine, ), ( -128, 127, torch.int8, torch.qint8, torch.float32, "cpu", torch.per_tensor_symmetric, ), ( -127, 127, torch.int8, torch.qint8, torch.float32, "cpu", torch.per_tensor_symmetric, ), ) for row in test_cases: self._test_dynamic_quant_per_tensor_numerics_impl(*row) def test_dynamic_quant_per_tensor_numerics_cuda(self): # verifies that dynamic quant per tensor in plain pytorch matches # numerics of production AO code if not torch.cuda.is_available(): print("no cuda, skip") return test_cases = ( ( -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", torch.per_tensor_affine, ), ( -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", torch.per_tensor_affine, ), ( -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", torch.per_tensor_symmetric, ), ( -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", torch.per_tensor_symmetric, ), ( -127, 127, torch.int8, torch.qint8, torch.float32, "cuda", torch.per_tensor_symmetric, ), ( -127, 127, torch.int8, torch.qint8, torch.float16, "cuda", torch.per_tensor_symmetric, ), ) for row in test_cases: self._test_dynamic_quant_per_tensor_numerics_impl(*row) def _test_dynamic_quant_per_channel_numerics_impl( self, qmin, qmax, int_dtype, qint_dtype, float_dtype, device ): # verifies that dynamic quant per channel in plain pytorch matches # numerics of production AO code # TODO(future): test this on cpu-half, need to first make # torch.aminmax support half on cpu x = torch.randn(16, 32, device=device, dtype=float_dtype) y_vals, y_scale, y_zero_point = dynamically_quantize_per_channel( x, qmin, qmax, int_dtype ) min_val, max_val = torch.aminmax(x, dim=1) # reference weight_obs = torch.ao.quantization.MovingAveragePerChannelMinMaxObserver( dtype=qint_dtype, quant_min=qmin, quant_max=qmax, qscheme=torch.per_channel_symmetric, averaging_constant=1.0, # make it ignore previous iterations ) weight_obs(x) y_ref_scale, y_ref_zp = weight_obs.calculate_qparams() y_ref_scale = y_ref_scale.to(device) y_ref_zp = y_ref_zp.to(device) # quantize_per_channel doesn't work for half, so we cast there and back x_for_ref = x.half().float() if float_dtype == torch.float16 else x y_ref = torch.quantize_per_channel( x_for_ref, y_ref_scale, y_ref_zp, 0, qint_dtype ) torch.testing.assert_close( y_scale, y_ref.q_per_channel_scales().to(float_dtype) ) assert torch.equal(y_zero_point, y_ref.q_per_channel_zero_points()) # this test case has one element where the rounding is off by one # from Python-only code vs the c++ code, it's easy to repro with # various shapes. # Discussion here is relevant: https://github.com/pytorch/pytorch/issues/16498 # TODO(future): figure out what to do about this # assert torch.equal(int_vals, q_reference.int_repr()) assert torch.max(torch.abs(y_vals - y_ref.int_repr())) <= 1 # dequantize x_dq = dequantize_per_channel(y_vals, y_scale, y_zero_point) x_ref_dq = y_ref.dequantize() # off-by-one for scale is okay torch.testing.assert_close( x_dq, x_ref_dq, atol=torch.max(y_scale).item() * 1.01, rtol=0.0001 ) def test_dynamic_quant_per_channel_numerics_cpu(self): test_cases = ((-128, 127, torch.int8, torch.qint8, torch.float32, "cpu"),) for row in test_cases: self._test_dynamic_quant_per_channel_numerics_impl(*row) def test_dynamic_quant_per_channel_numerics_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return test_cases = ( (-128, 127, torch.int8, torch.qint8, torch.float32, "cuda"), (-128, 127, torch.int8, torch.qint8, torch.float16, "cuda"), ) for row in test_cases: self._test_dynamic_quant_per_channel_numerics_impl(*row) def _test_quantize_per_token_impl(self, device, dtype): x = torch.randn(3, 3, 3, device=device, dtype=dtype) xq, scales = quantize_activation_per_token_absmax(x) x_dq = dequantize_per_tensor(xq, scales, None).to(x.dtype) sqnr = compute_error(x, x_dq) self.assertTrue(sqnr >= 45.0) def test_quantize_per_token_cpu(self): for dtype in (torch.float32, torch.float16, torch.bfloat16): self._test_quantize_per_token_impl("cpu", dtype) def test_quantize_per_token_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return for dtype in (torch.float32, torch.float16, torch.bfloat16): self._test_quantize_per_token_impl("cuda", dtype) def _test_per_token_linear_impl(self, device, dtype): x = torch.randn(2, 16, 8, device=device, dtype=dtype) w = torch.randn(16, 8, device=device, dtype=dtype) wq, w_scales, _w_zp = dynamically_quantize_per_channel(w, -127, 127, torch.int8) # Note: need to make the weight contiguous because we are # testing in eager mode and cuBlas will not give correct results # for a transposed weight y = quant_int8_dynamic_per_token_linear( x, wq.t().contiguous(), w_scales, None, dtype ) y_ref = torch.matmul(x, w.t()) sqnr = compute_error(y_ref, y) self.assertTrue(sqnr >= 42.0) def test_per_token_linear_cpu(self): for dtype in (torch.float32,): self._test_per_token_linear_impl("cpu", dtype) def test_per_token_linear_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return for dtype in (torch.float32, torch.float16, torch.bfloat16): self._test_per_token_linear_impl("cuda", dtype) def test__int_mm(self): # TODO(future): figure out what here needs to move to PT core, # if it's not already tested there if not torch.cuda.is_available(): print("no cuda, skip") return m, k, n = 32, 32, 16 x = torch.randint(-128, 127, (m, k), dtype=torch.int8, device="cuda") w = torch.randint(-128, 127, (k, n), dtype=torch.int8, device="cuda") y_ref = torch.matmul(x.float(), w.float()).to(torch.int32) y_raw = safe_int_mm(x, w) wrap_in_mm_opt = torch.compile(safe_int_mm, mode="max-autotune") # note: triton chokes on the line below on k == 8 and n == 8 with # https://www.internalfb.com/phabricator/paste/view/P683467944 # TODO(future): file an issue y_opt = wrap_in_mm_opt(x, w) torch.testing.assert_close(y_ref, y_raw, atol=0, rtol=0) torch.testing.assert_close(y_ref, y_opt, atol=0, rtol=0) def test__int_mm_eager_and_torch_compile_numerics(self): if not torch.cuda.is_available(): print("no cuda, skip") return def __int_mm_ref(x, w): x = x.cpu().to(torch.int32) w = w.cpu().to(torch.int32) y = torch.matmul(x, w) return y.cuda() shapes = ( # minimal test shape ((1, 32, 32), (32, 16)), # paste of real linear shapes from LLaMa 1.5b ((17, 1, 1536), (1536, 1536)), ((17, 8, 4096), (4096, 1536)), ((17, 1, 1536), (1536, 4096)), ((17, 8, 1536), (1536, 1536)), ((17, 1, 4096), (4096, 1536)), ((17, 8, 1536), (1536, 4096)), ) for x_shape, w_shape in shapes: def wrap_torch_int_mm(x, w): b, n, k = x.shape k, m = w.shape x = x.reshape(b * n, k) res = safe_int_mm(x, w) res = res.reshape(b, n, m) return res wrap_torch_int_mm_opt = torch.compile( wrap_torch_int_mm, mode="max-autotune" ) x = torch.randint(-128, 127, x_shape, dtype=torch.int8, device="cuda") w = torch.randint(-128, 127, w_shape, dtype=torch.int8, device="cuda") z_ref = __int_mm_ref(x, w) z_eager = wrap_torch_int_mm(x, w) z_torch_compile = wrap_torch_int_mm_opt(x, w) # print(z_ref) # print(z_eager) # print(z_torch_compile) torch.testing.assert_close(z_ref, z_eager, atol=0, rtol=0) torch.testing.assert_close(z_ref, z_torch_compile, atol=0, rtol=0) def _test_qlinear_per_channel_numerics( self, x_shape, lin_shape, qmin, qmax, int_dtype, qint_dtype, float_dtype, device ): qconfig = torch.ao.quantization.per_channel_dynamic_qconfig x = torch.randn(*x_shape, device=device, dtype=float_dtype) # TODO: test bias true and false # Note: reference path only works on float because lack of aten quant primitives # support of half, so we cast back and forth to emulate lin_ref = ( nn.Sequential(nn.Linear(*lin_shape)) .eval() .to(float_dtype) .float() .to(device) ) y_ref = lin_ref(x.float()) weight = lin_ref[0].weight bias = lin_ref[0].bias qconfig_mapping = QConfigMapping().set_global(qconfig) lin_ref_p = prepare_fx(lin_ref, qconfig_mapping, (torch.randn(1, 1),)) lin_ref_q = convert_to_reference_fx(lin_ref_p) y_q_ref = lin_ref_q(x.float()) # scale, zp of weight (get from reference model) w_obs = qconfig.weight() w_obs(weight) lin_ref_w_scale, lin_ref_w_zp = w_obs.calculate_qparams() lin_ref_w_scale = lin_ref_w_scale.to(device).to(float_dtype) # print('lin_ref_w', 'scale', lin_ref_w_scale, 'zp', lin_ref_w_zp) w_vals, _s, _z = dynamically_quantize_per_channel( getattr(lin_ref_q, "0").weight.to(float_dtype), -128, 127, torch.int8 ) w_vals = w_vals.t().contiguous() w_vals_sums = w_vals.sum(dim=0) # do our version of the quantized linear operator y = quant_int8_dynamic_linear( x, qmin, qmax, int_dtype, w_vals, lin_ref_w_scale, w_vals_sums, bias, float_dtype, ) # print('y', y) # print('y_q_ref', y_q_ref) # print('y_ref', y_ref) sqnr_ref = compute_error(y_ref, y_q_ref) sqnr_our = compute_error(y_ref, y) # print('sqnr_ref', sqnr_ref, 'sqnr_our', sqnr_our) # for large shapes, sqnr can be in the high 30s for float32 and float16 self.assertTrue(sqnr_our.item() >= 37.5) def test_qlinear_per_channel_numerics_cpu(self): # Note: the AO codebase doesn't easily support qint8 activations, # so the test cases below are for the quant primitives defined in # this file only. The AO reference is using quint8 here. test_cases = ( ((2, 3), (3, 4), 0, 255, torch.uint8, torch.quint8, torch.float32, "cpu"), ((2, 3), (3, 4), -128, 127, torch.int8, torch.qint8, torch.float32, "cpu"), ) for test_case in test_cases: self._test_qlinear_per_channel_numerics(*test_case) def test_qlinear_per_channel_numerics_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return test_cases = ( # Note: torch._int_mm needs int8 activations, so we don't test uint8 # activations on CUDA at all ( (32, 32), (32, 16), -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", ), ( (32, 32), (32, 16), -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", ), # a large shape from LLaMa 1.5B - currently fails for float16 ( (17, 4096), (4096, 1536), -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", ), ( (17, 4096), (4096, 1536), -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", ), ) for test_case in test_cases: self._test_qlinear_per_channel_numerics(*test_case) class TestSubclass(unittest.TestCase): def _test_dequantize_impl( self, test_subclass_from_float, min_sqnr=35, test_dtype=torch.bfloat16, test_shape=(32, 64, 64), ): m, k, n = test_shape lin = torch.nn.Linear(k, n, device="cuda").to(test_dtype) w = lin.weight.detach() lin.weight = torch.nn.Parameter( test_subclass_from_float(lin.weight), requires_grad=False ) self.assertGreater( SQNR(w, lin.weight.dequantize()), min_sqnr, f"{lin.weight.__class__.__name__} failed dtype={test_dtype}" ) self.assertGreater( SQNR(w.t(), lin.weight.t().dequantize()), min_sqnr, f"{lin.weight.__class__.__name__} failed transpose on dtype={test_dtype}" ) def test_dequantize_int8_dynamic_quant_subclass(self): for test_dtype in [torch.float32, torch.float16, torch.bfloat16]: self._test_dequantize_impl( Int8DynamicallyQuantizedLinearWeight.from_float, 35, test_dtype ) def test_dequantize_int8_weight_only_quant_subclass(self): for test_dtype in [torch.float32, torch.float16, torch.bfloat16]: self._test_dequantize_impl(
Int8WeightOnlyQuantizedLinearWeight.from_float, 35, test_dtype
20
2023-11-03 21:27:36+00:00
16k
codefuse-ai/Collinear-Constrained-Attention
train/trainer/atorch_trainer.py
[ { "identifier": "print_rank_0", "path": "utils/common_utils.py", "snippet": "TASK2ID = {}\nID2TASK = {}\n L = args.num_hidden_layers\n V = args.vocab_size\ndef get_rank():\ndef get_local_rank():\ndef is_main_process():\ndef is_local_main_process():\ndef print_rank_0(*message):\ndef get_world_size(...
import datetime import json import logging import math import os import random import re import shutil import time import warnings import gc import numpy as np import atorch import torch from functools import partial from pathlib import Path from deepspeed.ops.adam import DeepSpeedCPUAdam from torch.distributed.fsdp import FullStateDictConfig from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp import StateDictType from torch.optim.lr_scheduler import LambdaLR, CosineAnnealingLR, CosineAnnealingWarmRestarts from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from torch.utils.tensorboard import SummaryWriter from tqdm.auto import tqdm from transformers import get_scheduler as get_scheduler_trans from transformers.modeling_utils import PreTrainedModel, unwrap_model from transformers.trainer import ( OPTIMIZER_NAME, SCHEDULER_NAME, TRAINER_STATE_NAME, TRAINING_ARGS_NAME ) from transformers.trainer_pt_utils import reissue_pt_warnings from transformers.trainer_utils import ( PREFIX_CHECKPOINT_DIR, ) from transformers.utils import WEIGHTS_NAME from torch.nn import CrossEntropyLoss from utils.common_utils import print_rank_0, get_tflops_megatron, get_computation_speed, TASK2ID, ID2TASK, EarlyStopping, logger from utils.auto_accelerate_utils import FAMO, get_ltor_masks_and_position_ids, SelfPacedStatus from atorch.auto import auto_accelerate from atorch.utils.version import torch_version from model.gpt_neox.modeling_gpt_neox import GPTNeoXLayer, GPTNeoXAttention, GPTNeoXMLP from model.llama.modeling_llama import LlamaDecoderLayer, LlamaAttention, LlamaMLP from model.glm.modeling_glm import GLMBlock from torch.cuda.amp import GradScaler from apex.optimizers import FusedSGD from model.peft.modeling_peft import PeftModel
11,705
self._load_rng_state(self.resume_checkpoint_dir) torch.distributed.barrier() now_datetime = datetime.datetime.now() timestr = datetime.datetime.strftime(now_datetime, '%Y%m%d-%H%M%S') self.log_dir = os.path.join(self.args.output_dir, 'runs', timestr) self.summary_writer = None if torch.distributed.get_rank() == 0: self.summary_writer = SummaryWriter(log_dir=self.log_dir) def get_last_checkpoint(self, folder): _re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)") content = sorted(os.listdir(folder)) checkpoints = [ path for path in content if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path)) ] if len(checkpoints) == 0: return return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0]))) def _load_rng_state(self, resume_checkpoint_dir): # Load RNG states from `checkpoint` if resume_checkpoint_dir is None: return if self.args.world_size > 1: rng_file = os.path.join( resume_checkpoint_dir, f"rng_state_{self.rank}.pth") if not os.path.isfile(rng_file): logger.info( f"Didn't find an RNG file for process {self.rnak}, if you are resuming a training that " "wasn't launched in a distributed fashion, reproducibility is not guaranteed." ) return else: rng_file = os.path.join(resume_checkpoint_dir, "rng_state.pth") if not os.path.isfile(rng_file): logger.info( "Didn't find an RNG file, if you are resuming a training that was launched in a distributed " "fashion, reproducibility is not guaranteed." ) return checkpoint_rng_state = torch.load(rng_file) random.setstate(checkpoint_rng_state["python"]) np.random.set_state(checkpoint_rng_state["numpy"]) torch.random.set_rng_state(checkpoint_rng_state["cpu"]) if torch.cuda.is_available(): if self.args.local_rank != -1: torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"]) else: try: torch.cuda.random.set_rng_state_all( checkpoint_rng_state["cuda"]) except Exception as e: logger.info( f"Didn't manage to set back the RNG states of the GPU because of the following error:\n {e}" "\nThis won't yield the same results as if the training had not been interrupted." ) def load_atorch_model_state(self, model_state_dict, **kwargs): print('resume atorch model state') if self.is_rank0(): self.model.load_state_dict(model_state_dict) # 在 rank 0 加载完毕后,再通过sync_module_states分发参数 torch.distributed.barrier() # self.model = FSDP(self.model, sync_module_states=True, **kwargs) def load_atorch_optim_state(self, optim_state_dict): print('resume optimizer state') optim_state_dict = FSDP.scatter_full_optim_state_dict( optim_state_dict, self.model) # may be removed after PyTorch 2.2 def move_optim_state_to_cpu(optim_state_dict): for k in optim_state_dict: if isinstance(optim_state_dict[k], torch.Tensor): optim_state_dict[k] = optim_state_dict[k].cpu() elif isinstance(optim_state_dict[k], dict): move_optim_state_to_cpu(optim_state_dict[k]) move_optim_state_to_cpu(optim_state_dict) self.optimizer.load_state_dict(optim_state_dict) def load_famo_state(self): print_rank_0(f'loading famo checkpoint') self.famo_resume = True famo_dir = os.path.join(self.resume_checkpoint_dir, 'famo_checkpoint/') if not os.path.exists(famo_dir): print_rank_0(f'can not find the famo checkpoint dir!') else: famo_state_name = FAMO_CHECKPOINT_NAME + f'_rank_{self.rank}.pth' famo_checkpoint_state = torch.load(os.path.join(famo_dir, famo_state_name)) w_opt_state = famo_checkpoint_state['w_opt_state'] self.famo.prev_train_loss = famo_checkpoint_state['prev_train_loss'].to(self.famo.device) self.famo.prev_valid_loss = famo_checkpoint_state['prev_valid_loss'].to(self.famo.device) self.famo.first_train_step = famo_checkpoint_state['first_train_step'] self.famo.first_valid_step = famo_checkpoint_state['first_valid_step'] self.famo.ratio_valid_task_loss_prev = famo_checkpoint_state['ratio_valid_task_loss_prev'].to(self.famo.device) self.famo.w = famo_checkpoint_state['w'].to(self.famo.device) self.famo.w_opt.load_state_dict(w_opt_state) print_rank_0(f'prev_train_loss: {self.famo.prev_train_loss}') print_rank_0(f'prev_valid_loss: {self.famo.prev_valid_loss}') print_rank_0(f'first_train_step: {self.famo.first_train_step}') print_rank_0(f'first_valid_step: {self.famo.first_valid_step}') print_rank_0(f'ratio_valid_task_loss_prev: {self.famo.ratio_valid_task_loss_prev}') print_rank_0(f'w: {self.famo.w}') print_rank_0(f'load famo checkpoint successfully') def atorch_init(self): assert torch_version() >= (2, 0, 0), "use pt2.0 for use orig param if fsdp" if self.args.model_type == 'gpt_neox': # wrap_class = (GPTNeoXAttention, GPTNeoXMLP) wrap_class = (GPTNeoXLayer,) elif self.args.model_type == 'llama': # wrap_class = (LlamaAttention, LlamaMLP)
#!/usr/bin/env python # coding=utf-8 # Copyright (c) 2023 Ant Group. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. HYPER_PARAMETER_NAME = 'hyper_parameters.json' ATORCH_CHECKPOINT_NAME = 'atorch_checkpoint.bin' EPOCH_CHECKPOINT_NAME = 'epoch' FAMO_CHECKPOINT_NAME = 'famo_checkpoint' EMA_CHECKPOINT_NAME = 'ema_checkpoint' # logger = logging.getLogger(__name__) def is_local_main_process(): return atorch.local_rank() == 0 def is_global_main_process(): return atorch.rank() == 0 def has_inf_or_nan(x): try: # if x is half, the .float() incurs an additional deep copy, but it's necessary if # Pytorch's .sum() creates a one-element tensor of the same type as x # (which is true for some recent version of pytorch). cpu_sum = float(x.float().sum()) # More efficient version that can be used if .sum() returns a Python scalar # cpu_sum = float(x.sum()) except RuntimeError as instance: # We want to check if inst is actually an overflow exception. # RuntimeError could come from a different error. # If so, we still want the exception to propagate. if "value cannot be converted" not in instance.args[0]: raise return True else: if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum: return True return False def count_model_params(model): trainable_params = 0 all_params = 0 for param in model.parameters(): num_params = param.numel() all_params += num_params if param.requires_grad: trainable_params += num_params return all_params, trainable_params class AtorchArguments: def __init__(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) def get_linear_schedule_with_log_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): def lr_lambda(current_step: int): inverse_log_warm_up = 1.0 / math.log(num_warmup_steps) if current_step == 0: return 0.0 if current_step < num_warmup_steps: return inverse_log_warm_up * math.log(current_step) return max( 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)) ) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_scheduler(name, optimizer, num_warmup_steps, num_training_steps): scheduler_map = { 'log_warmup_linear_decay': get_linear_schedule_with_log_warmup} try: lr_scheduler = get_scheduler_trans( name, optimizer, num_warmup_steps, num_training_steps) return lr_scheduler except Exception: schedule_func = scheduler_map[name] return schedule_func(optimizer, num_warmup_steps, num_training_steps) class AtorchTrainer: def __init__(self, model, args, train_dataset, valid_dataset, tokenizer=None, callbacks=None, no_save_atorch_checkpoint=None, save_pytorch_model_bin_checkpoint=True, train_peft=False, rank=0, max_shard_size='10GB', files_to_save=None, args_to_save=None, data_collator=None, my_loss_func=None, **kwargs, ): self.args = args self.TASK2ID = TASK2ID self.ID2TASK = ID2TASK print('in atorch trainer') print(TASK2ID) print(ID2TASK) self.model = model self.no_save_atorch_checkpoint = no_save_atorch_checkpoint self.save_pytorch_model_bin_checkpoint = save_pytorch_model_bin_checkpoint self.train_peft = train_peft self.rank = rank self.kwargs = kwargs self.train_dataset = train_dataset self.valid_dataset = valid_dataset self.tokenizer = tokenizer self.max_shard_size = max_shard_size self.files_to_save = files_to_save self.args_to_save = args_to_save self.best_metric = None self.best_model_checkpoint = None self.no_save_base_model = True self.device = f"cuda:{atorch.local_rank()}" self.famo = FAMO(n_tasks=len(TASK2ID), device=self.device, mode=self.args.weighted_loss_mode) self.famo_resume = False self.selfpaced_status = SelfPacedStatus(args.selfpaced_interval) self.total_train_batch_size = self.args.per_device_train_batch_size * \ self.args.gradient_accumulation_steps * \ atorch.world_size() self.data_collator = data_collator self.my_loss_func = my_loss_func if self.args.early_stopping_patience > 0: print(f'early_stopping_patience: {self.args.early_stopping_patience}') patience = self.args.early_stopping_patience self.early_stopping = EarlyStopping(patience, verbose=True) self.train_dataloader_args = { "shuffle": True, "batch_size": self.total_train_batch_size, "pin_memory": True, "collate_fn": data_collator, "drop_last": True, "num_workers": self.args.num_workers, # "persistent_workers": args.num_workers > 0, } self.valid_dataloader = DataLoader( valid_dataset, sampler=DistributedSampler(valid_dataset, shuffle=True), batch_size=args.per_device_valid_batch_size, pin_memory=True, collate_fn=data_collator ) self.valid_dataloader_length = len(self.valid_dataloader) if self.args.resume_from_checkpoint == 'true': self.resume_checkpoint_dir = self.get_last_checkpoint( self.args.output_dir) self.atorch_args = AtorchArguments( lr=args.learning_rate, weight_decay=args.weight_decay, adam_eps=args.adam_epsilon, adam_beta1=args.adam_beta1, adam_beta2=args.adam_beta2) self.atorch_init() self.num_update_steps_per_epoch = math.ceil( len(self.train_dataloader) / self.args.gradient_accumulation_steps) print(f'number of update steps per epoch: {self.num_update_steps_per_epoch}') if self.args.max_steps == -1: self.args.max_steps = int( self.args.num_train_epochs * self.num_update_steps_per_epoch) else: self.args.num_train_epochs = math.ceil( self.args.max_steps / self.num_update_steps_per_epoch) # self.args.warmup_steps = self.args.get_warmup_steps( # self.args.max_steps) # 找不到get_warmup_steps custom_lr_scheduler_type = self.kwargs.get( 'custom_lr_scheduler_type', None) self.lr_scheduler = get_scheduler( name=custom_lr_scheduler_type if custom_lr_scheduler_type else self.args.lr_scheduler_type, optimizer=self.optimizer, num_warmup_steps=self.args.num_warmup_steps, num_training_steps=self.args.max_steps, ) print_rank_0(f'lr_scheduler{self.lr_scheduler}') if self.args.resume_from_checkpoint == 'true': with warnings.catch_warnings(record=True): self.lr_scheduler.load_state_dict(torch.load( os.path.join(self.resume_checkpoint_dir, SCHEDULER_NAME))) self._load_rng_state(self.resume_checkpoint_dir) torch.distributed.barrier() now_datetime = datetime.datetime.now() timestr = datetime.datetime.strftime(now_datetime, '%Y%m%d-%H%M%S') self.log_dir = os.path.join(self.args.output_dir, 'runs', timestr) self.summary_writer = None if torch.distributed.get_rank() == 0: self.summary_writer = SummaryWriter(log_dir=self.log_dir) def get_last_checkpoint(self, folder): _re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)") content = sorted(os.listdir(folder)) checkpoints = [ path for path in content if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path)) ] if len(checkpoints) == 0: return return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0]))) def _load_rng_state(self, resume_checkpoint_dir): # Load RNG states from `checkpoint` if resume_checkpoint_dir is None: return if self.args.world_size > 1: rng_file = os.path.join( resume_checkpoint_dir, f"rng_state_{self.rank}.pth") if not os.path.isfile(rng_file): logger.info( f"Didn't find an RNG file for process {self.rnak}, if you are resuming a training that " "wasn't launched in a distributed fashion, reproducibility is not guaranteed." ) return else: rng_file = os.path.join(resume_checkpoint_dir, "rng_state.pth") if not os.path.isfile(rng_file): logger.info( "Didn't find an RNG file, if you are resuming a training that was launched in a distributed " "fashion, reproducibility is not guaranteed." ) return checkpoint_rng_state = torch.load(rng_file) random.setstate(checkpoint_rng_state["python"]) np.random.set_state(checkpoint_rng_state["numpy"]) torch.random.set_rng_state(checkpoint_rng_state["cpu"]) if torch.cuda.is_available(): if self.args.local_rank != -1: torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"]) else: try: torch.cuda.random.set_rng_state_all( checkpoint_rng_state["cuda"]) except Exception as e: logger.info( f"Didn't manage to set back the RNG states of the GPU because of the following error:\n {e}" "\nThis won't yield the same results as if the training had not been interrupted." ) def load_atorch_model_state(self, model_state_dict, **kwargs): print('resume atorch model state') if self.is_rank0(): self.model.load_state_dict(model_state_dict) # 在 rank 0 加载完毕后,再通过sync_module_states分发参数 torch.distributed.barrier() # self.model = FSDP(self.model, sync_module_states=True, **kwargs) def load_atorch_optim_state(self, optim_state_dict): print('resume optimizer state') optim_state_dict = FSDP.scatter_full_optim_state_dict( optim_state_dict, self.model) # may be removed after PyTorch 2.2 def move_optim_state_to_cpu(optim_state_dict): for k in optim_state_dict: if isinstance(optim_state_dict[k], torch.Tensor): optim_state_dict[k] = optim_state_dict[k].cpu() elif isinstance(optim_state_dict[k], dict): move_optim_state_to_cpu(optim_state_dict[k]) move_optim_state_to_cpu(optim_state_dict) self.optimizer.load_state_dict(optim_state_dict) def load_famo_state(self): print_rank_0(f'loading famo checkpoint') self.famo_resume = True famo_dir = os.path.join(self.resume_checkpoint_dir, 'famo_checkpoint/') if not os.path.exists(famo_dir): print_rank_0(f'can not find the famo checkpoint dir!') else: famo_state_name = FAMO_CHECKPOINT_NAME + f'_rank_{self.rank}.pth' famo_checkpoint_state = torch.load(os.path.join(famo_dir, famo_state_name)) w_opt_state = famo_checkpoint_state['w_opt_state'] self.famo.prev_train_loss = famo_checkpoint_state['prev_train_loss'].to(self.famo.device) self.famo.prev_valid_loss = famo_checkpoint_state['prev_valid_loss'].to(self.famo.device) self.famo.first_train_step = famo_checkpoint_state['first_train_step'] self.famo.first_valid_step = famo_checkpoint_state['first_valid_step'] self.famo.ratio_valid_task_loss_prev = famo_checkpoint_state['ratio_valid_task_loss_prev'].to(self.famo.device) self.famo.w = famo_checkpoint_state['w'].to(self.famo.device) self.famo.w_opt.load_state_dict(w_opt_state) print_rank_0(f'prev_train_loss: {self.famo.prev_train_loss}') print_rank_0(f'prev_valid_loss: {self.famo.prev_valid_loss}') print_rank_0(f'first_train_step: {self.famo.first_train_step}') print_rank_0(f'first_valid_step: {self.famo.first_valid_step}') print_rank_0(f'ratio_valid_task_loss_prev: {self.famo.ratio_valid_task_loss_prev}') print_rank_0(f'w: {self.famo.w}') print_rank_0(f'load famo checkpoint successfully') def atorch_init(self): assert torch_version() >= (2, 0, 0), "use pt2.0 for use orig param if fsdp" if self.args.model_type == 'gpt_neox': # wrap_class = (GPTNeoXAttention, GPTNeoXMLP) wrap_class = (GPTNeoXLayer,) elif self.args.model_type == 'llama': # wrap_class = (LlamaAttention, LlamaMLP)
wrap_class = (LlamaDecoderLayer,)
7
2023-11-02 01:37:01+00:00
16k
bytedance/cryostar
projects/star/train_density.py
[ { "identifier": "StarfileDataSet", "path": "cryostar/utils/dataio.py", "snippet": "class StarfileDataSet(Dataset):\n\n def __init__(self, cfg: StarfileDatasetConfig):\n super().__init__()\n self.cfg = cfg\n self.df = starfile.read(Path(cfg.starfile_path))\n\n if \"optics\"...
import os import os.path as osp import einops import lightning.pytorch as pl import numpy as np import torch from lightning.pytorch.strategies import DDPStrategy from lightning.pytorch.utilities import rank_zero_only from torch.utils.data import DataLoader from tqdm import tqdm from mmengine import mkdir_or_exist from cryostar.utils.dataio import StarfileDataSet, StarfileDatasetConfig from cryostar.nerf.volume_utils import ImplicitFourierVolume from cryostar.utils.transforms import SpatialGridTranslate, FourierGridTranslate from cryostar.utils.ctf_utils import CTFRelion, CTFCryoDRGN from cryostar.utils.fft_utils import (fourier_to_primal_2d, primal_to_fourier_2d) from cryostar.utils.latent_space_utils import sample_along_pca, get_nearest_point, cluster_kmeans from cryostar.utils.misc import (pl_init_exp, create_circular_mask, log_to_current, pretty_dict) from cryostar.utils.losses import calc_kl_loss from cryostar.utils.ml_modules import VAEEncoder, reparameterize from cryostar.utils.mrc_tools import save_mrc from miscs import infer_ctf_params_from_config
11,009
log_to_current = rank_zero_only(log_to_current) TASK_NAME = "density" class CryoModel(pl.LightningModule): def __init__(self, cfg, dataset): super().__init__() self.cfg = cfg self.dataset = dataset self.z_dim = cfg.model.z_dim self.history_saved_dirs = [] if cfg.extra_input_data_attr.given_z is None and self.z_dim != 0: if cfg.model.enc_space == "real": self.encoder = VAEEncoder(self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) elif cfg.model.enc_space == "fourier": self.encoder = VAEEncoder(2 * self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) else: raise NotImplementedError if cfg.model.shift_method == "interp": self.translate = SpatialGridTranslate(self.cfg.data_process.down_side_shape, ) log_to_current("We will deprecate `model.shift_method=interp` in a future version, use `model.shift_method=fft` instead.") elif cfg.model.shift_method == "fft": self.f_translate = FourierGridTranslate(self.cfg.data_process.down_side_shape, ) else: raise NotImplementedError ctf_params = infer_ctf_params_from_config(cfg) if cfg.model.ctf == "v1": self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset)) log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.") elif cfg.model.ctf == "v2": self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset)) else: raise NotImplementedError log_to_current(ctf_params) self.vol = ImplicitFourierVolume( self.z_dim, self.cfg.data_process.down_side_shape, self.cfg.loss.mask_rad_for_image_loss, { "net_type": cfg.model.net_type, "pe_dim": self.cfg.data_process.down_side_shape, "D": self.cfg.data_process.down_side_shape, "pe_type": cfg.model.pe_type, "force_symmetry": False, "hidden": cfg.model.hidden, }) mask = create_circular_mask(self.cfg.data_process.down_side_shape, self.cfg.data_process.down_side_shape, None, self.cfg.data_process.down_side_shape // 2 * self.cfg.loss.mask_rad_for_image_loss,) self.register_buffer("mask", torch.from_numpy(mask)) if cfg.extra_input_data_attr.given_z is not None: self.register_buffer("given_z", torch.from_numpy(np.load(cfg.extra_input_data_attr.given_z))) if getattr(self.cfg.extra_input_data_attr, "ckpt_path", None) is not None: log_to_current(f"load checkpoint from {self.cfg.extra_input_data_attr.ckpt_path}") state_dict = torch.load(self.cfg.extra_input_data_attr.ckpt_path, map_location=self.device) self.vol.load_state_dict(state_dict) def _get_save_dir(self): save_dir = os.path.join(self.cfg.work_dir, f"{self.current_epoch:04d}_{self.global_step:07d}") mkdir_or_exist(save_dir) return save_dir def process_image(self, batch): R = batch["rotmat"] bsz = len(R) trans = torch.cat([ batch["shiftY"].float().reshape(bsz, 1, 1) / self.cfg.data_process.down_apix, batch["shiftX"].float().reshape(bsz, 1, 1) / self.cfg.data_process.down_apix ], dim=2) proj_in = batch["proj"].to(self.device) if self.cfg.model.shift_method == "interp": proj = self.translate.transform(proj_in.squeeze(1), trans.to(self.device)) elif self.cfg.model.shift_method == "fft": fproj = primal_to_fourier_2d(proj_in) fproj = self.f_translate.transform(fproj.squeeze(1), trans.to(self.device)) proj = fourier_to_primal_2d(fproj) if self.cfg.model.shift_data: return proj, proj else: return proj_in, proj def training_step(self, batch, batch_idx): R = batch["rotmat"] bsz = len(R) proj_in, proj_out = self.process_image(batch) f_proj_in = primal_to_fourier_2d(proj_in) if self.z_dim != 0: if self.cfg.extra_input_data_attr.given_z is not None: z = self.given_z[batch["idx"]].reshape(bsz, -1) kld_loss = 0.0 else: if self.cfg.model.enc_space == "fourier": enc_input = einops.rearrange(torch.view_as_real(f_proj_in), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2) elif self.cfg.model.enc_space == "real": enc_input = einops.rearrange(proj_in, "b 1 ny nx -> b (1 ny nx)") mu, log_var = self.encoder(enc_input) z = reparameterize(mu, log_var)
log_to_current = rank_zero_only(log_to_current) TASK_NAME = "density" class CryoModel(pl.LightningModule): def __init__(self, cfg, dataset): super().__init__() self.cfg = cfg self.dataset = dataset self.z_dim = cfg.model.z_dim self.history_saved_dirs = [] if cfg.extra_input_data_attr.given_z is None and self.z_dim != 0: if cfg.model.enc_space == "real": self.encoder = VAEEncoder(self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) elif cfg.model.enc_space == "fourier": self.encoder = VAEEncoder(2 * self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) else: raise NotImplementedError if cfg.model.shift_method == "interp": self.translate = SpatialGridTranslate(self.cfg.data_process.down_side_shape, ) log_to_current("We will deprecate `model.shift_method=interp` in a future version, use `model.shift_method=fft` instead.") elif cfg.model.shift_method == "fft": self.f_translate = FourierGridTranslate(self.cfg.data_process.down_side_shape, ) else: raise NotImplementedError ctf_params = infer_ctf_params_from_config(cfg) if cfg.model.ctf == "v1": self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset)) log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.") elif cfg.model.ctf == "v2": self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset)) else: raise NotImplementedError log_to_current(ctf_params) self.vol = ImplicitFourierVolume( self.z_dim, self.cfg.data_process.down_side_shape, self.cfg.loss.mask_rad_for_image_loss, { "net_type": cfg.model.net_type, "pe_dim": self.cfg.data_process.down_side_shape, "D": self.cfg.data_process.down_side_shape, "pe_type": cfg.model.pe_type, "force_symmetry": False, "hidden": cfg.model.hidden, }) mask = create_circular_mask(self.cfg.data_process.down_side_shape, self.cfg.data_process.down_side_shape, None, self.cfg.data_process.down_side_shape // 2 * self.cfg.loss.mask_rad_for_image_loss,) self.register_buffer("mask", torch.from_numpy(mask)) if cfg.extra_input_data_attr.given_z is not None: self.register_buffer("given_z", torch.from_numpy(np.load(cfg.extra_input_data_attr.given_z))) if getattr(self.cfg.extra_input_data_attr, "ckpt_path", None) is not None: log_to_current(f"load checkpoint from {self.cfg.extra_input_data_attr.ckpt_path}") state_dict = torch.load(self.cfg.extra_input_data_attr.ckpt_path, map_location=self.device) self.vol.load_state_dict(state_dict) def _get_save_dir(self): save_dir = os.path.join(self.cfg.work_dir, f"{self.current_epoch:04d}_{self.global_step:07d}") mkdir_or_exist(save_dir) return save_dir def process_image(self, batch): R = batch["rotmat"] bsz = len(R) trans = torch.cat([ batch["shiftY"].float().reshape(bsz, 1, 1) / self.cfg.data_process.down_apix, batch["shiftX"].float().reshape(bsz, 1, 1) / self.cfg.data_process.down_apix ], dim=2) proj_in = batch["proj"].to(self.device) if self.cfg.model.shift_method == "interp": proj = self.translate.transform(proj_in.squeeze(1), trans.to(self.device)) elif self.cfg.model.shift_method == "fft": fproj = primal_to_fourier_2d(proj_in) fproj = self.f_translate.transform(fproj.squeeze(1), trans.to(self.device)) proj = fourier_to_primal_2d(fproj) if self.cfg.model.shift_data: return proj, proj else: return proj_in, proj def training_step(self, batch, batch_idx): R = batch["rotmat"] bsz = len(R) proj_in, proj_out = self.process_image(batch) f_proj_in = primal_to_fourier_2d(proj_in) if self.z_dim != 0: if self.cfg.extra_input_data_attr.given_z is not None: z = self.given_z[batch["idx"]].reshape(bsz, -1) kld_loss = 0.0 else: if self.cfg.model.enc_space == "fourier": enc_input = einops.rearrange(torch.view_as_real(f_proj_in), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2) elif self.cfg.model.enc_space == "real": enc_input = einops.rearrange(proj_in, "b 1 ny nx -> b (1 ny nx)") mu, log_var = self.encoder(enc_input) z = reparameterize(mu, log_var)
kld_loss = calc_kl_loss(mu, log_var, self.cfg.loss.free_bits)
13
2023-11-06 07:15:26+00:00
16k
UMass-Foundation-Model/CoVLM
transformers/src/transformers/models/unispeech/configuration_unispeech.py
[ { "identifier": "PretrainedConfig", "path": "transformers/src/transformers/configuration_utils.py", "snippet": "class PretrainedConfig(PushToHubMixin):\n r\"\"\"\n Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as\n methods for lo...
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging
12,443
# coding=utf-8 # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ UniSpeech model configuration""" logger = logging.get_logger(__name__) UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/unispeech-large-1500h-cv": ( "https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json" ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech }
# coding=utf-8 # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ UniSpeech model configuration""" logger = logging.get_logger(__name__) UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/unispeech-large-1500h-cv": ( "https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json" ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech }
class UniSpeechConfig(PretrainedConfig):
0
2023-11-07 04:23:57+00:00
16k
HKU-BAL/ClairS-TO
src/realign_reads.py
[ { "identifier": "subprocess_popen", "path": "shared/utils.py", "snippet": "BASIC_BASES = set(\"ACGTU\")\nWARNING = '\\033[93m'\nERROR = '\\033[91m'\nENDC = '\\033[0m'\ndef log_error(log):\ndef log_warning(log):\ndef is_file_exists(file_name, suffix=\"\"):\ndef is_folder_exists(folder_name, suffix=\"\"):...
import sys import os import shlex import ctypes import re import subprocess import shared.param as param from subprocess import PIPE from argparse import ArgumentParser, SUPPRESS from collections import defaultdict from shared.utils import subprocess_popen, reference_sequence_from, IUPAC_base_to_ACGT_base_dict as BASE2ACGT, log_error from shared.interval_tree import bed_tree_from from shared.intervaltree.intervaltree import IntervalTree
12,982
class DBGPointer(ctypes.Structure): _fields_ = [("consensus_size", ctypes.c_int), ("consensus", ctypes.c_char_p * 200), ] # Read class for storing read information cigar_indel_re = r"(\d+)(D)" cigarRe = r"(\d+)([MIDNSHP=X])" graph_min_mapping_quality = 14 def get_len(seq, cigar): if 'D' not in cigar: return len(seq) indel_length = 0 for m in re.finditer(cigar_indel_re, cigar): indel_length += int(m.group(1)) return len(seq) + indel_length def print_ed(s1, s2): match_str = "" for x, y in zip(s1, s2): if x == y: match_str += "|" else: match_str += " " print(s1) print(match_str) print(s2) class Read(object): def __init__(self, read_start, seq, cigar, mapping_quality, base_quality, strand, raw_base_quality=None, unalign=False, read_name=None, read_id=None, flag=None, RNEXT=0, PNEXT=0, TLEN=0, phasing=None): self.read_start = read_start self.cigar = cigar self.mapping_quality = mapping_quality self.seq = seq self.base_quality = base_quality self.read_id = read_id self.read_end = self.read_start + get_len(seq, cigar) self.strand = strand self.graph_mq = True if self.mapping_quality >= graph_min_mapping_quality else False self.raw_base_quality = raw_base_quality self.read_name = read_name self.region = {} self.region_cigar = None self.region_start = None self.flag = str(flag) self.RNEXT = RNEXT self.PNEXT = PNEXT self.TLEN = PNEXT self.test_pos = None self.best_cigar = cigar self.best_pos = read_start self.best_align_score = None self.phasing = phasing def set_realign_flag(self): self.unalign = True def count_align_score(self, cigar): score = 0 for m in re.finditer(cigarRe, cigar): l, op, = int(m.group(1)), m.group(2) if op in 'MX=S': continue elif op in 'ID': score += l return score def set_realignment_info(self, region_start, realignment_cigar, realignment_start): realignment_cigar = realignment_cigar.replace('X', 'M') if realignment_cigar == self.cigar and realignment_start == self.read_start: return if self.best_align_score and realignment_cigar == self.best_cigar and realignment_start == self.best_pos: return realignment_align_score = self.count_align_score(realignment_cigar) if not self.best_align_score or realignment_align_score >= self.best_align_score: self.best_cigar = realignment_cigar self.best_pos = realignment_start self.best_align_score = realignment_align_score def decode_region(self, region_str): if region_str == '-' or '-' not in region_str: return region_str = region_str.rstrip().split('_') for region in region_str: region, cigar, pos = region.split('-') region, pos = int(region), int(pos) self.region[region] = [cigar, pos] def byte(x): return bytes(x, encoding="utf8") def find_max_overlap_index(query_region, search_regions): def overlap_length(region1, region2): return max(0, (min(region1[1], region2[1]) - max(region1[0], region2[0]))) overlap_lengths = [overlap_length(query_region, search_region) for search_region in search_regions] argmax = max(range(len(search_regions)), key=lambda idx: overlap_lengths[idx]) return None if overlap_lengths[argmax] == 0 else argmax def get_reference_seq(sequence, start, end, reference_start_0_based): if end < start: end, start = start, end return sequence[start - reference_start_0_based: end - reference_start_0_based] def phredscore2raw_score(qual): return ord(qual) - 33 def evc_base_from(base):
# BSD 3-Clause License # # Copyright 2023 The University of Hong Kong, Department of Computer Science # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. realign_chunk_size = 5000 min_dbg_mapping_quality = min_dbg_base_quality = 20 region_expansion_in_bp = expand_align_ref_region = 20 min_windows_distance = expand_align_ref_region * 4 max_window_size = max_region_reads_num = 1000 expandReferenceRegion = 100000 realigner_mod = os.path.join(*(os.path.split(__file__)[:-1] + ('realign/realigner',))) dbg_mod = os.path.join(*(os.path.split(__file__)[:-1] + ('realign/debruijn_graph',))) if not os.path.exists(realigner_mod) or not os.path.exists(dbg_mod): # try to find modules in clair3 python_path = subprocess.run('which python', stdout=subprocess.PIPE, shell=True).stdout.decode().rstrip() conda_prefix = os.path.dirname(os.path.dirname(python_path)) clair3_realign_path = os.path.join(conda_prefix, 'bin', 'preprocess', 'realign') clair3_realigner_mod = os.path.join(clair3_realign_path, 'realigner') clair3_dbg_mod = os.path.join(clair3_realign_path, 'debruijn_graph') if os.path.exists(clair3_realigner_mod) and os.path.exists(clair3_dbg_mod): realigner_mod = clair3_realigner_mod dbg_mod = clair3_dbg_mod else: print(log_error("[ERROR] `realigner` or `debruijn_graph` submodule not found in conda environment, pls install clair3-illumina package!")) sys.exit(1) realigner = ctypes.cdll.LoadLibrary(realigner_mod) dbg = ctypes.cdll.LoadLibrary(dbg_mod) class StructPointer(ctypes.Structure): _fields_ = [("position", ctypes.c_int * max_region_reads_num), ("cigar_string", ctypes.c_char_p * max_region_reads_num), ] class DBGPointer(ctypes.Structure): _fields_ = [("consensus_size", ctypes.c_int), ("consensus", ctypes.c_char_p * 200), ] # Read class for storing read information cigar_indel_re = r"(\d+)(D)" cigarRe = r"(\d+)([MIDNSHP=X])" graph_min_mapping_quality = 14 def get_len(seq, cigar): if 'D' not in cigar: return len(seq) indel_length = 0 for m in re.finditer(cigar_indel_re, cigar): indel_length += int(m.group(1)) return len(seq) + indel_length def print_ed(s1, s2): match_str = "" for x, y in zip(s1, s2): if x == y: match_str += "|" else: match_str += " " print(s1) print(match_str) print(s2) class Read(object): def __init__(self, read_start, seq, cigar, mapping_quality, base_quality, strand, raw_base_quality=None, unalign=False, read_name=None, read_id=None, flag=None, RNEXT=0, PNEXT=0, TLEN=0, phasing=None): self.read_start = read_start self.cigar = cigar self.mapping_quality = mapping_quality self.seq = seq self.base_quality = base_quality self.read_id = read_id self.read_end = self.read_start + get_len(seq, cigar) self.strand = strand self.graph_mq = True if self.mapping_quality >= graph_min_mapping_quality else False self.raw_base_quality = raw_base_quality self.read_name = read_name self.region = {} self.region_cigar = None self.region_start = None self.flag = str(flag) self.RNEXT = RNEXT self.PNEXT = PNEXT self.TLEN = PNEXT self.test_pos = None self.best_cigar = cigar self.best_pos = read_start self.best_align_score = None self.phasing = phasing def set_realign_flag(self): self.unalign = True def count_align_score(self, cigar): score = 0 for m in re.finditer(cigarRe, cigar): l, op, = int(m.group(1)), m.group(2) if op in 'MX=S': continue elif op in 'ID': score += l return score def set_realignment_info(self, region_start, realignment_cigar, realignment_start): realignment_cigar = realignment_cigar.replace('X', 'M') if realignment_cigar == self.cigar and realignment_start == self.read_start: return if self.best_align_score and realignment_cigar == self.best_cigar and realignment_start == self.best_pos: return realignment_align_score = self.count_align_score(realignment_cigar) if not self.best_align_score or realignment_align_score >= self.best_align_score: self.best_cigar = realignment_cigar self.best_pos = realignment_start self.best_align_score = realignment_align_score def decode_region(self, region_str): if region_str == '-' or '-' not in region_str: return region_str = region_str.rstrip().split('_') for region in region_str: region, cigar, pos = region.split('-') region, pos = int(region), int(pos) self.region[region] = [cigar, pos] def byte(x): return bytes(x, encoding="utf8") def find_max_overlap_index(query_region, search_regions): def overlap_length(region1, region2): return max(0, (min(region1[1], region2[1]) - max(region1[0], region2[0]))) overlap_lengths = [overlap_length(query_region, search_region) for search_region in search_regions] argmax = max(range(len(search_regions)), key=lambda idx: overlap_lengths[idx]) return None if overlap_lengths[argmax] == 0 else argmax def get_reference_seq(sequence, start, end, reference_start_0_based): if end < start: end, start = start, end return sequence[start - reference_start_0_based: end - reference_start_0_based] def phredscore2raw_score(qual): return ord(qual) - 33 def evc_base_from(base):
return base if base == "N" else BASE2ACGT[base]
3
2023-11-07 04:39:16+00:00
16k
the-siesta-group/edfio
tests/test_edfplus_header.py
[ { "identifier": "AnonymizedDateError", "path": "edfio/edf.py", "snippet": "class AnonymizedDateError(ValueError):\n \"\"\"Raised when trying to access an anonymized startdate or birthdate.\"\"\"" }, { "identifier": "Edf", "path": "edfio/edf.py", "snippet": "class Edf:\n \"\"\"Pytho...
import datetime import numpy as np import pytest from edfio import AnonymizedDateError, Edf, EdfSignal, Patient, Recording
13,638
@pytest.fixture() def patient(): return Patient._from_str("MCH-0234567 F 02-MAY-1951 Haagse_Harry") @pytest.fixture() def recording():
@pytest.fixture() def patient(): return Patient._from_str("MCH-0234567 F 02-MAY-1951 Haagse_Harry") @pytest.fixture() def recording():
return Recording._from_str(
4
2023-11-09 09:53:27+00:00
16k
sb-ai-lab/HypEx
hypex/matcher.py
[ { "identifier": "FaissMatcher", "path": "hypex/algorithms/faiss_matcher.py", "snippet": "class FaissMatcher:\n \"\"\"A class used to match instances using Faiss library.\"\"\"\n\n def __init__(\n self,\n df: pd.DataFrame,\n outcomes: str,\n treatment: st...
import logging import pickle import numpy as np import pandas as pd from typing import Union from tqdm.auto import tqdm from .algorithms.faiss_matcher import FaissMatcher from .algorithms.no_replacement_matching import MatcherNoReplacement from .selectors.feature_selector import FeatureSelector from .selectors.spearman_filter import SpearmanFilter from .selectors.outliers_filter import OutliersFilter from .selectors.base_filtration import const_filtration, nan_filtration from .utils.validators import random_feature from .utils.validators import random_treatment from .utils.validators import subset_refuter from .utils.validators import test_significance
12,777
def _log(self, message, silent=None): """Logs a message at the appropriate level. Args: message: The message to log. silent: If silent, logs will be only info """ if silent is None: silent = self.silent if silent: logger.debug(message) else: logger.info(message) def _matching(self) -> tuple: """Performs matching considering the presence of groups. Returns: Results of matching and matching quality metrics """ self._create_faiss_matcher() self._log("Applying matching") self.results, df_matched = self.matcher.match() self.quality_result = self.matcher.matching_quality(df_matched) return self.results, self.quality_result, df_matched def validate_result( self, refuter: str = "random_feature", effect_type: str = "ate", n_sim: int = 10, fraction: float = 0.8 ) -> dict: """Validates estimated ATE (Average Treatment Effect). Validates estimated effect: 1) by replacing real treatment with random placebo treatment. Estimated effect must be droped to zero, p-val > 0.05; 2) by adding random feature (`random_feature`). Estimated effect shouldn't change significantly, p-val < 0.05; 3) estimates effect on subset of data (default fraction is 0.8). Estimated effect shouldn't change significantly, p-val < 0.05. Args: refuter: Refuter type (`random_treatment`, `random_feature`, `subset_refuter`) effect_type: Which effect to validate (`ate`, `att`, `atc`) n_sim: Number of simulations fraction: Subset fraction for subset refuter only Returns: Dictionary of outcome_name (mean_effect on validation, p-value) """ if self.silent: logger.debug("Applying validation of result") else: logger.info("Applying validation of result") self.val_dict = {k: [] for k in self.outcomes} self.pval_dict = dict() effect_dict = {"ate": 0, "atc": 1, "att": 2} assert effect_type in effect_dict.keys() for i in tqdm(range(n_sim)): if refuter in ["random_treatment", "random_feature"]: if refuter == "random_treatment": self.input_data, orig_treatment, self.validate = random_treatment(self.input_data, self.treatment) elif refuter == "random_feature": self.input_data, self.validate = random_feature(self.input_data) if self.features_importance is not None and i == 0: self.features_importance.append("random_feature") self.matcher = FaissMatcher( self.input_data, self.outcomes, self.treatment, info_col=self.info_col, features=self.features_importance, group_col=self.group_col, validation=self.validate, n_neighbors=self.n_neighbors, pbar=False, ) elif refuter == "subset_refuter": df, self.validate = subset_refuter(self.input_data, self.treatment, fraction) self.matcher = FaissMatcher( df, self.outcomes, self.treatment, info_col=self.info_col, features=self.features_importance, group_col=self.group_col, validation=self.validate, n_neighbors=self.n_neighbors, pbar=False, ) else: logger.error("Incorrect refuter name") raise NameError( "Incorrect refuter name! Available refuters: `random_feature`, `random_treatment`, `subset_refuter`" ) if self.group_col is None: sim = self.matcher.match() else: sim = self.matcher.group_match() for key in self.val_dict.keys(): self.val_dict[key].append(sim[key][0]) for outcome in self.outcomes: self.pval_dict.update({outcome: [np.mean(self.val_dict[outcome])]}) self.pval_dict[outcome].append(
"""Base Matcher class.""" REPORT_FEAT_SELECT_DIR = "report_feature_selector" REPORT_PROP_MATCHER_DIR = "report_matcher" NAME_REPORT = "lama_interactive_report.html" N_THREADS = 1 N_FOLDS = 4 RANDOM_STATE = 123 TEST_SIZE = 0.2 TIMEOUT = 600 VERBOSE = 2 USE_ALGOS = ["lgb"] PROP_SCORES_COLUMN = "prop_scores" GENERATE_REPORT = True SAME_TARGET_THRESHOLD = 0.7 OUT_INTER_COEFF = 1.5 OUT_MODE_PERCENT = True OUT_MIN_PERCENT = 0.02 OUT_MAX_PERCENT = 0.98 logger = logging.getLogger("hypex") console_out = logging.StreamHandler() logging.basicConfig( handlers=(console_out,), format="[%(asctime)s | %(name)s | %(levelname)s]: %(message)s", datefmt="%d.%m.%Y %H:%M:%S", level=logging.INFO, ) class Matcher: """Class for compile full pipeline of Matching in Causal Inference task. Matcher steps: - Read, analyze data - Feature selection via LightAutoML - Converting a dataset with features to another space via Cholesky decomposition In the new space, the distance L2 becomes equivalent to the Mahalanobis distance. This allows us to use faiss to search for nearest objects, which can search only by L2 metric, but without violating the methodology of matching, for which it is important to count by the Mahalanobis distance - Finding the nearest neighbors for each unit (with duplicates) using faiss. For each of the control group, neighbors from the target group are matched and vice versa. - Calculation bias - Creating matched df (Wide df with pairs) - Calculation metrics: ATE, ATT, ATC, p-value, and сonfidence intervals - Calculation quality: PS-test, KS test, SMD test - Returns metrics as dataframe, quality results as dict of df's and df_matched - After receiving the result, the result should be validated using :func:`~hypex.matcher.Matcher.validate_result` Example: Common usecase - base pipeline for matching >>> # Base info >>> treatment = "treatment" # Column name with info about 'treatment' 0 or 1 >>> target = "target" # Column name with target >>> >>> # Optional >>> info_col = ["user_id", 'address'] # Columns that will not participate in the match and are informative. >>> group_col = "CatCol" # Column name for strict comparison (for a categorical feature) >>> >>> # Matching >>> model = Matcher(data, outcome=target, treatment=treatment, info_col=info_col, group_col=group_col) >>> features = model.lama_feature_select() # Feature selection via lama >>> results, quality, df_matched = model.estimate(features=some_features) # Performs matching >>> >>> model.validate_result() """ def __init__( self, input_data: pd.DataFrame, treatment: str, outcome: Union[str, list] = None, outcome_type: str = "numeric", group_col: str = None, info_col: list = None, weights: dict = None, base_filtration: bool = False, generate_report: bool = GENERATE_REPORT, report_feat_select_dir: str = REPORT_FEAT_SELECT_DIR, timeout: int = TIMEOUT, n_threads: int = N_THREADS, n_folds: int = N_FOLDS, verbose: bool = VERBOSE, use_algos: list = None, same_target_threshold: float = SAME_TARGET_THRESHOLD, interquartile_coeff: float = OUT_INTER_COEFF, drop_outliers_by_percentile: bool = OUT_MODE_PERCENT, min_percentile: float = OUT_MIN_PERCENT, max_percentile: float = OUT_MAX_PERCENT, n_neighbors: int = 1, silent: bool = True, pbar: bool = True, ): """Initialize the Matcher object. Args: input_data: Input dataframe outcome: Target column treatment: Column determine control and test groups outcome_type: Values type of target column. Defaults to "numeric" group_col: Column for grouping. Defaults to None. info_col: Columns with id, date or metadata, not taking part in calculations. Defaults to None weights: weights for numeric columns in order to increase matching quality by weighted feature. By default, is None (all features have the same weight equal to 1). Example: {'feature_1': 10} base_filtration: To use or not base filtration of features in order to remove all constant or almost all constant, bool. Default is False. generate_report: Flag to create report. Defaults to True report_feat_select_dir: Folder for report files. Defaults to "report_feature_selector" timeout: Limit work time of code LAMA. Defaults to 600 n_threads: Maximum number of threads. Defaults to 1 n_folds: Number of folds for cross-validation. Defaults to 4 verbose: Flag to show process stages. Defaults to 2 use_algos: List of names of LAMA algorithms for feature selection. Defaults to ["lgb"] same_target_threshold: Threshold for correlation coefficient filter (Spearman). Default to 0.7 interquartile_coeff: Percent for drop outliers. Default to 1.5 drop_outliers_by_percentile: Flag to drop outliers by custom percentiles. Defaults to True min_percentile: Minimum percentile to drop outliers. Defaults to 0.02 max_percentile: Maximum percentile to drop outliers. Defaults to 0.98 n_neighbors: Number of neighbors to match (in fact you may see more then n matches as every match may have more then one neighbor with the same distance). Default value is 1. silent: Write logs in debug mode pbar: Display progress bar while get index """ if use_algos is None: use_algos = USE_ALGOS self.input_data = input_data if outcome is None: outcome = list() self.outcomes = outcome if type(outcome) == list else [outcome] self.treatment = treatment self.group_col = group_col self.info_col = info_col self.outcome_type = outcome_type self.weights = weights self.generate_report = generate_report self.report_feat_select_dir = report_feat_select_dir self.timeout = timeout self.n_threads = n_threads self.n_folds = n_folds self.verbose = verbose self.use_algos = use_algos self.same_target_threshold = same_target_threshold self.interquartile_coeff = interquartile_coeff self.mode_percentile = drop_outliers_by_percentile self.min_percentile = min_percentile self.max_percentile = max_percentile self.base_filtration = base_filtration self.features_importance = None self.matcher = None self.val_dict = None self.pval_dict = None self.new_treatment = None self.validate = None self.dropped_features = [] self.n_neighbors = n_neighbors self.silent = silent self.pbar = pbar self._preprocessing_data() def _convert_categorical_to_dummy(self): """Converts categorical variables to dummy variables. Returns: Data with categorical variables converted to dummy variables. """ info_col = self.info_col if self.info_col is not None else [] group_col = [self.group_col] if self.group_col is not None else [] columns_to_drop = info_col + group_col if columns_to_drop is not None: data = self.input_data.drop(columns=columns_to_drop) else: data = self.input_data dummy_data = pd.get_dummies(data, drop_first=True, dtype=np.uint8) return dummy_data def _preprocessing_data(self): """Converts categorical features into dummy variables.""" info_col = self.info_col if self.info_col is not None else [] group_col = [self.group_col] if self.group_col is not None else [] columns_to_drop = info_col + group_col + self.outcomes + [self.treatment] if self.base_filtration: filtered_features = nan_filtration(self.input_data.drop(columns=columns_to_drop)) self.dropped_features = [f for f in self.input_data.columns if f not in filtered_features + columns_to_drop] self.input_data = self.input_data[filtered_features + columns_to_drop] nan_counts = self.input_data.isna().sum().sum() if nan_counts != 0: self._log(f"Number of NaN values filled with zeros: {nan_counts}", silent=False) self.input_data = self.input_data.fillna(0) if self.group_col is not None: group_col = self.input_data[[self.group_col]] if self.info_col is not None: info_col = self.input_data[self.info_col] self.input_data = self._convert_categorical_to_dummy() if self.group_col is not None: self.input_data = pd.concat([self.input_data, group_col], axis=1) if self.info_col is not None: self.input_data = pd.concat([self.input_data, info_col], axis=1) if self.base_filtration: filtered_features = const_filtration(self.input_data.drop(columns=columns_to_drop)) self.dropped_features = np.concatenate( ( self.dropped_features, [f for f in self.input_data.columns if f not in filtered_features + columns_to_drop], ) ) self.input_data = self.input_data[filtered_features + columns_to_drop] self._log("Categorical features turned into dummy") def _apply_filter(self, filter_class, *filter_args): """Applies a filter to the input data. Args: filter_class: The class of the filter to apply. *filter_args: Arguments to pass to the filter class. """ filter_instance = filter_class(*filter_args) self.input_data = filter_instance.perform_filter(self.input_data) def _spearman_filter(self): """Applies a filter by dropping columns correlated with the outcome column. This method uses the Spearman filter to eliminate features from the dataset that are highly correlated with the outcome columns, based on a pre-set threshold """ self._log("Applying filter by spearman test - drop columns correlated with outcome") self._apply_filter(SpearmanFilter, self.outcomes[0], self.treatment, self.same_target_threshold) def outliers_filter(self): """Removes outlier values from the dataset. This method employs an OutliersFilter. If `drop_outliers_by_percentile` is True, it retains only the values between the min and max percentiles If `drop_outliers_by_percentile` is False, it retains only the values between 2nd and 98th percentiles """ self._log( f"Applying filter of outliers\n" f"interquartile_coeff={self.interquartile_coeff}\n" f"mode_percentile={self.mode_percentile}\n" f"min_percentile={self.min_percentile}\n" f"max_percentile={self.max_percentile}" ) self._apply_filter( OutliersFilter, self.interquartile_coeff, self.mode_percentile, self.min_percentile, self.max_percentile ) def match_no_rep(self, threshold: float = 0.1, approximate_match: bool = False) -> pd.DataFrame: """Matching groups with no replacement. It's done by optimizing the linear sum of distances between pairs of treatment and control samples. Args: threshold: caliper for minimum deviation between test and control groups. in case weights is not None. approximate_match: use or not approximate matching Returns: Matched dataframe with no replacements. """ a = self.input_data[self.treatment] X = self.input_data.drop(columns=self.treatment) if self.info_col is not None: X = X.drop(columns=self.info_col) index_matched = MatcherNoReplacement(X, a, self.weights, approximate_match).match() filtred_matches = index_matched.loc[1].iloc[self.input_data[a == 1].index].matches[index_matched.loc[1].iloc[self.input_data[a == 1].index].matches.apply(lambda x: x != [])] if self.weights is not None: weighted_features = [f for f in self.weights.keys()] index_dict = dict() for w in weighted_features: source = self.input_data.loc[np.concatenate(filtred_matches.values)][w].values target = self.input_data.loc[filtred_matches.index.to_list()][w].values index = abs(source - target) <= abs(source) * threshold index_dict.update({w: index}) index_filtered = sum(index_dict.values()) == len(self.weights) matched_data = pd.concat( [self.input_data.loc[filtred_matches.index.to_list()].iloc[index_filtered], self.input_data.loc[np.concatenate(filtred_matches.values)].iloc[index_filtered]] ) else: matched_data = pd.concat([self.input_data.loc[filtred_matches.index.to_list()], self.input_data.loc[np.concatenate(filtred_matches.values)]]) return matched_data def lama_feature_select(self) -> pd.DataFrame: """Calculates the importance of each feature. This method use LamaFeatureSelector to rank the importance of each feature in the dataset The features are then sorted by their importance with the most important feature first Returns: The feature importances, sorted in descending order """ self._log("Counting feature importance") feat_select = FeatureSelector( outcome=self.outcomes[0], outcome_type=self.outcome_type, treatment=self.treatment, timeout=self.timeout, n_threads=self.n_threads, n_folds=self.n_folds, verbose=self.verbose, generate_report=self.generate_report, report_dir=self.report_feat_select_dir, use_algos=self.use_algos, ) df = self.input_data if self.group_col is None else self.input_data.drop(columns=self.group_col) if self.info_col is not None: df = df.drop(columns=self.info_col) features = feat_select.perform_selection(df=df) if self.group_col is None: self.features_importance = features else: self.features_importance = features.append( {"Feature": self.group_col, "Importance": features.Importance.max()}, ignore_index=True ) return self.features_importance.sort_values("Importance", ascending=False) def _create_faiss_matcher(self, df=None, validation=None): """Creates a FaissMatcher object. Args: df: The dataframe to use. If None, uses self.input_data. validation: Whether to use the matcher for validation. If None, determines based on whether """ if df is None: df = self.input_data self.matcher = FaissMatcher( df, self.outcomes, self.treatment, info_col=self.info_col, weights=self.weights, features=self.features_importance, group_col=self.group_col, validation=validation, n_neighbors=self.n_neighbors, pbar=False if validation else self.pbar, ) def _perform_validation(self): """Performs validation using the FaissMatcher.""" if self.group_col is None: sim = self.matcher.match() else: sim = self.matcher.group_match() for key in self.val_dict.keys(): self.val_dict[key].append(sim[key][0]) def _log(self, message, silent=None): """Logs a message at the appropriate level. Args: message: The message to log. silent: If silent, logs will be only info """ if silent is None: silent = self.silent if silent: logger.debug(message) else: logger.info(message) def _matching(self) -> tuple: """Performs matching considering the presence of groups. Returns: Results of matching and matching quality metrics """ self._create_faiss_matcher() self._log("Applying matching") self.results, df_matched = self.matcher.match() self.quality_result = self.matcher.matching_quality(df_matched) return self.results, self.quality_result, df_matched def validate_result( self, refuter: str = "random_feature", effect_type: str = "ate", n_sim: int = 10, fraction: float = 0.8 ) -> dict: """Validates estimated ATE (Average Treatment Effect). Validates estimated effect: 1) by replacing real treatment with random placebo treatment. Estimated effect must be droped to zero, p-val > 0.05; 2) by adding random feature (`random_feature`). Estimated effect shouldn't change significantly, p-val < 0.05; 3) estimates effect on subset of data (default fraction is 0.8). Estimated effect shouldn't change significantly, p-val < 0.05. Args: refuter: Refuter type (`random_treatment`, `random_feature`, `subset_refuter`) effect_type: Which effect to validate (`ate`, `att`, `atc`) n_sim: Number of simulations fraction: Subset fraction for subset refuter only Returns: Dictionary of outcome_name (mean_effect on validation, p-value) """ if self.silent: logger.debug("Applying validation of result") else: logger.info("Applying validation of result") self.val_dict = {k: [] for k in self.outcomes} self.pval_dict = dict() effect_dict = {"ate": 0, "atc": 1, "att": 2} assert effect_type in effect_dict.keys() for i in tqdm(range(n_sim)): if refuter in ["random_treatment", "random_feature"]: if refuter == "random_treatment": self.input_data, orig_treatment, self.validate = random_treatment(self.input_data, self.treatment) elif refuter == "random_feature": self.input_data, self.validate = random_feature(self.input_data) if self.features_importance is not None and i == 0: self.features_importance.append("random_feature") self.matcher = FaissMatcher( self.input_data, self.outcomes, self.treatment, info_col=self.info_col, features=self.features_importance, group_col=self.group_col, validation=self.validate, n_neighbors=self.n_neighbors, pbar=False, ) elif refuter == "subset_refuter": df, self.validate = subset_refuter(self.input_data, self.treatment, fraction) self.matcher = FaissMatcher( df, self.outcomes, self.treatment, info_col=self.info_col, features=self.features_importance, group_col=self.group_col, validation=self.validate, n_neighbors=self.n_neighbors, pbar=False, ) else: logger.error("Incorrect refuter name") raise NameError( "Incorrect refuter name! Available refuters: `random_feature`, `random_treatment`, `subset_refuter`" ) if self.group_col is None: sim = self.matcher.match() else: sim = self.matcher.group_match() for key in self.val_dict.keys(): self.val_dict[key].append(sim[key][0]) for outcome in self.outcomes: self.pval_dict.update({outcome: [np.mean(self.val_dict[outcome])]}) self.pval_dict[outcome].append(
test_significance(
10
2023-11-01 08:58:57+00:00
16k
tianhaowuhz/human-assisting-dex-grasp
Algorithms/ppo/gf_ppo_update.py
[ { "identifier": "RolloutStorage", "path": "Algorithms/ppo/storage.py", "snippet": "class RolloutStorage:\n\n def __init__(self, num_envs, num_transitions_per_env, obs_shape, states_shape, actions_shape, device='cpu', sampler='sequential'):\n\n self.device = device\n self.sampler = sampl...
from datetime import datetime from gym.spaces import Space from collections import deque from torch.utils.data import Dataset, TensorDataset, DataLoader from torch.utils.tensorboard import SummaryWriter from Algorithms.ppo import RolloutStorage from Algorithms.ppo import ActorCritic from Algorithms.SDE_update import loss_fn_cond, cond_ode_sampler, init_sde from Networks.SDENets_update import CondScoreModel from tqdm import tqdm from ipdb import set_trace import os import time import functools import numpy as np import statistics import glob import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import copy import time import pickle import cv2 import matplotlib.pyplot as plt import io import _pickle as CPickle
13,390
def images_to_video(path, images, fps=10, size=(256,256), suffix='mp4'): path = path+f'.{suffix}' out = cv2.VideoWriter(filename=path, fourcc=cv2.VideoWriter_fourcc(*'mp4v'), fps=fps, frameSize=size, isColor=True) for item in images: out.write(item.astype(np.uint8)) out.release() def get_img_from_fig(fig, dpi=180): buf = io.BytesIO() fig.savefig(buf, format="png", dpi=dpi) buf.seek(0) img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8) buf.close() img = cv2.imdecode(img_arr, 1) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) return img class GFPPO: def __init__(self, vec_env, cfg_train, device='cpu', sampler='sequential', log_dir='run', is_testing=False, print_log=True, apply_reset=False, asymmetric=False, args=None, ): self.args = args ''' PPO ''' # PPO parameters if not isinstance(vec_env.observation_space, Space): raise TypeError("vec_env.observation_space must be a gym Space") if not isinstance(vec_env.state_space, Space): raise TypeError("vec_env.state_space must be a gym Space") if not isinstance(vec_env.action_space, Space): raise TypeError("vec_env.action_space must be a gym Space") self.observation_space = vec_env.observation_space self.action_space = vec_env.action_space self.state_space = vec_env.state_space self.cfg_train = copy.deepcopy(cfg_train) learn_cfg = self.cfg_train["learn"] self.device = device self.asymmetric = asymmetric self.desired_kl = learn_cfg.get("desired_kl", None) self.schedule = learn_cfg.get("schedule", "fixed") self.step_size = learn_cfg["optim_stepsize"] self.init_noise_std = learn_cfg.get("init_noise_std", 0.3) self.model_cfg = self.cfg_train["policy"] self.num_transitions_per_env=learn_cfg["nsteps"] self.learning_rate=learn_cfg["optim_stepsize"] self.clip_param = learn_cfg["cliprange"] self.num_learning_epochs = learn_cfg["noptepochs"] self.num_mini_batches = learn_cfg["nminibatches"] self.value_loss_coef = learn_cfg.get("value_loss_coef", 2.0) self.entropy_coef = learn_cfg["ent_coef"] self.gamma = learn_cfg["gamma"] self.lam = learn_cfg["lam"] self.max_grad_norm = learn_cfg.get("max_grad_norm", 2.0) self.use_clipped_value_loss = learn_cfg.get("use_clipped_value_loss", False) # policy type self.action_type = self.cfg_train["setting"]["action_type"] self.sub_action_type = self.cfg_train["setting"]["sub_action_type"] self.action_clip = self.cfg_train["setting"]["action_clip"] self.grad_process = self.cfg_train["setting"]["grad_process"] self.grad_scale = self.cfg_train["setting"]["grad_scale"] if self.action_type=='joint' and self.sub_action_type=='add+jointscale': action_space_shape = (18+18,) else: action_space_shape = self.action_space.shape print(f'action_space_shape:{action_space_shape}!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') self.vec_env = vec_env self.vec_env.grad_scale = self.grad_scale pointnet_version = self.cfg_train["policy"]["pointnet_version"] hand_pcl = self.cfg_train["policy"]["hand_pcl"] hand_model = None # PPO components self.stack_frame_numer = self.vec_env.stack_frame_numbers self.actor_critic = ActorCritic(self.observation_space.shape, self.state_space.shape, action_space_shape, self.init_noise_std, self.model_cfg, asymmetric=asymmetric, stack_frame_number=self.stack_frame_numer, sub_obs_type=self.vec_env.sub_obs_type, num_fingertip=self.vec_env.num_fingertips, pointnet_type=pointnet_version, envs=self.vec_env, hand_pcl=hand_pcl, hand_model=hand_model, args=args) # pointnet backbone self.pointnet_finetune = self.model_cfg['finetune_pointnet'] self.finetune_pointnet_bz = 128 if self.model_cfg['pretrain_pointnet']: if pointnet_version == 'pt2': pointnet_model_dict = torch.load(os.path.join(args.score_model_path,'pt2.pt'), map_location=self.device) elif pointnet_version == 'pt': pointnet_model_dict = torch.load(os.path.join(args.score_model_path,'pt.pt'), map_location=self.device) if self.model_cfg['shared_pointnet']: self.actor_critic.pointnet_enc.load_state_dict(pointnet_model_dict) if not self.model_cfg['finetune_pointnet']: # freeze pointnet for name,param in self.actor_critic.pointnet_enc.named_parameters(): param.requires_grad = False else: self.actor_critic.actor_pointnet_enc.load_state_dict(pointnet_model_dict) self.actor_critic.critic_pointnet_enc.load_state_dict(pointnet_model_dict) if not self.model_cfg['finetune_pointnet']: # freeze pointnet for name,param in self.actor_critic.actor_pointnet_enc.named_parameters(): param.requires_grad = False for name,param in self.actor_critic.critic_pointnet_enc.named_parameters(): param.requires_grad = False self.actor_critic.to(self.device)
# gf part save_video = False img_size = 256 save_state = False def images_to_video(path, images, fps=10, size=(256,256), suffix='mp4'): path = path+f'.{suffix}' out = cv2.VideoWriter(filename=path, fourcc=cv2.VideoWriter_fourcc(*'mp4v'), fps=fps, frameSize=size, isColor=True) for item in images: out.write(item.astype(np.uint8)) out.release() def get_img_from_fig(fig, dpi=180): buf = io.BytesIO() fig.savefig(buf, format="png", dpi=dpi) buf.seek(0) img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8) buf.close() img = cv2.imdecode(img_arr, 1) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) return img class GFPPO: def __init__(self, vec_env, cfg_train, device='cpu', sampler='sequential', log_dir='run', is_testing=False, print_log=True, apply_reset=False, asymmetric=False, args=None, ): self.args = args ''' PPO ''' # PPO parameters if not isinstance(vec_env.observation_space, Space): raise TypeError("vec_env.observation_space must be a gym Space") if not isinstance(vec_env.state_space, Space): raise TypeError("vec_env.state_space must be a gym Space") if not isinstance(vec_env.action_space, Space): raise TypeError("vec_env.action_space must be a gym Space") self.observation_space = vec_env.observation_space self.action_space = vec_env.action_space self.state_space = vec_env.state_space self.cfg_train = copy.deepcopy(cfg_train) learn_cfg = self.cfg_train["learn"] self.device = device self.asymmetric = asymmetric self.desired_kl = learn_cfg.get("desired_kl", None) self.schedule = learn_cfg.get("schedule", "fixed") self.step_size = learn_cfg["optim_stepsize"] self.init_noise_std = learn_cfg.get("init_noise_std", 0.3) self.model_cfg = self.cfg_train["policy"] self.num_transitions_per_env=learn_cfg["nsteps"] self.learning_rate=learn_cfg["optim_stepsize"] self.clip_param = learn_cfg["cliprange"] self.num_learning_epochs = learn_cfg["noptepochs"] self.num_mini_batches = learn_cfg["nminibatches"] self.value_loss_coef = learn_cfg.get("value_loss_coef", 2.0) self.entropy_coef = learn_cfg["ent_coef"] self.gamma = learn_cfg["gamma"] self.lam = learn_cfg["lam"] self.max_grad_norm = learn_cfg.get("max_grad_norm", 2.0) self.use_clipped_value_loss = learn_cfg.get("use_clipped_value_loss", False) # policy type self.action_type = self.cfg_train["setting"]["action_type"] self.sub_action_type = self.cfg_train["setting"]["sub_action_type"] self.action_clip = self.cfg_train["setting"]["action_clip"] self.grad_process = self.cfg_train["setting"]["grad_process"] self.grad_scale = self.cfg_train["setting"]["grad_scale"] if self.action_type=='joint' and self.sub_action_type=='add+jointscale': action_space_shape = (18+18,) else: action_space_shape = self.action_space.shape print(f'action_space_shape:{action_space_shape}!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') self.vec_env = vec_env self.vec_env.grad_scale = self.grad_scale pointnet_version = self.cfg_train["policy"]["pointnet_version"] hand_pcl = self.cfg_train["policy"]["hand_pcl"] hand_model = None # PPO components self.stack_frame_numer = self.vec_env.stack_frame_numbers self.actor_critic = ActorCritic(self.observation_space.shape, self.state_space.shape, action_space_shape, self.init_noise_std, self.model_cfg, asymmetric=asymmetric, stack_frame_number=self.stack_frame_numer, sub_obs_type=self.vec_env.sub_obs_type, num_fingertip=self.vec_env.num_fingertips, pointnet_type=pointnet_version, envs=self.vec_env, hand_pcl=hand_pcl, hand_model=hand_model, args=args) # pointnet backbone self.pointnet_finetune = self.model_cfg['finetune_pointnet'] self.finetune_pointnet_bz = 128 if self.model_cfg['pretrain_pointnet']: if pointnet_version == 'pt2': pointnet_model_dict = torch.load(os.path.join(args.score_model_path,'pt2.pt'), map_location=self.device) elif pointnet_version == 'pt': pointnet_model_dict = torch.load(os.path.join(args.score_model_path,'pt.pt'), map_location=self.device) if self.model_cfg['shared_pointnet']: self.actor_critic.pointnet_enc.load_state_dict(pointnet_model_dict) if not self.model_cfg['finetune_pointnet']: # freeze pointnet for name,param in self.actor_critic.pointnet_enc.named_parameters(): param.requires_grad = False else: self.actor_critic.actor_pointnet_enc.load_state_dict(pointnet_model_dict) self.actor_critic.critic_pointnet_enc.load_state_dict(pointnet_model_dict) if not self.model_cfg['finetune_pointnet']: # freeze pointnet for name,param in self.actor_critic.actor_pointnet_enc.named_parameters(): param.requires_grad = False for name,param in self.actor_critic.critic_pointnet_enc.named_parameters(): param.requires_grad = False self.actor_critic.to(self.device)
self.storage = RolloutStorage(self.vec_env.num_envs, self.num_transitions_per_env, self.observation_space.shape,
0
2023-11-09 06:08:40+00:00
16k
ml4bio/RhoFold
rhofold/model/structure_module.py
[ { "identifier": "Linear", "path": "rhofold/model/primitives.py", "snippet": "class Linear(nn.Linear):\n \"\"\"\n A Linear layer with built-in nonstandard initializations. Called just\n like torch.nn.Linear.\n\n Implements the initializers in 1.11.4, plus some additional ones found\n in th...
import math import torch import torch.nn as nn import torch.nn.functional as F from typing import Optional, Tuple, Sequence from rhofold.model.primitives import Linear, LayerNorm from rhofold.utils.rigid_utils import Rigid from rhofold.utils.tensor_utils import ( dict_multimap, permute_final_dims, flatten_final_dims, ) from einops import rearrange from rhofold.utils.alphabet import RNAAlphabet from rhofold.utils.converter import RNAConverter
11,003
no_heads: Number of attention heads no_qk_points: Number of query/key points to generate no_v_points: Number of value points to generate """ super(InvariantPointAttention, self).__init__() self.c_s = c_s self.c_z = c_z self.c_hidden = c_hidden self.no_heads = no_heads self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.inf = inf self.eps = eps hc = self.c_hidden * self.no_heads self.linear_q = Linear(self.c_s, hc) self.linear_kv = Linear(self.c_s, 2 * hc) hpq = self.no_heads * self.no_qk_points * 3 self.linear_q_points = Linear(self.c_s, hpq) hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3 self.linear_kv_points = Linear(self.c_s, hpkv) # hpv = self.no_heads * self.no_v_points * 3 self.linear_b = Linear(self.c_z, self.no_heads) self.head_weights = nn.Parameter(torch.zeros((no_heads))) # ipa_point_weights_init_(self.head_weights) concat_out_dim = self.no_heads * ( self.c_z + self.c_hidden + self.no_v_points * 4 ) self.linear_out = Linear(concat_out_dim, self.c_s) self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward( self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, _offload_inference: bool = False, _z_reference_list: Optional[Sequence[torch.Tensor]] = None, ) -> torch.Tensor: """ Args: s: [*, N_res, C_s] single representation z: [*, N_res, N_res, C_z] pair representation r: [*, N_res] transformation object mask: [*, N_res] mask Returns: [*, N_res, C_s] single representation update """ z = [z] ####################################### # Generate scalar and point activations ####################################### # [*, N_res, H * C_hidden] q = self.linear_q(s) kv = self.linear_kv(s) # [*, N_res, H, C_hidden] q = q.view(q.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, 2 * C_hidden] kv = kv.view(kv.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, C_hidden] k, v = torch.split(kv, self.c_hidden, dim=-1) # [*, N_res, H * P_q * 3] q_pts = self.linear_q_points(s) # [*, N_res, H * P_q, 3] q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1) q_pts = torch.stack(q_pts, dim=-1) q_pts = r[..., None].apply(q_pts) # [*, N_res, H, P_q, 3] q_pts = q_pts.view( q_pts.shape[:-2] + (self.no_heads, self.no_qk_points, 3) ) # [*, N_res, H * (P_q + P_v) * 3] kv_pts = self.linear_kv_points(s) # [*, N_res, H * (P_q + P_v), 3] kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1) kv_pts = torch.stack(kv_pts, dim=-1) kv_pts = r[..., None].apply(kv_pts) # [*, N_res, H, (P_q + P_v), 3] kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.no_heads, -1, 3)) # [*, N_res, H, P_q/P_v, 3] k_pts, v_pts = torch.split( kv_pts, [self.no_qk_points, self.no_v_points], dim=-2 ) # [*, N_res, N_res, H] b = self.linear_b(z[0]) if(_offload_inference): z[0] = z[0].cpu() # [*, H, N_res, N_res] a = torch.matmul(
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class RefineNet(nn.Module): """""" def __init__(self, dim = 64, is_pos_emb = True, n_layer = 4, enable = True, **kwargs): """Constructor function.""" super().__init__() self.is_pos_emb = is_pos_emb self.alphabet = RNAAlphabet.from_architecture('RNA') self.embed_tokens = nn.Embedding(len(self.alphabet), dim) self.enable = enable if self.is_pos_emb: self.embed_positions = PosEmbedding(4096, dim, self.alphabet.padding_idx) self.refine_layer0 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer1 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer2 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer3 = ResEGNN(corrections=n_layer, dims_in=dim) def forward(self, tokens, cords): """Perform the forward pass. Args: Returns: """ if not self.enable: return cords tokens = tokens[:, 0, :] tokens = tokens.unsqueeze(-1).repeat(1, 1, 23) b, l, n = tokens.shape cords = cords.reshape([b, l, n, 3]) fea = self.embed_tokens(tokens) b, l, n, _ = fea.shape if self.is_pos_emb: fea += self.embed_positions(tokens.reshape(b * l, n)).view(fea.size()) out = self.refine_layer0(fea.reshape([ b * l, n, -1]), cords.reshape([ b * l, n, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, l, n, -1]).transpose(1,2) cords = cords.reshape([b, l, n, -1]).transpose(1,2) out = self.refine_layer1(fea.reshape([ b * n, l, -1]), cords.reshape([ b * n, l, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, n, l, -1]).transpose(1,2) cords = cords.reshape([b, n, l, -1]).transpose(1,2) out = self.refine_layer2(fea.reshape([ b * l, n, -1]), cords.reshape([ b * l, n, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, l, n, -1]).transpose(1,2) cords = cords.reshape([b, l, n, -1]).transpose(1,2) out = self.refine_layer3(fea.reshape([ b * n, l, -1]), cords.reshape([ b * n, l, -1]), is_fea = True) fea, cords = out[-1] cords = cords.reshape([b, n, l, -1]).transpose(1,2) cords = cords.reshape([b, l * n, 3]) return cords class Swish_(torch.nn.Module): def forward(self, x): return x * x.sigmoid() SiLU = torch.nn.SiLU if hasattr(torch.nn, 'SiLU') else Swish_ class CoorsNorm(torch.nn.Module): def __init__(self, eps=1e-8): super().__init__() self.eps = eps self.fn = torch.nn.LayerNorm(1) def forward(self, coors): norm = coors.norm(dim=-1, keepdim=True) normed_coors = coors / norm.clamp(min=self.eps) phase = self.fn(norm) return phase * normed_coors # classes class EGNN(torch.nn.Module): def __init__( self, dim, m_dim=32, ): super().__init__() ''' # Most of the code in this file is based on egnn-pytorch by lucidrains. ''' edge_input_dim = (dim * 2) + 1 self.edge_mlp = torch.nn.Sequential( torch.nn.Linear(edge_input_dim, edge_input_dim * 2), SiLU(), torch.nn.Linear(edge_input_dim * 2, m_dim), SiLU() ) self.coors_norm = CoorsNorm() self.node_mlp = torch.nn.Sequential( torch.nn.Linear(dim + m_dim, dim * 2), SiLU(), torch.nn.Linear(dim * 2, dim), ) self.coors_mlp = torch.nn.Sequential( torch.nn.Linear(m_dim, m_dim * 4), SiLU(), torch.nn.Linear(m_dim * 4, 1) ) def forward(self, feats, coors): rel_coors = rearrange(coors, 'b i d -> b i () d') - rearrange(coors, 'b j d -> b () j d') rel_dist = (rel_coors ** 2).sum(dim=-1, keepdim=True) feats_j = rearrange(feats, 'b j d -> b () j d') feats_i = rearrange(feats, 'b i d -> b i () d') feats_i, feats_j = torch.broadcast_tensors(feats_i, feats_j) edge_input = torch.cat((feats_i, feats_j, rel_dist), dim=-1) m_ij = self.edge_mlp(edge_input) coor_weights = self.coors_mlp(m_ij) coor_weights = rearrange(coor_weights, 'b i j () -> b i j') rel_coors = self.coors_norm(rel_coors) scale_factor = 1 / 50.0 coors_out = torch.einsum('b i j, b i j c -> b i c', coor_weights * scale_factor, rel_coors) + coors m_i = m_ij.sum(dim=-2) node_mlp_input = torch.cat((feats, m_i), dim=-1) node_out = self.node_mlp(node_mlp_input) + feats return node_out, coors_out class ResEGNN(torch.nn.Module): def __init__(self, corrections=4, dims_in=41, **kwargs): super().__init__() self.layers = torch.nn.ModuleList([EGNN(dim=dims_in, **kwargs) for _ in range(corrections)]) def forward(self, amino, geom, is_fea = False, keep_last_cords = None): output = [] for layer in self.layers: geom_init = geom amino, geom = layer(amino, geom) if keep_last_cords is not None: geom[:, -keep_last_cords:] = geom_init[:, -keep_last_cords:] output.append([amino, geom]) return output if is_fea else geom class PosEmbedding(nn.Embedding): """ """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int): if padding_idx is not None: num_embeddings_ = num_embeddings + padding_idx + 1 else: num_embeddings_ = num_embeddings super().__init__(num_embeddings_, embedding_dim, padding_idx) self.max_positions = num_embeddings def forward(self, input: torch.Tensor): """Input is expected to be of size [bsz x seqlen].""" mask = input.ne(self.padding_idx).int() positions = (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + self.padding_idx return F.embedding( positions, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, ) class AngleResnetBlock(nn.Module): def __init__(self, c_hidden): """ Args: c_hidden: Hidden channel dimension """ super(AngleResnetBlock, self).__init__() self.c_hidden = c_hidden self.linear_1 = Linear(self.c_hidden, self.c_hidden) self.linear_2 = Linear(self.c_hidden, self.c_hidden) self.relu = nn.ReLU() def forward(self, a: torch.Tensor) -> torch.Tensor: s_initial = a a = self.relu(a) a = self.linear_1(a) a = self.relu(a) a = self.linear_2(a) return a + s_initial class AngleResnet(nn.Module): """ """ def __init__(self, c_in, c_hidden, no_blocks, no_angles, epsilon): """ Args: c_in: Input channel dimension c_hidden: Hidden channel dimension no_blocks: Number of resnet blocks no_angles: Number of torsion angles to generate epsilon: Small constant for normalization """ super(AngleResnet, self).__init__() self.c_in = c_in self.c_hidden = c_hidden self.no_blocks = no_blocks self.no_angles = no_angles self.eps = epsilon self.linear_in = Linear(self.c_in, self.c_hidden) self.linear_initial = Linear(self.c_in, self.c_hidden) self.layers = nn.ModuleList() for _ in range(self.no_blocks): layer = AngleResnetBlock(c_hidden=self.c_hidden) self.layers.append(layer) self.linear_out = Linear(self.c_hidden, self.no_angles * 2) self.relu = nn.ReLU() def forward( self, s: torch.Tensor, s_initial: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: s: [*, C_hidden] single embedding s_initial: [*, C_hidden] single embedding as of the start of the StructureModule Returns: [*, no_angles, 2] predicted angles """ # [*, C_hidden] s_initial = self.relu(s_initial) s_initial = self.linear_initial(s_initial) s = self.relu(s) s = self.linear_in(s) s = s + s_initial for l in self.layers: s = l(s) s = self.relu(s) # [*, no_angles * 2] s = self.linear_out(s) # [*, no_angles, 2] s = s.view(s.shape[:-1] + (-1, 2)) unnormalized_s = s norm_denom = torch.sqrt( torch.clamp( torch.sum(s ** 2, dim=-1, keepdim=True), min=self.eps, ) ) s = s / norm_denom return unnormalized_s, s class InvariantPointAttention(nn.Module): """ Implements Algorithm 22. """ def __init__( self, c_s: int, c_z: int, c_hidden: int, no_heads: int, no_qk_points: int, no_v_points: int, inf: float = 1e5, eps: float = 1e-8, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_hidden: Hidden channel dimension no_heads: Number of attention heads no_qk_points: Number of query/key points to generate no_v_points: Number of value points to generate """ super(InvariantPointAttention, self).__init__() self.c_s = c_s self.c_z = c_z self.c_hidden = c_hidden self.no_heads = no_heads self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.inf = inf self.eps = eps hc = self.c_hidden * self.no_heads self.linear_q = Linear(self.c_s, hc) self.linear_kv = Linear(self.c_s, 2 * hc) hpq = self.no_heads * self.no_qk_points * 3 self.linear_q_points = Linear(self.c_s, hpq) hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3 self.linear_kv_points = Linear(self.c_s, hpkv) # hpv = self.no_heads * self.no_v_points * 3 self.linear_b = Linear(self.c_z, self.no_heads) self.head_weights = nn.Parameter(torch.zeros((no_heads))) # ipa_point_weights_init_(self.head_weights) concat_out_dim = self.no_heads * ( self.c_z + self.c_hidden + self.no_v_points * 4 ) self.linear_out = Linear(concat_out_dim, self.c_s) self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward( self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, _offload_inference: bool = False, _z_reference_list: Optional[Sequence[torch.Tensor]] = None, ) -> torch.Tensor: """ Args: s: [*, N_res, C_s] single representation z: [*, N_res, N_res, C_z] pair representation r: [*, N_res] transformation object mask: [*, N_res] mask Returns: [*, N_res, C_s] single representation update """ z = [z] ####################################### # Generate scalar and point activations ####################################### # [*, N_res, H * C_hidden] q = self.linear_q(s) kv = self.linear_kv(s) # [*, N_res, H, C_hidden] q = q.view(q.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, 2 * C_hidden] kv = kv.view(kv.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, C_hidden] k, v = torch.split(kv, self.c_hidden, dim=-1) # [*, N_res, H * P_q * 3] q_pts = self.linear_q_points(s) # [*, N_res, H * P_q, 3] q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1) q_pts = torch.stack(q_pts, dim=-1) q_pts = r[..., None].apply(q_pts) # [*, N_res, H, P_q, 3] q_pts = q_pts.view( q_pts.shape[:-2] + (self.no_heads, self.no_qk_points, 3) ) # [*, N_res, H * (P_q + P_v) * 3] kv_pts = self.linear_kv_points(s) # [*, N_res, H * (P_q + P_v), 3] kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1) kv_pts = torch.stack(kv_pts, dim=-1) kv_pts = r[..., None].apply(kv_pts) # [*, N_res, H, (P_q + P_v), 3] kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.no_heads, -1, 3)) # [*, N_res, H, P_q/P_v, 3] k_pts, v_pts = torch.split( kv_pts, [self.no_qk_points, self.no_v_points], dim=-2 ) # [*, N_res, N_res, H] b = self.linear_b(z[0]) if(_offload_inference): z[0] = z[0].cpu() # [*, H, N_res, N_res] a = torch.matmul(
permute_final_dims(q, (1, 0, 2)), # [*, H, N_res, C_hidden]
4
2023-11-01 10:29:08+00:00
16k
tylerlight071/Project-Cipher
main.py
[ { "identifier": "clear_terminal", "path": "components/common_functions.py", "snippet": "def clear_terminal():\n os.system('cls' if os.name == 'nt' else 'clear')" }, { "identifier": "print_slow", "path": "components/common_functions.py", "snippet": "def print_slow(text, delay=0.00): #...
import msvcrt import os import pickle import sys import time import colorama import pygame from colorama import Fore, Style from components.common_functions import clear_terminal, print_slow, shop_help, help_user, connect_help, mail_help, \ system_help from conversations.calls import intro_call, first_call, second_call, third_call, fourth_call, fifth_call, sixth_call, \ markus_seen_call from conversations.minigame_calls import code_shatter_call from minigames.code_shatter_minigame import code_shatter_minigame from minigames.eye_spy_minigame import port_scanning from systems.level_1.amy.amy_system import AmySystem from systems.level_1.billy.billy_system import BillySystem from systems.level_1.cameras.camera_1 import camera_first from systems.level_1.markus.markus_system import MarkusSystem
13,730
# Set the PYGAME_HIDE_SUPPORT_PROMPT environment variable os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "1" # Initialize pygame mixer pygame.mixer.init() # Load the bg music file and loop it pygame.mixer.music.load('bg_music.mp3') pygame.mixer.music.play(-1) # sets the volume to 20% (change value to adjust) pygame.mixer.music.set_volume(0.2) # Define the global variables at the module level inventory = [] balance = 300 emails = [] has_read_email = False has_read_file = False has_intro_call = False seen_markus = False evidence = [] amy_system = AmySystem() billy_system = BillySystem()
# Set the PYGAME_HIDE_SUPPORT_PROMPT environment variable os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "1" # Initialize pygame mixer pygame.mixer.init() # Load the bg music file and loop it pygame.mixer.music.load('bg_music.mp3') pygame.mixer.music.play(-1) # sets the volume to 20% (change value to adjust) pygame.mixer.music.set_volume(0.2) # Define the global variables at the module level inventory = [] balance = 300 emails = [] has_read_email = False has_read_file = False has_intro_call = False seen_markus = False evidence = [] amy_system = AmySystem() billy_system = BillySystem()
markus_system = MarkusSystem()
21
2023-11-06 09:52:13+00:00
16k
Codra-Ingenierie-Informatique/DataLab
cdl/core/computation/signal.py
[ { "identifier": "fit", "path": "cdl/algorithms/fit.py", "snippet": "class FitModel(abc.ABC):\nclass GaussianModel(FitModel):\nclass LorentzianModel(FitModel):\nclass VoigtModel(FitModel):\n def func(cls, x, amp, sigma, x0, y0):\n def get_amp_from_amplitude(\n cls, amplitude, sigma\n ): ...
import guidata.dataset as gds import numpy as np import scipy.integrate as spt import scipy.ndimage as spi import scipy.optimize as spo import scipy.signal as sps from cdl.algorithms import fit from cdl.algorithms.signal import ( derivative, interpolate, moving_average, normalize, peak_indexes, xpeak, xy_fft, xy_ifft, ) from cdl.config import _ from cdl.core.computation.base import ( ClipParam, FFTParam, GaussianParam, MovingAverageParam, MovingMedianParam, ThresholdParam, ) from cdl.core.model.signal import SignalObj
10,905
class XYCalibrateParam(gds.DataSet): """Signal calibration parameters""" axes = (("x", _("X-axis")), ("y", _("Y-axis"))) axis = gds.ChoiceItem(_("Calibrate"), axes, default="y") a = gds.FloatItem("a", default=1.0) b = gds.FloatItem("b", default=0.0) def compute_calibration(src: SignalObj, p: XYCalibrateParam) -> SignalObj: """Compute linear calibration Args: src (SignalObj): source signal p (XYCalibrateParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "calibration", f"{p.axis}={p.a}*{p.axis}+{p.b}") x, y = src.get_data() if p.axis == "x": dst.set_xydata(p.a * x + p.b, y) else: dst.set_xydata(x, p.a * y + p.b) return dst def compute_threshold(src: SignalObj, p: ThresholdParam) -> SignalObj: """Compute threshold clipping Args: src (SignalObj): source signal p (ThresholdParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "threshold", f"min={p.value}") x, y = src.get_data() dst.set_xydata(x, np.clip(y, p.value, y.max())) return dst def compute_clip(src: SignalObj, p: ClipParam) -> SignalObj: """Compute maximum data clipping Args: src (SignalObj): source signal p (ClipParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "clip", f"max={p.value}") x, y = src.get_data() dst.set_xydata(x, np.clip(y, y.min(), p.value)) return dst def compute_gaussian_filter(src: SignalObj, p: GaussianParam) -> SignalObj: """Compute gaussian filter Args: src (SignalObj): source signal p (GaussianParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "gaussian_filter", f"σ={p.sigma:.3f}") x, y = src.get_data() dst.set_xydata(x, spi.gaussian_filter1d(y, p.sigma)) return dst def compute_moving_average(src: SignalObj, p: MovingAverageParam) -> SignalObj: """Compute moving average Args: src (SignalObj): source signal p (MovingAverageParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "moving_average", f"n={p.n:d}") x, y = src.get_data() dst.set_xydata(x, moving_average(y, p.n)) return dst def compute_moving_median(src: SignalObj, p: MovingMedianParam) -> SignalObj: """Compute moving median Args: src (SignalObj): source signal p (MovingMedianParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "moving_median", f"n={p.n:d}") x, y = src.get_data() dst.set_xydata(x, sps.medfilt(y, kernel_size=p.n)) return dst def compute_wiener(src: SignalObj) -> SignalObj: """Compute Wiener filter Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "wiener") x, y = src.get_data() dst.set_xydata(x, sps.wiener(y)) return dst def compute_fft(src: SignalObj, p: FFTParam) -> SignalObj: """Compute FFT Args: src (SignalObj): source signal p (FFTParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "fft") x, y = src.get_data()
# -*- coding: utf-8 -*- # # Licensed under the terms of the BSD 3-Clause # (see cdl/LICENSE for details) """ .. Signal computation objects (see parent package :mod:`cdl.core.computation`) """ # pylint: disable=invalid-name # Allows short reference names like x, y, ... # Note: # ---- # All dataset classes must also be imported in the cdl.core.computation.param module. from __future__ import annotations VALID_DTYPES_STRLIST = SignalObj.get_valid_dtypenames() def dst_11(src: SignalObj, name: str, suffix: str | None = None) -> SignalObj: """Create result signal object for compute_11 function Args: src (SignalObj): source signal name (str): name of the function Returns: SignalObj: result signal object """ dst = src.copy(title=f"{name}({src.short_id})") if suffix is not None: dst.title += "|" + suffix return dst def dst_n1n(src1: SignalObj, src2: SignalObj, name: str, suffix: str | None = None): """Create result signal object for compute_n1n function Args: src1 (SignalObj): source signal 1 src2 (SignalObj): source signal 2 name (str): name of the function Returns: SignalObj: result signal object """ dst = src1.copy(title=f"{name}({src1.short_id}, {src2.short_id})") if suffix is not None: dst.title += "|" + suffix return dst # -------- compute_n1 functions -------------------------------------------------------- # Functions with N input signals and 1 output signal # -------------------------------------------------------------------------------------- # Those functions are perfoming a computation on N input signals and return a single # output signal. If we were only executing these functions locally, we would not need # to define them here, but since we are using the multiprocessing module, we need to # define them here so that they can be pickled and sent to the worker processes. # Also, we need to systematically return the output signal object, even if it is already # modified in place, because the multiprocessing module will not be able to retrieve # the modified object from the worker processes. def compute_add(dst: SignalObj, src: SignalObj) -> SignalObj: """Add signal to result signal Args: dst (SignalObj): destination signal src (SignalObj): source signal """ dst.y += np.array(src.y, dtype=dst.y.dtype) if dst.dy is not None: dst.dy = np.sqrt(dst.dy**2 + src.dy**2) return dst def compute_product(dst: SignalObj, src: SignalObj) -> SignalObj: """Multiply signal to result signal Args: dst (SignalObj): destination signal src (SignalObj): source signal """ dst.y *= np.array(src.y, dtype=dst.y.dtype) if dst.dy is not None: dst.dy = dst.y * np.sqrt((dst.dy / dst.y) ** 2 + (src.dy / src.y) ** 2) return dst # -------- compute_n1n functions ------------------------------------------------------- # Functions with N input images + 1 input image and N output images # -------------------------------------------------------------------------------------- def compute_difference(src1: SignalObj, src2: SignalObj) -> SignalObj: """Compute difference between two signals Args: src1 (SignalObj): source signal 1 src2 (SignalObj): source signal 2 Returns: SignalObj: result signal object """ dst = dst_n1n(src1, src2, "difference") dst.y = src1.y - src2.y if dst.dy is not None: dst.dy = np.sqrt(src1.dy**2 + src2.dy**2) return dst def compute_quadratic_difference(src1: SignalObj, src2: SignalObj) -> SignalObj: """Compute quadratic difference between two signals Args: src1 (SignalObj): source signal 1 src2 (SignalObj): source signal 2 Returns: SignalObj: result signal object """ dst = dst_n1n(src1, src2, "quadratic_difference") x1, y1 = src1.get_data() _x2, y2 = src2.get_data() dst.set_xydata(x1, (y1 - np.array(y2, dtype=y1.dtype)) / np.sqrt(2.0)) if np.issubdtype(dst.data.dtype, np.unsignedinteger): dst.data[src1.data < src2.data] = 0 if dst.dy is not None: dst.dy = np.sqrt(src1.dy**2 + src2.dy**2) return dst def compute_division(src1: SignalObj, src2: SignalObj) -> SignalObj: """Compute division between two signals Args: src1 (SignalObj): source signal 1 src2 (SignalObj): source signal 2 Returns: SignalObj: result signal object """ dst = dst_n1n(src1, src2, "division") x1, y1 = src1.get_data() _x2, y2 = src2.get_data() dst.set_xydata(x1, y1 / np.array(y2, dtype=y1.dtype)) return dst # -------- compute_11 functions -------------------------------------------------------- # Functions with 1 input image and 1 output image # -------------------------------------------------------------------------------------- def extract_multiple_roi(src: SignalObj, group: gds.DataSetGroup) -> SignalObj: """Extract multiple regions of interest from data Args: src (SignalObj): source signal group (gds.DataSetGroup): group of parameters Returns: SignalObj: signal with multiple regions of interest """ suffix = None if len(group.datasets) == 1: p = group.datasets[0] suffix = f"indexes={p.col1:d}:{p.col2:d}" dst = dst_11(src, "extract_multiple_roi", suffix) x, y = src.get_data() xout, yout = np.ones_like(x) * np.nan, np.ones_like(y) * np.nan for p in group.datasets: slice0 = slice(p.col1, p.col2 + 1) xout[slice0], yout[slice0] = x[slice0], y[slice0] nans = np.isnan(xout) | np.isnan(yout) dst.set_xydata(xout[~nans], yout[~nans]) # TODO: [P2] Instead of removing geometric shapes, apply roi extract dst.remove_all_shapes() return dst def extract_single_roi(src: SignalObj, p: gds.DataSet) -> SignalObj: """Extract single region of interest from data Args: src (SignalObj): source signal p (gds.DataSet): parameters Returns: SignalObj: signal with single region of interest """ dst = dst_11(src, "extract_single_roi", f"indexes={p.col1:d}:{p.col2:d}") x, y = src.get_data() dst.set_xydata(x[p.col1 : p.col2 + 1], y[p.col1 : p.col2 + 1]) # TODO: [P2] Instead of removing geometric shapes, apply roi extract dst.remove_all_shapes() return dst def compute_swap_axes(src: SignalObj) -> SignalObj: """Swap axes Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "swap_axes") x, y = src.get_data() dst.set_xydata(y, x) return dst def compute_abs(src: SignalObj) -> SignalObj: """Compute absolute value Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "abs") x, y = src.get_data() dst.set_xydata(x, np.abs(y)) return dst def compute_re(src: SignalObj) -> SignalObj: """Compute real part Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "re") x, y = src.get_data() dst.set_xydata(x, np.real(y)) return dst def compute_im(src: SignalObj) -> SignalObj: """Compute imaginary part Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "im") x, y = src.get_data() dst.set_xydata(x, np.imag(y)) return dst class DataTypeSParam(gds.DataSet): """Convert signal data type parameters""" dtype_str = gds.ChoiceItem( _("Destination data type"), list(zip(VALID_DTYPES_STRLIST, VALID_DTYPES_STRLIST)), help=_("Output image data type."), ) def compute_astype(src: SignalObj, p: DataTypeSParam) -> SignalObj: """Convert data type Args: src: source signal p: parameters Returns: Result signal object """ dst = dst_11(src, "astype", f"dtype={p.dtype_str}") dst.xydata = src.xydata.astype(p.dtype_str) return dst def compute_log10(src: SignalObj) -> SignalObj: """Compute Log10 Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "log10") x, y = src.get_data() dst.set_xydata(x, np.log10(y)) return dst class PeakDetectionParam(gds.DataSet): """Peak detection parameters""" threshold = gds.IntItem( _("Threshold"), default=30, min=0, max=100, slider=True, unit="%" ) min_dist = gds.IntItem(_("Minimum distance"), default=1, min=1, unit="points") def compute_peak_detection(src: SignalObj, p: PeakDetectionParam) -> SignalObj: """Peak detection Args: src (SignalObj): source signal p (PeakDetectionParam): parameters Returns: SignalObj: result signal object """ dst = dst_11( src, "peak_detection", f"threshold={p.threshold}%, min_dist={p.min_dist}pts" ) x, y = src.get_data() indexes = peak_indexes(y, thres=p.threshold * 0.01, min_dist=p.min_dist) dst.set_xydata(x[indexes], y[indexes]) dst.metadata["curvestyle"] = "Sticks" return dst class NormalizeYParam(gds.DataSet): """Normalize parameters""" methods = ( (_("maximum"), "maximum"), (_("amplitude"), "amplitude"), (_("sum"), "sum"), (_("energy"), "energy"), ) method = gds.ChoiceItem(_("Normalize with respect to"), methods) def compute_normalize(src: SignalObj, p: NormalizeYParam) -> SignalObj: """Normalize data Args: src (SignalObj): source signal p (NormalizeYParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "normalize", f"ref={p.method}") x, y = src.get_data() dst.set_xydata(x, normalize(y, p.method)) return dst def compute_derivative(src: SignalObj) -> SignalObj: """Compute derivative Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "derivative") x, y = src.get_data() dst.set_xydata(x, derivative(x, y)) return dst def compute_integral(src: SignalObj) -> SignalObj: """Compute integral Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "integral") x, y = src.get_data() dst.set_xydata(x, spt.cumtrapz(y, x, initial=0.0)) return dst class XYCalibrateParam(gds.DataSet): """Signal calibration parameters""" axes = (("x", _("X-axis")), ("y", _("Y-axis"))) axis = gds.ChoiceItem(_("Calibrate"), axes, default="y") a = gds.FloatItem("a", default=1.0) b = gds.FloatItem("b", default=0.0) def compute_calibration(src: SignalObj, p: XYCalibrateParam) -> SignalObj: """Compute linear calibration Args: src (SignalObj): source signal p (XYCalibrateParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "calibration", f"{p.axis}={p.a}*{p.axis}+{p.b}") x, y = src.get_data() if p.axis == "x": dst.set_xydata(p.a * x + p.b, y) else: dst.set_xydata(x, p.a * y + p.b) return dst def compute_threshold(src: SignalObj, p: ThresholdParam) -> SignalObj: """Compute threshold clipping Args: src (SignalObj): source signal p (ThresholdParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "threshold", f"min={p.value}") x, y = src.get_data() dst.set_xydata(x, np.clip(y, p.value, y.max())) return dst def compute_clip(src: SignalObj, p: ClipParam) -> SignalObj: """Compute maximum data clipping Args: src (SignalObj): source signal p (ClipParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "clip", f"max={p.value}") x, y = src.get_data() dst.set_xydata(x, np.clip(y, y.min(), p.value)) return dst def compute_gaussian_filter(src: SignalObj, p: GaussianParam) -> SignalObj: """Compute gaussian filter Args: src (SignalObj): source signal p (GaussianParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "gaussian_filter", f"σ={p.sigma:.3f}") x, y = src.get_data() dst.set_xydata(x, spi.gaussian_filter1d(y, p.sigma)) return dst def compute_moving_average(src: SignalObj, p: MovingAverageParam) -> SignalObj: """Compute moving average Args: src (SignalObj): source signal p (MovingAverageParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "moving_average", f"n={p.n:d}") x, y = src.get_data() dst.set_xydata(x, moving_average(y, p.n)) return dst def compute_moving_median(src: SignalObj, p: MovingMedianParam) -> SignalObj: """Compute moving median Args: src (SignalObj): source signal p (MovingMedianParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "moving_median", f"n={p.n:d}") x, y = src.get_data() dst.set_xydata(x, sps.medfilt(y, kernel_size=p.n)) return dst def compute_wiener(src: SignalObj) -> SignalObj: """Compute Wiener filter Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "wiener") x, y = src.get_data() dst.set_xydata(x, sps.wiener(y)) return dst def compute_fft(src: SignalObj, p: FFTParam) -> SignalObj: """Compute FFT Args: src (SignalObj): source signal p (FFTParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "fft") x, y = src.get_data()
dst.set_xydata(*xy_fft(x, y, shift=p.shift))
7
2023-11-09 16:56:03+00:00
16k
lalalamdbf/PLSE_IDRR
src/prompt-tuning/prompt/pipeline_base.py
[ { "identifier": "InputExample", "path": "src/prompt-tuning/prompt/data_utils.py", "snippet": "class InputExample(object):\n \"\"\"A raw input example consisting of segments of text,\n a label for classification task or a target sequence of generation task.\n Other desired information can be pas...
from pickle import FALSE from torch.utils.data.sampler import RandomSampler from transformers.configuration_utils import PretrainedConfig from transformers.generation_utils import GenerationMixin from torch.utils.data import Dataset from typing import * from .data_utils import InputExample, InputFeatures from torch.utils.data._utils.collate import default_collate from tqdm.std import tqdm from transformers.tokenization_utils import PreTrainedTokenizer from transformers.utils.dummy_pt_objects import PreTrainedModel from .utils import TokenizerWrapper from .prompt_base import Template, Verbalizer from collections import defaultdict from collections import namedtuple from torch.utils.data import DataLoader import torch import torch.nn as nn import inspect import numpy as np
10,960
def signature(f): r"""Get the function f 's input arguments. A useful gadget when some function slot might be instantiated into multiple functions. Args: f (:obj:`function`) : the function to get the input arguments. Returns: namedtuple : of args, default, varargs, keywords, respectively.s """ sig = inspect.signature(f) args = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD ] varargs = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_POSITIONAL ] varargs = varargs[0] if varargs else None keywords = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_KEYWORD ] keywords = keywords[0] if keywords else None defaults = [ p.default for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty ] or None argspec = namedtuple('Signature', ['args', 'defaults', 'varargs', 'keywords']) return argspec(args, defaults, varargs, keywords) class PromptDataLoader(object): r""" PromptDataLoader wraps the original dataset. The input data is firstly wrapped with the prompt's template, and then is tokenized by a wrapperd-tokenizer. Args: dataset (:obj:`Dataset` or :obj:`List`): Either a DatasetObject or a list containing the input examples. template (:obj:`Template`): A derived class of :obj:`Template` tokenizer (:obj:`PretrainedTokenizer`): The pretrained tokenizer. tokenizer_wrapper_class (:cls:`TokenizerWrapper`): The class of tokenizer wrapper. max_seq_length (:obj:`int`, optional): The max sequence length of the input ids. It's used to truncate sentences. batch_size (:obj:`int`, optional): The batch_size of data loader teacher_forcing (:obj:`bool`, optional): Whether to fill the mask with target text. Set to true in training generation model. decoder_max_length (:obj:`int`, optional): the decoder maximum length of an encoder-decoder model. predict_eos_token (:obj:`bool`, optional): Whether to predict the <eos> token. Suggest to set to true in generation. truncate_method (:obj:`bool`, optional): the truncate method to use. select from `head`, `tail`, `balanced`. kwargs :Other kwargs that might be passed into a tokenizer wrapper. """ def __init__(self, dataset: Union[Dataset, List], template: Template, tokenizer_wrapper: Optional[TokenizerWrapper] = None, tokenizer: PreTrainedTokenizer = None, tokenizer_wrapper_class = None,
def signature(f): r"""Get the function f 's input arguments. A useful gadget when some function slot might be instantiated into multiple functions. Args: f (:obj:`function`) : the function to get the input arguments. Returns: namedtuple : of args, default, varargs, keywords, respectively.s """ sig = inspect.signature(f) args = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD ] varargs = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_POSITIONAL ] varargs = varargs[0] if varargs else None keywords = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_KEYWORD ] keywords = keywords[0] if keywords else None defaults = [ p.default for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty ] or None argspec = namedtuple('Signature', ['args', 'defaults', 'varargs', 'keywords']) return argspec(args, defaults, varargs, keywords) class PromptDataLoader(object): r""" PromptDataLoader wraps the original dataset. The input data is firstly wrapped with the prompt's template, and then is tokenized by a wrapperd-tokenizer. Args: dataset (:obj:`Dataset` or :obj:`List`): Either a DatasetObject or a list containing the input examples. template (:obj:`Template`): A derived class of :obj:`Template` tokenizer (:obj:`PretrainedTokenizer`): The pretrained tokenizer. tokenizer_wrapper_class (:cls:`TokenizerWrapper`): The class of tokenizer wrapper. max_seq_length (:obj:`int`, optional): The max sequence length of the input ids. It's used to truncate sentences. batch_size (:obj:`int`, optional): The batch_size of data loader teacher_forcing (:obj:`bool`, optional): Whether to fill the mask with target text. Set to true in training generation model. decoder_max_length (:obj:`int`, optional): the decoder maximum length of an encoder-decoder model. predict_eos_token (:obj:`bool`, optional): Whether to predict the <eos> token. Suggest to set to true in generation. truncate_method (:obj:`bool`, optional): the truncate method to use. select from `head`, `tail`, `balanced`. kwargs :Other kwargs that might be passed into a tokenizer wrapper. """ def __init__(self, dataset: Union[Dataset, List], template: Template, tokenizer_wrapper: Optional[TokenizerWrapper] = None, tokenizer: PreTrainedTokenizer = None, tokenizer_wrapper_class = None,
verbalizer: Optional[Verbalizer] = None,
4
2023-11-01 08:52:36+00:00
16k
choderalab/chiron
Examples/LJ_langevin.py
[ { "identifier": "LJPotential", "path": "chiron/potential.py", "snippet": "class LJPotential(NeuralNetworkPotential):\n def __init__(\n self,\n topology: Topology,\n sigma: unit.Quantity = 3.350 * unit.angstroms,\n epsilon: unit.Quantity = 1.0 * unit.kilocalories_per_mole,\...
from openmmtools.testsystems import LennardJonesFluid from chiron.potential import LJPotential from openmm import unit from chiron.states import SamplerState, ThermodynamicState from chiron.neighbors import NeighborListNsqrd, OrthogonalPeriodicSpace from chiron.reporters import SimulationReporter from chiron.integrators import LangevinIntegrator import os import h5py import matplotlib.pyplot as plt
11,204
# Use the LennardJonesFluid example from openmmtools to initialize particle positions and topology # For this example, the topology provides the masses for the particles # The default LennardJonesFluid example considers the system to be Argon with 39.9 amu lj_fluid = LennardJonesFluid(reduced_density=0.1, nparticles=1000) # initialize the LennardJones potential in chiron # sigma = 0.34 * unit.nanometer epsilon = 0.238 * unit.kilocalories_per_mole cutoff = 3.0 * sigma lj_potential = LJPotential( lj_fluid.topology, sigma=sigma, epsilon=epsilon, cutoff=cutoff ) # define the sampler state
# Use the LennardJonesFluid example from openmmtools to initialize particle positions and topology # For this example, the topology provides the masses for the particles # The default LennardJonesFluid example considers the system to be Argon with 39.9 amu lj_fluid = LennardJonesFluid(reduced_density=0.1, nparticles=1000) # initialize the LennardJones potential in chiron # sigma = 0.34 * unit.nanometer epsilon = 0.238 * unit.kilocalories_per_mole cutoff = 3.0 * sigma lj_potential = LJPotential( lj_fluid.topology, sigma=sigma, epsilon=epsilon, cutoff=cutoff ) # define the sampler state
sampler_state = SamplerState(
1
2023-11-07 18:17:43+00:00
16k
WolfgangFahl/dcm
dcm/dcm_webserver.py
[ { "identifier": "Assessment", "path": "dcm/dcm_assessment.py", "snippet": "class Assessment:\n \"\"\"\n Assessment for CompetenceTree\n \"\"\"\n\n def __init__(\n self,\n webserver: NiceGuiWebserver,\n dcm: DynamicCompetenceMap,\n learner: Learner,\n debug:...
import os from typing import Optional from urllib.parse import urlparse from fastapi import HTTPException from fastapi.responses import HTMLResponse from ngwidgets.file_selector import FileSelector from ngwidgets.input_webserver import InputWebserver from ngwidgets.webserver import WebserverConfig from nicegui import Client, app, ui from pydantic import BaseModel from dcm.dcm_assessment import Assessment from dcm.dcm_chart import DcmChart from dcm.dcm_core import CompetenceTree, DynamicCompetenceMap, Learner from dcm.svg import SVG, SVGConfig from dcm.version import Version
12,069
""" Created on 2023-11-06 @author: wf """ class SVGRenderRequest(BaseModel): """ A request for rendering an SVG. Attributes: name (str): The name of the render request. definition (str): The string representation of the data to be rendered, in either JSON or YAML format. markup (str): The format of the definition ('json' or 'yaml'). config (SVGConfig): Optional configuration for SVG rendering. Defaults to None, which uses default settings. """ name: str definition: str markup: str config: Optional[SVGConfig] = None class DynamicCompentenceMapWebServer(InputWebserver): """ server to supply Dynamic Competence Map Visualizations """ @classmethod def get_config(cls) -> WebserverConfig: """ get the configuration for this Webserver """ copy_right = "(c)2023-2024 Wolfgang Fahl" config = WebserverConfig(
""" Created on 2023-11-06 @author: wf """ class SVGRenderRequest(BaseModel): """ A request for rendering an SVG. Attributes: name (str): The name of the render request. definition (str): The string representation of the data to be rendered, in either JSON or YAML format. markup (str): The format of the definition ('json' or 'yaml'). config (SVGConfig): Optional configuration for SVG rendering. Defaults to None, which uses default settings. """ name: str definition: str markup: str config: Optional[SVGConfig] = None class DynamicCompentenceMapWebServer(InputWebserver): """ server to supply Dynamic Competence Map Visualizations """ @classmethod def get_config(cls) -> WebserverConfig: """ get the configuration for this Webserver """ copy_right = "(c)2023-2024 Wolfgang Fahl" config = WebserverConfig(
copy_right=copy_right, version=Version(), default_port=8885
7
2023-11-06 09:24:24+00:00
16k
Harvard-Ophthalmology-AI-Lab/FairSeg
SAMed/segment_anything/automatic_mask_generator.py
[ { "identifier": "Sam", "path": "SAMed/segment_anything/modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_...
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from .modeling import Sam from .predictor import SamPredictor from .utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
10,911
Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros(len(data["boxes"])), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros(len(data["boxes"])), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamAutomaticMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crops_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crops_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros(len(data["boxes"])), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros(len(data["boxes"])), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
data["points"] = uncrop_points(data["points"], crop_box)
17
2023-11-03 17:05:40+00:00
16k
microsoft/PLEX
PLEX/finetuning.py
[ { "identifier": "TrajectoryDataset", "path": "PLEX/util/data.py", "snippet": "class TrajectoryDataset:\n def __init__(self, trajectories, camera_names, contextual):\n self.trajectories = list(trajectories)\n if not globals.full_state_mode:\n self.camera_names = camera_names\n...
import os import torch import argparse import sys from PLEX.util.data import TrajectoryDataset, load_data, setup_batch_sampler, train_val_split from PLEX.util.misc import parse_tasks, setup_essentials, setup_model, set_trainable_params, setup_trainer, run_training from PLEX.util.evaluators import get_success_rate_evaluator, get_validation_error_evaluator from PLEX.util.cmdline import add_common_args, add_conditioning_args from PLEX.util.log import setup_wandb_logging
11,755
def finetune(cmdline_args): os.environ["NCCL_DEBUG"] = "INFO" print("=== Finetuning ===") parser = argparse.ArgumentParser() # Add all relevant command-line arguments add_common_args(parser) add_conditioning_args(parser) parser.add_argument('--finetune_learning_rate', type=float, default=1e-5) parser.add_argument('--finetune_steps_per_iter', type=int, default=100) parser.add_argument('--target_task', type=str, default=None) parser.add_argument('--max_target_trajectories', type=int, default=None) # Parse them and validate them args = parser.parse_args(cmdline_args) args = vars(args) if not args['bc_learning_mode']: assert 'reward' not in args['modalities_to_mask'], "If the model is expected to condition on returns, then they should not be masked out." # NOTE: The arguments below aren't actual command-line arguments. We are just addeing them to args[] out of convenience. # Note also that during finetuning we set predicted_inverse_dynamics_loss_weight=1, i.e., **in case the # finetuning trajectories contain actions**, we adapt PLEX's based on the predicted observation latents # from it planner PL rather than based on the actual ("grounded") observation latents contained # in finetuning trajectories. if args['model'] == 'PLEX': args['grounded_inverse_dynamics_loss_weight'] = 0 args['predicted_inverse_dynamics_loss_weight'] = 1 args['future_prediction_loss_weight'] = 1 log, log_to_wandb, timer, data_shuffling_rng, device, camera_names, modalities_to_mask, data_dir, common_env_metadata_dict = setup_essentials(args) # NOTE: common_env_metadata_dict may be modified by the calls to load_data below. # Load data: target-task trajectories target_tasks, target_max_trajs = parse_tasks(args['target_task'], args['robot'], args['max_target_trajectories']) target_task = target_tasks[0] data = load_data(log, data_dir, target_tasks, max_trajectories=target_max_trajs, discount=args['discount'], camera_names=camera_names, image_size=args['image_size'], target_frame_rate=args['target_frame_rate'], normalize_rewards=args['normalize_reward'], reward_type=args['reward_type'], common_env_metadata_dict=common_env_metadata_dict, data_shuffling_rng=data_shuffling_rng) assert len(data.keys()) == 1, f"There should be only one target task. Discovered {len(data.keys())}: {data.keys()}" #assert args['validation_tasks'] is None, f"Validation tasks other than the target tasks aren't used during finetuning and were likely specified erroneously: {args['validation_tasks']}." # Train/test split # NOTE: we don't actually need create the split if args['best_metric'] == 'evaluation/success_rate' if args['best_metric'] == 'evaluation/success_rate': print("WARNING: since the evaluation metric is success rate, the training-validation split of the target task data will be ignored, and all target-task trajectories will be used for training.") train_trajectories, val_trajectories = train_val_split(data[target_task.name], args['validation_frac'])
def finetune(cmdline_args): os.environ["NCCL_DEBUG"] = "INFO" print("=== Finetuning ===") parser = argparse.ArgumentParser() # Add all relevant command-line arguments add_common_args(parser) add_conditioning_args(parser) parser.add_argument('--finetune_learning_rate', type=float, default=1e-5) parser.add_argument('--finetune_steps_per_iter', type=int, default=100) parser.add_argument('--target_task', type=str, default=None) parser.add_argument('--max_target_trajectories', type=int, default=None) # Parse them and validate them args = parser.parse_args(cmdline_args) args = vars(args) if not args['bc_learning_mode']: assert 'reward' not in args['modalities_to_mask'], "If the model is expected to condition on returns, then they should not be masked out." # NOTE: The arguments below aren't actual command-line arguments. We are just addeing them to args[] out of convenience. # Note also that during finetuning we set predicted_inverse_dynamics_loss_weight=1, i.e., **in case the # finetuning trajectories contain actions**, we adapt PLEX's based on the predicted observation latents # from it planner PL rather than based on the actual ("grounded") observation latents contained # in finetuning trajectories. if args['model'] == 'PLEX': args['grounded_inverse_dynamics_loss_weight'] = 0 args['predicted_inverse_dynamics_loss_weight'] = 1 args['future_prediction_loss_weight'] = 1 log, log_to_wandb, timer, data_shuffling_rng, device, camera_names, modalities_to_mask, data_dir, common_env_metadata_dict = setup_essentials(args) # NOTE: common_env_metadata_dict may be modified by the calls to load_data below. # Load data: target-task trajectories target_tasks, target_max_trajs = parse_tasks(args['target_task'], args['robot'], args['max_target_trajectories']) target_task = target_tasks[0] data = load_data(log, data_dir, target_tasks, max_trajectories=target_max_trajs, discount=args['discount'], camera_names=camera_names, image_size=args['image_size'], target_frame_rate=args['target_frame_rate'], normalize_rewards=args['normalize_reward'], reward_type=args['reward_type'], common_env_metadata_dict=common_env_metadata_dict, data_shuffling_rng=data_shuffling_rng) assert len(data.keys()) == 1, f"There should be only one target task. Discovered {len(data.keys())}: {data.keys()}" #assert args['validation_tasks'] is None, f"Validation tasks other than the target tasks aren't used during finetuning and were likely specified erroneously: {args['validation_tasks']}." # Train/test split # NOTE: we don't actually need create the split if args['best_metric'] == 'evaluation/success_rate' if args['best_metric'] == 'evaluation/success_rate': print("WARNING: since the evaluation metric is success rate, the training-validation split of the target task data will be ignored, and all target-task trajectories will be used for training.") train_trajectories, val_trajectories = train_val_split(data[target_task.name], args['validation_frac'])
target_all_data = TrajectoryDataset(data[target_task.name], camera_names, contextual=True)
0
2023-11-06 09:38:09+00:00
16k
Giftify-Bot/Giftify-Bot
bot.py
[ { "identifier": "GuildConfig", "path": "models/giveaway_settings.py", "snippet": "class GuildConfig:\n \"\"\"Represents the configuration settings for a guild.\n\n Parameters\n ----------\n guild: discord.Guild\n The guild associated with the configuration.\n logging: Optional[disc...
import asyncio import datetime import logging import os import pathlib import sys import traceback import aiohttp import asyncpg import discord import dotenv import jishaku import sentry_sdk import uvloop from logging.handlers import RotatingFileHandler from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple from amari import AmariClient from discord.ext import commands from discord.utils import MISSING from discord.utils import _ColourFormatter as ColourFormatter from expiringdict import ExpiringDict from sentry_sdk.integrations.logging import LoggingIntegration from models.giveaway_settings import GuildConfig from models.giveaways import Giveaway from models.raffles import Raffle from utils.constants import ERROR_EMOJI, SUCCESS_EMOJI, WARN_EMOJI from utils.db import db_init from utils.tree import CommandTree from utils.view import ConfirmationView from cogs.timer_manager import TimerManager from models.donation_settings import GuildDonationConfig
13,742
try: except ImportError: # Windows pass else: uvloop.install() jishaku.Flags.HIDE = True jishaku.Flags.RETAIN = True jishaku.Flags.NO_UNDERSCORE = True jishaku.Flags.NO_DM_TRACEBACK = True OWNER_IDS = (747403406154399765,) EXTENSIONS: Tuple[str, ...] = ( "meta", "settings", "timers", "giveaways", "donations", "raffles", "logger", "webserver", ) class RemoveNoise(logging.Filter): def __init__(self) -> None: super().__init__(name="discord.state") def filter(self, record) -> bool: if record.levelname == "WARNING" and "referencing an unknown" in record.msg: return False return True class LogHandler: def __init__(self, stream: bool = True) -> None: self.log: logging.Logger = logging.getLogger() self.max_bytes: int = 32 * 1024 * 1024 self.logging_path = pathlib.Path("./logs/") self.logging_path.mkdir(exist_ok=True) self.stream = stream async def __aenter__(self) -> "LogHandler": return self.__enter__() def __enter__(self: "LogHandler") -> "LogHandler": logging.getLogger("discord").setLevel(logging.INFO) logging.getLogger("discord.http").setLevel(logging.INFO) logging.getLogger("discord.state").addFilter(RemoveNoise()) self.log.setLevel(logging.INFO) handler = RotatingFileHandler( filename=self.logging_path / "Giftify.log", encoding="utf-8", mode="w", maxBytes=self.max_bytes, backupCount=5, ) dt_fmt = "%Y-%m-%d %H:%M:%S" fmt = logging.Formatter("[{asctime}] [{levelname:<7}] {name}: {message}", dt_fmt, style="{") handler.setFormatter(fmt) self.log.addHandler(handler) if self.stream: stream_handler = logging.StreamHandler() stream_handler.setFormatter(ColourFormatter()) self.log.addHandler(stream_handler) return self async def __aexit__(self, *args: Any) -> None: return self.__exit__(*args) def __exit__(self, *args: Any) -> None: handlers = self.log.handlers[:] for handler in handlers: handler.close() self.log.removeHandler(handler) class GiftifyHelper: configs: List[GuildConfig] = [] donation_configs: List[GuildDonationConfig] = [] cached_giveaways: List["Giveaway"] = [] webhook_cache: Dict[discord.TextChannel, discord.Webhook] = {} raffles_cache: Dict[discord.Guild, List[Raffle]] = ExpiringDict(max_len=100, max_age_seconds=300) pool: asyncpg.Pool user: discord.ClientUser amari_client: AmariClient """A helper class for Giftify's operations. This class provides methods to send interaction messages with embeds, fetch webhooks for a channel, and retrieve or fetch guild configuration. """ async def send( self, interaction: discord.Interaction, message: str, reason: str = "success", ephemeral: bool = True, view: discord.ui.View = MISSING, ) -> None: """Sends an interaction message with embed. Parameters ----------- interaction: discord.Interaction The interaction to respond to. message: str The response message to send. reason: str The reason to send the message, can be "warn", "error" or "success". ephemeral: bool If the response should be sent ephemerally. """
from __future__ import annotations if TYPE_CHECKING: dotenv.load_dotenv() try: except ImportError: # Windows pass else: uvloop.install() jishaku.Flags.HIDE = True jishaku.Flags.RETAIN = True jishaku.Flags.NO_UNDERSCORE = True jishaku.Flags.NO_DM_TRACEBACK = True OWNER_IDS = (747403406154399765,) EXTENSIONS: Tuple[str, ...] = ( "meta", "settings", "timers", "giveaways", "donations", "raffles", "logger", "webserver", ) class RemoveNoise(logging.Filter): def __init__(self) -> None: super().__init__(name="discord.state") def filter(self, record) -> bool: if record.levelname == "WARNING" and "referencing an unknown" in record.msg: return False return True class LogHandler: def __init__(self, stream: bool = True) -> None: self.log: logging.Logger = logging.getLogger() self.max_bytes: int = 32 * 1024 * 1024 self.logging_path = pathlib.Path("./logs/") self.logging_path.mkdir(exist_ok=True) self.stream = stream async def __aenter__(self) -> "LogHandler": return self.__enter__() def __enter__(self: "LogHandler") -> "LogHandler": logging.getLogger("discord").setLevel(logging.INFO) logging.getLogger("discord.http").setLevel(logging.INFO) logging.getLogger("discord.state").addFilter(RemoveNoise()) self.log.setLevel(logging.INFO) handler = RotatingFileHandler( filename=self.logging_path / "Giftify.log", encoding="utf-8", mode="w", maxBytes=self.max_bytes, backupCount=5, ) dt_fmt = "%Y-%m-%d %H:%M:%S" fmt = logging.Formatter("[{asctime}] [{levelname:<7}] {name}: {message}", dt_fmt, style="{") handler.setFormatter(fmt) self.log.addHandler(handler) if self.stream: stream_handler = logging.StreamHandler() stream_handler.setFormatter(ColourFormatter()) self.log.addHandler(stream_handler) return self async def __aexit__(self, *args: Any) -> None: return self.__exit__(*args) def __exit__(self, *args: Any) -> None: handlers = self.log.handlers[:] for handler in handlers: handler.close() self.log.removeHandler(handler) class GiftifyHelper: configs: List[GuildConfig] = [] donation_configs: List[GuildDonationConfig] = [] cached_giveaways: List["Giveaway"] = [] webhook_cache: Dict[discord.TextChannel, discord.Webhook] = {} raffles_cache: Dict[discord.Guild, List[Raffle]] = ExpiringDict(max_len=100, max_age_seconds=300) pool: asyncpg.Pool user: discord.ClientUser amari_client: AmariClient """A helper class for Giftify's operations. This class provides methods to send interaction messages with embeds, fetch webhooks for a channel, and retrieve or fetch guild configuration. """ async def send( self, interaction: discord.Interaction, message: str, reason: str = "success", ephemeral: bool = True, view: discord.ui.View = MISSING, ) -> None: """Sends an interaction message with embed. Parameters ----------- interaction: discord.Interaction The interaction to respond to. message: str The response message to send. reason: str The reason to send the message, can be "warn", "error" or "success". ephemeral: bool If the response should be sent ephemerally. """
emoji = WARN_EMOJI if reason == "warn" else ERROR_EMOJI if reason == "error" else SUCCESS_EMOJI
4
2023-11-09 15:00:15+00:00
16k
Zjy0401/CoCoFormer
train.py
[ { "identifier": "create_jsf_datasets", "path": "dataset/jsf.py", "snippet": "def create_jsf_datasets(dataset_root, max_seq, random_seq=True):\n\n train_root = os.path.join(dataset_root, \"train\")\n # val_root = os.path.join(dataset_root, \"val\")\n test_root = os.path.join(dataset_root, \"test...
import os import csv import shutil import torch import torch.nn as nn import pickle from thop import profile from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from torch.optim import Adam from dataset.jsf import create_jsf_datasets from model.CoCoFormer import CoCoformer, Discriminator, PureTransformer from model.loss import SmoothCrossEntropyLoss from utilities.constants import * from utilities.device import get_device, use_cuda from utilities.lr_scheduling import LrStepTracker, get_lr from utilities.argument_funcs import parse_train_args, print_train_args, write_model_params from utilities.run_model import train_epoch, train_with_adv, eval_model, get_metrics, train_with_pure_transformer, params from tensorboardX import SummaryWriter
11,813
##### read word2event event2word f = open(args.word2event, 'rb') word2event = pickle.load(f) # reverse the vector event2word event2word = {} for key, val in word2event.items(): event2word[val] = key if args.only_Transformer: model = PureTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) else: model = CoCoformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) model_disc = Discriminator() if args.gpu[0] != -1: model = torch.nn.DataParallel(model, device_ids=args.gpu) model = model.cuda(device=args.gpu[0]) model_disc = torch.nn.DataParallel(model_disc, device_ids=args.gpu) model_disc = model_disc.cuda(device=args.gpu[0]) params(train_loader, model, model_disc) ##### Continuing from previous training session ##### start_epoch = BASELINE_EPOCH if args.continue_weights is not None: if args.continue_epoch is None: print("ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights") return else: model.load_state_dict(torch.load(args.continue_weights)) start_epoch = args.continue_epoch elif args.continue_epoch is not None: print("ERROR: Need continue weights (-continue_weights) when using continue_epoch") return ##### Lr Scheduler vs static lr ##### if args.lr is None: if args.continue_epoch is None: init_step = 0 else: init_step = args.continue_epoch * len(train_loader) lr = LR_DEFAULT_START * len(args.gpu) lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step) else: lr = args.lr ##### Not smoothing evaluation loss ##### eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD) ##### SmoothCrossEntropyLoss or CrossEntropyLoss for training ##### if args.ce_smoothing is None: train_loss_func = eval_loss_func else: train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE, ignore_index=TOKEN_PAD) ##### Optimizer ##### opt = Adam(model.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON) opt_disc = Adam(model_disc.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON) if args.lr is None: lr_scheduler = LambdaLR(opt, lr_stepper.step) lr_disc_scheduler = LambdaLR(opt_disc, lr_stepper.step) else: lr_scheduler = None lr_disc_scheduler = None ##### Tracking best evaluation accuracy ##### best_eval_acc = 0.0 best_eval_acc_epoch = -1 best_eval_loss = float("inf") best_eval_loss_epoch = -1 ##### Results reporting ##### if not os.path.isfile(results_file): with open(results_file, "w", newline="") as o_stream: writer = csv.writer(o_stream) writer.writerow(CSV_HEADER) ##### TRAIN LOOP ##### for epoch in range(start_epoch, args.epochs): # Baseline has no training and acts as a base loss and accuracy (epoch 0 in a sense) if epoch > BASELINE_EPOCH: print(SEPERATOR) print("NEW EPOCH:", epoch + 1) print(SEPERATOR) print("") # Train if args.only_Transformer: train_with_pure_transformer(epoch + 1, model, train_loader, train_loss_func, opt, lr_scheduler, args.print_modulus) else: if args.adv_train: train_with_adv(epoch + 1, model, model_disc, train_loader, train_loss_func, opt, opt_disc, lr_scheduler, lr_disc_scheduler, args.print_modulus) else: train_epoch(epoch + 1, model, train_loader, train_loss_func, opt, lr_scheduler, args.print_modulus) print(SEPERATOR) print("Evaluating:") else: print(SEPERATOR) print("Baseline model evaluation (Epoch 0):") if epoch != -1: # Eval train_loss, train_acc = eval_model(model, train_loader, train_loss_func) eval_loss, eval_acc = eval_model(model, test_loader, eval_loss_func) print("Epoch:", epoch + 1) if args.metrics: TER = get_metrics(model, test_loader) print("TER:", TER) # Learn rate
# from dataset.e_piano import create_epiano_datasets, compute_epiano_accuracy, split_train_test CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss", "Train Accuracy", "Avg Eval loss", "Eval accuracy"] # Baseline is an untrained epoch that we evaluate as a baseline loss and accuracy BASELINE_EPOCH = -1 # main def main(): """ ---------- Author: Damon Gwinn ---------- Entry point. Trains a model specified by command line arguments ---------- """ args = parse_train_args() print_train_args(args) if args.force_cpu: use_cuda(False) print("WARNING: Forced CPU usage, expect model to perform slower") print("") os.makedirs(args.output_dir, exist_ok=True) ##### Output prep ##### params_file = os.path.join(args.output_dir, "model_params.txt") write_model_params(args, params_file) weights_folder = os.path.join(args.output_dir, "weights") os.makedirs(weights_folder, exist_ok=True) results_folder = os.path.join(args.output_dir, "results") os.makedirs(results_folder, exist_ok=True) results_file = os.path.join(results_folder, "results.csv") best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle") best_acc_file = os.path.join(results_folder, "best_acc_weights.pickle") best_text = os.path.join(results_folder, "best_epochs.txt") ##### Tensorboard ##### if args.no_tensorboard: tensorboard_summary = None else: tensorboad_dir = os.path.join(args.output_dir, "tensorboard") tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir) ##### Datasets ##### # train_dataset, val_dataset, test_dataset = create_epiano_datasets(args.input_dir, args.max_sequence) train_dataset, test_dataset = create_jsf_datasets(args.input_dir, args.max_sequence) train_loader = DataLoader(train_dataset, batch_size=args.batch_size * len(args.gpu), num_workers=args.n_workers, shuffle=True) # val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers) test_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.n_workers) ##### read word2event event2word f = open(args.word2event, 'rb') word2event = pickle.load(f) # reverse the vector event2word event2word = {} for key, val in word2event.items(): event2word[val] = key if args.only_Transformer: model = PureTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) else: model = CoCoformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) model_disc = Discriminator() if args.gpu[0] != -1: model = torch.nn.DataParallel(model, device_ids=args.gpu) model = model.cuda(device=args.gpu[0]) model_disc = torch.nn.DataParallel(model_disc, device_ids=args.gpu) model_disc = model_disc.cuda(device=args.gpu[0]) params(train_loader, model, model_disc) ##### Continuing from previous training session ##### start_epoch = BASELINE_EPOCH if args.continue_weights is not None: if args.continue_epoch is None: print("ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights") return else: model.load_state_dict(torch.load(args.continue_weights)) start_epoch = args.continue_epoch elif args.continue_epoch is not None: print("ERROR: Need continue weights (-continue_weights) when using continue_epoch") return ##### Lr Scheduler vs static lr ##### if args.lr is None: if args.continue_epoch is None: init_step = 0 else: init_step = args.continue_epoch * len(train_loader) lr = LR_DEFAULT_START * len(args.gpu) lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step) else: lr = args.lr ##### Not smoothing evaluation loss ##### eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD) ##### SmoothCrossEntropyLoss or CrossEntropyLoss for training ##### if args.ce_smoothing is None: train_loss_func = eval_loss_func else: train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE, ignore_index=TOKEN_PAD) ##### Optimizer ##### opt = Adam(model.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON) opt_disc = Adam(model_disc.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON) if args.lr is None: lr_scheduler = LambdaLR(opt, lr_stepper.step) lr_disc_scheduler = LambdaLR(opt_disc, lr_stepper.step) else: lr_scheduler = None lr_disc_scheduler = None ##### Tracking best evaluation accuracy ##### best_eval_acc = 0.0 best_eval_acc_epoch = -1 best_eval_loss = float("inf") best_eval_loss_epoch = -1 ##### Results reporting ##### if not os.path.isfile(results_file): with open(results_file, "w", newline="") as o_stream: writer = csv.writer(o_stream) writer.writerow(CSV_HEADER) ##### TRAIN LOOP ##### for epoch in range(start_epoch, args.epochs): # Baseline has no training and acts as a base loss and accuracy (epoch 0 in a sense) if epoch > BASELINE_EPOCH: print(SEPERATOR) print("NEW EPOCH:", epoch + 1) print(SEPERATOR) print("") # Train if args.only_Transformer: train_with_pure_transformer(epoch + 1, model, train_loader, train_loss_func, opt, lr_scheduler, args.print_modulus) else: if args.adv_train: train_with_adv(epoch + 1, model, model_disc, train_loader, train_loss_func, opt, opt_disc, lr_scheduler, lr_disc_scheduler, args.print_modulus) else: train_epoch(epoch + 1, model, train_loader, train_loss_func, opt, lr_scheduler, args.print_modulus) print(SEPERATOR) print("Evaluating:") else: print(SEPERATOR) print("Baseline model evaluation (Epoch 0):") if epoch != -1: # Eval train_loss, train_acc = eval_model(model, train_loader, train_loss_func) eval_loss, eval_acc = eval_model(model, test_loader, eval_loss_func) print("Epoch:", epoch + 1) if args.metrics: TER = get_metrics(model, test_loader) print("TER:", TER) # Learn rate
lr = get_lr(opt)
8
2023-11-01 08:33:08+00:00
16k
tiendatnguyen-vision/Orbit-symmetrize
ParticleScatter/exp_lorentz/emlp-pytorch/emlp_pytorch/reps/representation.py
[ { "identifier": "Group", "path": "ParticleScatter/exp_lorentz/emlp-pytorch/emlp_pytorch/groups.py", "snippet": "class Group(nn.Module):\n \"\"\" Abstract Group Object which new groups should inherit from. \"\"\"\n\n def __init__(self):\n super().__init__()\n self.lie_algebra = NotImp...
import math import logging import itertools import torch from functools import lru_cache as cache, reduce from collections import defaultdict from plum import dispatch from torch import nn from ..groups import Group from .linear_operator_base import LinearOperator from .linear_operators import ConcatLazy, I, lazify, densify, LazyJVP, LazyPerm, \ LazyDirectSum, LazyKron, LazyKronsum, lazy_direct_matmat, product from .utils import orthogonal_complement, krylov_constraint_solve, get_device
11,304
def bilinear_weights(out_rep, in_rep): """ Bilinear weights for a linear operator from in_rep to out_rep. """ # TODO: replace lazy_projection function with LazyDirectSum LinearOperator W_rep, W_perm = (in_rep >> out_rep).canonicalize() # TODO: possible bug when in_rep and out_rep are both non sumreps? investigate inv_perm = torch.argsort(W_perm) mat_shape = out_rep.size(), in_rep.size() x_rep = in_rep W_multiplicities = W_rep.reps x_multiplicities = x_rep.reps x_multiplicities = {rep: n for rep, n in x_multiplicities.items() if rep != Scalar} def nelems(nx, rep): return min(nx, rep.size()) active_dims = sum(W_multiplicities.get(rep, 0)*nelems(n, rep) for rep, n in x_multiplicities.items()) reduced_indices_dict = {rep: ids[torch.randint( len(ids), size=(nelems(len(ids), rep),))].reshape(-1) for rep, ids in x_rep.as_dict(torch.arange(x_rep.size())).items()} # Apply the projections for each rank, concatenate, and permute back to orig rank order # (r,), (*c) # TODO: find out why backwards of this function is so slow def lazy_projection(params, x): bshape = x.shape[:-1] x = x.reshape(-1, x.size(-1)) bs = x.size(0) i = 0 Ws = [] for rep, W_mult in W_multiplicities.items(): if rep not in x_multiplicities: Ws.append(torch.zeros((bs, W_mult*rep.size()), device=x.device)) continue x_mult = x_multiplicities[rep] n = nelems(x_mult, rep) i_end = i+W_mult*n bids = reduced_indices_dict[rep] bilinear_params = params[i:i_end].reshape(W_mult, n) # bs,nK-> (nK,bs) i = i_end # (bs,W_mult,d^r) = (W_mult,n)@(n,d^r,bs) bilinear_elems = bilinear_params@x[..., bids].t().reshape(n, rep.size()*bs) bilinear_elems = bilinear_elems.reshape(W_mult*rep.size(), bs).t() Ws.append(bilinear_elems) Ws = torch.cat(Ws, axis=-1) # concatenate over rep axis # reorder to original rank ordering return Ws[..., inv_perm].reshape(*bshape, *mat_shape) return active_dims, lazy_projection class SumRep(Rep): """ A sum of representations, e.g. V+V.T. """ def __init__(self, *reps, extra_perm=None, skip_init=False): """ Constructs a tensor type based on a list of tensor ranks and possibly the symmetry generators gen.""" super().__init__() if skip_init: return # Integers can be used as shorthand for scalars. reps = [SumRepFromCollection({Scalar: rep}) if isinstance(rep, int) else \ rep for rep in reps] # Get reps and permutations reps, perms = zip(*[rep.canonicalize() for rep in reps]) rep_counters = [rep.reps if isinstance(rep, SumRep) else {rep: 1} for rep in reps] # Combine reps and permutations: ∑_a + ∑_b = ∑_{a∪b} self.reps, perm = self.compute_canonical(rep_counters, perms) self.perm = extra_perm[perm] if extra_perm is not None else perm self.invperm = torch.argsort(self.perm) self.canonical = (self.perm == torch.arange(len(self.perm))).all() self.is_permutation = all(rep.is_permutation for rep in self.reps.keys()) def size(self): return sum(rep.size()*count for rep, count in self.reps.items()) def rho(self, M): rhos = [rep.rho(M) for rep in self.reps] multiplicities = self.reps.values() return LazyPerm(self.invperm)@LazyDirectSum(rhos, multiplicities)@LazyPerm(self.perm) def drho(self, A): drhos = [rep.drho(A) for rep in self.reps] multiplicities = self.reps.values() return LazyPerm(self.invperm)@LazyDirectSum(drhos, multiplicities)@LazyPerm(self.perm) def __eq__(self, other): return self.reps == other.reps and (self.perm == other.perm).all() def __hash__(self): assert self.canonical return hash(tuple(self.reps.items())) def t(self): """ only swaps to adjoint representation, does not reorder elems""" return SumRep(*[rep.t() for rep, c in self.reps.items() for _ in range(c)], extra_perm=self.perm) def __repr__(self): return "+".join(f"{count if count > 1 else ''}{repr(rep)}" for rep, count in self.reps.items()) def canonicalize(self): """Returns a canonically ordered rep with order np.arange(self.size()) and the permutation which achieves that ordering""" return SumRepFromCollection(self.reps), self.perm def forward(self, G): return SumRepFromCollection({rep(G): c for rep, c in self.reps.items()}, perm=self.perm) def concrete(self): return True def equivariant_basis(self): """ Overrides default implementation with a more efficient version which decomposes the constraints across the sum.""" Qs = {rep: rep.equivariant_basis() for rep in self.reps} device = self.G.device if self.G is not None else get_device(list(Qs.values())) Qs = {rep: (Q.to(device).to(torch.float) if torch.is_tensor(Q) else Q) \ for rep, Q in Qs.items()} active_dims = sum(self.reps[rep]*Qs[rep].size(-1) for rep in Qs.keys()) multiplicities = self.reps.values() def lazy_Q(array):
""" The base Representation class. """ class Rep(nn.Module): """ The base Representation class. Representation objects formalize the vector space V on which the group acts, the group representation matrix ρ(g), and the Lie Algebra representation dρ(A) in a single object. Representations act as types for vectors coming from V. These types can be manipulated and transformed with the built in operators ⊕,⊗,dual, as well as incorporating custom representations. Representation objects should be immutable. At minimum, new representations need to implement ``rho``, ``__str__``.""" def __init__(self): super().__init__() self.is_permutation = False self._size = None self.G = None def rho(self, M): """ Group representation of the matrix M of shape (d,d)""" raise NotImplementedError def drho(self, A): """ Lie Algebra representation of the matrix A of shape (d,d)""" In = torch.eye(A.size(0), dtype=A.dtype, device=A.device) return LazyJVP(self.rho, In, A) def forward(self, G): """ Instantiate (nonconcrete) representation with a symmetry group (forward) """ raise NotImplementedError def __str__(self): return repr(self) def __repr__(self): raise NotImplementedError def __eq__(self, other): if type(self) is not type(other): # pylint: disable=unidiomatic-typecheck return False return self.__hash__() == other.__hash__() def __hash__(self): raise NotImplementedError def size(self): """ Dimension dim(V) of the representation """ if self._size is not None: return self._size if self.concrete() and isinstance(self.G, Group): self._size = self.rho(self.G.sample()).size(-1) return self._size raise NotImplementedError def canonicalize(self): """ An optional method to convert the representation into a canonical form in order to reuse equivalent solutions in the solver. Should return both the canonically ordered representation, along with a permutation which can be applied to vectors of the current representation to achieve that ordering. """ # return canonicalized rep return self, torch.arange(self.size()) def rho_dense(self, M): """ A convenience function which returns rho(M) as a dense matrix.""" return densify(self.rho(M)) def drho_dense(self, A): """ A convenience function which returns drho(A) as a dense matrix.""" return densify(self.drho(A)) def constraint_matrix(self): """ Constructs the equivariance constrant matrix (lazily) by concatenating the constraints (ρ(hᵢ)-I) for i=1,...M and dρ(Aₖ) for k=1,..,D from the generators of the symmetry group. """ n = self.size() constraints = [] constraints.extend([lazify(self.rho(h)).to(self.G.device)-I(n, device=self.G.device) \ for h in self.G.discrete_generators]) constraints.extend([lazify(self.drho(A)).to(self.G.device) for A in self.G.lie_algebra]) return ConcatLazy(constraints) if constraints else lazify( torch.zeros((1, n), device=self.G.device)) solcache = {} def equivariant_basis(self): """ Computes the equivariant solution basis for the given representation of size N. Canonicalizes problems and caches solutions for reuse. Output [Q (N,r)] """ if self == Scalar: return torch.ones((1, 1), device=self.G.device) canon_rep, perm = self.canonicalize() invperm = torch.argsort(perm) if canon_rep not in self.solcache: logging.info("%r cache miss", canon_rep) logging.info("Solving basis for %r%s", self, f", for G={self.G}" if self.G is not None else "") C_lazy = canon_rep.constraint_matrix() if C_lazy.size(0)*C_lazy.size(1) > 3e7: # Too large to use SVD result = krylov_constraint_solve(C_lazy) else: C_dense = C_lazy.to_dense() result = orthogonal_complement(C_dense) self.solcache[canon_rep] = result return self.solcache[canon_rep][invperm] def equivariant_projector(self): """ Computes the (lazy) projection matrix P=QQᵀ that projects to the equivariant basis.""" Q = self.equivariant_basis() Q_lazy = lazify(Q) P = Q_lazy@Q_lazy.H() return P def concrete(self): """ Concreteness """ return isinstance(self.G, Group) def __add__(self, other): """ Direct sum (⊕) of representations. """ if isinstance(other, int): if other == 0: return self return self+other*Scalar if both_concrete(self, other): return SumRep(self, other) return DeferredSumRep(self, other) def __radd__(self, other): if isinstance(other, int): if other == 0: return self return other*Scalar+self return NotImplemented def __mul__(self, other): """ Tensor sum (⊗) of representations. """ return mul_reps(self, other) def __rmul__(self, other): return mul_reps(other, self) def __pow__(self, other): """ Iterated tensor product. """ assert isinstance(other, int), \ f"Power only supported for integers, not {type(other)}" assert other >= 0, f"Negative powers {other} not supported" return reduce(lambda a, b: a*b, other*[self], Scalar) def __rshift__(self, other): """ Linear maps from self -> other """ return other*self.t() def __lshift__(self, other): """ Linear maps from other -> self """ return self*other.t() def __lt__(self, other): """ less than defined to disambiguate ordering multiple different representations. Canonical ordering is determined first by Group, then by size, then by hash""" if other == Scalar: return False try: if self.G < other.G: return True if self.G > other.G: return False except (AttributeError, TypeError): pass if self.size() < other.size(): return True if self.size() > other.size(): return False return hash(self) < hash(other) # For sorting purposes only def t(self): """ Dual representation V*, rho*, drho*.""" if isinstance(self.G, Group) and self.G.is_orthogonal: return self return Dual(self) @dispatch def mul_reps(ra, rb: int): """ Product of a scalar and a representation. """ if rb == 1: return ra if rb == 0: return 0 if ra.concrete(): return SumRep(*(rb*[ra])) return DeferredSumRep(*(rb*[ra])) @dispatch def mul_reps(ra: int, rb): # pylint: disable=function-redefined """ Product of a scalar and a representation. """ return mul_reps(rb, ra) # pylint: disable=W1114:arguments-out-of-order class ScalarRep(Rep): """ The trivial representation of the group G. """ def __init__(self, G=None): super().__init__() self.G = G self.is_permutation = True def forward(self, G): self.G = G return self def size(self): return 1 def canonicalize(self): return self, torch.zeros(1, dtype=torch.long) def __repr__(self): return "V⁰" def t(self): return self def rho(self, M): return torch.eye(1, device=self.G.device) def drho(self, A): return 0*torch.eye(1, device=self.G.device) def __hash__(self): return 0 def __eq__(self, other): return isinstance(other, ScalarRep) def __mul__(self, other): if isinstance(other, int): return super().__mul__(other) return other def __rmul__(self, other): if isinstance(other, int): return super().__rmul__(other) return other def concrete(self): return True class Base(Rep): """ Base representation V of a group.""" def __init__(self, G=None): super().__init__() self.G = G if G is not None: self.is_permutation = G.is_permutation def forward(self, G): return self.__class__(G) def rho(self, M): if isinstance(self.G, Group) and isinstance(M, dict): M = M[self.G] return M def drho(self, A): if isinstance(self.G, Group) and isinstance(A, dict): A = A[self.G] return A def size(self): assert self.G is not None, f"must know G to find size for rep={self}" return self.G.d def __repr__(self): return "V" def __hash__(self): return hash((type(self), self.G)) def __eq__(self, other): return type(other) is type(self) and self.G == other.G def __lt__(self, other): if isinstance(other, Dual): return True return super().__lt__(other) class Dual(Rep): """ Dual representation V*, rho*, drho*.""" def __init__(self, rep): super().__init__() self.rep = rep self.G = rep.G if hasattr(rep, "is_permutation"): self.is_permutation = rep.is_permutation def forward(self, G): return self.rep(G).t() def rho(self, M): rho = self.rep.rho(M) rhoinvt = rho.invt() if isinstance(rho, LinearOperator) else torch.linalg.inv(rho).t() return rhoinvt def drho(self, A): return -self.rep.drho(A).t() def __repr__(self): return repr(self.rep)+"*" def t(self): return self.rep def __eq__(self, other): return type(other) is type(self) and self.rep == other.rep def __hash__(self): return hash((type(self), self.rep)) def __lt__(self, other): if other == self.rep: return False return super().__lt__(other) def size(self): return self.rep.size() # Alias V or Vector for an instance of the Base representation of a group V = Vector = Base() # An instance of the Scalar representation, equivalent to V**0 Scalar = ScalarRep() def T(p, q=0, G=None): """ A convenience function for creating rank (p,q) tensors.""" return (V**p*V.t()**q)(G) def bilinear_weights(out_rep, in_rep): """ Bilinear weights for a linear operator from in_rep to out_rep. """ # TODO: replace lazy_projection function with LazyDirectSum LinearOperator W_rep, W_perm = (in_rep >> out_rep).canonicalize() # TODO: possible bug when in_rep and out_rep are both non sumreps? investigate inv_perm = torch.argsort(W_perm) mat_shape = out_rep.size(), in_rep.size() x_rep = in_rep W_multiplicities = W_rep.reps x_multiplicities = x_rep.reps x_multiplicities = {rep: n for rep, n in x_multiplicities.items() if rep != Scalar} def nelems(nx, rep): return min(nx, rep.size()) active_dims = sum(W_multiplicities.get(rep, 0)*nelems(n, rep) for rep, n in x_multiplicities.items()) reduced_indices_dict = {rep: ids[torch.randint( len(ids), size=(nelems(len(ids), rep),))].reshape(-1) for rep, ids in x_rep.as_dict(torch.arange(x_rep.size())).items()} # Apply the projections for each rank, concatenate, and permute back to orig rank order # (r,), (*c) # TODO: find out why backwards of this function is so slow def lazy_projection(params, x): bshape = x.shape[:-1] x = x.reshape(-1, x.size(-1)) bs = x.size(0) i = 0 Ws = [] for rep, W_mult in W_multiplicities.items(): if rep not in x_multiplicities: Ws.append(torch.zeros((bs, W_mult*rep.size()), device=x.device)) continue x_mult = x_multiplicities[rep] n = nelems(x_mult, rep) i_end = i+W_mult*n bids = reduced_indices_dict[rep] bilinear_params = params[i:i_end].reshape(W_mult, n) # bs,nK-> (nK,bs) i = i_end # (bs,W_mult,d^r) = (W_mult,n)@(n,d^r,bs) bilinear_elems = bilinear_params@x[..., bids].t().reshape(n, rep.size()*bs) bilinear_elems = bilinear_elems.reshape(W_mult*rep.size(), bs).t() Ws.append(bilinear_elems) Ws = torch.cat(Ws, axis=-1) # concatenate over rep axis # reorder to original rank ordering return Ws[..., inv_perm].reshape(*bshape, *mat_shape) return active_dims, lazy_projection class SumRep(Rep): """ A sum of representations, e.g. V+V.T. """ def __init__(self, *reps, extra_perm=None, skip_init=False): """ Constructs a tensor type based on a list of tensor ranks and possibly the symmetry generators gen.""" super().__init__() if skip_init: return # Integers can be used as shorthand for scalars. reps = [SumRepFromCollection({Scalar: rep}) if isinstance(rep, int) else \ rep for rep in reps] # Get reps and permutations reps, perms = zip(*[rep.canonicalize() for rep in reps]) rep_counters = [rep.reps if isinstance(rep, SumRep) else {rep: 1} for rep in reps] # Combine reps and permutations: ∑_a + ∑_b = ∑_{a∪b} self.reps, perm = self.compute_canonical(rep_counters, perms) self.perm = extra_perm[perm] if extra_perm is not None else perm self.invperm = torch.argsort(self.perm) self.canonical = (self.perm == torch.arange(len(self.perm))).all() self.is_permutation = all(rep.is_permutation for rep in self.reps.keys()) def size(self): return sum(rep.size()*count for rep, count in self.reps.items()) def rho(self, M): rhos = [rep.rho(M) for rep in self.reps] multiplicities = self.reps.values() return LazyPerm(self.invperm)@LazyDirectSum(rhos, multiplicities)@LazyPerm(self.perm) def drho(self, A): drhos = [rep.drho(A) for rep in self.reps] multiplicities = self.reps.values() return LazyPerm(self.invperm)@LazyDirectSum(drhos, multiplicities)@LazyPerm(self.perm) def __eq__(self, other): return self.reps == other.reps and (self.perm == other.perm).all() def __hash__(self): assert self.canonical return hash(tuple(self.reps.items())) def t(self): """ only swaps to adjoint representation, does not reorder elems""" return SumRep(*[rep.t() for rep, c in self.reps.items() for _ in range(c)], extra_perm=self.perm) def __repr__(self): return "+".join(f"{count if count > 1 else ''}{repr(rep)}" for rep, count in self.reps.items()) def canonicalize(self): """Returns a canonically ordered rep with order np.arange(self.size()) and the permutation which achieves that ordering""" return SumRepFromCollection(self.reps), self.perm def forward(self, G): return SumRepFromCollection({rep(G): c for rep, c in self.reps.items()}, perm=self.perm) def concrete(self): return True def equivariant_basis(self): """ Overrides default implementation with a more efficient version which decomposes the constraints across the sum.""" Qs = {rep: rep.equivariant_basis() for rep in self.reps} device = self.G.device if self.G is not None else get_device(list(Qs.values())) Qs = {rep: (Q.to(device).to(torch.float) if torch.is_tensor(Q) else Q) \ for rep, Q in Qs.items()} active_dims = sum(self.reps[rep]*Qs[rep].size(-1) for rep in Qs.keys()) multiplicities = self.reps.values() def lazy_Q(array):
return lazy_direct_matmat(array, Qs.values(), multiplicities)[self.invperm]
11
2023-11-01 07:19:02+00:00
16k