id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
161,892 | import logging
import multiprocessing
import os
import time
import pickle
import time
import _thread as thread
from watchdog.observers import Observer
from robot import config, constants, statistic, Player
from robot.ConfigMonitor import ConfigMonitor
from robot.sdk import LED
def singleton(cls):
_instance = {}
def inner(conversation):
if cls not in _instance:
_instance[cls] = cls(conversation)
return _instance[cls]
return inner | null |
161,893 | import os
import base64
import tempfile
import pypinyin
import subprocess
import uuid
import asyncio
import edge_tts
import nest_asyncio
from aip import AipSpeech
from . import utils, config, constants
from robot import logging
from pathlib import Path
from pypinyin import lazy_pinyin
from pydub import AudioSegment
from abc import ABCMeta, abstractmethod
from .sdk import TencentSpeech, AliSpeech, XunfeiSpeech, atc, VITSClient
import requests
from xml.etree import ElementTree
logger = logging.getLogger(__name__)
def get_engines():
def get_subclasses(cls):
subclasses = set()
for subclass in cls.__subclasses__():
subclasses.add(subclass)
subclasses.update(get_subclasses(subclass))
return subclasses
return [
engine
for engine in list(get_subclasses(AbstractTTS))
if hasattr(engine, "SLUG") and engine.SLUG
]
The provided code snippet includes necessary dependencies for implementing the `get_engine_by_slug` function. Write a Python function `def get_engine_by_slug(slug=None)` to solve the following problem:
Returns: A TTS Engine implementation available on the current platform Raises: ValueError if no speaker implementation is supported on this platform
Here is the function:
def get_engine_by_slug(slug=None):
"""
Returns:
A TTS Engine implementation available on the current platform
Raises:
ValueError if no speaker implementation is supported on this platform
"""
if not slug or type(slug) is not str:
raise TypeError("无效的 TTS slug '%s'", slug)
selected_engines = list(
filter(
lambda engine: hasattr(engine, "SLUG") and engine.SLUG == slug,
get_engines(),
)
)
if len(selected_engines) == 0:
raise ValueError(f"错误:找不到名为 {slug} 的 TTS 引擎")
else:
if len(selected_engines) > 1:
logger.warning(f"注意: 有多个 TTS 名称与指定的引擎名 {slug} 匹配")
engine = selected_engines[0]
logger.info(f"使用 {engine.SLUG} TTS 引擎")
return engine.get_instance() | Returns: A TTS Engine implementation available on the current platform Raises: ValueError if no speaker implementation is supported on this platform |
161,894 | import os
import requests
import json
import semver
from subprocess import call
from robot import constants, logging
from datetime import datetime, timedelta
_updater = None
class Updater(object):
def __init__(self):
self.last_check = datetime.now() - timedelta(days=1.5)
self.update_info = {}
def _pull(self, cwd, tag):
if os.path.exists(cwd):
return (
call(
[f"git checkout master && git pull && git checkout {tag}"],
cwd=cwd,
shell=True,
)
== 0
)
else:
logger.error(f"目录 {cwd} 不存在")
return False
def _pip(self, cwd):
if os.path.exists(cwd):
return (
call(
["pip3", "install", "-r", "requirements.txt"], cwd=cwd, shell=False
)
== 0
)
else:
logger.error(f"目录 {cwd} 不存在")
return False
def update(self):
update_info = self.fetch()
success = True
if update_info == {}:
logger.info("恭喜你,wukong-robot 已经是最新!")
if "main" in update_info:
if self._pull(
constants.APP_PATH, update_info["main"]["version"]
) and self._pip(constants.APP_PATH):
logger.info("wukong-robot 更新成功!")
self.update_info.pop("main")
else:
logger.info("wukong-robot 更新失败!")
success = False
if "contrib" in update_info:
if self._pull(
constants.CONTRIB_PATH, update_info["contrib"]["version"]
) and self._pip(constants.CONTRIB_PATH):
logger.info("wukong-contrib 更新成功!")
self.update_info.pop("contrib")
else:
logger.info("wukong-contrib 更新失败!")
success = False
return success
def _get_version(self, path, current):
if os.path.exists(os.path.join(path, "VERSION")):
with open(os.path.join(path, "VERSION"), "r") as f:
return f.read().strip()
else:
return current
def fetch(self):
global URL, DEV_URL
url = URL
now = datetime.now()
if (now - self.last_check).seconds <= 1800:
logger.debug(f"30 分钟内已检查过更新,使用上次的检查结果:{self.update_info}")
return self.update_info
try:
self.last_check = now
r = requests.get(url, timeout=3)
info = json.loads(r.text)
main_version = info["main"]["version"]
contrib_version = info["contrib"]["version"]
# 检查主仓库
current_main_version = self._get_version(constants.APP_PATH, main_version)
current_contrib_version = self._get_version(
constants.CONTRIB_PATH, contrib_version
)
if semver.compare(main_version, current_main_version) > 0:
logger.info(f"主仓库检查到更新:{info['main']}")
self.update_info["main"] = info["main"]
if semver.compare(contrib_version, current_contrib_version) > 0:
logger.info(f"插件库检查到更新:{info['contrib']}")
self.update_info["contrib"] = info["contrib"]
if "notices" in info:
self.update_info["notices"] = info["notices"]
return self.update_info
except Exception as e:
logger.error(f"检查更新失败:{e}", stack_info=True)
return {}
def fetch():
global _updater
if not _updater:
_updater = Updater()
return _updater.fetch() | null |
161,895 | import time
from snowboy import snowboydecoder
from robot import config, logging, utils, constants
logger = logging.getLogger(__name__)
detector = None
recorder = None
porcupine = None
The provided code snippet includes necessary dependencies for implementing the `initDetector` function. Write a Python function `def initDetector(wukong)` to solve the following problem:
初始化离线唤醒热词监听器,支持 snowboy 和 porcupine 两大引擎
Here is the function:
def initDetector(wukong):
"""
初始化离线唤醒热词监听器,支持 snowboy 和 porcupine 两大引擎
"""
global porcupine, recorder, detector
if config.get("detector", "snowboy") == "porcupine":
logger.info("使用 porcupine 进行离线唤醒")
import pvporcupine
from pvrecorder import PvRecorder
access_key = config.get("/porcupine/access_key")
keyword_paths = config.get("/porcupine/keyword_paths")
keywords = config.get("/porcupine/keywords", ["porcupine"])
if keyword_paths:
porcupine = pvporcupine.create(
access_key=access_key,
keyword_paths=[constants.getConfigData(kw) for kw in keyword_paths],
sensitivities=[config.get("sensitivity", 0.5)] * len(keyword_paths),
)
else:
porcupine = pvporcupine.create(
access_key=access_key,
keywords=keywords,
sensitivities=[config.get("sensitivity", 0.5)] * len(keywords),
)
recorder = PvRecorder(device_index=-1, frame_length=porcupine.frame_length)
recorder.start()
try:
while True:
pcm = recorder.read()
result = porcupine.process(pcm)
if result >= 0:
kw = keyword_paths[result] if keyword_paths else keywords[result]
logger.info(
"[porcupine] Keyword {} Detected at time {}".format(
kw,
time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime(time.time())
),
)
)
wukong._detected_callback(False)
recorder.stop()
wukong.conversation.interrupt()
query = wukong.conversation.activeListen()
wukong.conversation.doResponse(query)
recorder.start()
except pvporcupine.PorcupineActivationError as e:
logger.error("[Porcupine] AccessKey activation error", stack_info=True)
raise e
except pvporcupine.PorcupineActivationLimitError as e:
logger.error(
f"[Porcupine] AccessKey {access_key} has reached it's temporary device limit",
stack_info=True,
)
raise e
except pvporcupine.PorcupineActivationRefusedError as e:
logger.error(
"[Porcupine] AccessKey '%s' refused" % access_key, stack_info=True
)
raise e
except pvporcupine.PorcupineActivationThrottledError as e:
logger.error(
"[Porcupine] AccessKey '%s' has been throttled" % access_key,
stack_info=True,
)
raise e
except pvporcupine.PorcupineError as e:
logger.error("[Porcupine] 初始化 Porcupine 失败", stack_info=True)
raise e
except KeyboardInterrupt:
logger.info("Stopping ...")
finally:
porcupine and porcupine.delete()
recorder and recorder.delete()
else:
logger.info("使用 snowboy 进行离线唤醒")
detector and detector.terminate()
models = constants.getHotwordModel(config.get("hotword", "wukong.pmdl"))
detector = snowboydecoder.HotwordDetector(
models, sensitivity=config.get("sensitivity", 0.5)
)
# main loop
try:
callbacks = wukong._detected_callback
detector.start(
detected_callback=callbacks,
audio_recorder_callback=wukong.conversation.converse,
interrupt_check=wukong._interrupt_callback,
silent_count_threshold=config.get("silent_threshold", 15),
recording_timeout=config.get("recording_timeout", 5) * 4,
sleep_time=0.03,
)
detector.terminate()
except Exception as e:
logger.critical(f"离线唤醒机制初始化失败:{e}", stack_info=True) | 初始化离线唤醒热词监听器,支持 snowboy 和 porcupine 两大引擎 |
161,896 | import os
import shutil
DATA_PATH = os.path.join(APP_PATH, "static")
CONFIG_PATH = os.path.expanduser(os.getenv("WUKONG_CONFIG", "~/.wukong"))
The provided code snippet includes necessary dependencies for implementing the `getQAPath` function. Write a Python function `def getQAPath()` to solve the following problem:
获取QA数据集文件的路径 returns: QA数据集文件的存储路径
Here is the function:
def getQAPath():
"""
获取QA数据集文件的路径
returns: QA数据集文件的存储路径
"""
qa_source = os.path.join(DATA_PATH, "qa.csv")
qa_dst = os.path.join(CONFIG_PATH, "qa.csv")
if not os.path.exists(qa_dst):
shutil.copyfile(qa_source, qa_dst)
return qa_dst | 获取QA数据集文件的路径 returns: QA数据集文件的存储路径 |
161,897 | import pkgutil
from . import config
from . import constants
from robot import logging
from robot.sdk.AbstractPlugin import AbstractPlugin
_plugins_query = []
def init_plugins(con):
"""
动态加载技能插件
参数:
con -- 会话模块
"""
global _has_init
locations = [constants.PLUGIN_PATH, constants.CONTRIB_PATH, constants.CUSTOM_PATH]
logger.debug(f"检查插件目录:{locations}")
global _plugins_query
nameSet = set()
for finder, name, ispkg in pkgutil.walk_packages(locations):
try:
loader = finder.find_module(name)
mod = loader.load_module(name)
except Exception:
logger.warning(f"插件 {name} 加载出错,跳过", exc_info=True)
continue
if not hasattr(mod, "Plugin"):
logger.debug(f"模块 {name} 非插件,跳过")
continue
# plugins run at query
plugin = mod.Plugin(con)
if plugin.SLUG == "AbstractPlugin":
plugin.SLUG = name
# check conflict
if plugin.SLUG in nameSet:
logger.warning(f"插件 {name} SLUG({plugin.SLUG}) 重复,跳过")
continue
nameSet.add(plugin.SLUG)
# whether a plugin is enabled
if config.has(plugin.SLUG) and "enable" in config.get(plugin.SLUG):
if not config.get(plugin.SLUG)["enable"]:
logger.info(f"插件 {name} 已被禁用")
continue
if issubclass(mod.Plugin, AbstractPlugin):
logger.info(f"插件 {name} 加载成功 ")
_plugins_query.append(plugin)
def sort_priority(m):
if hasattr(m, "PRIORITY"):
return m.PRIORITY
return 0
_plugins_query.sort(key=sort_priority, reverse=True)
_has_init = True
def get_plugins(con):
global _plugins_query
_plugins_query = []
init_plugins(con)
return _plugins_query | null |
161,898 | import os
import yaml
import json
import time
import base64
import random
import hashlib
import asyncio
import requests
import markdown
import threading
import subprocess
import tornado.web
import tornado.ioloop
import tornado.options
import tornado.httpserver
from tornado.websocket import WebSocketHandler
from urllib.parse import unquote
from robot.sdk.History import History
from robot import config, utils, logging, Updater, constants
from tools import make_json, solr_tools
settings = {
"cookie_secret": config.get(
"/server/cookie_secret", "__GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__"
),
"template_path": os.path.join(constants.APP_PATH, "server/templates"),
"static_path": os.path.join(constants.APP_PATH, "server/static"),
"login_url": "/login",
"debug": False,
}
def start_server(con, wk):
global conversation, wukong
conversation = con
wukong = wk
if config.get("/server/enable", False):
port = config.get("/server/port", "5001")
try:
asyncio.set_event_loop(asyncio.new_event_loop())
application.listen(int(port))
tornado.ioloop.IOLoop.instance().start()
except Exception as e:
logger.critical(f"服务器启动失败: {e}", stack_info=True)
def run(conversation, wukong, debug=False):
settings["debug"] = debug
t = threading.Thread(target=lambda: start_server(conversation, wukong))
t.start() | null |
161,899 | from robot import logging
import urllib.request
import threading
import urllib
import sys
import os
def add_engine(
host, enginename, port=8983, shard=1, replica=1, maxshardpernode=5, conf="myconf"
):
"""
Add engine
"""
url = "http://{}:{}/solr/admin/collections".format(host, port)
params = {}
params["action"] = "CREATE"
params["name"] = enginename
params["numShards"] = shard
params["replicationFactor"] = replica
params["maxShardsPerNode"] = maxshardpernode
params["collection.configName"] = conf
params["wt"] = "json"
try:
req = urllib.request.Request(url)
response = urllib.request.urlopen(
req, urllib.parse.urlencode(params).encode("utf-8")
)
print(response.read())
except Exception as err:
_get_error_message(err)
def delete_engine(host, enginename, port=8983):
"""
Delete engine
"""
url = "http://{}:{}/solr/admin/collections".format(host, port)
params = {}
params["action"] = "DELETE"
params["name"] = enginename
params["wt"] = "json"
try:
req = urllib.request.Request(url)
response = urllib.request.urlopen(
req, urllib.parse.urlencode(params).encode("utf-8")
)
print(response.read())
except Exception as err:
_get_error_message(err)
def upload_documents(host, enginename, port=8983, documents="", num_thread=1):
"""
Fill documents
documents can be a file path(Each row is a json format document)
"""
def thread_upload(binary_data, mutex):
"""
We didn't use the producer-consumer model because of the need to implement batch loads
if not, too many documents are read into memory
"""
url = "http://{}:{}/solr/{}/update".format(host, port, enginename)
try:
req = urllib.request.Request(url)
req.headers = HEADER
response = urllib.request.urlopen(req, binary_data.encode("utf-8"))
mutex.acquire()
logger.info(response.read())
mutex.release()
except Exception as err:
mutex.acquire()
logger.error(err, stack_info=True)
mutex.release()
def upload_batch(batch_docs):
"""
Upload a batch of documents
"""
if len(batch_docs[0]) <= 0:
return
thread_task = []
mutex = threading.Lock()
for sub_batch in batch_docs:
if len(sub_batch) <= 0:
continue
data = "[{}]".format(",".join(sub_batch))
task = threading.Thread(target=thread_upload, args=(data, mutex))
task.setDaemon(True)
thread_task.append(task)
for task in thread_task:
task.start()
for task in thread_task:
task.join()
def upload_file(upfile):
"""
Upload a document in a file
"""
oneM = 2**20
batch_bytes = 0
batch_docs = [list() for i in range(num_thread)]
idx_container = 0
with open(upfile) as f:
for line in f:
doc = line.strip()
byte_doc = len(doc)
# Subcontainer is not full, put in the corresponding child container
if batch_bytes + byte_doc <= oneM:
batch_docs[idx_container].append(doc)
batch_bytes += byte_doc
continue
# Sub-container space is not enough, parent container is not full, switch sub-container idx
if idx_container + 1 < num_thread:
idx_container += 1
batch_docs[idx_container].append(doc)
batch_bytes = byte_doc
continue
# The parent container is full, upload
upload_batch(batch_docs)
# clear cache
batch_docs = [list() for i in range(num_thread)]
idx_container = 0
batch_docs[idx_container].append(doc)
batch_bytes = byte_doc
# Upload the last remaining
upload_batch(batch_docs)
# Based on the methods provided above, batch uploads based on incoming file types
if os.path.isfile(documents):
upload_file(documents)
elif os.path.isdir(documents):
for upfile in os.listdir(documents):
upload_file(os.path.join(documents, upfile))
else:
print(_make_smart_hint(HINT_TYPE_NOR_ERR, "Wrong document file path"))
def clear_documents(host, enginename, port=8983):
"""
delete engine
"""
url = "http://{}:{}/solr/{}/update".format(host, port, enginename)
params = {}
params["stream.body"] = "<delete><query>*:*</query></delete>"
params["wt"] = "json"
params["commit"] = "true"
try:
req = urllib.request.Request(url)
response = urllib.request.urlopen(
req, urllib.parse.urlencode(params).encode("utf-8")
)
logger.debug(response.read())
except Exception as err:
logger.error(err, stack_info=True)
def help(**kwargs):
"""
usage
"""
print(
"""=====================================================================================
solr_tools provides two ways to use: Python method and command line
Commands available:
-op - specific operations are listed and explained below
add_eng -- Add a new engine
del_eng -- Delete a engine
up_doc -- Upload documents
clear_doc -- Clear documsnts
-host - hostname or host ip
-port - solr listenning port (default 8983)
-eng_name - solr engine name
-shard - available when op's add_eng
-replica - available when op's add_eng
-nodemaxshard - available when op's add_eng, means max shard per node
-conf_name - available when op's add_eng, indicate the linking conf file name
-schema_conf - available when op's set_schema, schema config file path
-documents - available when op's up_doc, documents path
-num_thread - available when op's up_doc, to define multithread num
====================================================================================="""
)
def call_function(func, params):
"""
call op function
"""
func(**params)
The provided code snippet includes necessary dependencies for implementing the `command_line_tools` function. Write a Python function `def command_line_tools()` to solve the following problem:
command tools
Here is the function:
def command_line_tools():
"""
command tools
"""
params = {}
ops = {
"add_eng": add_engine,
"del_eng": delete_engine,
"up_doc": upload_documents,
"clear_doc": clear_documents,
}
argidx = 1
func = help
while argidx < len(sys.argv):
if sys.argv[argidx] == "-op":
op = sys.argv[argidx + 1]
if op not in ops:
print("+-+-+-+-+-+-+-+-+-+-+-+-+-+-")
print("Not support operation, sees:")
print("+-+-+-+-+-+-+-+-+-+-+-+-+-+-")
help()
exit(1)
func = ops[op]
argidx += 2
elif sys.argv[argidx] == "-host":
params["host"] = sys.argv[argidx + 1]
argidx += 2
elif sys.argv[argidx] == "-port":
params["port"] = int(sys.argv[argidx + 1])
argidx += 2
elif sys.argv[argidx] == "-eng_name":
params["enginename"] = sys.argv[argidx + 1]
argidx += 2
elif sys.argv[argidx] == "-shard":
params["shard"] = int(sys.argv[argidx + 1])
argidx += 2
elif sys.argv[argidx] == "-replica":
params["replica"] = int(sys.argv[argidx + 1])
argidx += 2
elif sys.argv[argidx] == "-nodemaxshard":
params["maxshardpernode"] = int(sys.argv[argidx + 1])
argidx += 2
elif sys.argv[argidx] == "-conf_name":
params["conf"] = sys.argv[argidx + 1]
argidx += 2
elif sys.argv[argidx] == "-schema_conf":
params["schema_config"] = sys.argv[argidx + 1]
argidx += 2
elif sys.argv[argidx] == "-documents":
params["documents"] = sys.argv[argidx + 1]
argidx += 2
elif sys.argv[argidx] == "-num_thread":
params["num_thread"] = int(sys.argv[argidx + 1])
argidx += 2
elif sys.argv[argidx] == "-help":
help()
exit(1)
else:
help()
exit(1)
# call the specific op function
call_function(func, params) | command tools |
161,900 | import sys
import json
from robot import utils
The provided code snippet includes necessary dependencies for implementing the `write_format_file` function. Write a Python function `def write_format_file(fields, format_file_str)` to solve the following problem:
write schema file for solr
Here is the function:
def write_format_file(fields, format_file_str):
"""
write schema file for solr
"""
solr_format = []
for f in fields:
if f == "id":
continue
if f == "question":
f_type = "text_multi_lang"
f_index = True
else:
f_type = "string"
f_index = False
f_str = {"indexed": f_index, "name": f, "stored": True, "type": f_type}
solr_format.append(f_str)
ff = open(format_file_str, "w")
ff.write(json.dumps(solr_format, indent=4) + "\n")
ff.close() | write schema file for solr |
161,901 | import sys
import json
from robot import utils
def run(faq_file_str, json_file_str):
"""
convert text file to json file, save schema file
"""
idx = 0
header = 0
field_cnt = 0
auto_id = False
faq_file = open(faq_file_str, "r")
for line in faq_file:
arr = line.strip().split("\t")
if header == 0:
header = 1
field_names = arr
field_cnt = len(field_names)
if "question" not in field_names or "answer" not in field_names:
print("need question and answer")
sys.exit(6)
if "id" not in field_names:
auto_id = True
# write_format_file(field_names, format_file_str)
json_file = open(json_file_str, "w")
continue
if len(arr) != field_cnt:
print(f"line {idx+2} error")
continue
idx += 1
data = dict([field_names[i], arr[i]] for i in range(field_cnt))
if auto_id:
data["id"] = str(idx)
json_file.write(json.dumps(data, ensure_ascii=False))
json_file.write("\n")
json_file.close()
faq_file.close()
def convert(faq_str, json_file_str):
faq_file_str = utils.write_temp_file(faq_str, ".csv", mode="w")
run(faq_file_str, json_file_str) | null |
161,902 | import torch
import torch.nn as nn
import math
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `_make_divisible` function. Write a Python function `def _make_divisible(v, divisor, min_value=None)` to solve the following problem:
This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py :param v: :param divisor: :param min_value: :return:
Here is the function:
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v | This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py :param v: :param divisor: :param min_value: :return: |
161,903 | import torch
import torch.nn as nn
import math
import torch.nn.functional as F
def conv_3x3_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
) | null |
161,904 | import torch
import torch.nn as nn
import math
import torch.nn.functional as F
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
) | null |
161,905 | import torch
import torch.nn as nn
import math
import torch.nn.functional as F
class MBV2_CA(nn.Module):
def __init__(self, num_classes=1000, width_mult=1.):
def forward(self, x):
def _initialize_weights(self):
def mbv2_ca(**kwargs):
return MBV2_CA(**kwargs) | null |
161,906 | import datetime
import json
import os
import re
import sys
import threading
import time
import urllib
import ssl
import pytz as pytz
import requests
from multiprocessing import Process, Queue, Manager
isDebug = True
jsonPath = '../sources.json'
resourcePath = '../resources.txt'
readMePath = '../../../README.md'
ntLog(log, color=''):
global isDebug
if isDebug:
if color == 'red' or color == 'failed' or color == 'fail' or color == 'error':
print(f"\n{bcolors.FAIL} {log} {bcolors.FAIL}\n")
elif color == 'blue':
print(f"{bcolors.OKBLUE} {log} {bcolors.OKBLUE}")
elif color == 'pink':
print(f"{bcolors.HEADER} {log} {bcolors.HEADER}")
elif color == 'green' or color == 'success':
print(f"{bcolors.OKGREEN} {log} {bcolors.OKGREEN}")
elif color == 'yellow' or color == 'warn':
print(f"\n{bcolors.WARNING} {log} {bcolors.WARNING}\n")
elif color == 'lightblue':
print(f"{bcolors.OKCYAN} {log} {bcolors.OKCYAN}")
else:
print(f"{bcolors.ENDC} {log} {bcolors.ENDC}")
xbsName(name):
isAbandon = False
list = ['书源合集', '整合', '交流群', '测试', 'beta', 'QQ频道', 'QQ群', '模版', '例子', '示例', 'Beta', 'BETA',
'样板', '样版', '样本', '拷贝', '模板', '教程', '制作', '调试', '合集', '汇总', '废弃', 'Test', 'test',
'TEST']
for value in list:
if re.search(value, name):
printLog('发现关键词: ' + value + ' , ' + name + ' , 丢弃', 'warn')
isAbandon = True
return isAbandon
= False
list = ['jumpli', 'ThorJsbox', 'TinhoXu', 'tickmao', 'Cyril0563', 'lTbgykio', 'Dujltqzv']
for value in list:
if re.search(value, name):
printLog('发现黑名单内的作者: ' + name + ' , 丢弃', 'warn')
isAbandon = True
return isAbandon
rtWork():
new_list = readRepoFromJson()
process = []
if os.path.exists(resourcePath):
os.remove(resourcePath)
print(resourcePath + '文件存在,已执行删除')
for i in new_list:
user = i[0]
repo = i[1] # 仓库链接
srcUrl = i[2] # 资源链接
nowPath = '../repo/' + user + '/' # 路径拼接,../repo/shidahuilang
if checkUserName(user):
# 该作者仓库疑似搬运,跳过
continue
# 检查是否存在该用户仓库存放文件夹
if not os.path.exists(nowPath):
printLog(user + '仓库文件夹不存在', 'warn')
os.makedirs(nowPath)
if os.path.exists(nowPath):
printLog(user + '仓库文件夹创建成功', 'success')
else:
printLog(user + '仓库文件夹创建失败', 'fail')
break
# process.append(Process(target=parseResouece, args=[nowPath, user, repo, srcUrl.strip()]))
if isinstance(srcUrl, list):
# 判断资源链接变量是字符串还是列表
for i in srcUrl:
process.append(Process(target=parseResouece, args=[nowPath, user, repo, i.strip()]))
elif isinstance(srcUrl, str):
process.append(Process(target=parseResouece, args=[nowPath, user, repo, srcUrl.strip()]))
# 创建并启动进程
[p.start() for p in process]
# 等待子进程结束后再继续往下运行,在当前位置阻塞主进程
[p.join() for p in process]
printLog('\n更新完成 !!!', 'lightblue')
def readRepoFromJson():
with open(jsonPath, encoding='utf-8') as f:
result = json.load(f)
# 定义一个空数组
new_list = []
for i in result:
# i是个字典
# [
# {
# "user": "xiaohucode",
# "repo": "https://github.com/xiaohucode/xiangse",
# "sourceurl": "https://github.com/xiaohucode/xiangse/blob/main/README.md"
# }
# ]
new_list.append((i.get('user'), i.get('repo'), i.get('sourceurl'))) # 将获取的值存入数组中
# printLog(new_list)
return new_list
def urlChangeToRaw(srcUrl):
if re.search('github\.com', srcUrl, flags=0):
# 将github链接置换成raw链接
printLog('发现github链接: ' + srcUrl + ',将github链接置换为githubusercontent')
srcUrl = srcUrl.replace('github', 'raw.githubusercontent').replace('/blob', '')
printLog('置换后的链接: ' + srcUrl)
return srcUrl
elif re.search('jsdelivr\.net', srcUrl):
return srcUrl
def parseResouece(nowPath, user, repo, srcUrl):
# 进程锁
manager = Manager()
lock = manager.Lock()
# 检查资源链接是xbs整合还是readme.md,如果是xbs则时间下载,如果是md则解析
if re.search('\.xbs', srcUrl):
srcUrl = urlChangeToRaw(srcUrl)
printLog('资源链接是xbs文件: ' + srcUrl, 'red')
writeSourcesListLock(lock, srcUrl)
downloadResource3(nowPath, srcUrl.strip())
elif re.search('\.md', srcUrl) or re.search('\.txt', srcUrl) or re.search('\.conf', srcUrl):
srcUrl = urlChangeToRaw(srcUrl)
getResource(lock, nowPath, srcUrl.strip())
else:
# 既不是xbs也不是md,单纯一个github仓库链接
getHtmlResource(lock, nowPath, user, repo, srcUrl)
# 最后更新readme时间
updateDate()
Resource(lock, path, srcUrl):
success = False # 是否成功
try_times = 0 # 重试次数
res = None # 返回值
# 获取srcUrl链接资源
while try_times < 5 and not success:
res = requests.get(srcUrl)
if res.status_code != 200:
time.sleep(1)
try_times = try_times + 1
else:
success = True
break
if not success:
sys.exit('error in request %s\n\treturn code: %d' % (srcUrl, res.status_code))
# 检索是否文件内是否存在xbs链接
xbsList = re.findall('.+\.xbs', res.text)
if xbsList:
for url in xbsList:
url = url.replace('https://ghproxy.com/', '') # 移除加速代理网址前缀
# tmp = url.split('/')
# outpath = path + tmp[len(tmp) - 1]
writeSourcesListLock(lock, url)
# 启用多线程下载
threading.Thread(target=downloadResource, args=(path, url)).start()
ath, user, repo, srcUrl):
# github api 获取仓库根目录下文件目录
repoApiUrl = 'https://api.github.com/repos/' + user + '/' + repo + '/contents'
reponse = requests.get(repoApiUrl).json()
if reponse:
for i in reponse:
name = i.get('name')
srcUrl = i.get('download_url')
type = i.get('type')
dirpath = i.get('path')
if checkxbsName(name):
continue
if type == 'file':
if checkxbsName(name):
continue
elif re.search('\.xbs', name):
writeSourcesListLock(lock, srcUrl)
downloadResource(path, srcUrl)
elif re.search('\.md', name):
printLog('发现markdown文件: ' + name)
else:
printLog('未识别处理的文件: ' + name)
elif type == 'dir':
# github api 获取仓库子目录下文件目录
repoApiUrl2 = 'https://api.github.com/repos/' + user + '/' + repo + '/contents' + '/' + dirpath
res = requests.get(repoApiUrl2).json()
for i2 in res:
if res:
_name = i2.get('name')
_srcUrl = i2.get('download_url')
_type = i2.get('type')
_dirpath = i2.get('path')
if checkxbsName(_name):
continue
if _type == 'file':
if checkxbsName(_name):
continue
elif re.search('\.xbs', _name):
writeSourcesListLock(lock, _srcUrl)
downloadResource(path, _srcUrl)
elif re.search('\.md', _name):
printLog('发现markdown文件: ' + _name)
else:
printLog('未识别处理的文件: ' + _name)
t = srcUrl.split('/')
tmp = tmpList[len(tmpList) - 1]
if re.search('%', tmp):
printLog('发现非法字符,进行urldecode', 'warn')
tmp = urllib.parse.unquote(tmp)
if not checkxbsName(tmp):
path = path + tmp
printLog('开始下载: ' + srcUrl.strip())
ssl._create_default_https_context = ssl._create_unverified_context
index = 0
while True:
try:
r = requests.get(srcUrl, stream=True)
with open(path, 'wb') as f:
for ch in r:
f.write(ch)
f.close()
index += 1
except Exception as e:
printLog(srcUrl + ' 下载出错,重试', 'warn')
if index > 5:
printLog('已重试达到最大次数,停止!', 'error')
time.sleep(1)
continue
break
printLog('下载成功:' + path, 'success')
tmpList = srcUrl.split('/')
tmp = tmpList[len(tmpList) - 1]
if re.search('%', tmp):
printLog('发现非法字符,进行urldecode', 'warn')
tmp = urllib.parse.unquote(tmp)
path = path + tmp
printLog('开始下载: ' + srcUrl.strip())
ssl._create_default_https_context = ssl._create_unverified_context
index = 0
while True:
try:
r = requests.get(srcUrl, stream=True)
with open(path, 'wb') as f:
for ch in r:
f.write(ch)
f.close()
index += 1
except Exception as e:
printLog(srcUrl + ' 下载出错,重试', 'warn')
if index > 5:
printLog('已重试达到最大次数,停止!', 'error')
time.sleep(1)
continue
break
printLog('下载成功:' + path, 'success')
lock, srcUrl):
global resourcePath
lock.acquire()
with open(resourcePath, 'a+', encoding="UTF-8") as f:
f.write(srcUrl.strip() + '\n')
f.flush()
f.close()
lock.release()
def writeSourcesList(srcUrl):
global resourcePath
with open(resourcePath, 'a+', encoding="UTF-8") as f:
f.write(srcUrl.strip() + '\n')
f.flush()
f.close()
def updateDate():
text_list = []
# dateNow = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) # 获取系统时间,在github action上会是其他时区
tz = pytz.timezone('Asia/Shanghai') # 东八区
dateNow = datetime.datetime.fromtimestamp(int(time.time()), tz).strftime('%Y-%m-%d %H:%M:%S %Z%z')
with open(readMePath, 'r', encoding="UTF-8") as f:
for lineTmp in f.readlines():
if re.search('自动更新时间', lineTmp):
printLog('旧的: \t' + lineTmp, 'pink')
lineTmp = '**自动更新时间** ' + dateNow + '\n'
text_list.append(lineTmp)
else:
text_list.append(lineTmp)
f.flush()
f.close()
with open(readMePath, 'w+', encoding="UTF-8") as f2:
for text in text_list:
f2.write(text)
f2.flush()
f.close()
if __name__ == '__main__':
startWork()
def startWork():
new_list = readRepoFromJson()
process = []
if os.path.exists(resourcePath):
os.remove(resourcePath)
print(resourcePath + '文件存在,已执行删除')
for i in new_list:
user = i[0]
repo = i[1] # 仓库链接
srcUrl = i[2] # 资源链接
nowPath = '../repo/' + user + '/' # 路径拼接,../repo/shidahuilang
if checkUserName(user):
# 该作者仓库疑似搬运,跳过
continue
# 检查是否存在该用户仓库存放文件夹
if not os.path.exists(nowPath):
printLog(user + '仓库文件夹不存在', 'warn')
os.makedirs(nowPath)
if os.path.exists(nowPath):
printLog(user + '仓库文件夹创建成功', 'success')
else:
printLog(user + '仓库文件夹创建失败', 'fail')
break
# process.append(Process(target=parseResouece, args=[nowPath, user, repo, srcUrl.strip()]))
if isinstance(srcUrl, list):
# 判断资源链接变量是字符串还是列表
for i in srcUrl:
process.append(Process(target=parseResouece, args=[nowPath, user, repo, i.strip()]))
elif isinstance(srcUrl, str):
process.append(Process(target=parseResouece, args=[nowPath, user, repo, srcUrl.strip()]))
# 创建并启动进程
[p.start() for p in process]
# 等待子进程结束后再继续往下运行,在当前位置阻塞主进程
[p.join() for p in process]
printLog('\n更新完成 !!!', 'lightblue') | null |
161,907 |
def downloadResource2(path, srcUrl):
tmpList = srcUrl.split('/')
tmp = tmpList[len(tmpList) - 1]
if re.search('%', tmp):
printLog('发现非法字符,进行urldecode', 'warn')
tmp = urllib.parse.unquote(tmp)
if not checkxbsName(tmp):
path = path + tmp
printLog('开始下载: ' + srcUrl.strip())
ssl._create_default_https_context = ssl._create_unverified_context
index = 0
while True:
try:
r = requests.get(srcUrl, stream=True)
with open(path, 'wb') as f:
for ch in r:
f.write(ch)
f.close()
index += 1
except Exception as e:
printLog(srcUrl + ' 下载出错,重试', 'warn')
if index > 5:
printLog('已重试达到最大次数,停止!', 'error')
time.sleep(1)
continue
break
printLog('下载成功:' + path, 'success') | null |
161,908 |
The provided code snippet includes necessary dependencies for implementing the `download_progress_hook` function. Write a Python function `def download_progress_hook(blocknum, blocksize, totalsize)` to solve the following problem:
* 用于urllib.request.urlretrieve方法的回调函数,显示下载进度 @ blocknum:当前已经下载的块 @ blocksize:每次传输的块大小 @ totalsize:网页文件总大小
Here is the function:
def download_progress_hook(blocknum, blocksize, totalsize):
"""
* 用于urllib.request.urlretrieve方法的回调函数,显示下载进度
@ blocknum:当前已经下载的块
@ blocksize:每次传输的块大小
@ totalsize:网页文件总大小
"""
if totalsize == 0:
percent = 0
else:
percent = blocknum * blocksize / totalsize
if percent > 1.0:
percent = 1.0
percent = percent * 100
# 打印下载的百分比
printLog("download %s : %.4f%%" % (percent), 'pink') | * 用于urllib.request.urlretrieve方法的回调函数,显示下载进度 @ blocknum:当前已经下载的块 @ blocksize:每次传输的块大小 @ totalsize:网页文件总大小 |
161,909 |
def writeSourcesList(srcUrl):
global resourcePath
with open(resourcePath, 'a+', encoding="UTF-8") as f:
f.write(srcUrl.strip() + '\n')
f.flush()
f.close() | null |
161,910 | import requests
from bs4 import BeautifulSoup
import json
import os
from datetime import datetime, timedelta
import re
import urllib3
import urllib.parse
import shutil
import pytz
import time
urls = [
'https://www.yckceo.com/yuedu/shuyuan/index.html',
'https://www.yckceo.com/yuedu/shuyuans/index.html',
]
def parse_page(url):
response = requests.get(url, verify=True)
if response.status_code != 200:
print(f'Access {url}: response.status_code')
return []
soup = BeautifulSoup(response.text, 'html.parser')
relevant_links = []
today = datetime.today().date()
for div in soup.find_all('div', class_='layui-col-xs12 layui-col-sm6 layui-col-md4'):
link = div.find('a', href=True)
date_element = div.find('p', class_='m-right')
if link and date_element:
href = link['href']
link_date_str = date_element.text.strip()
match = re.search(r'(\d+)(天前|小时前|分钟前)', link_date_str)
if match:
value, unit = match.group(1, 2)
if unit == '分钟前':
days_ago = 1
elif unit == '小时前':
days_ago = 1
else:
days_ago = int(value)
link_date = today - timedelta(days=days_ago)
time_range = time_ranges.get(url, (0, float('inf')))
if time_range[0] <= days_ago <= time_range[1]:
json_url = f'https://www.yckceo.com{href.replace("content", "json")}'
relevant_links.append((json_url, link_date))
else:
# Try to parse the date in the format MM/DD HH:MM
try:
date_format = "%m/%d %H:%M"
link_date = datetime.strptime(link_date_str, date_format)
link_date = link_date.replace(year=today.year) # Assume the year is the same as today's year
# Check if the link is within the specified time range for the current URL
time_range = time_ranges.get(url, (0, float('inf')))
if today - link_date.date() <= timedelta(days=time_range[1]):
json_url = f'https://www.yckceo.com{href.replace("content", "json")}'
relevant_links.append((json_url, link_date.date()))
except ValueError as e:
print(f"Error parsing date for {url}: {e}")
return relevant_links
def download_json(url, output_base_dir=''):
final_url = get_redirected_url(url)
if final_url:
print(f"Real URL: {final_url}")
json_url = final_url.replace('.html', '.json')
response = requests.get(json_url, verify=True)
if response.status_code == 200:
try:
json_content = response.json()
id = json_url.split('/')[-1].split('.')[0]
filename = os.path.basename(urllib.parse.urlparse(json_url).path)
output_dir = 'shuyuan_data' if 'shuyuan' in json_url else 'shuyuans_data'
output_path = os.path.join(output_base_dir, output_dir, filename)
os.makedirs(os.path.join(output_base_dir, output_dir), exist_ok=True)
with open(output_path, 'w') as f:
json.dump(json_content, f, indent=2, ensure_ascii=False)
print(f"Downloaded {filename} to {output_base_dir}/{output_dir}")
# Now you can use the original URL for further processing
print(f"Download URL: {url}")
except json.JSONDecodeError as e:
print(f"Error decoding JSON for {json_url}: {e}")
print(f"Response Content: {response.text}")
else:
print(f"Error downloading {json_url}: Status code {response.status_code}")
print(f"Response Content: {response.text}")
else:
print(f"Error getting redirected URL for {url}")
def clean_old_files(directory='', root_dir=''):
directory = directory or os.getcwd()
full_path = os.path.abspath(os.path.join(root_dir, directory))
try:
if os.path.exists(full_path):
for filename in os.listdir(full_path):
file_path = os.path.join(full_path, filename)
try:
if os.path.isfile(file_path):
os.remove(file_path)
print(f"Deleted file: {file_path}")
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
print(f"Deleted directory: {file_path}")
except Exception as e:
print(f"Error deleting {file_path}: {e}")
print(f"Successfully cleaned old files in {full_path}")
else:
print(f"Directory does not exist: {full_path}")
except OSError as e:
print(f"Unable to clean old files in {full_path}: {e}")
def beautify_json_files(directory='', root_dir=''):
directory = directory or os.getcwd()
full_path = os.path.join(root_dir, directory)
try:
if os.path.isfile(full_path):
beautify_json_file(full_path)
print(f"成功美化 JSON 文件: {full_path}")
elif os.path.isdir(full_path):
for filename in os.listdir(full_path):
if filename.endswith('.json'):
file_path = os.path.join(full_path, filename)
beautify_json_file(file_path)
print(f"成功美化 JSON 文件: {file_path}")
print(f"成功美化目录中的所有 JSON 文件: {full_path}")
else:
print(f"无效路径: {full_path}")
except OSError as e:
print(f"无法美化 JSON 文件:{full_path},错误信息:{e}")
def merge_json_files(input_dir='', output_file='merged.json', root_dir=''):
input_dir = os.path.join(root_dir, input_dir)
if input_dir and not os.path.exists(input_dir):
os.makedirs(input_dir)
clean_old_files(directory='shuyuan_data', root_dir=root_dir)
clean_old_files(directory='shuyuans_data', root_dir=root_dir)
for url, _ in parse_page(urls[0]):
output_dir = 'shuyuan_data' if 'shuyuan' in url else 'shuyuans_data'
download_json(url, output_base_dir=root_dir)
print(f"Processed URL: {url}")
for url, _ in parse_page(urls[1]):
output_dir = 'shuyuan_data' if 'shuyuan' in url else 'shuyuans_data'
download_json(url, output_base_dir=root_dir)
print(f"Processed URL: {url}")
for dir_name in ['shuyuan_data', 'shuyuans_data']:
dir_path = os.path.join(root_dir, dir_name)
if not os.path.exists(dir_path):
print(f"Folder does not exist: {dir_path}")
continue
all_data = []
for filename in os.listdir(dir_path):
if filename.endswith('.json'):
with open(os.path.join(dir_path, filename)) as f:
data = json.load(f)
all_data.extend(data)
output_path = os.path.join(root_dir, f"{dir_name}.json")
with open(output_path, 'w') as f:
f.write(json.dumps(all_data, indent=2, ensure_ascii=False))
print(f"合并的数据保存到 {output_path}")
beautify_json_files(f"{dir_name}.json", root_dir) | null |
161,911 | import requests
from bs4 import BeautifulSoup
import json
import os
from datetime import datetime, timedelta
import re
import urllib3
import urllib.parse
import shutil
import pytz
import time
def updateDate(readMePath):
text_list = []
tz = pytz.timezone('Asia/Shanghai') # 东八区
dateNow = datetime.fromtimestamp(int(time.time()), tz).strftime('%Y-%m-%d %H:%M:%S %Z%z')
with open(readMePath, 'r', encoding="UTF-8") as f:
for lineTmp in f.readlines():
if re.search('自动更新时间', lineTmp):
print('旧的: \t' + lineTmp)
lineTmp = '**自动更新时间** ' + dateNow + '\n'
text_list.append(lineTmp)
else:
text_list.append(lineTmp)
with open(readMePath, 'w+', encoding="UTF-8") as f2:
for text in text_list:
f2.write(text) | null |
161,912 | import requests
from bs4 import BeautifulSoup
import json
import os
from datetime import datetime, timedelta
import re
import urllib3
import urllib.parse
import shutil
import pytz
import time
def merge_book_json(root_dir=''):
shuyuan_data_path = os.path.join(root_dir, 'shuyuan_data.json')
shuyuans_data_path = os.path.join(root_dir, 'shuyuans_data.json')
book_path = os.path.join(root_dir, 'book.json')
try:
with open(shuyuan_data_path, 'r') as shuyuan_file, open(shuyuans_data_path, 'r') as shuyuans_file:
shuyuan_data = json.load(shuyuan_file)
shuyuans_data = json.load(shuyuans_file)
book_data = shuyuan_data + shuyuans_data
with open(book_path, 'w') as book_file:
json.dump(book_data, book_file, indent=2, ensure_ascii=False)
print(f"合并的数据保存到 {book_path}")
except Exception as e:
print(f"合并JSON文件时发生错误:{e}") | null |
161,913 | import json
import requests
def download_remote_file(remote_url, local_path):
response = requests.get(remote_url)
if response.status_code == 200:
with open(local_path, 'wb') as local_file:
local_file.write(response.content) | null |
161,914 | import json
import requests
def merge_libraries(library1, library2):
apps1 = library1.get('apps', [])
apps2 = library2.get('apps', [])
merged_apps = apps1 + apps2
merged_library = library1.copy()
merged_library['apps'] = merged_apps
return merged_library | null |
161,915 | import os
import jsonpath.dirname(os.path.abspath(__file__))
def format_and_convert_unicode(input_folder, output_folder, replace_original=False):
# 确保输出文件夹存在
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# 遍历输入文件夹中的所有文件
for filename in os.listdir(input_folder):
if filename.endswith(".json"):
input_file = os.path.join(input_folder, filename)
output_file = os.path.join(output_folder, filename)
with open(input_file, 'r') as file:
json_data = file.read()
parsed_json = json.loads(json_data)
formatted_json = json.dumps(parsed_json, indent=4, ensure_ascii=False)
with open(output_file, 'w', encoding='utf-8') as file:
file.write(formatted_json)
print("已成功转换Unicode并美化保存文件:", output_file)
if replace_original:
os.remove(input_file)
os.rename(output_file, input_file)
print("已替换原始文件:", input_file) | null |
161,916 | import re
import requests
def should_exclude_channel(channel_name):
# 判断是否要排除特定类型的频道,可以根据需要进行修改
excluded_keywords = ['台湾女歌手龙飘飘珍藏版HD', '湖南-凤凰古城', '香港佛陀']
for keyword in excluded_keywords:
if keyword in channel_name:
return True
return False
def get_channel_type_header(channel_name):
if 'CCTV' in channel_name:
return '央视频道'
elif '卫视' in channel_name:
return '卫视频道'
elif '香港' in channel_name or '澳门' in channel_name or '凤凰' in channel_name:
return '港澳频道'
else:
return ''
with open(output_file_path, 'r', encoding='utf-8') as file:
content = file.read()
with open(output_file_path, 'w', encoding='utf-8') as file:
file.write(content)
def extract_tv_links_from_url(url, output_file_path):
try:
response = requests.get(url)
response.raise_for_status()
text = response.text
with open(output_file_path, 'w', encoding='utf-8') as output_file:
pattern_ipv6 = re.compile(r'\[.*?\]')
text = re.sub(pattern_ipv6, '', text)
pattern = re.compile(r'([^,]+(?:CCTV|卫视|香港|澳门|台湾|凤凰)[^\n,]*[^,]*),(https://[^\s]+)')
matches = re.findall(pattern, text)
channel_types_written = set()
if matches:
for tv_channel, link in matches:
if not should_exclude_channel(tv_channel):
header = get_channel_type_header(tv_channel)
if header and header not in channel_types_written:
output_file.write(f'{header},#genre#\n')
channel_types_written.add(header)
output_file.write(f'{tv_channel},{link}\n')
print(f'Successfully extracted and saved TV links.')
else:
print('No TV links found.')
except Exception as e:
print(f'Error: {e}') | null |
161,917 | import os
from setuptools import setup
def get_requirements():
with open("./requirements.txt") as reqsf:
reqs = reqsf.readlines()
return reqs | null |
161,918 | import torch
The provided code snippet includes necessary dependencies for implementing the `apply_masks` function. Write a Python function `def apply_masks(x, masks, concat=True)` to solve the following problem:
:param x: tensor of shape [B (batch-size), N (num-patches), D (feature-dim)] :param masks: list of tensors of shape [B, K] containing indices of K patches in [N] to keep
Here is the function:
def apply_masks(x, masks, concat=True):
"""
:param x: tensor of shape [B (batch-size), N (num-patches), D (feature-dim)]
:param masks: list of tensors of shape [B, K] containing indices of K patches in [N] to keep
"""
all_x = []
for m in masks:
mask_keep = m.unsqueeze(-1).repeat(1, 1, x.size(-1))
all_x += [torch.gather(x, dim=1, index=mask_keep)]
if not concat:
return all_x
return torch.cat(all_x, dim=0) | :param x: tensor of shape [B (batch-size), N (num-patches), D (feature-dim)] :param masks: list of tensors of shape [B, K] containing indices of K patches in [N] to keep |
161,919 | import math
import numpy as np
import random
import numbers
import PIL
from PIL import Image
import torch
import torchvision
import torchvision.transforms.functional as F
from torchvision import transforms
import src.datasets.utils.video.functional as FF
from src.datasets.utils.video.randaugment import rand_augment_transform
The provided code snippet includes necessary dependencies for implementing the `random_short_side_scale_jitter` function. Write a Python function `def random_short_side_scale_jitter( images, min_size, max_size, boxes=None, inverse_uniform_sampling=False )` to solve the following problem:
Perform a spatial short scale jittering on the given images and corresponding boxes. Args: images (tensor): images to perform scale jitter. Dimension is `num frames` x `channel` x `height` x `width`. min_size (int): the minimal size to scale the frames. max_size (int): the maximal size to scale the frames. boxes (ndarray): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. inverse_uniform_sampling (bool): if True, sample uniformly in [1 / max_scale, 1 / min_scale] and take a reciprocal to get the scale. If False, take a uniform sample from [min_scale, max_scale]. Returns: (tensor): the scaled images with dimension of `num frames` x `channel` x `new height` x `new width`. (ndarray or None): the scaled boxes with dimension of `num boxes` x 4.
Here is the function:
def random_short_side_scale_jitter(
images, min_size, max_size, boxes=None, inverse_uniform_sampling=False
):
"""
Perform a spatial short scale jittering on the given images and
corresponding boxes.
Args:
images (tensor): images to perform scale jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
min_size (int): the minimal size to scale the frames.
max_size (int): the maximal size to scale the frames.
boxes (ndarray): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
inverse_uniform_sampling (bool): if True, sample uniformly in
[1 / max_scale, 1 / min_scale] and take a reciprocal to get the
scale. If False, take a uniform sample from [min_scale, max_scale].
Returns:
(tensor): the scaled images with dimension of
`num frames` x `channel` x `new height` x `new width`.
(ndarray or None): the scaled boxes with dimension of
`num boxes` x 4.
"""
if inverse_uniform_sampling:
size = int(
round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size))
)
else:
size = int(round(np.random.uniform(min_size, max_size)))
height = images.shape[2]
width = images.shape[3]
if (width <= height and width == size) or (
height <= width and height == size
):
return images, boxes
new_width = size
new_height = size
if width < height:
new_height = int(math.floor((float(height) / width) * size))
if boxes is not None:
boxes = boxes * float(new_height) / height
else:
new_width = int(math.floor((float(width) / height) * size))
if boxes is not None:
boxes = boxes * float(new_width) / width
return (
torch.nn.functional.interpolate(
images,
size=(new_height, new_width),
mode='bilinear',
align_corners=False,
),
boxes,
) | Perform a spatial short scale jittering on the given images and corresponding boxes. Args: images (tensor): images to perform scale jitter. Dimension is `num frames` x `channel` x `height` x `width`. min_size (int): the minimal size to scale the frames. max_size (int): the maximal size to scale the frames. boxes (ndarray): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. inverse_uniform_sampling (bool): if True, sample uniformly in [1 / max_scale, 1 / min_scale] and take a reciprocal to get the scale. If False, take a uniform sample from [min_scale, max_scale]. Returns: (tensor): the scaled images with dimension of `num frames` x `channel` x `new height` x `new width`. (ndarray or None): the scaled boxes with dimension of `num boxes` x 4. |
161,920 | import math
import numpy as np
import random
import numbers
import PIL
from PIL import Image
import torch
import torchvision
import torchvision.transforms.functional as F
from torchvision import transforms
import src.datasets.utils.video.functional as FF
from src.datasets.utils.video.randaugment import rand_augment_transform
def crop_boxes(boxes, x_offset, y_offset):
"""
Peform crop on the bounding boxes given the offsets.
Args:
boxes (ndarray or None): bounding boxes to peform crop. The dimension
is `num boxes` x 4.
x_offset (int): cropping offset in the x axis.
y_offset (int): cropping offset in the y axis.
Returns:
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
cropped_boxes = boxes.copy()
cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset
cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset
return cropped_boxes
The provided code snippet includes necessary dependencies for implementing the `random_crop` function. Write a Python function `def random_crop(images, size, boxes=None)` to solve the following problem:
Perform random spatial crop on the given images and corresponding boxes. Args: images (tensor): images to perform random crop. The dimension is `num frames` x `channel` x `height` x `width`. size (int): the size of height and width to crop on the image. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: cropped (tensor): cropped images with dimension of `num frames` x `channel` x `size` x `size`. cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4.
Here is the function:
def random_crop(images, size, boxes=None):
"""
Perform random spatial crop on the given images and corresponding boxes.
Args:
images (tensor): images to perform random crop. The dimension is
`num frames` x `channel` x `height` x `width`.
size (int): the size of height and width to crop on the image.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
cropped (tensor): cropped images with dimension of
`num frames` x `channel` x `size` x `size`.
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
if images.shape[2] == size and images.shape[3] == size:
return images
height = images.shape[2]
width = images.shape[3]
y_offset = 0
if height > size:
y_offset = int(np.random.randint(0, height - size))
x_offset = 0
if width > size:
x_offset = int(np.random.randint(0, width - size))
cropped = images[
:, :, y_offset:y_offset + size, x_offset:x_offset + size
]
cropped_boxes = (
crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None
)
return cropped, cropped_boxes | Perform random spatial crop on the given images and corresponding boxes. Args: images (tensor): images to perform random crop. The dimension is `num frames` x `channel` x `height` x `width`. size (int): the size of height and width to crop on the image. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: cropped (tensor): cropped images with dimension of `num frames` x `channel` x `size` x `size`. cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4. |
161,921 | import math
import numpy as np
import random
import numbers
import PIL
from PIL import Image
import torch
import torchvision
import torchvision.transforms.functional as F
from torchvision import transforms
import src.datasets.utils.video.functional as FF
from src.datasets.utils.video.randaugment import rand_augment_transform
The provided code snippet includes necessary dependencies for implementing the `horizontal_flip` function. Write a Python function `def horizontal_flip(prob, images, boxes=None)` to solve the following problem:
Perform horizontal flip on the given images and corresponding boxes. Args: prob (float): probility to flip the images. images (tensor): images to perform horizontal flip, the dimension is `num frames` x `channel` x `height` x `width`. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: images (tensor): images with dimension of `num frames` x `channel` x `height` x `width`. flipped_boxes (ndarray or None): the flipped boxes with dimension of `num boxes` x 4.
Here is the function:
def horizontal_flip(prob, images, boxes=None):
"""
Perform horizontal flip on the given images and corresponding boxes.
Args:
prob (float): probility to flip the images.
images (tensor): images to perform horizontal flip, the dimension is
`num frames` x `channel` x `height` x `width`.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
images (tensor): images with dimension of
`num frames` x `channel` x `height` x `width`.
flipped_boxes (ndarray or None): the flipped boxes with dimension of
`num boxes` x 4.
"""
if boxes is None:
flipped_boxes = None
else:
flipped_boxes = boxes.copy()
if np.random.uniform() < prob:
images = images.flip((-1))
if len(images.shape) == 3:
width = images.shape[2]
elif len(images.shape) == 4:
width = images.shape[3]
else:
raise NotImplementedError("Dimension does not supported")
if boxes is not None:
flipped_boxes[:, [0, 2]] = width - boxes[:, [2, 0]] - 1
return images, flipped_boxes | Perform horizontal flip on the given images and corresponding boxes. Args: prob (float): probility to flip the images. images (tensor): images to perform horizontal flip, the dimension is `num frames` x `channel` x `height` x `width`. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: images (tensor): images with dimension of `num frames` x `channel` x `height` x `width`. flipped_boxes (ndarray or None): the flipped boxes with dimension of `num boxes` x 4. |
161,922 | import math
import numpy as np
import random
import numbers
import PIL
from PIL import Image
import torch
import torchvision
import torchvision.transforms.functional as F
from torchvision import transforms
import src.datasets.utils.video.functional as FF
from src.datasets.utils.video.randaugment import rand_augment_transform
def crop_boxes(boxes, x_offset, y_offset):
"""
Peform crop on the bounding boxes given the offsets.
Args:
boxes (ndarray or None): bounding boxes to peform crop. The dimension
is `num boxes` x 4.
x_offset (int): cropping offset in the x axis.
y_offset (int): cropping offset in the y axis.
Returns:
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
cropped_boxes = boxes.copy()
cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset
cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset
return cropped_boxes
The provided code snippet includes necessary dependencies for implementing the `uniform_crop` function. Write a Python function `def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None)` to solve the following problem:
Perform uniform spatial sampling on the images and corresponding boxes. Args: images (tensor): images to perform uniform crop. The dimension is `num frames` x `channel` x `height` x `width`. size (int): size of height and weight to crop the images. spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width is larger than height. Or 0, 1, or 2 for top, center, and bottom crop if height is larger than width. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. scale_size (int): optinal. If not None, resize the images to scale_size before performing any crop. Returns: cropped (tensor): images with dimension of `num frames` x `channel` x `size` x `size`. cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4.
Here is the function:
def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None):
"""
Perform uniform spatial sampling on the images and corresponding boxes.
Args:
images (tensor): images to perform uniform crop. The dimension is
`num frames` x `channel` x `height` x `width`.
size (int): size of height and weight to crop the images.
spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width
is larger than height. Or 0, 1, or 2 for top, center, and bottom
crop if height is larger than width.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
scale_size (int): optinal. If not None, resize the images to scale_size before
performing any crop.
Returns:
cropped (tensor): images with dimension of
`num frames` x `channel` x `size` x `size`.
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
assert spatial_idx in [0, 1, 2]
ndim = len(images.shape)
if ndim == 3:
images = images.unsqueeze(0)
height = images.shape[2]
width = images.shape[3]
if scale_size is not None:
if width <= height:
width, height = scale_size, int(height / width * scale_size)
else:
width, height = int(width / height * scale_size), scale_size
images = torch.nn.functional.interpolate(
images,
size=(height, width),
mode='bilinear',
align_corners=False,
)
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
if height > width:
if spatial_idx == 0:
y_offset = 0
elif spatial_idx == 2:
y_offset = height - size
else:
if spatial_idx == 0:
x_offset = 0
elif spatial_idx == 2:
x_offset = width - size
cropped = images[
:, :, y_offset:y_offset + size, x_offset:x_offset + size
]
cropped_boxes = (
crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None
)
if ndim == 3:
cropped = cropped.squeeze(0)
return cropped, cropped_boxes | Perform uniform spatial sampling on the images and corresponding boxes. Args: images (tensor): images to perform uniform crop. The dimension is `num frames` x `channel` x `height` x `width`. size (int): size of height and weight to crop the images. spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width is larger than height. Or 0, 1, or 2 for top, center, and bottom crop if height is larger than width. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. scale_size (int): optinal. If not None, resize the images to scale_size before performing any crop. Returns: cropped (tensor): images with dimension of `num frames` x `channel` x `size` x `size`. cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4. |
161,923 | import math
import numpy as np
import random
import numbers
import PIL
from PIL import Image
import torch
import torchvision
import torchvision.transforms.functional as F
from torchvision import transforms
import src.datasets.utils.video.functional as FF
from src.datasets.utils.video.randaugment import rand_augment_transform
The provided code snippet includes necessary dependencies for implementing the `clip_boxes_to_image` function. Write a Python function `def clip_boxes_to_image(boxes, height, width)` to solve the following problem:
Clip an array of boxes to an image with the given height and width. Args: boxes (ndarray): bounding boxes to perform clipping. Dimension is `num boxes` x 4. height (int): given image height. width (int): given image width. Returns: clipped_boxes (ndarray): the clipped boxes with dimension of `num boxes` x 4.
Here is the function:
def clip_boxes_to_image(boxes, height, width):
"""
Clip an array of boxes to an image with the given height and width.
Args:
boxes (ndarray): bounding boxes to perform clipping.
Dimension is `num boxes` x 4.
height (int): given image height.
width (int): given image width.
Returns:
clipped_boxes (ndarray): the clipped boxes with dimension of
`num boxes` x 4.
"""
clipped_boxes = boxes.copy()
clipped_boxes[:, [0, 2]] = np.minimum(
width - 1.0, np.maximum(0.0, boxes[:, [0, 2]])
)
clipped_boxes[:, [1, 3]] = np.minimum(
height - 1.0, np.maximum(0.0, boxes[:, [1, 3]])
)
return clipped_boxes | Clip an array of boxes to an image with the given height and width. Args: boxes (ndarray): bounding boxes to perform clipping. Dimension is `num boxes` x 4. height (int): given image height. width (int): given image width. Returns: clipped_boxes (ndarray): the clipped boxes with dimension of `num boxes` x 4. |
161,924 | import math
import numpy as np
import random
import numbers
import PIL
from PIL import Image
import torch
import torchvision
import torchvision.transforms.functional as F
from torchvision import transforms
import src.datasets.utils.video.functional as FF
from src.datasets.utils.video.randaugment import rand_augment_transform
def brightness_jitter(var, images):
"""
Perfrom brightness jittering on the input images. The channels of images
should be in order BGR.
Args:
var (float): jitter ratio for brightness.
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
alpha = 1.0 + np.random.uniform(-var, var)
img_bright = torch.zeros(images.shape)
images = blend(images, img_bright, alpha)
return images
def contrast_jitter(var, images):
"""
Perfrom contrast jittering on the input images. The channels of images
should be in order BGR.
Args:
var (float): jitter ratio for contrast.
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
alpha = 1.0 + np.random.uniform(-var, var)
img_gray = grayscale(images)
img_gray[:] = torch.mean(img_gray, dim=(1, 2, 3), keepdim=True)
images = blend(images, img_gray, alpha)
return images
def saturation_jitter(var, images):
"""
Perfrom saturation jittering on the input images. The channels of images
should be in order BGR.
Args:
var (float): jitter ratio for saturation.
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
alpha = 1.0 + np.random.uniform(-var, var)
img_gray = grayscale(images)
images = blend(images, img_gray, alpha)
return images
The provided code snippet includes necessary dependencies for implementing the `color_jitter` function. Write a Python function `def color_jitter(images, img_brightness=0, img_contrast=0, img_saturation=0)` to solve the following problem:
Perfrom a color jittering on the input images. The channels of images should be in order BGR. Args: images (tensor): images to perform color jitter. Dimension is `num frames` x `channel` x `height` x `width`. img_brightness (float): jitter ratio for brightness. img_contrast (float): jitter ratio for contrast. img_saturation (float): jitter ratio for saturation. Returns: images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`.
Here is the function:
def color_jitter(images, img_brightness=0, img_contrast=0, img_saturation=0):
"""
Perfrom a color jittering on the input images. The channels of images
should be in order BGR.
Args:
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
img_brightness (float): jitter ratio for brightness.
img_contrast (float): jitter ratio for contrast.
img_saturation (float): jitter ratio for saturation.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
jitter = []
if img_brightness != 0:
jitter.append('brightness')
if img_contrast != 0:
jitter.append('contrast')
if img_saturation != 0:
jitter.append('saturation')
if len(jitter) > 0:
order = np.random.permutation(np.arange(len(jitter)))
for idx in range(0, len(jitter)):
if jitter[order[idx]] == 'brightness':
images = brightness_jitter(img_brightness, images)
elif jitter[order[idx]] == 'contrast':
images = contrast_jitter(img_contrast, images)
elif jitter[order[idx]] == 'saturation':
images = saturation_jitter(img_saturation, images)
return images | Perfrom a color jittering on the input images. The channels of images should be in order BGR. Args: images (tensor): images to perform color jitter. Dimension is `num frames` x `channel` x `height` x `width`. img_brightness (float): jitter ratio for brightness. img_contrast (float): jitter ratio for contrast. img_saturation (float): jitter ratio for saturation. Returns: images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`. |
161,925 | import math
import numpy as np
import random
import numbers
import PIL
from PIL import Image
import torch
import torchvision
import torchvision.transforms.functional as F
from torchvision import transforms
import src.datasets.utils.video.functional as FF
from src.datasets.utils.video.randaugment import rand_augment_transform
The provided code snippet includes necessary dependencies for implementing the `lighting_jitter` function. Write a Python function `def lighting_jitter(images, alphastd, eigval, eigvec)` to solve the following problem:
Perform AlexNet-style PCA jitter on the given images. Args: images (tensor): images to perform lighting jitter. Dimension is `num frames` x `channel` x `height` x `width`. alphastd (float): jitter ratio for PCA jitter. eigval (list): eigenvalues for PCA jitter. eigvec (list[list]): eigenvectors for PCA jitter. Returns: out_images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`.
Here is the function:
def lighting_jitter(images, alphastd, eigval, eigvec):
"""
Perform AlexNet-style PCA jitter on the given images.
Args:
images (tensor): images to perform lighting jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
alphastd (float): jitter ratio for PCA jitter.
eigval (list): eigenvalues for PCA jitter.
eigvec (list[list]): eigenvectors for PCA jitter.
Returns:
out_images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
if alphastd == 0:
return images
# generate alpha1, alpha2, alpha3.
alpha = np.random.normal(0, alphastd, size=(1, 3))
eig_vec = np.array(eigvec)
eig_val = np.reshape(eigval, (1, 3))
rgb = np.sum(
eig_vec * np.repeat(alpha, 3, axis=0) * np.repeat(eig_val, 3, axis=0),
axis=1,
)
out_images = torch.zeros_like(images)
if len(images.shape) == 3:
# C H W
channel_dim = 0
elif len(images.shape) == 4:
# T C H W
channel_dim = 1
else:
raise NotImplementedError(f'Unsupported dimension {len(images.shape)}')
for idx in range(images.shape[channel_dim]):
# C H W
if len(images.shape) == 3:
out_images[idx] = images[idx] + rgb[2 - idx]
# T C H W
elif len(images.shape) == 4:
out_images[:, idx] = images[:, idx] + rgb[2 - idx]
else:
raise NotImplementedError(
f'Unsupported dimension {len(images.shape)}'
)
return out_images | Perform AlexNet-style PCA jitter on the given images. Args: images (tensor): images to perform lighting jitter. Dimension is `num frames` x `channel` x `height` x `width`. alphastd (float): jitter ratio for PCA jitter. eigval (list): eigenvalues for PCA jitter. eigvec (list[list]): eigenvectors for PCA jitter. Returns: out_images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`. |
161,926 | import math
import numpy as np
import random
import numbers
import PIL
from PIL import Image
import torch
import torchvision
import torchvision.transforms.functional as F
from torchvision import transforms
import src.datasets.utils.video.functional as FF
from src.datasets.utils.video.randaugment import rand_augment_transform
The provided code snippet includes necessary dependencies for implementing the `color_normalization` function. Write a Python function `def color_normalization(images, mean, stddev)` to solve the following problem:
Perform color nomration on the given images. Args: images (tensor): images to perform color normalization. Dimension is `num frames` x `channel` x `height` x `width`. mean (list): mean values for normalization. stddev (list): standard deviations for normalization. Returns: out_images (tensor): the noramlized images, the dimension is `num frames` x `channel` x `height` x `width`.
Here is the function:
def color_normalization(images, mean, stddev):
"""
Perform color nomration on the given images.
Args:
images (tensor): images to perform color normalization. Dimension is
`num frames` x `channel` x `height` x `width`.
mean (list): mean values for normalization.
stddev (list): standard deviations for normalization.
Returns:
out_images (tensor): the noramlized images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
if len(images.shape) == 3:
assert (
len(mean) == images.shape[0]
), 'channel mean not computed properly'
assert (
len(stddev) == images.shape[0]
), 'channel stddev not computed properly'
elif len(images.shape) == 4:
assert (
len(mean) == images.shape[1]
), 'channel mean not computed properly'
assert (
len(stddev) == images.shape[1]
), 'channel stddev not computed properly'
else:
raise NotImplementedError(f'Unsupported dimension {len(images.shape)}')
out_images = torch.zeros_like(images)
for idx in range(len(mean)):
# C H W
if len(images.shape) == 3:
out_images[idx] = (images[idx] - mean[idx]) / stddev[idx]
elif len(images.shape) == 4:
out_images[:, idx] = (images[:, idx] - mean[idx]) / stddev[idx]
else:
raise NotImplementedError(
f'Unsupported dimension {len(images.shape)}'
)
return out_images | Perform color nomration on the given images. Args: images (tensor): images to perform color normalization. Dimension is `num frames` x `channel` x `height` x `width`. mean (list): mean values for normalization. stddev (list): standard deviations for normalization. Returns: out_images (tensor): the noramlized images, the dimension is `num frames` x `channel` x `height` x `width`. |
161,927 | import math
import numpy as np
import random
import numbers
import PIL
from PIL import Image
import torch
import torchvision
import torchvision.transforms.functional as F
from torchvision import transforms
import src.datasets.utils.video.functional as FF
from src.datasets.utils.video.randaugment import rand_augment_transform
def _get_param_spatial_crop(
scale, ratio, height, width, num_repeat=10, log_scale=True, switch_hw=False
):
"""
Given scale, ratio, height and width, return sampled coordinates of the videos.
"""
for _ in range(num_repeat):
area = height * width
target_area = random.uniform(*scale) * area
if log_scale:
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
else:
aspect_ratio = random.uniform(*ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if np.random.uniform() < 0.5 and switch_hw:
w, h = h, w
if 0 < w <= width and 0 < h <= height:
i = random.randint(0, height - h)
j = random.randint(0, width - w)
return i, j, h, w
# Fallback to central crop
in_ratio = float(width) / float(height)
if in_ratio < min(ratio):
w = width
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = height
w = int(round(h * max(ratio)))
else: # whole image
w = width
h = height
i = (height - h) // 2
j = (width - w) // 2
return i, j, h, w
The provided code snippet includes necessary dependencies for implementing the `random_resized_crop` function. Write a Python function `def random_resized_crop( images, target_height, target_width, scale=(0.8, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0), )` to solve the following problem:
Crop the given images to random size and aspect ratio. A crop of random size (default: of 0.08 to 1.0) of the original size and a random aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop is finally resized to given size. This is popularly used to train the Inception networks. Args: images: Images to perform resizing and cropping. target_height: Desired height after cropping. target_width: Desired width after cropping. scale: Scale range of Inception-style area based random resizing. ratio: Aspect ratio range of Inception-style area based random resizing.
Here is the function:
def random_resized_crop(
images,
target_height,
target_width,
scale=(0.8, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
):
"""
Crop the given images to random size and aspect ratio. A crop of random
size (default: of 0.08 to 1.0) of the original size and a random aspect
ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This
crop is finally resized to given size. This is popularly used to train the
Inception networks.
Args:
images: Images to perform resizing and cropping.
target_height: Desired height after cropping.
target_width: Desired width after cropping.
scale: Scale range of Inception-style area based random resizing.
ratio: Aspect ratio range of Inception-style area based random resizing.
"""
height = images.shape[2]
width = images.shape[3]
i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width)
cropped = images[:, :, i:i + h, j:j + w]
return torch.nn.functional.interpolate(
cropped,
size=(target_height, target_width),
mode='bilinear',
align_corners=False,
) | Crop the given images to random size and aspect ratio. A crop of random size (default: of 0.08 to 1.0) of the original size and a random aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop is finally resized to given size. This is popularly used to train the Inception networks. Args: images: Images to perform resizing and cropping. target_height: Desired height after cropping. target_width: Desired width after cropping. scale: Scale range of Inception-style area based random resizing. ratio: Aspect ratio range of Inception-style area based random resizing. |
161,928 | import math
import numpy as np
import random
import numbers
import PIL
from PIL import Image
import torch
import torchvision
import torchvision.transforms.functional as F
from torchvision import transforms
import src.datasets.utils.video.functional as FF
from src.datasets.utils.video.randaugment import rand_augment_transform
def _get_param_spatial_crop(
scale, ratio, height, width, num_repeat=10, log_scale=True, switch_hw=False
):
"""
Given scale, ratio, height and width, return sampled coordinates of the videos.
"""
for _ in range(num_repeat):
area = height * width
target_area = random.uniform(*scale) * area
if log_scale:
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
else:
aspect_ratio = random.uniform(*ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if np.random.uniform() < 0.5 and switch_hw:
w, h = h, w
if 0 < w <= width and 0 < h <= height:
i = random.randint(0, height - h)
j = random.randint(0, width - w)
return i, j, h, w
# Fallback to central crop
in_ratio = float(width) / float(height)
if in_ratio < min(ratio):
w = width
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = height
w = int(round(h * max(ratio)))
else: # whole image
w = width
h = height
i = (height - h) // 2
j = (width - w) // 2
return i, j, h, w
The provided code snippet includes necessary dependencies for implementing the `random_resized_crop_with_shift` function. Write a Python function `def random_resized_crop_with_shift( images, target_height, target_width, scale=(0.8, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0), )` to solve the following problem:
This is similar to random_resized_crop. However, it samples two different boxes (for cropping) for the first and last frame. It then linearly interpolates the two boxes for other frames. Args: images: Images to perform resizing and cropping. target_height: Desired height after cropping. target_width: Desired width after cropping. scale: Scale range of Inception-style area based random resizing. ratio: Aspect ratio range of Inception-style area based random resizing.
Here is the function:
def random_resized_crop_with_shift(
images,
target_height,
target_width,
scale=(0.8, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
):
"""
This is similar to random_resized_crop. However, it samples two different
boxes (for cropping) for the first and last frame. It then linearly
interpolates the two boxes for other frames.
Args:
images: Images to perform resizing and cropping.
target_height: Desired height after cropping.
target_width: Desired width after cropping.
scale: Scale range of Inception-style area based random resizing.
ratio: Aspect ratio range of Inception-style area based random resizing.
"""
t = images.shape[1]
height = images.shape[2]
width = images.shape[3]
i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width)
i_, j_, h_, w_ = _get_param_spatial_crop(scale, ratio, height, width)
i_s = [int(i) for i in torch.linspace(i, i_, steps=t).tolist()]
j_s = [int(i) for i in torch.linspace(j, j_, steps=t).tolist()]
h_s = [int(i) for i in torch.linspace(h, h_, steps=t).tolist()]
w_s = [int(i) for i in torch.linspace(w, w_, steps=t).tolist()]
out = torch.zeros((3, t, target_height, target_width))
for ind in range(t):
out[:, ind:ind + 1, :, :] = torch.nn.functional.interpolate(
images[
:,
ind:ind + 1,
i_s[ind]:i_s[ind] + h_s[ind],
j_s[ind]:j_s[ind] + w_s[ind],
],
size=(target_height, target_width),
mode='bilinear',
align_corners=False,
)
return out | This is similar to random_resized_crop. However, it samples two different boxes (for cropping) for the first and last frame. It then linearly interpolates the two boxes for other frames. Args: images: Images to perform resizing and cropping. target_height: Desired height after cropping. target_width: Desired width after cropping. scale: Scale range of Inception-style area based random resizing. ratio: Aspect ratio range of Inception-style area based random resizing. |
161,929 | import math
import numpy as np
import random
import numbers
import PIL
from PIL import Image
import torch
import torchvision
import torchvision.transforms.functional as F
from torchvision import transforms
import src.datasets.utils.video.functional as FF
from src.datasets.utils.video.randaugment import rand_augment_transform
def _pil_interp(method):
if method == 'bicubic':
return Image.BICUBIC
elif method == 'lanczos':
return Image.LANCZOS
elif method == 'hamming':
return Image.HAMMING
else:
return Image.BILINEAR
class Compose(object):
"""Composes several transforms
Args:
transforms (list of ``Transform`` objects): list of transforms
to compose
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, clip):
for t in self.transforms:
clip = t(clip)
return clip
def rand_augment_transform(config_str, hparams):
"""
RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719
Create a RandAugment transform
:param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by
dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining
sections, not order sepecific determine
'm' - integer magnitude of rand augment
'n' - integer num layers (number of transform ops selected per image)
'w' - integer probabiliy weight index (index of a set of weights to influence choice of op)
'mstd' - float std deviation of magnitude noise applied
'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0)
Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5
'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2
:param hparams: Other hparams (kwargs) for the RandAugmentation scheme
:return: A PyTorch compatible Transform
"""
magnitude = _MAX_LEVEL # default to _MAX_LEVEL for magnitude (currently 10)
num_layers = 2 # default to 2 ops per image
weight_idx = None # default to no probability weights for op choice
transforms = _RAND_TRANSFORMS
config = config_str.split("-")
assert config[0] == "rand"
config = config[1:]
for c in config:
cs = re.split(r"(\d.*)", c)
if len(cs) < 2:
continue
key, val = cs[:2]
if key == "mstd":
# noise param injected via hparams for now
hparams.setdefault("magnitude_std", float(val))
elif key == "inc":
if bool(val):
transforms = _RAND_INCREASING_TRANSFORMS
elif key == "m":
magnitude = int(val)
elif key == "n":
num_layers = int(val)
elif key == "w":
weight_idx = int(val)
else:
assert NotImplementedError
ra_ops = rand_augment_ops(
magnitude=magnitude, hparams=hparams, transforms=transforms
)
choice_weights = (
None if weight_idx is None else _select_rand_weights(weight_idx)
)
return RandAugment(ra_ops, num_layers, choice_weights=choice_weights)
The provided code snippet includes necessary dependencies for implementing the `create_random_augment` function. Write a Python function `def create_random_augment( input_size, auto_augment=None, interpolation='bilinear', )` to solve the following problem:
Get video randaug transform. Args: input_size: The size of the input video in tuple. auto_augment: Parameters for randaug. An example: "rand-m7-n4-mstd0.5-inc1" (m is the magnitude and n is the number of operations to apply). interpolation: Interpolation method.
Here is the function:
def create_random_augment(
input_size,
auto_augment=None,
interpolation='bilinear',
):
"""
Get video randaug transform.
Args:
input_size: The size of the input video in tuple.
auto_augment: Parameters for randaug. An example:
"rand-m7-n4-mstd0.5-inc1" (m is the magnitude and n is the number
of operations to apply).
interpolation: Interpolation method.
"""
if isinstance(input_size, tuple):
img_size = input_size[-2:]
else:
img_size = input_size
if auto_augment:
assert isinstance(auto_augment, str)
if isinstance(img_size, tuple):
img_size_min = min(img_size)
else:
img_size_min = img_size
aa_params = {'translate_const': int(img_size_min * 0.45)}
if interpolation and interpolation != 'random':
aa_params['interpolation'] = _pil_interp(interpolation)
if auto_augment.startswith('rand'):
return transforms.Compose(
[rand_augment_transform(auto_augment, aa_params)]
)
raise NotImplementedError | Get video randaug transform. Args: input_size: The size of the input video in tuple. auto_augment: Parameters for randaug. An example: "rand-m7-n4-mstd0.5-inc1" (m is the magnitude and n is the number of operations to apply). interpolation: Interpolation method. |
161,930 | import math
import numpy as np
import random
import numbers
import PIL
from PIL import Image
import torch
import torchvision
import torchvision.transforms.functional as F
from torchvision import transforms
import src.datasets.utils.video.functional as FF
from src.datasets.utils.video.randaugment import rand_augment_transform
def _get_param_spatial_crop(
scale, ratio, height, width, num_repeat=10, log_scale=True, switch_hw=False
):
"""
Given scale, ratio, height and width, return sampled coordinates of the videos.
"""
for _ in range(num_repeat):
area = height * width
target_area = random.uniform(*scale) * area
if log_scale:
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
else:
aspect_ratio = random.uniform(*ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if np.random.uniform() < 0.5 and switch_hw:
w, h = h, w
if 0 < w <= width and 0 < h <= height:
i = random.randint(0, height - h)
j = random.randint(0, width - w)
return i, j, h, w
# Fallback to central crop
in_ratio = float(width) / float(height)
if in_ratio < min(ratio):
w = width
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = height
w = int(round(h * max(ratio)))
else: # whole image
w = width
h = height
i = (height - h) // 2
j = (width - w) // 2
return i, j, h, w
The provided code snippet includes necessary dependencies for implementing the `random_sized_crop_img` function. Write a Python function `def random_sized_crop_img( im, size, jitter_scale=(0.08, 1.0), jitter_aspect=(3.0 / 4.0, 4.0 / 3.0), max_iter=10, )` to solve the following problem:
Performs Inception-style cropping (used for training).
Here is the function:
def random_sized_crop_img(
im,
size,
jitter_scale=(0.08, 1.0),
jitter_aspect=(3.0 / 4.0, 4.0 / 3.0),
max_iter=10,
):
"""
Performs Inception-style cropping (used for training).
"""
assert (
len(im.shape) == 3
), 'Currently only support image for random_sized_crop'
h, w = im.shape[1:3]
i, j, h, w = _get_param_spatial_crop(
scale=jitter_scale,
ratio=jitter_aspect,
height=h,
width=w,
num_repeat=max_iter,
log_scale=False,
switch_hw=True,
)
cropped = im[:, i:i + h, j:j + w]
return torch.nn.functional.interpolate(
cropped.unsqueeze(0),
size=(size, size),
mode='bilinear',
align_corners=False,
).squeeze(0) | Performs Inception-style cropping (used for training). |
161,962 | import numpy as np
from PIL import Image
import torch
The provided code snippet includes necessary dependencies for implementing the `convert_img` function. Write a Python function `def convert_img(img)` to solve the following problem:
Converts (H, W, C) numpy.ndarray to (C, W, H) format
Here is the function:
def convert_img(img):
"""Converts (H, W, C) numpy.ndarray to (C, W, H) format"""
if len(img.shape) == 3:
img = img.transpose(2, 0, 1)
if len(img.shape) == 2:
img = np.expand_dims(img, 0)
return img | Converts (H, W, C) numpy.ndarray to (C, W, H) format |
161,963 | import math
from functools import partial
import torch
import torch.nn as nn
from src.models.utils.modules import Block
from src.models.utils.pos_embs import get_2d_sincos_pos_embed, get_3d_sincos_pos_embed
from src.utils.tensors import (
trunc_normal_,
repeat_interleave_batch
)
from src.masks.utils import apply_masks
class VisionTransformerPredictor(nn.Module):
""" Vision Transformer """
def __init__(
self,
img_size=224,
patch_size=16,
num_frames=1,
tubelet_size=2,
embed_dim=768,
predictor_embed_dim=384,
depth=6,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
norm_layer=nn.LayerNorm,
init_std=0.02,
uniform_power=False,
use_mask_tokens=False,
num_mask_tokens=2,
zero_init_mask_tokens=True,
**kwargs
):
super().__init__()
# Map input to predictor dimension
self.predictor_embed = nn.Linear(embed_dim, predictor_embed_dim, bias=True)
# Mask tokens
self.mask_tokens = None
self.num_mask_tokens = 0
if use_mask_tokens:
self.num_mask_tokens = num_mask_tokens
self.mask_tokens = nn.ParameterList([
nn.Parameter(torch.zeros(1, 1, predictor_embed_dim))
for i in range(num_mask_tokens)
])
# Determine positional embedding
self.input_size = img_size
self.patch_size = patch_size
# --
self.num_frames = num_frames
self.tubelet_size = tubelet_size
self.is_video = num_frames > 1
grid_size = self.input_size // self.patch_size
grid_depth = self.num_frames // self.tubelet_size
if self.is_video:
self.num_patches = num_patches = (
(num_frames // tubelet_size)
* (img_size // patch_size)
* (img_size // patch_size)
)
else:
self.num_patches = num_patches = (
(img_size // patch_size)
* (img_size // patch_size)
)
# Position embedding
self.uniform_power = uniform_power
self.predictor_pos_embed = None
self.predictor_pos_embed = nn.Parameter(
torch.zeros(1, num_patches, predictor_embed_dim),
requires_grad=False)
# Attention Blocks
self.predictor_blocks = nn.ModuleList([
Block(
dim=predictor_embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
act_layer=nn.GELU,
attn_drop=attn_drop_rate,
grid_size=grid_size,
grid_depth=grid_depth,
norm_layer=norm_layer)
for i in range(depth)])
# Normalize & project back to input dimension
self.predictor_norm = norm_layer(predictor_embed_dim)
self.predictor_proj = nn.Linear(predictor_embed_dim, embed_dim, bias=True)
# ------ initialize weights
if self.predictor_pos_embed is not None:
self._init_pos_embed(self.predictor_pos_embed.data) # sincos pos-embed
self.init_std = init_std
if not zero_init_mask_tokens:
for mt in self.mask_tokens:
trunc_normal_(mt, std=init_std)
self.apply(self._init_weights)
self._rescale_blocks()
def _init_pos_embed(self, pos_embed):
embed_dim = pos_embed.size(-1)
grid_size = self.input_size // self.patch_size
if self.is_video:
grid_depth = self.num_frames // self.tubelet_size
sincos = get_3d_sincos_pos_embed(
embed_dim,
grid_size,
grid_depth,
cls_token=False,
uniform_power=self.uniform_power
)
else:
sincos = get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False)
pos_embed.copy_(torch.from_numpy(sincos).float().unsqueeze(0))
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=self.init_std)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def _rescale_blocks(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.predictor_blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def diffusion(self, x, noise_beta=(0.5, 1.0), steps=1000):
# Prepare diffusion noise schedule
b1, b2 = noise_beta
beta_scheduler = (b1 + i*(b2-b1)/steps for i in range(steps))
alpha_scheduler = []
_alpha = 1.0
for _beta in beta_scheduler:
_alpha *= 1.-_beta
alpha_scheduler += [_alpha]
# Sample diffusion time step
T = torch.randint(0, steps, (len(x),))
alpha = torch.tensor(alpha_scheduler, device=x.device)[T].unsqueeze(-1).unsqueeze(-1)
# Normalize features and apply noise
x = torch.nn.functional.layer_norm(x, (x.size(-1),))
x = alpha**0.5 * x + (1.-alpha)**0.5 * torch.randn(x.shape, device=x.device)
return x
def forward(self, ctxt, tgt, masks_ctxt, masks_tgt, mask_index=1):
"""
:param ctxt: context tokens
:param tgt: target tokens
:param masks_ctxt: indices of context tokens in input
:params masks_tgt: indices of target tokens in input
"""
assert (masks_ctxt is not None) and (masks_tgt is not None), 'Cannot run predictor without mask indices'
if not isinstance(masks_ctxt, list):
masks_ctxt = [masks_ctxt]
if not isinstance(masks_tgt, list):
masks_tgt = [masks_tgt]
# Batch Size
B = len(ctxt) // len(masks_ctxt)
# Map context tokens to pedictor dimensions
x = self.predictor_embed(ctxt)
_, N_ctxt, D = x.shape
# Add positional embedding to ctxt tokens
if self.predictor_pos_embed is not None:
ctxt_pos_embed = self.predictor_pos_embed.repeat(B, 1, 1)
x += apply_masks(ctxt_pos_embed, masks_ctxt)
# Map target tokens to predictor dimensions & add noise (fwd diffusion)
if self.mask_tokens is None:
pred_tokens = self.predictor_embed(tgt)
pred_tokens = self.diffusion(pred_tokens)
else:
mask_index = mask_index % self.num_mask_tokens
pred_tokens = self.mask_tokens[mask_index]
pred_tokens = pred_tokens.repeat(B, self.num_patches, 1)
pred_tokens = apply_masks(pred_tokens, masks_tgt)
# Add positional embedding to target tokens
if self.predictor_pos_embed is not None:
pos_embs = self.predictor_pos_embed.repeat(B, 1, 1)
pos_embs = apply_masks(pos_embs, masks_tgt)
pos_embs = repeat_interleave_batch(pos_embs, B, repeat=len(masks_ctxt))
pred_tokens += pos_embs
# Concatenate context & target tokens
x = x.repeat(len(masks_tgt), 1, 1)
x = torch.cat([x, pred_tokens], dim=1)
# FIXME: this implementation currently assumes masks_ctxt and masks_tgt
# are alligned 1:1 (ok with MultiMask wrapper on predictor but
# otherwise will break)
masks_ctxt = torch.cat(masks_ctxt, dim=0)
masks_tgt = torch.cat(masks_tgt, dim=0)
masks = torch.cat([masks_ctxt, masks_tgt], dim=1)
# Fwd prop
for blk in self.predictor_blocks:
x = blk(x, mask=masks)
x = self.predictor_norm(x)
# Return output corresponding to target tokens
x = x[:, N_ctxt:]
x = self.predictor_proj(x)
return x
def vit_predictor(**kwargs):
model = VisionTransformerPredictor(
mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs)
return model | null |
161,964 | import math
from functools import partial
import torch
import torch.nn as nn
from src.models.utils.patch_embed import PatchEmbed, PatchEmbed3D
from src.models.utils.modules import Block
from src.models.utils.pos_embs import get_2d_sincos_pos_embed, get_3d_sincos_pos_embed
from src.utils.tensors import trunc_normal_
from src.masks.utils import apply_masks
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(
self,
img_size=224,
patch_size=16,
num_frames=1,
tubelet_size=2,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
norm_layer=nn.LayerNorm,
init_std=0.02,
out_layers=None,
uniform_power=False,
**kwargs
):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.num_heads = num_heads
self.out_layers = out_layers
self.input_size = img_size
self.patch_size = patch_size
self.num_frames = num_frames
self.tubelet_size = tubelet_size
self.is_video = num_frames > 1
grid_size = self.input_size // self.patch_size
grid_depth = self.num_frames // self.tubelet_size
# Tokenize pixels with convolution
if self.is_video:
self.patch_embed = PatchEmbed3D(
patch_size=patch_size,
tubelet_size=tubelet_size,
in_chans=in_chans,
embed_dim=embed_dim)
self.num_patches = (
(num_frames // tubelet_size)
* (img_size // patch_size)
* (img_size // patch_size)
)
else:
self.patch_embed = PatchEmbed(
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim)
self.num_patches = (
(img_size // patch_size)
* (img_size // patch_size)
)
# Position embedding
self.uniform_power = uniform_power
self.pos_embed = None
self.pos_embed = nn.Parameter(
torch.zeros(1, self.num_patches, embed_dim),
requires_grad=False)
# Attention Blocks
self.blocks = nn.ModuleList([
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
act_layer=nn.GELU,
grid_size=grid_size,
grid_depth=grid_depth,
attn_drop=attn_drop_rate,
norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# ------ initialize weights
if self.pos_embed is not None:
self._init_pos_embed(self.pos_embed.data) # sincos pos-embed
self.init_std = init_std
self.apply(self._init_weights)
self._rescale_blocks()
def _init_pos_embed(self, pos_embed):
embed_dim = pos_embed.size(-1)
grid_size = self.input_size // self.patch_size
if self.is_video:
grid_depth = self.num_frames // self.tubelet_size
sincos = get_3d_sincos_pos_embed(
embed_dim,
grid_size,
grid_depth,
cls_token=False,
uniform_power=self.uniform_power
)
else:
sincos = get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False)
pos_embed.copy_(torch.from_numpy(sincos).float().unsqueeze(0))
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=self.init_std)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=self.init_std)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv3d):
trunc_normal_(m.weight, std=self.init_std)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _rescale_blocks(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def get_num_layers(self):
return len(self.blocks)
def no_weight_decay(self):
return {}
def forward(self, x, masks=None):
"""
:param x: input image/video
:param masks: indices of patch tokens to mask (remove)
"""
if masks is not None and not isinstance(masks, list):
masks = [masks]
# Tokenize input
pos_embed = self.pos_embed
if pos_embed is not None:
pos_embed = self.interpolate_pos_encoding(x, pos_embed)
x = self.patch_embed(x)
if pos_embed is not None:
x += pos_embed
B, N, D = x.shape
# Mask away unwanted tokens (if masks provided)
if masks is not None:
x = apply_masks(x, masks)
masks = torch.cat(masks, dim=0)
# Fwd prop
outs = []
for i, blk in enumerate(self.blocks):
x = blk(x, mask=masks)
if self.out_layers is not None and i in self.out_layers:
outs.append(self.norm(x))
if self.out_layers is not None:
return outs
if self.norm is not None:
x = self.norm(x)
return x
def interpolate_pos_encoding(self, x, pos_embed):
_, N, dim = pos_embed.shape
if self.is_video:
# If pos_embed already corret size, just return
_, _, T, H, W = x.shape
if H == self.input_size and W == self.input_size and T == self.num_frames:
return pos_embed
# Convert depth, height, width of input to be measured in patches
# instead of pixels/frames
T = T // self.tubelet_size
H = H // self.patch_size
W = W // self.patch_size
# Compute the initialized shape of the positional embedding measured
# in patches
N_t = self.num_frames // self.tubelet_size
N_h = N_w = self.input_size // self.patch_size
assert N_h * N_w * N_t == N, 'Positional embedding initialized incorrectly'
# Compute scale factor for spatio-temporal interpolation
scale_factor = (T/N_t, H/N_h, W/N_w)
pos_embed = nn.functional.interpolate(
pos_embed.reshape(1, N_t, N_h, N_w, dim).permute(0, 4, 1, 2, 3),
scale_factor=scale_factor,
mode='trilinear')
pos_embed = pos_embed.permute(0, 2, 3, 4, 1).view(1, -1, dim)
return pos_embed
else:
# If pos_embed already corret size, just return
_, _, H, W = x.shape
if H == self.input_size and W == self.input_size:
return pos_embed
# Compute scale factor for spatial interpolation
npatch = (H // self.patch_size) * (W // self.patch_size)
scale_factor = math.sqrt(npatch / N)
pos_embed = nn.functional.interpolate(
pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=scale_factor,
mode='bicubic')
pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return pos_embed
def vit_tiny(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model | null |
161,965 | import math
from functools import partial
import torch
import torch.nn as nn
from src.models.utils.patch_embed import PatchEmbed, PatchEmbed3D
from src.models.utils.modules import Block
from src.models.utils.pos_embs import get_2d_sincos_pos_embed, get_3d_sincos_pos_embed
from src.utils.tensors import trunc_normal_
from src.masks.utils import apply_masks
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(
self,
img_size=224,
patch_size=16,
num_frames=1,
tubelet_size=2,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
norm_layer=nn.LayerNorm,
init_std=0.02,
out_layers=None,
uniform_power=False,
**kwargs
):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.num_heads = num_heads
self.out_layers = out_layers
self.input_size = img_size
self.patch_size = patch_size
self.num_frames = num_frames
self.tubelet_size = tubelet_size
self.is_video = num_frames > 1
grid_size = self.input_size // self.patch_size
grid_depth = self.num_frames // self.tubelet_size
# Tokenize pixels with convolution
if self.is_video:
self.patch_embed = PatchEmbed3D(
patch_size=patch_size,
tubelet_size=tubelet_size,
in_chans=in_chans,
embed_dim=embed_dim)
self.num_patches = (
(num_frames // tubelet_size)
* (img_size // patch_size)
* (img_size // patch_size)
)
else:
self.patch_embed = PatchEmbed(
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim)
self.num_patches = (
(img_size // patch_size)
* (img_size // patch_size)
)
# Position embedding
self.uniform_power = uniform_power
self.pos_embed = None
self.pos_embed = nn.Parameter(
torch.zeros(1, self.num_patches, embed_dim),
requires_grad=False)
# Attention Blocks
self.blocks = nn.ModuleList([
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
act_layer=nn.GELU,
grid_size=grid_size,
grid_depth=grid_depth,
attn_drop=attn_drop_rate,
norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# ------ initialize weights
if self.pos_embed is not None:
self._init_pos_embed(self.pos_embed.data) # sincos pos-embed
self.init_std = init_std
self.apply(self._init_weights)
self._rescale_blocks()
def _init_pos_embed(self, pos_embed):
embed_dim = pos_embed.size(-1)
grid_size = self.input_size // self.patch_size
if self.is_video:
grid_depth = self.num_frames // self.tubelet_size
sincos = get_3d_sincos_pos_embed(
embed_dim,
grid_size,
grid_depth,
cls_token=False,
uniform_power=self.uniform_power
)
else:
sincos = get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False)
pos_embed.copy_(torch.from_numpy(sincos).float().unsqueeze(0))
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=self.init_std)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=self.init_std)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv3d):
trunc_normal_(m.weight, std=self.init_std)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _rescale_blocks(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def get_num_layers(self):
return len(self.blocks)
def no_weight_decay(self):
return {}
def forward(self, x, masks=None):
"""
:param x: input image/video
:param masks: indices of patch tokens to mask (remove)
"""
if masks is not None and not isinstance(masks, list):
masks = [masks]
# Tokenize input
pos_embed = self.pos_embed
if pos_embed is not None:
pos_embed = self.interpolate_pos_encoding(x, pos_embed)
x = self.patch_embed(x)
if pos_embed is not None:
x += pos_embed
B, N, D = x.shape
# Mask away unwanted tokens (if masks provided)
if masks is not None:
x = apply_masks(x, masks)
masks = torch.cat(masks, dim=0)
# Fwd prop
outs = []
for i, blk in enumerate(self.blocks):
x = blk(x, mask=masks)
if self.out_layers is not None and i in self.out_layers:
outs.append(self.norm(x))
if self.out_layers is not None:
return outs
if self.norm is not None:
x = self.norm(x)
return x
def interpolate_pos_encoding(self, x, pos_embed):
_, N, dim = pos_embed.shape
if self.is_video:
# If pos_embed already corret size, just return
_, _, T, H, W = x.shape
if H == self.input_size and W == self.input_size and T == self.num_frames:
return pos_embed
# Convert depth, height, width of input to be measured in patches
# instead of pixels/frames
T = T // self.tubelet_size
H = H // self.patch_size
W = W // self.patch_size
# Compute the initialized shape of the positional embedding measured
# in patches
N_t = self.num_frames // self.tubelet_size
N_h = N_w = self.input_size // self.patch_size
assert N_h * N_w * N_t == N, 'Positional embedding initialized incorrectly'
# Compute scale factor for spatio-temporal interpolation
scale_factor = (T/N_t, H/N_h, W/N_w)
pos_embed = nn.functional.interpolate(
pos_embed.reshape(1, N_t, N_h, N_w, dim).permute(0, 4, 1, 2, 3),
scale_factor=scale_factor,
mode='trilinear')
pos_embed = pos_embed.permute(0, 2, 3, 4, 1).view(1, -1, dim)
return pos_embed
else:
# If pos_embed already corret size, just return
_, _, H, W = x.shape
if H == self.input_size and W == self.input_size:
return pos_embed
# Compute scale factor for spatial interpolation
npatch = (H // self.patch_size) * (W // self.patch_size)
scale_factor = math.sqrt(npatch / N)
pos_embed = nn.functional.interpolate(
pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=scale_factor,
mode='bicubic')
pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return pos_embed
def vit_small(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model | null |
161,966 | import math
from functools import partial
import torch
import torch.nn as nn
from src.models.utils.patch_embed import PatchEmbed, PatchEmbed3D
from src.models.utils.modules import Block
from src.models.utils.pos_embs import get_2d_sincos_pos_embed, get_3d_sincos_pos_embed
from src.utils.tensors import trunc_normal_
from src.masks.utils import apply_masks
class VisionTransformer(nn.Module):
def __init__(
self,
img_size=224,
patch_size=16,
num_frames=1,
tubelet_size=2,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
norm_layer=nn.LayerNorm,
init_std=0.02,
out_layers=None,
uniform_power=False,
**kwargs
):
def _init_pos_embed(self, pos_embed):
def _init_weights(self, m):
def _rescale_blocks(self):
def rescale(param, layer_id):
def get_num_layers(self):
def no_weight_decay(self):
def forward(self, x, masks=None):
def interpolate_pos_encoding(self, x, pos_embed):
def vit_base(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model | null |
161,967 | import math
from functools import partial
import torch
import torch.nn as nn
from src.models.utils.patch_embed import PatchEmbed, PatchEmbed3D
from src.models.utils.modules import Block
from src.models.utils.pos_embs import get_2d_sincos_pos_embed, get_3d_sincos_pos_embed
from src.utils.tensors import trunc_normal_
from src.masks.utils import apply_masks
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(
self,
img_size=224,
patch_size=16,
num_frames=1,
tubelet_size=2,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
norm_layer=nn.LayerNorm,
init_std=0.02,
out_layers=None,
uniform_power=False,
**kwargs
):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.num_heads = num_heads
self.out_layers = out_layers
self.input_size = img_size
self.patch_size = patch_size
self.num_frames = num_frames
self.tubelet_size = tubelet_size
self.is_video = num_frames > 1
grid_size = self.input_size // self.patch_size
grid_depth = self.num_frames // self.tubelet_size
# Tokenize pixels with convolution
if self.is_video:
self.patch_embed = PatchEmbed3D(
patch_size=patch_size,
tubelet_size=tubelet_size,
in_chans=in_chans,
embed_dim=embed_dim)
self.num_patches = (
(num_frames // tubelet_size)
* (img_size // patch_size)
* (img_size // patch_size)
)
else:
self.patch_embed = PatchEmbed(
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim)
self.num_patches = (
(img_size // patch_size)
* (img_size // patch_size)
)
# Position embedding
self.uniform_power = uniform_power
self.pos_embed = None
self.pos_embed = nn.Parameter(
torch.zeros(1, self.num_patches, embed_dim),
requires_grad=False)
# Attention Blocks
self.blocks = nn.ModuleList([
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
act_layer=nn.GELU,
grid_size=grid_size,
grid_depth=grid_depth,
attn_drop=attn_drop_rate,
norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# ------ initialize weights
if self.pos_embed is not None:
self._init_pos_embed(self.pos_embed.data) # sincos pos-embed
self.init_std = init_std
self.apply(self._init_weights)
self._rescale_blocks()
def _init_pos_embed(self, pos_embed):
embed_dim = pos_embed.size(-1)
grid_size = self.input_size // self.patch_size
if self.is_video:
grid_depth = self.num_frames // self.tubelet_size
sincos = get_3d_sincos_pos_embed(
embed_dim,
grid_size,
grid_depth,
cls_token=False,
uniform_power=self.uniform_power
)
else:
sincos = get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False)
pos_embed.copy_(torch.from_numpy(sincos).float().unsqueeze(0))
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=self.init_std)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=self.init_std)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv3d):
trunc_normal_(m.weight, std=self.init_std)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _rescale_blocks(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def get_num_layers(self):
return len(self.blocks)
def no_weight_decay(self):
return {}
def forward(self, x, masks=None):
"""
:param x: input image/video
:param masks: indices of patch tokens to mask (remove)
"""
if masks is not None and not isinstance(masks, list):
masks = [masks]
# Tokenize input
pos_embed = self.pos_embed
if pos_embed is not None:
pos_embed = self.interpolate_pos_encoding(x, pos_embed)
x = self.patch_embed(x)
if pos_embed is not None:
x += pos_embed
B, N, D = x.shape
# Mask away unwanted tokens (if masks provided)
if masks is not None:
x = apply_masks(x, masks)
masks = torch.cat(masks, dim=0)
# Fwd prop
outs = []
for i, blk in enumerate(self.blocks):
x = blk(x, mask=masks)
if self.out_layers is not None and i in self.out_layers:
outs.append(self.norm(x))
if self.out_layers is not None:
return outs
if self.norm is not None:
x = self.norm(x)
return x
def interpolate_pos_encoding(self, x, pos_embed):
_, N, dim = pos_embed.shape
if self.is_video:
# If pos_embed already corret size, just return
_, _, T, H, W = x.shape
if H == self.input_size and W == self.input_size and T == self.num_frames:
return pos_embed
# Convert depth, height, width of input to be measured in patches
# instead of pixels/frames
T = T // self.tubelet_size
H = H // self.patch_size
W = W // self.patch_size
# Compute the initialized shape of the positional embedding measured
# in patches
N_t = self.num_frames // self.tubelet_size
N_h = N_w = self.input_size // self.patch_size
assert N_h * N_w * N_t == N, 'Positional embedding initialized incorrectly'
# Compute scale factor for spatio-temporal interpolation
scale_factor = (T/N_t, H/N_h, W/N_w)
pos_embed = nn.functional.interpolate(
pos_embed.reshape(1, N_t, N_h, N_w, dim).permute(0, 4, 1, 2, 3),
scale_factor=scale_factor,
mode='trilinear')
pos_embed = pos_embed.permute(0, 2, 3, 4, 1).view(1, -1, dim)
return pos_embed
else:
# If pos_embed already corret size, just return
_, _, H, W = x.shape
if H == self.input_size and W == self.input_size:
return pos_embed
# Compute scale factor for spatial interpolation
npatch = (H // self.patch_size) * (W // self.patch_size)
scale_factor = math.sqrt(npatch / N)
pos_embed = nn.functional.interpolate(
pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=scale_factor,
mode='bicubic')
pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return pos_embed
def vit_large(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model | null |
161,968 | import math
from functools import partial
import torch
import torch.nn as nn
from src.models.utils.patch_embed import PatchEmbed, PatchEmbed3D
from src.models.utils.modules import Block
from src.models.utils.pos_embs import get_2d_sincos_pos_embed, get_3d_sincos_pos_embed
from src.utils.tensors import trunc_normal_
from src.masks.utils import apply_masks
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(
self,
img_size=224,
patch_size=16,
num_frames=1,
tubelet_size=2,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
norm_layer=nn.LayerNorm,
init_std=0.02,
out_layers=None,
uniform_power=False,
**kwargs
):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.num_heads = num_heads
self.out_layers = out_layers
self.input_size = img_size
self.patch_size = patch_size
self.num_frames = num_frames
self.tubelet_size = tubelet_size
self.is_video = num_frames > 1
grid_size = self.input_size // self.patch_size
grid_depth = self.num_frames // self.tubelet_size
# Tokenize pixels with convolution
if self.is_video:
self.patch_embed = PatchEmbed3D(
patch_size=patch_size,
tubelet_size=tubelet_size,
in_chans=in_chans,
embed_dim=embed_dim)
self.num_patches = (
(num_frames // tubelet_size)
* (img_size // patch_size)
* (img_size // patch_size)
)
else:
self.patch_embed = PatchEmbed(
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim)
self.num_patches = (
(img_size // patch_size)
* (img_size // patch_size)
)
# Position embedding
self.uniform_power = uniform_power
self.pos_embed = None
self.pos_embed = nn.Parameter(
torch.zeros(1, self.num_patches, embed_dim),
requires_grad=False)
# Attention Blocks
self.blocks = nn.ModuleList([
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
act_layer=nn.GELU,
grid_size=grid_size,
grid_depth=grid_depth,
attn_drop=attn_drop_rate,
norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# ------ initialize weights
if self.pos_embed is not None:
self._init_pos_embed(self.pos_embed.data) # sincos pos-embed
self.init_std = init_std
self.apply(self._init_weights)
self._rescale_blocks()
def _init_pos_embed(self, pos_embed):
embed_dim = pos_embed.size(-1)
grid_size = self.input_size // self.patch_size
if self.is_video:
grid_depth = self.num_frames // self.tubelet_size
sincos = get_3d_sincos_pos_embed(
embed_dim,
grid_size,
grid_depth,
cls_token=False,
uniform_power=self.uniform_power
)
else:
sincos = get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False)
pos_embed.copy_(torch.from_numpy(sincos).float().unsqueeze(0))
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=self.init_std)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=self.init_std)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv3d):
trunc_normal_(m.weight, std=self.init_std)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _rescale_blocks(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def get_num_layers(self):
return len(self.blocks)
def no_weight_decay(self):
return {}
def forward(self, x, masks=None):
"""
:param x: input image/video
:param masks: indices of patch tokens to mask (remove)
"""
if masks is not None and not isinstance(masks, list):
masks = [masks]
# Tokenize input
pos_embed = self.pos_embed
if pos_embed is not None:
pos_embed = self.interpolate_pos_encoding(x, pos_embed)
x = self.patch_embed(x)
if pos_embed is not None:
x += pos_embed
B, N, D = x.shape
# Mask away unwanted tokens (if masks provided)
if masks is not None:
x = apply_masks(x, masks)
masks = torch.cat(masks, dim=0)
# Fwd prop
outs = []
for i, blk in enumerate(self.blocks):
x = blk(x, mask=masks)
if self.out_layers is not None and i in self.out_layers:
outs.append(self.norm(x))
if self.out_layers is not None:
return outs
if self.norm is not None:
x = self.norm(x)
return x
def interpolate_pos_encoding(self, x, pos_embed):
_, N, dim = pos_embed.shape
if self.is_video:
# If pos_embed already corret size, just return
_, _, T, H, W = x.shape
if H == self.input_size and W == self.input_size and T == self.num_frames:
return pos_embed
# Convert depth, height, width of input to be measured in patches
# instead of pixels/frames
T = T // self.tubelet_size
H = H // self.patch_size
W = W // self.patch_size
# Compute the initialized shape of the positional embedding measured
# in patches
N_t = self.num_frames // self.tubelet_size
N_h = N_w = self.input_size // self.patch_size
assert N_h * N_w * N_t == N, 'Positional embedding initialized incorrectly'
# Compute scale factor for spatio-temporal interpolation
scale_factor = (T/N_t, H/N_h, W/N_w)
pos_embed = nn.functional.interpolate(
pos_embed.reshape(1, N_t, N_h, N_w, dim).permute(0, 4, 1, 2, 3),
scale_factor=scale_factor,
mode='trilinear')
pos_embed = pos_embed.permute(0, 2, 3, 4, 1).view(1, -1, dim)
return pos_embed
else:
# If pos_embed already corret size, just return
_, _, H, W = x.shape
if H == self.input_size and W == self.input_size:
return pos_embed
# Compute scale factor for spatial interpolation
npatch = (H // self.patch_size) * (W // self.patch_size)
scale_factor = math.sqrt(npatch / N)
pos_embed = nn.functional.interpolate(
pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=scale_factor,
mode='bicubic')
pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return pos_embed
def vit_huge(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=1280, depth=32, num_heads=16, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model | null |
161,969 | import math
from functools import partial
import torch
import torch.nn as nn
from src.models.utils.patch_embed import PatchEmbed, PatchEmbed3D
from src.models.utils.modules import Block
from src.models.utils.pos_embs import get_2d_sincos_pos_embed, get_3d_sincos_pos_embed
from src.utils.tensors import trunc_normal_
from src.masks.utils import apply_masks
class VisionTransformer(nn.Module):
def __init__(
self,
img_size=224,
patch_size=16,
num_frames=1,
tubelet_size=2,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
norm_layer=nn.LayerNorm,
init_std=0.02,
out_layers=None,
uniform_power=False,
**kwargs
):
def _init_pos_embed(self, pos_embed):
def _init_weights(self, m):
def _rescale_blocks(self):
def rescale(param, layer_id):
def get_num_layers(self):
def no_weight_decay(self):
def forward(self, x, masks=None):
def interpolate_pos_encoding(self, x, pos_embed):
def vit_giant(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=48/11,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model | null |
161,970 | import math
from functools import partial
import torch
import torch.nn as nn
from src.models.utils.patch_embed import PatchEmbed, PatchEmbed3D
from src.models.utils.modules import Block
from src.models.utils.pos_embs import get_2d_sincos_pos_embed, get_3d_sincos_pos_embed
from src.utils.tensors import trunc_normal_
from src.masks.utils import apply_masks
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(
self,
img_size=224,
patch_size=16,
num_frames=1,
tubelet_size=2,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
norm_layer=nn.LayerNorm,
init_std=0.02,
out_layers=None,
uniform_power=False,
**kwargs
):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.num_heads = num_heads
self.out_layers = out_layers
self.input_size = img_size
self.patch_size = patch_size
self.num_frames = num_frames
self.tubelet_size = tubelet_size
self.is_video = num_frames > 1
grid_size = self.input_size // self.patch_size
grid_depth = self.num_frames // self.tubelet_size
# Tokenize pixels with convolution
if self.is_video:
self.patch_embed = PatchEmbed3D(
patch_size=patch_size,
tubelet_size=tubelet_size,
in_chans=in_chans,
embed_dim=embed_dim)
self.num_patches = (
(num_frames // tubelet_size)
* (img_size // patch_size)
* (img_size // patch_size)
)
else:
self.patch_embed = PatchEmbed(
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim)
self.num_patches = (
(img_size // patch_size)
* (img_size // patch_size)
)
# Position embedding
self.uniform_power = uniform_power
self.pos_embed = None
self.pos_embed = nn.Parameter(
torch.zeros(1, self.num_patches, embed_dim),
requires_grad=False)
# Attention Blocks
self.blocks = nn.ModuleList([
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
act_layer=nn.GELU,
grid_size=grid_size,
grid_depth=grid_depth,
attn_drop=attn_drop_rate,
norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# ------ initialize weights
if self.pos_embed is not None:
self._init_pos_embed(self.pos_embed.data) # sincos pos-embed
self.init_std = init_std
self.apply(self._init_weights)
self._rescale_blocks()
def _init_pos_embed(self, pos_embed):
embed_dim = pos_embed.size(-1)
grid_size = self.input_size // self.patch_size
if self.is_video:
grid_depth = self.num_frames // self.tubelet_size
sincos = get_3d_sincos_pos_embed(
embed_dim,
grid_size,
grid_depth,
cls_token=False,
uniform_power=self.uniform_power
)
else:
sincos = get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False)
pos_embed.copy_(torch.from_numpy(sincos).float().unsqueeze(0))
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=self.init_std)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=self.init_std)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv3d):
trunc_normal_(m.weight, std=self.init_std)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _rescale_blocks(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def get_num_layers(self):
return len(self.blocks)
def no_weight_decay(self):
return {}
def forward(self, x, masks=None):
"""
:param x: input image/video
:param masks: indices of patch tokens to mask (remove)
"""
if masks is not None and not isinstance(masks, list):
masks = [masks]
# Tokenize input
pos_embed = self.pos_embed
if pos_embed is not None:
pos_embed = self.interpolate_pos_encoding(x, pos_embed)
x = self.patch_embed(x)
if pos_embed is not None:
x += pos_embed
B, N, D = x.shape
# Mask away unwanted tokens (if masks provided)
if masks is not None:
x = apply_masks(x, masks)
masks = torch.cat(masks, dim=0)
# Fwd prop
outs = []
for i, blk in enumerate(self.blocks):
x = blk(x, mask=masks)
if self.out_layers is not None and i in self.out_layers:
outs.append(self.norm(x))
if self.out_layers is not None:
return outs
if self.norm is not None:
x = self.norm(x)
return x
def interpolate_pos_encoding(self, x, pos_embed):
_, N, dim = pos_embed.shape
if self.is_video:
# If pos_embed already corret size, just return
_, _, T, H, W = x.shape
if H == self.input_size and W == self.input_size and T == self.num_frames:
return pos_embed
# Convert depth, height, width of input to be measured in patches
# instead of pixels/frames
T = T // self.tubelet_size
H = H // self.patch_size
W = W // self.patch_size
# Compute the initialized shape of the positional embedding measured
# in patches
N_t = self.num_frames // self.tubelet_size
N_h = N_w = self.input_size // self.patch_size
assert N_h * N_w * N_t == N, 'Positional embedding initialized incorrectly'
# Compute scale factor for spatio-temporal interpolation
scale_factor = (T/N_t, H/N_h, W/N_w)
pos_embed = nn.functional.interpolate(
pos_embed.reshape(1, N_t, N_h, N_w, dim).permute(0, 4, 1, 2, 3),
scale_factor=scale_factor,
mode='trilinear')
pos_embed = pos_embed.permute(0, 2, 3, 4, 1).view(1, -1, dim)
return pos_embed
else:
# If pos_embed already corret size, just return
_, _, H, W = x.shape
if H == self.input_size and W == self.input_size:
return pos_embed
# Compute scale factor for spatial interpolation
npatch = (H // self.patch_size) * (W // self.patch_size)
scale_factor = math.sqrt(npatch / N)
pos_embed = nn.functional.interpolate(
pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=scale_factor,
mode='bicubic')
pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return pos_embed
def vit_gigantic(patch_size=14, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=1664, depth=48, num_heads=16, mpl_ratio=64/13,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs
)
return model | null |
161,971 | import numpy as np
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
"""
embed_dim: output dimension for each position
pos: a list of positions to be encoded: size (M,)
returns: (M, D)
"""
assert embed_dim % 2 == 0
omega = np.arange(embed_dim // 2, dtype=float)
omega /= embed_dim / 2.
omega = 1. / 10000**omega # (D/2,)
pos = pos.reshape(-1) # (M,)
out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
emb_sin = np.sin(out) # (M, D/2)
emb_cos = np.cos(out) # (M, D/2)
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
return emb
The provided code snippet includes necessary dependencies for implementing the `get_3d_sincos_pos_embed` function. Write a Python function `def get_3d_sincos_pos_embed( embed_dim, grid_size, grid_depth, cls_token=False, uniform_power=False )` to solve the following problem:
grid_size: int of the grid height and width grid_depth: int of the grid depth returns: pos_embed: [grid_depth*grid_size*grid_size, embed_dim] (w/o cls_token) or [1+grid_depth*grid_size*grid_size, embed_dim] (w/ cls_token)
Here is the function:
def get_3d_sincos_pos_embed(
embed_dim,
grid_size,
grid_depth,
cls_token=False,
uniform_power=False
):
"""
grid_size: int of the grid height and width
grid_depth: int of the grid depth
returns:
pos_embed: [grid_depth*grid_size*grid_size, embed_dim] (w/o cls_token)
or [1+grid_depth*grid_size*grid_size, embed_dim] (w/ cls_token)
"""
grid_d = np.arange(grid_depth, dtype=float)
grid_h = np.arange(grid_size, dtype=float)
grid_w = np.arange(grid_size, dtype=float)
grid_h, grid_d, grid_w = np.meshgrid(grid_h, grid_d, grid_w) # order of meshgrid is very important for indexing as [d,h,w]
if not uniform_power:
h_embed_dim = embed_dim // 4
w_embed_dim = embed_dim // 4
d_embed_dim = embed_dim // 2
else:
h_embed_dim = w_embed_dim = d_embed_dim = int(np.ceil(embed_dim/6)*2)
emb_h = get_1d_sincos_pos_embed_from_grid(h_embed_dim, grid_h) # (T*H*W, D1)
emb_w = get_1d_sincos_pos_embed_from_grid(w_embed_dim, grid_w) # (T*H*W, D2)
emb_d = get_1d_sincos_pos_embed_from_grid(d_embed_dim, grid_d) # (T*H*W, D3)
pos_embed = np.concatenate([emb_d, emb_h, emb_w], axis=1)
pos_embed = pos_embed[:, :embed_dim]
if cls_token:
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
return pos_embed | grid_size: int of the grid height and width grid_depth: int of the grid depth returns: pos_embed: [grid_depth*grid_size*grid_size, embed_dim] (w/o cls_token) or [1+grid_depth*grid_size*grid_size, embed_dim] (w/ cls_token) |
161,972 | import numpy as np
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
"""
embed_dim: output dimension for each position
pos: a list of positions to be encoded: size (M,)
returns: (M, D)
"""
assert embed_dim % 2 == 0
omega = np.arange(embed_dim // 2, dtype=float)
omega /= embed_dim / 2.
omega = 1. / 10000**omega # (D/2,)
pos = pos.reshape(-1) # (M,)
out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
emb_sin = np.sin(out) # (M, D/2)
emb_cos = np.cos(out) # (M, D/2)
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
return emb
The provided code snippet includes necessary dependencies for implementing the `get_2d_sincos_pos_embed` function. Write a Python function `def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False)` to solve the following problem:
grid_size: int of the grid height and width returns: pos_embed: [grid_size*grid_size, embed_dim] (w/o cls_token) or [1+grid_size*grid_size, embed_dim] (w/ cls_token)
Here is the function:
def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
"""
grid_size: int of the grid height and width
returns:
pos_embed: [grid_size*grid_size, embed_dim] (w/o cls_token)
or [1+grid_size*grid_size, embed_dim] (w/ cls_token)
"""
grid_h = np.arange(grid_size, dtype=float)
grid_w = np.arange(grid_size, dtype=float)
grid_w, grid_h = np.meshgrid(grid_w, grid_h) # order of meshgrid is very important for indexing as [h, w]
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid_h) # (H*W, D/2)
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid_w) # (H*W, D/2)
pos_embed = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
if cls_token:
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
return pos_embed | grid_size: int of the grid height and width returns: pos_embed: [grid_size*grid_size, embed_dim] (w/o cls_token) or [1+grid_size*grid_size, embed_dim] (w/ cls_token) |
161,973 | import numpy as np
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
"""
embed_dim: output dimension for each position
pos: a list of positions to be encoded: size (M,)
returns: (M, D)
"""
assert embed_dim % 2 == 0
omega = np.arange(embed_dim // 2, dtype=float)
omega /= embed_dim / 2.
omega = 1. / 10000**omega # (D/2,)
pos = pos.reshape(-1) # (M,)
out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
emb_sin = np.sin(out) # (M, D/2)
emb_cos = np.cos(out) # (M, D/2)
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
return emb
The provided code snippet includes necessary dependencies for implementing the `get_1d_sincos_pos_embed` function. Write a Python function `def get_1d_sincos_pos_embed(embed_dim, grid_size, cls_token=False)` to solve the following problem:
embed_dim: output dimension for each position grid_size: int of the grid length returns: pos_embed: [grid_size, embed_dim] (w/o cls_token) or [1+grid_size, embed_dim] (w/ cls_token)
Here is the function:
def get_1d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
"""
embed_dim: output dimension for each position
grid_size: int of the grid length
returns:
pos_embed: [grid_size, embed_dim] (w/o cls_token)
or [1+grid_size, embed_dim] (w/ cls_token)
"""
grid = np.arange(grid_size, dtype=float)
pos_embed = get_1d_sincos_pos_embed_from_grid(embed_dim, grid)
if cls_token:
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
return pos_embed | embed_dim: output dimension for each position grid_size: int of the grid length returns: pos_embed: [grid_size, embed_dim] (w/o cls_token) or [1+grid_size, embed_dim] (w/ cls_token) |
161,974 | import logging
import sys
import torch
The provided code snippet includes necessary dependencies for implementing the `gpu_timer` function. Write a Python function `def gpu_timer(closure, log_timings=True)` to solve the following problem:
Helper to time gpu-time to execute closure()
Here is the function:
def gpu_timer(closure, log_timings=True):
""" Helper to time gpu-time to execute closure() """
log_timings = log_timings and torch.cuda.is_available()
elapsed_time = -1.
if log_timings:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
result = closure()
if log_timings:
end.record()
torch.cuda.synchronize()
elapsed_time = start.elapsed_time(end)
return result, elapsed_time | Helper to time gpu-time to execute closure() |
161,975 | import logging
import sys
import torch
class AverageMeter(object):
def __init__(self):
def reset(self):
def update(self, val, n=1):
def grad_logger(named_params):
stats = AverageMeter()
stats.first_layer = None
stats.last_layer = None
for n, p in named_params:
if (p.grad is not None) and not (n.endswith('.bias') or len(p.shape) == 1):
grad_norm = float(torch.norm(p.grad.data))
stats.update(grad_norm)
if 'qkv' in n:
stats.last_layer = grad_norm
if stats.first_layer is None:
stats.first_layer = grad_norm
if stats.first_layer is None or stats.last_layer is None:
stats.first_layer = stats.last_layer = 0.
return stats | null |
161,976 | import logging
import sys
import torch
class AverageMeter(object):
"""computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.max = float('-inf')
self.min = float('inf')
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
try:
self.max = max(val, self.max)
self.min = min(val, self.min)
except Exception:
pass
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
The provided code snippet includes necessary dependencies for implementing the `adamw_logger` function. Write a Python function `def adamw_logger(optimizer)` to solve the following problem:
logging magnitude of first and second momentum buffers in adamw
Here is the function:
def adamw_logger(optimizer):
""" logging magnitude of first and second momentum buffers in adamw """
# TODO: assert that optimizer is instance of torch.optim.AdamW
state = optimizer.state_dict().get('state')
exp_avg_stats = AverageMeter()
exp_avg_sq_stats = AverageMeter()
for key in state:
s = state.get(key)
exp_avg_stats.update(float(s.get('exp_avg').abs().mean()))
exp_avg_sq_stats.update(float(s.get('exp_avg_sq').abs().mean()))
return {'exp_avg': exp_avg_stats, 'exp_avg_sq': exp_avg_sq_stats} | logging magnitude of first and second momentum buffers in adamw |
161,977 | import math
import torch
from logging import getLogger
The provided code snippet includes necessary dependencies for implementing the `apply_masks` function. Write a Python function `def apply_masks(x, masks)` to solve the following problem:
:param x: tensor of shape [B (batch-size), N (num-patches), D (feature-dim)] :param masks: list of tensors containing indices of patches [0,N) to keep
Here is the function:
def apply_masks(x, masks):
"""
:param x: tensor of shape [B (batch-size), N (num-patches), D (feature-dim)]
:param masks: list of tensors containing indices of patches [0,N) to keep
"""
all_x = []
for m in masks:
mask_keep = m.unsqueeze(-1).repeat(1, 1, x.size(-1))
all_x += [torch.gather(x, dim=1, index=mask_keep)]
return torch.cat(all_x, dim=0) | :param x: tensor of shape [B (batch-size), N (num-patches), D (feature-dim)] :param masks: list of tensors containing indices of patches [0,N) to keep |
161,979 | import numpy as np
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import src.datasets.utils.video.transforms as video_transforms
import src.datasets.utils.video.volume_transforms as volume_transforms
from src.datasets.utils.video.randerase import RandomErasing
from src.models.utils.pos_embs import get_1d_sincos_pos_embed
from src.masks.utils import apply_masks
The provided code snippet includes necessary dependencies for implementing the `tensor_normalize` function. Write a Python function `def tensor_normalize(tensor, mean, std)` to solve the following problem:
Normalize a given tensor by subtracting the mean and dividing the std. Args: tensor (tensor): tensor to normalize. mean (tensor or list): mean value to subtract. std (tensor or list): std to divide.
Here is the function:
def tensor_normalize(tensor, mean, std):
"""
Normalize a given tensor by subtracting the mean and dividing the std.
Args:
tensor (tensor): tensor to normalize.
mean (tensor or list): mean value to subtract.
std (tensor or list): std to divide.
"""
if tensor.dtype == torch.uint8:
tensor = tensor.float()
tensor = tensor / 255.0
if type(mean) == list:
mean = torch.tensor(mean)
if type(std) == list:
std = torch.tensor(std)
tensor = tensor - mean
tensor = tensor / std
return tensor | Normalize a given tensor by subtracting the mean and dividing the std. Args: tensor (tensor): tensor to normalize. mean (tensor or list): mean value to subtract. std (tensor or list): std to divide. |
161,980 | import os
try:
# -- WARNING: IF DOING DISTRIBUTED TRAINING ON A NON-SLURM CLUSTER, MAKE
# -- SURE TO UPDATE THIS TO GET LOCAL-RANK ON NODE, OR ENSURE
# -- THAT YOUR JOBS ARE LAUNCHED WITH ONLY 1 DEVICE VISIBLE
# -- TO EACH PROCESS
os.environ['CUDA_VISIBLE_DEVICES'] = os.environ['SLURM_LOCALID']
except Exception:
pass
import logging
import pprint
import numpy as np
import torch
import torch.multiprocessing as mp
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
import src.models.vision_transformer as vit
from src.models.attentive_pooler import AttentiveClassifier
from src.datasets.data_manager import (
init_data,
)
from src.utils.distributed import (
init_distributed,
AllReduce
)
from src.utils.schedulers import (
WarmupCosineSchedule,
CosineWDSchedule,
)
from src.utils.logging import (
AverageMeter,
CSVLogger
)
from evals.video_classification_frozen.utils import (
make_transforms,
ClipAggregation,
FrameAggregation
)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
torch.manual_seed(_GLOBAL_SEED)
torch.backends.cudnn.benchmark = True
class AllReduce(torch.autograd.Function):
def forward(ctx, x):
if (
dist.is_available()
and dist.is_initialized()
and (dist.get_world_size() > 1)
):
x = x.contiguous() / dist.get_world_size()
dist.all_reduce(x)
return x
def backward(ctx, grads):
return grads
class AverageMeter(object):
"""computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.max = float('-inf')
self.min = float('inf')
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
try:
self.max = max(val, self.max)
self.min = min(val, self.min)
except Exception:
pass
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def run_one_epoch(
device,
training,
encoder,
classifier,
scaler,
optimizer,
scheduler,
wd_scheduler,
data_loader,
use_bfloat16,
num_spatial_views,
num_temporal_views,
attend_across_segments,
):
classifier.train(mode=training)
criterion = torch.nn.CrossEntropyLoss()
top1_meter = AverageMeter()
for itr, data in enumerate(data_loader):
if training:
scheduler.step()
wd_scheduler.step()
with torch.cuda.amp.autocast(dtype=torch.float16, enabled=use_bfloat16):
# Load data and put on GPU
clips = [
[dij.to(device, non_blocking=True) for dij in di] # iterate over spatial views of clip
for di in data[0] # iterate over temporal index of clip
]
clip_indices = [d.to(device, non_blocking=True) for d in data[2]]
labels = data[1].to(device)
batch_size = len(labels)
# Forward and prediction
with torch.no_grad():
outputs = encoder(clips, clip_indices)
if not training:
if attend_across_segments:
outputs = [classifier(o) for o in outputs]
else:
outputs = [[classifier(ost) for ost in os] for os in outputs]
if training:
if attend_across_segments:
outputs = [classifier(o) for o in outputs]
else:
outputs = [[classifier(ost) for ost in os] for os in outputs]
# Compute loss
if attend_across_segments:
loss = sum([criterion(o, labels) for o in outputs]) / len(outputs)
else:
loss = sum([sum([criterion(ost, labels) for ost in os]) for os in outputs]) / len(outputs) / len(outputs[0])
with torch.no_grad():
if attend_across_segments:
outputs = sum([F.softmax(o, dim=1) for o in outputs]) / len(outputs)
else:
outputs = sum([sum([F.softmax(ost, dim=1) for ost in os]) for os in outputs]) / len(outputs) / len(outputs[0])
top1_acc = 100. * outputs.max(dim=1).indices.eq(labels).sum() / batch_size
top1_acc = float(AllReduce.apply(top1_acc))
top1_meter.update(top1_acc)
if training:
if use_bfloat16:
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(classifier.parameters(), 1.0)
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(classifier.parameters(), 1.0)
optimizer.step()
optimizer.zero_grad()
if itr % 20 == 0:
logger.info('[%5d] %.3f%% (loss: %.3f) [mem: %.2e]'
% (itr, top1_meter.avg, loss,
torch.cuda.max_memory_allocated() / 1024.**2))
return top1_meter.avg | null |
161,981 | import os
import logging
import pprint
import numpy as np
import torch
import torch.multiprocessing as mp
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
import src.models.vision_transformer as vit
from src.models.attentive_pooler import AttentiveClassifier
from src.datasets.data_manager import (
init_data,
)
from src.utils.distributed import (
init_distributed,
AllReduce
)
from src.utils.schedulers import (
WarmupCosineSchedule,
CosineWDSchedule,
)
from src.utils.logging import (
AverageMeter,
CSVLogger
)
from evals.video_classification_frozen.utils import (
make_transforms,
ClipAggregation,
FrameAggregation
)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
torch.manual_seed(_GLOBAL_SEED)
torch.backends.cudnn.benchmark = True
def load_checkpoint(
device,
r_path,
classifier,
opt,
scaler
):
try:
checkpoint = torch.load(r_path, map_location=torch.device('cpu'))
epoch = checkpoint['epoch']
# -- loading encoder
pretrained_dict = checkpoint['classifier']
msg = classifier.load_state_dict(pretrained_dict)
logger.info(f'loaded pretrained classifier from epoch {epoch} with msg: {msg}')
# -- loading optimizer
opt.load_state_dict(checkpoint['opt'])
if scaler is not None:
scaler.load_state_dict(checkpoint['scaler'])
logger.info(f'loaded optimizers from epoch {epoch}')
logger.info(f'read-path: {r_path}')
del checkpoint
except Exception as e:
logger.info(f'Encountered exception when loading checkpoint {e}')
epoch = 0
return classifier, opt, scaler, epoch | null |
161,982 | import os
import logging
import pprint
import numpy as np
import torch
import torch.multiprocessing as mp
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
import src.models.vision_transformer as vit
from src.models.attentive_pooler import AttentiveClassifier
from src.datasets.data_manager import (
init_data,
)
from src.utils.distributed import (
init_distributed,
AllReduce
)
from src.utils.schedulers import (
WarmupCosineSchedule,
CosineWDSchedule,
)
from src.utils.logging import (
AverageMeter,
CSVLogger
)
from evals.video_classification_frozen.utils import (
make_transforms,
ClipAggregation,
FrameAggregation
)
def init_data(
batch_size,
transform=None,
shared_transform=None,
data='ImageNet',
collator=None,
pin_mem=True,
num_workers=8,
world_size=1,
rank=0,
root_path=None,
image_folder=None,
training=True,
copy_data=False,
drop_last=True,
tokenize_txt=True,
subset_file=None,
clip_len=8,
frame_sample_rate=2,
duration=None,
num_clips=1,
random_clip_sampling=True,
allow_clip_overlap=False,
filter_short_videos=False,
filter_long_videos=int(1e9),
decode_one_clip=True,
datasets_weights=None,
persistent_workers=False,
repeat_wds=False,
ipe=300,
log_dir=None,
):
if (data.lower() == 'imagenet') \
or (data.lower() == 'inat21') \
or (data.lower() == 'places205'):
from src.datasets.image_dataset import make_imagedataset
dataset, data_loader, dist_sampler = make_imagedataset(
transform=transform,
batch_size=batch_size,
collator=collator,
pin_mem=pin_mem,
training=training,
num_workers=num_workers,
world_size=world_size,
rank=rank,
root_path=root_path,
image_folder=image_folder,
persistent_workers=persistent_workers,
copy_data=copy_data,
drop_last=drop_last,
subset_file=subset_file)
elif data.lower() == 'videodataset':
from src.datasets.video_dataset import make_videodataset
dataset, data_loader, dist_sampler = make_videodataset(
data_paths=root_path,
batch_size=batch_size,
frames_per_clip=clip_len,
frame_step=frame_sample_rate,
duration=duration,
num_clips=num_clips,
random_clip_sampling=random_clip_sampling,
allow_clip_overlap=allow_clip_overlap,
filter_short_videos=filter_short_videos,
filter_long_videos=filter_long_videos,
shared_transform=shared_transform,
transform=transform,
datasets_weights=datasets_weights,
collator=collator,
num_workers=num_workers,
world_size=world_size,
rank=rank,
drop_last=drop_last,
log_dir=log_dir)
return (data_loader, dist_sampler)
def make_transforms(
training=True,
random_horizontal_flip=True,
random_resize_aspect_ratio=(3/4, 4/3),
random_resize_scale=(0.3, 1.0),
reprob=0.0,
auto_augment=False,
motion_shift=False,
crop_size=224,
num_views_per_clip=1,
normalize=((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))
):
if not training and num_views_per_clip > 1:
print('Making EvalVideoTransform, multi-view')
_frames_augmentation = EvalVideoTransform(
num_views_per_clip=num_views_per_clip,
short_side_size=crop_size,
normalize=normalize,
)
else:
_frames_augmentation = VideoTransform(
training=training,
random_horizontal_flip=random_horizontal_flip,
random_resize_aspect_ratio=random_resize_aspect_ratio,
random_resize_scale=random_resize_scale,
reprob=reprob,
auto_augment=auto_augment,
motion_shift=motion_shift,
crop_size=crop_size,
normalize=normalize,
)
return _frames_augmentation
def make_dataloader(
root_path,
batch_size,
world_size,
rank,
dataset_type='VideoDataset',
resolution=224,
frames_per_clip=16,
frame_step=4,
num_segments=8,
eval_duration=None,
num_views_per_segment=1,
allow_segment_overlap=True,
training=False,
num_workers=12,
subset_file=None
):
# Make Video Transforms
transform = make_transforms(
training=training,
num_views_per_clip=num_views_per_segment,
random_horizontal_flip=False,
random_resize_aspect_ratio=(0.75, 4/3),
random_resize_scale=(0.08, 1.0),
reprob=0.25,
auto_augment=True,
motion_shift=False,
crop_size=resolution,
)
data_loader, _ = init_data(
data=dataset_type,
root_path=root_path,
transform=transform,
batch_size=batch_size,
world_size=world_size,
rank=rank,
clip_len=frames_per_clip,
frame_sample_rate=frame_step,
duration=eval_duration,
num_clips=num_segments,
allow_clip_overlap=allow_segment_overlap,
num_workers=num_workers,
copy_data=False,
drop_last=False,
subset_file=subset_file)
return data_loader | null |
161,983 | import os
import logging
import pprint
import numpy as np
import torch
import torch.multiprocessing as mp
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
import src.models.vision_transformer as vit
from src.models.attentive_pooler import AttentiveClassifier
from src.datasets.data_manager import (
init_data,
)
from src.utils.distributed import (
init_distributed,
AllReduce
)
from src.utils.schedulers import (
WarmupCosineSchedule,
CosineWDSchedule,
)
from src.utils.logging import (
AverageMeter,
CSVLogger
)
from evals.video_classification_frozen.utils import (
make_transforms,
ClipAggregation,
FrameAggregation
)
def load_pretrained(
encoder,
pretrained,
checkpoint_key='target_encoder'
):
def init_model(
device,
pretrained,
model_name,
patch_size=16,
crop_size=224,
# Video specific parameters
frames_per_clip=16,
tubelet_size=2,
use_sdpa=False,
use_SiLU=False,
tight_SiLU=True,
uniform_power=False,
checkpoint_key='target_encoder'
):
encoder = vit.__dict__[model_name](
img_size=crop_size,
patch_size=patch_size,
num_frames=frames_per_clip,
tubelet_size=tubelet_size,
uniform_power=uniform_power,
use_sdpa=use_sdpa,
use_SiLU=use_SiLU,
tight_SiLU=tight_SiLU,
)
encoder.to(device)
encoder = load_pretrained(encoder=encoder, pretrained=pretrained, checkpoint_key=checkpoint_key)
return encoder | null |
161,984 | import os
import logging
import pprint
import numpy as np
import torch
import torch.multiprocessing as mp
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
import src.models.vision_transformer as vit
from src.models.attentive_pooler import AttentiveClassifier
from src.datasets.data_manager import (
init_data,
)
from src.utils.distributed import (
init_distributed,
AllReduce
)
from src.utils.schedulers import (
WarmupCosineSchedule,
CosineWDSchedule,
)
from src.utils.logging import (
AverageMeter,
CSVLogger
)
from evals.video_classification_frozen.utils import (
make_transforms,
ClipAggregation,
FrameAggregation
)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
torch.manual_seed(_GLOBAL_SEED)
torch.backends.cudnn.benchmark = True
class WarmupCosineSchedule(object):
def __init__(
self,
optimizer,
warmup_steps,
start_lr,
ref_lr,
T_max,
last_epoch=-1,
final_lr=0.
):
self.optimizer = optimizer
self.start_lr = start_lr
self.ref_lr = ref_lr
self.final_lr = final_lr
self.warmup_steps = warmup_steps
self.T_max = T_max - warmup_steps
self._step = 0.
def step(self):
self._step += 1
if self._step < self.warmup_steps:
progress = float(self._step) / float(max(1, self.warmup_steps))
new_lr = self.start_lr + progress * (self.ref_lr - self.start_lr)
else:
# -- progress after warmup
progress = float(self._step - self.warmup_steps) / float(max(1, self.T_max))
new_lr = max(self.final_lr,
self.final_lr + (self.ref_lr - self.final_lr) * 0.5 * (1. + math.cos(math.pi * progress)))
for group in self.optimizer.param_groups:
group['lr'] = new_lr
return new_lr
class CosineWDSchedule(object):
def __init__(
self,
optimizer,
ref_wd,
T_max,
final_wd=0.
):
self.optimizer = optimizer
self.ref_wd = ref_wd
self.final_wd = final_wd
self.T_max = T_max
self._step = 0.
def step(self):
self._step += 1
progress = self._step / self.T_max
new_wd = self.final_wd + (self.ref_wd - self.final_wd) * 0.5 * (1. + math.cos(math.pi * progress))
if self.final_wd <= self.ref_wd:
new_wd = max(self.final_wd, new_wd)
else:
new_wd = min(self.final_wd, new_wd)
for group in self.optimizer.param_groups:
if ('WD_exclude' not in group) or not group['WD_exclude']:
group['weight_decay'] = new_wd
return new_wd
def init_opt(
classifier,
iterations_per_epoch,
start_lr,
ref_lr,
warmup,
num_epochs,
wd=1e-6,
final_wd=1e-6,
final_lr=0.0,
use_bfloat16=False
):
param_groups = [
{
'params': (p for n, p in classifier.named_parameters()
if ('bias' not in n) and (len(p.shape) != 1))
}, {
'params': (p for n, p in classifier.named_parameters()
if ('bias' in n) or (len(p.shape) == 1)),
'WD_exclude': True,
'weight_decay': 0
}
]
logger.info('Using AdamW')
optimizer = torch.optim.AdamW(param_groups)
scheduler = WarmupCosineSchedule(
optimizer,
warmup_steps=int(warmup*iterations_per_epoch),
start_lr=start_lr,
ref_lr=ref_lr,
final_lr=final_lr,
T_max=int(num_epochs*iterations_per_epoch))
wd_scheduler = CosineWDSchedule(
optimizer,
ref_wd=wd,
final_wd=final_wd,
T_max=int(num_epochs*iterations_per_epoch))
scaler = torch.cuda.amp.GradScaler() if use_bfloat16 else None
return optimizer, scaler, scheduler, wd_scheduler | null |
161,985 | import argparse
import multiprocessing as mp
import pprint
import yaml
from src.utils.distributed import init_distributed
from evals.scaffold import main as eval_main
def init_distributed(port=37123, rank_and_world_size=(None, None)):
if dist.is_available() and dist.is_initialized():
return dist.get_world_size(), dist.get_rank()
rank, world_size = rank_and_world_size
os.environ['MASTER_ADDR'] = 'localhost'
if (rank is None) or (world_size is None):
try:
world_size = int(os.environ['SLURM_NTASKS'])
rank = int(os.environ['SLURM_PROCID'])
os.environ['MASTER_ADDR'] = os.environ['HOSTNAME']
except Exception:
logger.info('SLURM vars not set (distributed training not available)')
world_size, rank = 1, 0
return world_size, rank
try:
os.environ['MASTER_PORT'] = str(port)
torch.distributed.init_process_group(
backend='nccl',
world_size=world_size,
rank=rank
)
except Exception as e:
world_size, rank = 1, 0
logger.info(f'Rank: {rank}. Distributed training not available {e}')
return world_size, rank
def process_main(rank, fname, world_size, devices):
import os
os.environ['CUDA_VISIBLE_DEVICES'] = str(devices[rank].split(':')[-1])
import logging
logging.basicConfig()
logger = logging.getLogger()
if rank == 0:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.ERROR)
logger.info(f'called-params {fname}')
# Load config
params = None
with open(fname, 'r') as y_file:
params = yaml.load(y_file, Loader=yaml.FullLoader)
logger.info('loaded params...')
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(params)
# Init distributed (access to comm between GPUS on same machine)
world_size, rank = init_distributed(rank_and_world_size=(rank, world_size))
logger.info(f'Running... (rank: {rank}/{world_size})')
# Launch the eval with loaded config
eval_main(params['eval_name'], args_eval=params) | null |
161,986 | import argparse
import logging
import os
import pprint
import sys
import time
import yaml
import submitit
from evals.scaffold import main as eval_main
logger = logging.getLogger()
def launch_evals_with_parsed_args(
args_for_evals,
submitit_folder,
partition='learnlab,learnfair',
timeout=4300,
nodes=1,
tasks_per_node=1,
delay_seconds=10,
exclude_nodes=None
):
if not isinstance(args_for_evals, list):
logger.info(f'Passed in eval-args of type {type(args_for_evals)}')
args_for_evals = [args_for_evals]
time.sleep(delay_seconds)
logger.info('Launching evaluations in separate jobs...')
executor = submitit.AutoExecutor(
folder=os.path.join(submitit_folder, 'job_%j'),
slurm_max_num_timeout=20)
executor.update_parameters(
slurm_partition=partition,
slurm_mem_per_gpu='55G',
timeout_min=timeout,
nodes=nodes,
tasks_per_node=tasks_per_node,
cpus_per_task=12,
gpus_per_node=tasks_per_node)
if exclude_nodes is not None:
executor.update_parameters(slurm_exclude=exclude_nodes)
jobs, trainers = [], []
with executor.batch():
for ae in args_for_evals:
fb_trainer = Trainer(ae)
job = executor.submit(fb_trainer,)
trainers.append(fb_trainer)
jobs.append(job)
for job in jobs:
logger.info(f'Launched eval job with id {job.job_id}')
def launch_evals():
# ---------------------------------------------------------------------- #
# 1. Put config file names in a list
# ---------------------------------------------------------------------- #
config_fnames = [args.fname]
# -- If batch-launch is True, then the args.fname yaml file is not a
# -- config, but actually specifies a list of other config files
# -- to run in a slurm job array
if args.batch_launch:
with open(args.fname, 'r') as y_file:
config_fnames = yaml.load(y_file, Loader=yaml.FullLoader)
# ---------------------------------------------------------------------- #
# ---------------------------------------------------------------------- #
# 2. Parse each yaml config file as a dict and place in list
# ---------------------------------------------------------------------- #
nodes, tasks_per_node = None, None
configs = []
for f in config_fnames:
with open(f, 'r') as y_file:
_params = yaml.load(y_file, Loader=yaml.FullLoader)
nodes = int(_params.get('nodes'))
tasks_per_node = int(_params.get('tasks_per_node'))
configs += [_params]
logger.info(f'Loaded {len(configs)} config files')
logger.info(f'Running all jobs with {nodes=} / {tasks_per_node=}')
# ---------------------------------------------------------------------- #
# ---------------------------------------------------------------------- #
# 3. Launch evals with parsed config files
# ---------------------------------------------------------------------- #
launch_evals_with_parsed_args(
args_for_evals=configs,
submitit_folder=args.folder,
partition=args.partition,
timeout=args.time,
nodes=nodes,
tasks_per_node=tasks_per_node,
exclude_nodes=args.exclude)
# ---------------------------------------------------------------------- # | null |
161,987 | import os
import logging
import pprint
import numpy as np
import torch
import torch.multiprocessing as mp
import torchvision.transforms as transforms
from torch.nn.parallel import DistributedDataParallel
from timm.data import create_transform as timm_make_transforms
import src.models.vision_transformer as vit
from src.models.attentive_pooler import AttentiveClassifier
from src.datasets.data_manager import (
init_data,
)
from src.utils.distributed import (
init_distributed,
AllReduce
)
from src.utils.schedulers import (
WarmupCosineSchedule,
CosineWDSchedule,
)
from src.utils.logging import (
AverageMeter,
CSVLogger
)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
torch.manual_seed(_GLOBAL_SEED)
torch.backends.cudnn.benchmark = True
class AllReduce(torch.autograd.Function):
def forward(ctx, x):
if (
dist.is_available()
and dist.is_initialized()
and (dist.get_world_size() > 1)
):
x = x.contiguous() / dist.get_world_size()
dist.all_reduce(x)
return x
def backward(ctx, grads):
return grads
class AverageMeter(object):
"""computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.max = float('-inf')
self.min = float('inf')
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
try:
self.max = max(val, self.max)
self.min = min(val, self.min)
except Exception:
pass
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def run_one_epoch(
device,
training,
encoder,
classifier,
scaler,
optimizer,
scheduler,
wd_scheduler,
data_loader,
use_bfloat16,
):
classifier.train(mode=training)
criterion = torch.nn.CrossEntropyLoss()
top1_meter = AverageMeter()
for itr, data in enumerate(data_loader):
if training:
scheduler.step()
wd_scheduler.step()
with torch.cuda.amp.autocast(dtype=torch.float16, enabled=use_bfloat16):
imgs, labels = data[0].to(device), data[1].to(device)
with torch.no_grad():
outputs = encoder(imgs)
if not training:
outputs = classifier(outputs)
if training:
outputs = classifier(outputs)
loss = criterion(outputs, labels)
top1_acc = 100. * outputs.max(dim=1).indices.eq(labels).sum() / len(imgs)
top1_acc = float(AllReduce.apply(top1_acc))
top1_meter.update(top1_acc)
if training:
if use_bfloat16:
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(classifier.parameters(), 1.0)
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(classifier.parameters(), 1.0)
optimizer.step()
optimizer.zero_grad()
if itr % 20 == 0:
logger.info('[%5d] %.3f%% (loss: %.3f) [mem: %.2e]'
% (itr, top1_meter.avg, loss,
torch.cuda.max_memory_allocated() / 1024.**2))
return top1_meter.avg | null |
161,988 | import os
import logging
import pprint
import numpy as np
import torch
import torch.multiprocessing as mp
import torchvision.transforms as transforms
from torch.nn.parallel import DistributedDataParallel
from timm.data import create_transform as timm_make_transforms
import src.models.vision_transformer as vit
from src.models.attentive_pooler import AttentiveClassifier
from src.datasets.data_manager import (
init_data,
)
from src.utils.distributed import (
init_distributed,
AllReduce
)
from src.utils.schedulers import (
WarmupCosineSchedule,
CosineWDSchedule,
)
from src.utils.logging import (
AverageMeter,
CSVLogger
)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
torch.manual_seed(_GLOBAL_SEED)
torch.backends.cudnn.benchmark = True
def load_checkpoint(
device,
r_path,
classifier,
opt,
scaler
):
try:
checkpoint = torch.load(r_path, map_location=torch.device('cpu'))
epoch = checkpoint['epoch']
# -- loading encoder
pretrained_dict = checkpoint['classifier']
msg = classifier.load_state_dict(pretrained_dict)
logger.info(f'loaded pretrained classifier from epoch {epoch} with msg: {msg}')
# -- loading optimizer
opt.load_state_dict(checkpoint['opt'])
if scaler is not None:
scaler.load_state_dict(checkpoint['scaler'])
logger.info(f'loaded optimizers from epoch {epoch}')
logger.info(f'read-path: {r_path}')
del checkpoint
except Exception as e:
logger.info(f'Encountered exception when loading checkpoint {e}')
epoch = 0
return classifier, opt, scaler, epoch | null |
161,989 | import os
import logging
import pprint
import numpy as np
import torch
import torch.multiprocessing as mp
import torchvision.transforms as transforms
from torch.nn.parallel import DistributedDataParallel
from timm.data import create_transform as timm_make_transforms
import src.models.vision_transformer as vit
from src.models.attentive_pooler import AttentiveClassifier
from src.datasets.data_manager import (
init_data,
)
from src.utils.distributed import (
init_distributed,
AllReduce
)
from src.utils.schedulers import (
WarmupCosineSchedule,
CosineWDSchedule,
)
from src.utils.logging import (
AverageMeter,
CSVLogger
)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def init_data(
batch_size,
transform=None,
shared_transform=None,
data='ImageNet',
collator=None,
pin_mem=True,
num_workers=8,
world_size=1,
rank=0,
root_path=None,
image_folder=None,
training=True,
copy_data=False,
drop_last=True,
tokenize_txt=True,
subset_file=None,
clip_len=8,
frame_sample_rate=2,
duration=None,
num_clips=1,
random_clip_sampling=True,
allow_clip_overlap=False,
filter_short_videos=False,
filter_long_videos=int(1e9),
decode_one_clip=True,
datasets_weights=None,
persistent_workers=False,
repeat_wds=False,
ipe=300,
log_dir=None,
):
if (data.lower() == 'imagenet') \
or (data.lower() == 'inat21') \
or (data.lower() == 'places205'):
from src.datasets.image_dataset import make_imagedataset
dataset, data_loader, dist_sampler = make_imagedataset(
transform=transform,
batch_size=batch_size,
collator=collator,
pin_mem=pin_mem,
training=training,
num_workers=num_workers,
world_size=world_size,
rank=rank,
root_path=root_path,
image_folder=image_folder,
persistent_workers=persistent_workers,
copy_data=copy_data,
drop_last=drop_last,
subset_file=subset_file)
elif data.lower() == 'videodataset':
from src.datasets.video_dataset import make_videodataset
dataset, data_loader, dist_sampler = make_videodataset(
data_paths=root_path,
batch_size=batch_size,
frames_per_clip=clip_len,
frame_step=frame_sample_rate,
duration=duration,
num_clips=num_clips,
random_clip_sampling=random_clip_sampling,
allow_clip_overlap=allow_clip_overlap,
filter_short_videos=filter_short_videos,
filter_long_videos=filter_long_videos,
shared_transform=shared_transform,
transform=transform,
datasets_weights=datasets_weights,
collator=collator,
num_workers=num_workers,
world_size=world_size,
rank=rank,
drop_last=drop_last,
log_dir=log_dir)
return (data_loader, dist_sampler)
def make_dataloader(
dataset_name,
root_path,
image_folder,
batch_size,
world_size,
rank,
resolution=224,
training=False,
subset_file=None
):
normalization = ((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))
if training:
logger.info('implementing auto-agument strategy')
transform = timm_make_transforms(
input_size=resolution,
is_training=training,
auto_augment='original',
interpolation='bicubic',
re_prob=0.25,
re_mode='pixel',
re_count=1,
mean=normalization[0],
std=normalization[1])
else:
transform = transforms.Compose([
transforms.Resize(size=int(resolution * 256/224)),
transforms.CenterCrop(size=resolution),
transforms.ToTensor(),
transforms.Normalize(normalization[0], normalization[1])])
data_loader, _ = init_data(
data=dataset_name,
transform=transform,
batch_size=batch_size,
world_size=world_size,
rank=rank,
root_path=root_path,
image_folder=image_folder,
training=training,
copy_data=False,
drop_last=False,
subset_file=subset_file)
return data_loader | null |
161,990 | import os
import logging
import pprint
import numpy as np
import torch
import torch.multiprocessing as mp
import torchvision.transforms as transforms
from torch.nn.parallel import DistributedDataParallel
from timm.data import create_transform as timm_make_transforms
import src.models.vision_transformer as vit
from src.models.attentive_pooler import AttentiveClassifier
from src.datasets.data_manager import (
init_data,
)
from src.utils.distributed import (
init_distributed,
AllReduce
)
from src.utils.schedulers import (
WarmupCosineSchedule,
CosineWDSchedule,
)
from src.utils.logging import (
AverageMeter,
CSVLogger
)
def load_pretrained(
encoder,
pretrained,
checkpoint_key='target_encoder'
):
logger.info(f'Loading pretrained model from {pretrained}')
checkpoint = torch.load(pretrained, map_location='cpu')
try:
pretrained_dict = checkpoint[checkpoint_key]
except Exception:
pretrained_dict = checkpoint['encoder']
pretrained_dict = {k.replace('module.', ''): v for k, v in pretrained_dict.items()}
pretrained_dict = {k.replace('backbone.', ''): v for k, v in pretrained_dict.items()}
for k, v in encoder.state_dict().items():
if k not in pretrained_dict:
logger.info(f'key "{k}" could not be found in loaded state dict')
elif pretrained_dict[k].shape != v.shape:
logger.info(f'key "{k}" is of different shape in model and loaded state dict')
pretrained_dict[k] = v
msg = encoder.load_state_dict(pretrained_dict, strict=False)
print(encoder)
logger.info(f'loaded pretrained model with msg: {msg}')
logger.info(f'loaded pretrained encoder from epoch: {checkpoint["epoch"]}\n path: {pretrained}')
del checkpoint
return encoder
def init_model(
device,
pretrained,
model_name,
patch_size=16,
crop_size=224,
# Video specific parameters
frames_per_clip=16,
tubelet_size=2,
use_sdpa=False,
use_SiLU=False,
tight_SiLU=True,
uniform_power=False,
checkpoint_key='target_encoder'
):
encoder = vit.__dict__[model_name](
img_size=crop_size,
patch_size=patch_size,
num_frames=frames_per_clip,
tubelet_size=tubelet_size,
uniform_power=uniform_power,
use_sdpa=use_sdpa,
use_SiLU=use_SiLU,
tight_SiLU=tight_SiLU,
)
if frames_per_clip > 1:
def forward_prehook(module, input):
input = input[0] # [B, C, H, W]
input = input.unsqueeze(2).repeat(1, 1, frames_per_clip, 1, 1)
return (input)
encoder.register_forward_pre_hook(forward_prehook)
encoder.to(device)
encoder = load_pretrained(encoder=encoder, pretrained=pretrained, checkpoint_key=checkpoint_key)
return encoder | null |
161,991 | import os
import logging
import pprint
import numpy as np
import torch
import torch.multiprocessing as mp
import torchvision.transforms as transforms
from torch.nn.parallel import DistributedDataParallel
from timm.data import create_transform as timm_make_transforms
import src.models.vision_transformer as vit
from src.models.attentive_pooler import AttentiveClassifier
from src.datasets.data_manager import (
init_data,
)
from src.utils.distributed import (
init_distributed,
AllReduce
)
from src.utils.schedulers import (
WarmupCosineSchedule,
CosineWDSchedule,
)
from src.utils.logging import (
AverageMeter,
CSVLogger
)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
torch.manual_seed(_GLOBAL_SEED)
torch.backends.cudnn.benchmark = True
class WarmupCosineSchedule(object):
def __init__(
self,
optimizer,
warmup_steps,
start_lr,
ref_lr,
T_max,
last_epoch=-1,
final_lr=0.
):
self.optimizer = optimizer
self.start_lr = start_lr
self.ref_lr = ref_lr
self.final_lr = final_lr
self.warmup_steps = warmup_steps
self.T_max = T_max - warmup_steps
self._step = 0.
def step(self):
self._step += 1
if self._step < self.warmup_steps:
progress = float(self._step) / float(max(1, self.warmup_steps))
new_lr = self.start_lr + progress * (self.ref_lr - self.start_lr)
else:
# -- progress after warmup
progress = float(self._step - self.warmup_steps) / float(max(1, self.T_max))
new_lr = max(self.final_lr,
self.final_lr + (self.ref_lr - self.final_lr) * 0.5 * (1. + math.cos(math.pi * progress)))
for group in self.optimizer.param_groups:
group['lr'] = new_lr
return new_lr
class CosineWDSchedule(object):
def __init__(
self,
optimizer,
ref_wd,
T_max,
final_wd=0.
):
self.optimizer = optimizer
self.ref_wd = ref_wd
self.final_wd = final_wd
self.T_max = T_max
self._step = 0.
def step(self):
self._step += 1
progress = self._step / self.T_max
new_wd = self.final_wd + (self.ref_wd - self.final_wd) * 0.5 * (1. + math.cos(math.pi * progress))
if self.final_wd <= self.ref_wd:
new_wd = max(self.final_wd, new_wd)
else:
new_wd = min(self.final_wd, new_wd)
for group in self.optimizer.param_groups:
if ('WD_exclude' not in group) or not group['WD_exclude']:
group['weight_decay'] = new_wd
return new_wd
def init_opt(
classifier,
iterations_per_epoch,
start_lr,
ref_lr,
warmup,
num_epochs,
wd=1e-6,
final_wd=1e-6,
final_lr=0.0,
use_bfloat16=False
):
param_groups = [
{
'params': (p for n, p in classifier.named_parameters()
if ('bias' not in n) and (len(p.shape) != 1))
}, {
'params': (p for n, p in classifier.named_parameters()
if ('bias' in n) or (len(p.shape) == 1)),
'WD_exclude': True,
'weight_decay': 0
}
]
logger.info('Using AdamW')
optimizer = torch.optim.AdamW(param_groups)
scheduler = WarmupCosineSchedule(
optimizer,
warmup_steps=int(warmup*iterations_per_epoch),
start_lr=start_lr,
ref_lr=ref_lr,
final_lr=final_lr,
T_max=int(num_epochs*iterations_per_epoch))
wd_scheduler = CosineWDSchedule(
optimizer,
ref_wd=wd,
final_wd=final_wd,
T_max=int(num_epochs*iterations_per_epoch))
scaler = torch.cuda.amp.GradScaler() if use_bfloat16 else None
return optimizer, scaler, scheduler, wd_scheduler | null |
161,992 | import argparse
import multiprocessing as mp
import pprint
import yaml
from app.scaffold import main as app_main
from src.utils.distributed import init_distributed
def init_distributed(port=37123, rank_and_world_size=(None, None)):
if dist.is_available() and dist.is_initialized():
return dist.get_world_size(), dist.get_rank()
rank, world_size = rank_and_world_size
os.environ['MASTER_ADDR'] = 'localhost'
if (rank is None) or (world_size is None):
try:
world_size = int(os.environ['SLURM_NTASKS'])
rank = int(os.environ['SLURM_PROCID'])
os.environ['MASTER_ADDR'] = os.environ['HOSTNAME']
except Exception:
logger.info('SLURM vars not set (distributed training not available)')
world_size, rank = 1, 0
return world_size, rank
try:
os.environ['MASTER_PORT'] = str(port)
torch.distributed.init_process_group(
backend='nccl',
world_size=world_size,
rank=rank
)
except Exception as e:
world_size, rank = 1, 0
logger.info(f'Rank: {rank}. Distributed training not available {e}')
return world_size, rank
def get_logger(name=None, force=False):
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=LOG_FORMAT, datefmt=DATE_FORMAT, force=force)
return logging.getLogger(name=name)
def process_main(rank, fname, world_size, devices):
import os
os.environ['CUDA_VISIBLE_DEVICES'] = str(devices[rank].split(':')[-1])
import logging
from src.utils.logging import get_logger
logger = get_logger(force=True)
if rank == 0:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.ERROR)
logger.info(f'called-params {fname}')
# Load config
params = None
with open(fname, 'r') as y_file:
params = yaml.load(y_file, Loader=yaml.FullLoader)
logger.info('loaded params...')
# Log config
if rank == 0:
pprint.PrettyPrinter(indent=4).pprint(params)
dump = os.path.join(params['logging']['folder'], 'params-pretrain.yaml')
with open(dump, 'w') as f:
yaml.dump(params, f)
# Init distributed (access to comm between GPUS on same machine)
world_size, rank = init_distributed(rank_and_world_size=(rank, world_size))
logger.info(f'Running... (rank: {rank}/{world_size})')
# Launch the app with loaded config
app_main(params['app'], args=params) | null |
161,993 | import argparse
import os
import pprint
import yaml
import submitit
from app.scaffold import main as app_main
from src.utils.logging import get_logger
logger = get_logger(force=True)
def launch_app_with_parsed_args(
args_for_pretrain,
submitit_folder,
partition,
timeout=4300,
nodes=1,
tasks_per_node=1,
exclude_nodes=None
):
def launch():
# ---------------------------------------------------------------------- #
# 1. Put config file names in a list
# ---------------------------------------------------------------------- #
config_fnames = [args.fname]
# -- If batch-launch is True, then the args.fname yaml file is not a
# -- config, but actually specifies a list of other config files
# -- to run in a slurm job array
if args.batch_launch:
with open(args.fname, 'r') as y_file:
config_fnames = yaml.load(y_file, Loader=yaml.FullLoader)
# ---------------------------------------------------------------------- #
# ---------------------------------------------------------------------- #
# 2. Parse each yaml config file as a dict and place in list
# ---------------------------------------------------------------------- #
nodes, tasks_per_node = None, None
configs = []
for f in config_fnames:
with open(f, 'r') as y_file:
_params = yaml.load(y_file, Loader=yaml.FullLoader)
nodes = int(_params.get('nodes'))
tasks_per_node = int(_params.get('tasks_per_node'))
configs += [_params]
logger.info(f'Loaded {len(configs)} config files')
logger.info(f'Running all jobs with {nodes=} / {tasks_per_node=}')
# ---------------------------------------------------------------------- #
# ---------------------------------------------------------------------- #
# 3. Launch evals with parsed config files
# ---------------------------------------------------------------------- #
launch_app_with_parsed_args(
args_for_pretrain=configs,
submitit_folder=args.folder,
partition=args.partition,
timeout=args.time,
nodes=nodes,
tasks_per_node=tasks_per_node,
exclude_nodes=args.exclude)
# ---------------------------------------------------------------------- # | null |
161,994 | import torch
import torchvision.transforms as transforms
import src.datasets.utils.video.transforms as video_transforms
from src.datasets.utils.video.randerase import RandomErasing
class VideoTransform(object):
def __init__(
self,
random_horizontal_flip=True,
random_resize_aspect_ratio=(3/4, 4/3),
random_resize_scale=(0.3, 1.0),
reprob=0.0,
auto_augment=False,
motion_shift=False,
crop_size=224,
normalize=((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))
):
self.random_horizontal_flip = random_horizontal_flip
self.random_resize_aspect_ratio = random_resize_aspect_ratio
self.random_resize_scale = random_resize_scale
self.auto_augment = auto_augment
self.motion_shift = motion_shift
self.crop_size = crop_size
self.mean = torch.tensor(normalize[0], dtype=torch.float32)
self.std = torch.tensor(normalize[1], dtype=torch.float32)
if not self.auto_augment:
# Without auto-augment, PIL and tensor conversions simply scale uint8 space by 255.
self.mean *= 255.
self.std *= 255.
self.autoaug_transform = video_transforms.create_random_augment(
input_size=(crop_size, crop_size),
auto_augment='rand-m7-n4-mstd0.5-inc1',
interpolation='bicubic',
)
self.spatial_transform = video_transforms.random_resized_crop_with_shift \
if motion_shift else video_transforms.random_resized_crop
self.reprob = reprob
self.erase_transform = RandomErasing(
reprob,
mode='pixel',
max_count=1,
num_splits=1,
device='cpu',
)
def __call__(self, buffer):
if self.auto_augment:
buffer = [transforms.ToPILImage()(frame) for frame in buffer]
buffer = self.autoaug_transform(buffer)
buffer = [transforms.ToTensor()(img) for img in buffer]
buffer = torch.stack(buffer) # T C H W
buffer = buffer.permute(0, 2, 3, 1) # T H W C
else:
buffer = torch.tensor(buffer, dtype=torch.float32)
buffer = buffer.permute(3, 0, 1, 2) # T H W C -> C T H W
buffer = self.spatial_transform(
images=buffer,
target_height=self.crop_size,
target_width=self.crop_size,
scale=self.random_resize_scale,
ratio=self.random_resize_aspect_ratio,
)
if self.random_horizontal_flip:
buffer, _ = video_transforms.horizontal_flip(0.5, buffer)
buffer = _tensor_normalize_inplace(buffer, self.mean, self.std)
if self.reprob > 0:
buffer = buffer.permute(1, 0, 2, 3)
buffer = self.erase_transform(buffer)
buffer = buffer.permute(1, 0, 2, 3)
return buffer
def make_transforms(
random_horizontal_flip=True,
random_resize_aspect_ratio=(3/4, 4/3),
random_resize_scale=(0.3, 1.0),
reprob=0.0,
auto_augment=False,
motion_shift=False,
crop_size=224,
normalize=((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))
):
_frames_augmentation = VideoTransform(
random_horizontal_flip=random_horizontal_flip,
random_resize_aspect_ratio=random_resize_aspect_ratio,
random_resize_scale=random_resize_scale,
reprob=reprob,
auto_augment=auto_augment,
motion_shift=motion_shift,
crop_size=crop_size,
normalize=normalize,
)
return _frames_augmentation | null |
161,995 | import torch
import torchvision.transforms as transforms
import src.datasets.utils.video.transforms as video_transforms
from src.datasets.utils.video.randerase import RandomErasing
The provided code snippet includes necessary dependencies for implementing the `tensor_normalize` function. Write a Python function `def tensor_normalize(tensor, mean, std)` to solve the following problem:
Normalize a given tensor by subtracting the mean and dividing the std. Args: tensor (tensor): tensor to normalize. mean (tensor or list): mean value to subtract. std (tensor or list): std to divide.
Here is the function:
def tensor_normalize(tensor, mean, std):
"""
Normalize a given tensor by subtracting the mean and dividing the std.
Args:
tensor (tensor): tensor to normalize.
mean (tensor or list): mean value to subtract.
std (tensor or list): std to divide.
"""
if tensor.dtype == torch.uint8:
tensor = tensor.float()
tensor = tensor / 255.0
if type(mean) == list:
mean = torch.tensor(mean)
if type(std) == list:
std = torch.tensor(std)
tensor = tensor - mean
tensor = tensor / std
return tensor | Normalize a given tensor by subtracting the mean and dividing the std. Args: tensor (tensor): tensor to normalize. mean (tensor or list): mean value to subtract. std (tensor or list): std to divide. |
161,996 | import torch
import torchvision.transforms as transforms
import src.datasets.utils.video.transforms as video_transforms
from src.datasets.utils.video.randerase import RandomErasing
The provided code snippet includes necessary dependencies for implementing the `_tensor_normalize_inplace` function. Write a Python function `def _tensor_normalize_inplace(tensor, mean, std)` to solve the following problem:
Normalize a given tensor by subtracting the mean and dividing the std. Args: tensor (tensor): tensor to normalize (with dimensions C, T, H, W). mean (tensor): mean value to subtract (in 0 to 255 floats). std (tensor): std to divide (in 0 to 255 floats).
Here is the function:
def _tensor_normalize_inplace(tensor, mean, std):
"""
Normalize a given tensor by subtracting the mean and dividing the std.
Args:
tensor (tensor): tensor to normalize (with dimensions C, T, H, W).
mean (tensor): mean value to subtract (in 0 to 255 floats).
std (tensor): std to divide (in 0 to 255 floats).
"""
if tensor.dtype == torch.uint8:
tensor = tensor.float()
C, T, H, W = tensor.shape
tensor = tensor.view(C, -1).permute(1, 0) # Make C the last dimension
tensor.sub_(mean).div_(std)
tensor = tensor.permute(1, 0).view(C, T, H, W) # Put C back in front
return tensor | Normalize a given tensor by subtracting the mean and dividing the std. Args: tensor (tensor): tensor to normalize (with dimensions C, T, H, W). mean (tensor): mean value to subtract (in 0 to 255 floats). std (tensor): std to divide (in 0 to 255 floats). |
161,997 | import logging
import sys
import warnings
import yaml
import torch
import src.models.vision_transformer as video_vit
import src.models.predictor as vit_pred
from src.models.utils.multimask import MultiMaskWrapper, PredictorMultiMaskWrapper
from src.utils.schedulers import (
WarmupCosineSchedule,
CosineWDSchedule)
from src.utils.tensors import trunc_normal_
logger = logging.getLogger()
def load_checkpoint(
r_path,
encoder,
predictor,
target_encoder,
opt,
scaler,
):
try:
checkpoint = torch.load(r_path, map_location=torch.device('cpu'))
except Exception as e:
logger.info(f'Encountered exception when loading checkpoint {e}')
epoch = 0
try:
epoch = checkpoint['epoch']
# -- loading encoder
pretrained_dict = checkpoint['encoder']
msg = encoder.load_state_dict(pretrained_dict)
logger.info(f'loaded pretrained encoder from epoch {epoch} with msg: {msg}')
# -- loading predictor
pretrained_dict = checkpoint['predictor']
msg = predictor.load_state_dict(pretrained_dict)
logger.info(f'loaded pretrained predictor from epoch {epoch} with msg: {msg}')
# -- loading target_encoder
if target_encoder is not None:
print(list(checkpoint.keys()))
pretrained_dict = checkpoint['target_encoder']
msg = target_encoder.load_state_dict(pretrained_dict)
logger.info(
f'loaded pretrained target encoder from epoch {epoch} with msg: {msg}'
)
# -- loading optimizer
opt.load_state_dict(checkpoint['opt'])
if scaler is not None:
scaler.load_state_dict(checkpoint['scaler'])
logger.info(f'loaded optimizers from epoch {epoch}')
logger.info(f'read-path: {r_path}')
del checkpoint
except Exception as e:
logger.info(f'Encountered exception when loading checkpoint {e}')
epoch = 0
return (
encoder,
predictor,
target_encoder,
opt,
scaler,
epoch,
) | null |
161,998 | import logging
import sys
import warnings
import yaml
import torch
import src.models.vision_transformer as video_vit
import src.models.predictor as vit_pred
from src.models.utils.multimask import MultiMaskWrapper, PredictorMultiMaskWrapper
from src.utils.schedulers import (
WarmupCosineSchedule,
CosineWDSchedule)
from src.utils.tensors import trunc_normal_
logger = logging.getLogger()
class MultiMaskWrapper(nn.Module):
def __init__(self, backbone):
super().__init__()
self.backbone = backbone
def forward(self, x, masks=None):
if masks is None:
return self.backbone(x)
if (masks is not None) and not isinstance(masks, list):
masks = [masks]
outs = []
for m in masks:
outs += [self.backbone(x, masks=m)]
return outs
class PredictorMultiMaskWrapper(nn.Module):
def __init__(self, backbone):
super().__init__()
self.backbone = backbone
def forward(self, ctxt, tgt, masks_ctxt, masks_tgt):
if type(ctxt) is not list:
ctxt = [ctxt]
if type(tgt) is not list:
tgt = [tgt]
if type(masks_ctxt) is not list:
masks_ctxt = [masks_ctxt]
if type(masks_tgt) is not list:
masks_tgt = [masks_tgt]
outs = []
for i, (zi, hi, mc, mt) in enumerate(zip(ctxt, tgt, masks_ctxt, masks_tgt)):
outs += [self.backbone(zi, hi, mc, mt, mask_index=i)]
return outs
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def init_video_model(
device,
patch_size=16,
num_frames=16,
tubelet_size=2,
model_name='vit_base',
crop_size=224,
pred_depth=6,
pred_embed_dim=384,
uniform_power=False,
use_mask_tokens=False,
num_mask_tokens=2,
zero_init_mask_tokens=True,
use_sdpa=False,
):
encoder = video_vit.__dict__[model_name](
img_size=crop_size,
patch_size=patch_size,
num_frames=num_frames,
tubelet_size=tubelet_size,
uniform_power=uniform_power,
use_sdpa=use_sdpa,
)
encoder = MultiMaskWrapper(encoder)
predictor = vit_pred.__dict__['vit_predictor'](
img_size=crop_size,
use_mask_tokens=use_mask_tokens,
patch_size=patch_size,
num_frames=num_frames,
tubelet_size=tubelet_size,
embed_dim=encoder.backbone.embed_dim,
predictor_embed_dim=pred_embed_dim,
depth=pred_depth,
num_heads=encoder.backbone.num_heads,
uniform_power=uniform_power,
num_mask_tokens=num_mask_tokens,
zero_init_mask_tokens=zero_init_mask_tokens,
use_sdpa=use_sdpa,
)
predictor = PredictorMultiMaskWrapper(predictor)
def init_weights(m):
if isinstance(m, torch.nn.Linear):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.LayerNorm):
torch.nn.init.constant_(m.bias, 0)
torch.nn.init.constant_(m.weight, 1.0)
for m in encoder.modules():
init_weights(m)
for m in predictor.modules():
init_weights(m)
encoder.to(device)
predictor.to(device)
logger.info(encoder)
logger.info(predictor)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
logger.info(f'Encoder number of parameters: {count_parameters(encoder)}')
logger.info(f'Predictor number of parameters: {count_parameters(predictor)}')
return encoder, predictor | null |
161,999 | import logging
import sys
import warnings
import yaml
import torch
import src.models.vision_transformer as video_vit
import src.models.predictor as vit_pred
from src.models.utils.multimask import MultiMaskWrapper, PredictorMultiMaskWrapper
from src.utils.schedulers import (
WarmupCosineSchedule,
CosineWDSchedule)
from src.utils.tensors import trunc_normal_
logger = logging.getLogger()
class WarmupCosineSchedule(object):
def __init__(
self,
optimizer,
warmup_steps,
start_lr,
ref_lr,
T_max,
last_epoch=-1,
final_lr=0.
):
self.optimizer = optimizer
self.start_lr = start_lr
self.ref_lr = ref_lr
self.final_lr = final_lr
self.warmup_steps = warmup_steps
self.T_max = T_max - warmup_steps
self._step = 0.
def step(self):
self._step += 1
if self._step < self.warmup_steps:
progress = float(self._step) / float(max(1, self.warmup_steps))
new_lr = self.start_lr + progress * (self.ref_lr - self.start_lr)
else:
# -- progress after warmup
progress = float(self._step - self.warmup_steps) / float(max(1, self.T_max))
new_lr = max(self.final_lr,
self.final_lr + (self.ref_lr - self.final_lr) * 0.5 * (1. + math.cos(math.pi * progress)))
for group in self.optimizer.param_groups:
group['lr'] = new_lr
return new_lr
class CosineWDSchedule(object):
def __init__(
self,
optimizer,
ref_wd,
T_max,
final_wd=0.
):
self.optimizer = optimizer
self.ref_wd = ref_wd
self.final_wd = final_wd
self.T_max = T_max
self._step = 0.
def step(self):
self._step += 1
progress = self._step / self.T_max
new_wd = self.final_wd + (self.ref_wd - self.final_wd) * 0.5 * (1. + math.cos(math.pi * progress))
if self.final_wd <= self.ref_wd:
new_wd = max(self.final_wd, new_wd)
else:
new_wd = min(self.final_wd, new_wd)
for group in self.optimizer.param_groups:
if ('WD_exclude' not in group) or not group['WD_exclude']:
group['weight_decay'] = new_wd
return new_wd
def init_opt(
encoder,
predictor,
iterations_per_epoch,
start_lr,
ref_lr,
warmup,
num_epochs,
wd=1e-6,
final_wd=1e-6,
final_lr=0.0,
mixed_precision=False,
ipe_scale=1.25,
betas=(0.9, 0.999),
eps=1e-8,
zero_init_bias_wd=True,
):
param_groups = [
{
'params': (p for n, p in encoder.named_parameters()
if ('bias' not in n) and (len(p.shape) != 1))
}, {
'params': (p for n, p in predictor.named_parameters()
if ('bias' not in n) and (len(p.shape) != 1))
}, {
'params': (p for n, p in encoder.named_parameters()
if ('bias' in n) or (len(p.shape) == 1)),
'WD_exclude': zero_init_bias_wd,
'weight_decay': 0,
}, {
'params': (p for n, p in predictor.named_parameters()
if ('bias' in n) or (len(p.shape) == 1)),
'WD_exclude': zero_init_bias_wd,
'weight_decay': 0,
},
]
logger.info('Using AdamW')
optimizer = torch.optim.AdamW(param_groups, betas=betas, eps=eps)
scheduler = WarmupCosineSchedule(
optimizer,
warmup_steps=int(warmup * iterations_per_epoch),
start_lr=start_lr,
ref_lr=ref_lr,
final_lr=final_lr,
T_max=int(ipe_scale * num_epochs * iterations_per_epoch),
)
wd_scheduler = CosineWDSchedule(
optimizer,
ref_wd=wd,
final_wd=final_wd,
T_max=int(ipe_scale * num_epochs * iterations_per_epoch),
)
scaler = torch.cuda.amp.GradScaler() if mixed_precision else None
return optimizer, scaler, scheduler, wd_scheduler | null |
162,000 | import pandas as pd
('.XSHE','') e if ('XSHG' in code) else 'sz'+xcode if ('XSHE' in code) else code
def get_price(code, end_date='',count=10, frequency='1d', fields=[]): #对外暴露只有唯一函数,这样对用户才是最友好的
xcode= code.replace('.XSHG','').replace('.XSHE','') #证券代码编码兼容处理
xcode='sh'+xcode if ('XSHG' in code) else 'sz'+xcode if ('XSHE' in code) else code
if frequency in ['1d','1w','1M']: #1d日线 1w周线 1M月线
try: return get_price_sina( xcode, end_date=end_date,count=count,frequency=frequency) #主力
except: return get_price_day_tx(xcode,end_date=end_date,count=count,frequency=frequency) #备用
if frequency in ['1m','5m','15m','30m','60m']: #分钟线 ,1m只有腾讯接口 5分钟5m 60分钟60m
if frequency in '1m': return get_price_min_tx(xcode,end_date=end_date,count=count,frequency=frequency)
try: return get_price_sina( xcode,end_date=end_date,count=count,frequency=frequency) #主力
except: return get_price_min_tx(xcode,end_date=end_date,count=count,frequency=frequency) #备用 | null |
162,001 | return np.round(N,D)
def RET(S,N=1): return np.array(S)[-N] #返回序列倒数第N个值,默认返回最后一个 | null |
162,002 | return np.round(N,D)
def MIN(S1,S2): return np.minimum(S1,S2) #序列min | null |
162,003 |
def DIFF(S, N=1): #前一个值减后一个值,前面会产生nan
return pd.Series(S).diff(N) #np.diff(S)直接删除nan,会少一行 | null |
162,004 | F(S_BOOL,S_TRUE,S_FALSE): #序列布尔判断 res=S_TRUE if S_BOOL==True else S_FALSE
return np.where(S_BOOL, S_TRUE, S_FALSE)
def SUM(S, N): #对序列求N天累计和,返回序列
return pd.Series(S).rolling(N).sum().values
def EVERY(S_BOOL, N): # EVERY(CLOSE>O, 5) 最近N天是否都是True
R=SUM(S_BOOL, N)
return IF(R==N, True, False) | null |
162,005 |
def LAST(S_BOOL, A, B): #从前A日到前B日一直满足S_BOOL条件
if A<B: A=B #要求A>B 例:LAST(CLOSE>OPEN,5,3) 5天前到3天前是否都收阳线
return S_BOOL[-A:-B].sum()==(A-B) #返回单个布尔值 | null |
162,006 | F(S_BOOL,S_TRUE,S_FALSE): #序列布尔判断 res=S_TRUE if S_BOOL==True else S_FALSE
return np.where(S_BOOL, S_TRUE, S_FALSE)
def SUM(S, N): #对序列求N天累计和,返回序列
return pd.Series(S).rolling(N).sum().values
def EXIST(S_BOOL, N=5): # EXIST(CLOSE>3010, N=5) n日内是否存在一天大于3000点
R=SUM(S_BOOL,N)
return IF(R>0, True ,False) | null |
162,007 | return np.round(N,D)
def BARSLAST(S_BOOL): #上一次条件成立到当前的周期
M=np.argwhere(S_BOOL); # BARSLAST(CLOSE/REF(CLOSE)>=1.1) 上一次涨停到今天的天数
return len(S_BOOL)-int(M[-1])-1 if M.size>0 else -1 | null |
162,008 | def SLOPE(S,N,RS=False): #返S序列N周期回线性回归斜率 (默认只返回斜率,不返回整个直线序列)
M=pd.Series(S[-N:]); poly = np.polyfit(M.index, M.values,deg=1); Y=np.polyval(poly, M.index);
if RS: return Y[1]-Y[0],Y
return Y[1]-Y[0]
# COUNT(CLOSE>O, N): 最近N天满足S_BOO的天数 True的天数
return SUM(S_BOOL,N)
def FORCAST(S,N): #返S序列N周期回线性回归后的预测值
K,Y=SLOPE(S,N,RS=True)
return Y[-1]+K | null |
162,009 | F(S_BOOL,S_TRUE,S_FALSE): #序列布尔判断 res=S_TRUE if S_BOOL==True else S_FALSE
return np.where(S_BOOL, S_TRUE, S_FALSE)
def CROSS(S1,S2): #判断穿越 CROSS(MA(C,5),MA(C,10))
CROSS_BOOL=IF(S1>S2, True ,False)
return COUNT(CROSS_BOOL>0,2)==1 #上穿:昨天0 今天1 下穿:昨天1 今天0 | null |
162,010 | ef EMA(S,N): #指数移动平均,为了精度 S>4*N EMA至少需要120周期
return pd.Series(S).ewm(span=N, adjust=False).mean().values
def MACD(CLOSE,SHORT=12,LONG=26,M=9): # EMA的关系,S取120日,和雪球小数点2位相同
DIF = EMA(CLOSE,SHORT)-EMA(CLOSE,LONG);
DEA = EMA(DIF,M); MACD=(DIF-DEA)*2
return RD(DIF),RD(DEA),RD(MACD) | null |
162,011 | HHV(S,N): # HHV(C, 5) # 最近5天收盘最高价
return pd.Series(S).rolling(N).max().values
def LLV(S,N):
def EMA(S,N):
def KDJ(CLOSE,HIGH,LOW, N=9,M1=3,M2=3): # KDJ指标
RSV = (CLOSE - LLV(LOW, N)) / (HHV(HIGH, N) - LLV(LOW, N)) * 100
K = EMA(RSV, (M1*2-1)); D = EMA(K,(M2*2-1)); J=K*3-D*2
return K, D, J | null |
162,012 | maximum(S1,S2) #序列max
def REF(S, N=1): #对序列整体下移动N,返回序列(shift后会产生NAN)
return pd.Series(S).shift(N).values
def SMA(S, N, M=1): #中国式的SMA,至少需要120周期才精确
K = pd.Series(S).rolling(N).mean() #先求出平均值 (下面如果有不用循环的办法,能提高性能,望告知)
for i in range(N+1, len(S)): K[i] = (M * S[i] + (N -M) * K[i-1]) / N # 因为要取K[i-1],所以 range(N+1, len(S))
return K
def RSI(CLOSE, N=24):
DIF = CLOSE-REF(CLOSE,1)
return RD(SMA(MAX(DIF,0), N) / SMA(ABS(DIF), N) * 100) | null |
162,013 | HHV(S,N): # HHV(C, 5) # 最近5天收盘最高价
return pd.Series(S).rolling(N).max().values
def LLV(S,N): # LLV(C, 5) # 最近5天收盘最低价
return pd.Series(S).rolling(N).min().values
def WR(CLOSE, HIGH, LOW, N=10, N1=6): #W&R 威廉指标
WR = (HHV(HIGH, N) - CLOSE) / (HHV(HIGH, N) - LLV(LOW, N)) * 100
WR1 = (HHV(HIGH, N1) - CLOSE) / (HHV(HIGH, N1) - LLV(LOW, N1)) * 100
return RD(WR), RD(WR1) | null |
162,014 | #求序列的N日平均值,返回序列
return pd.Series(S).rolling(N).mean().values
def BIAS(CLOSE,L1=6, L2=12, L3=24): # BIAS乖离率
BIAS1 = (CLOSE - MA(CLOSE, L1)) / MA(CLOSE, L1) * 100
BIAS2 = (CLOSE - MA(CLOSE, L2)) / MA(CLOSE, L2) * 100
BIAS3 = (CLOSE - MA(CLOSE, L3)) / MA(CLOSE, L3) * 100
return RD(BIAS1), RD(BIAS2), RD(BIAS3) | null |
162,015 | #求序列的N日平均值,返回序列
return pd.Series(S).rolling(N).mean().values
#求序列的N日标准差,返回序列
return pd.Series(S).rolling(N).std(ddof=0).values
def BOLL(CLOSE,N=20, P=2): #BOLL指标,布林带
MID = MA(CLOSE, N);
UPPER = MID + STD(CLOSE, N) * P
LOWER = MID - STD(CLOSE, N) * P
return RD(UPPER), RD(MID), RD(LOWER) | null |
162,016 | #求序列的N日平均值,返回序列
return pd.Series(S).rolling(N).mean().values
def REF(S, N=1): #对序列整体下移动N,返回序列(shift后会产生NAN)
return pd.Series(S).shift(N).values
def PSY(CLOSE,N=12, M=6):
PSY=COUNT(CLOSE>REF(CLOSE,1),N)/N*100
PSYMA=MA(PSY,M)
return RD(PSY),RD(PSYMA) | null |
162,017 | #求序列的N日平均值,返回序列
return pd.Series(S).rolling(N).mean().values
def AVEDEV(S,N):
def CCI(CLOSE,HIGH,LOW,N=14):
TP=(HIGH+LOW+CLOSE)/3
return (TP-MA(TP,N))/(0.015*AVEDEV(TP,N)) | null |
162,018 | maximum(S1,S2) #序列max
def MA(S,N): #求序列的N日平均值,返回序列
return pd.Series(S).rolling(N).mean().values
def REF(S, N=1): #对序列整体下移动N,返回序列(shift后会产生NAN)
return pd.Series(S).shift(N).values
def ATR(CLOSE,HIGH,LOW, N=20): #真实波动N日平均值
TR = MAX(MAX((HIGH - LOW), ABS(REF(CLOSE, 1) - HIGH)), ABS(REF(CLOSE, 1) - LOW))
return MA(TR, N) | null |
162,019 | #求序列的N日平均值,返回序列
return pd.Series(S).rolling(N).mean().values
def BBI(CLOSE,M1=3,M2=6,M3=12,M4=20): #BBI多空指标
return (MA(CLOSE,M1)+MA(CLOSE,M2)+MA(CLOSE,M3)+MA(CLOSE,M4))/4 | null |
162,020 | maximum(S1,S2) #序列max
def MA(S,N): #求序列的N日平均值,返回序列
return pd.Series(S).rolling(N).mean().values
def REF(S, N=1): #对序列整体下移动N,返回序列(shift后会产生NAN)
return pd.Series(S).shift(N).values
def IF(S_BOOL,S_TRUE,S_FALSE): #序列布尔判断 res=S_TRUE if S_BOOL==True else S_FALSE
return np.where(S_BOOL, S_TRUE, S_FALSE)
def SUM(S, N): #对序列求N天累计和,返回序列
return pd.Series(S).rolling(N).sum().values
def DMI(CLOSE,HIGH,LOW,M1=14,M2=6): #动向指标:结果和同花顺,通达信完全一致
TR = SUM(MAX(MAX(HIGH - LOW, ABS(HIGH - REF(CLOSE, 1))), ABS(LOW - REF(CLOSE, 1))), M1)
HD = HIGH - REF(HIGH, 1); LD = REF(LOW, 1) - LOW
DMP = SUM(IF((HD > 0) & (HD > LD), HD, 0), M1)
DMM = SUM(IF((LD > 0) & (LD > HD), LD, 0), M1)
PDI = DMP * 100 / TR; MDI = DMM * 100 / TR
ADX = MA(ABS(MDI - PDI) / (PDI + MDI) * 100, M2)
ADXR = (ADX + REF(ADX, M2)) / 2
return PDI, MDI, ADX, ADXR | null |
162,021 | HHV(S,N): # HHV(C, 5) # 最近5天收盘最高价
return pd.Series(S).rolling(N).max().values
def LLV(S,N):
def TAQ(HIGH,LOW,N): #唐安奇通道交易指标,大道至简,能穿越牛熊
UP=HHV(HIGH,N); DOWN=LLV(LOW,N); MID=(UP+DOWN)/2
return UP,MID,DOWN | null |
162,022 | #求序列的N日平均值,返回序列
return pd.Series(S).rolling(N).mean().values
def REF(S, N=1): #对序列整体下移动N,返回序列(shift后会产生NAN)
return pd.Series(S).shift(N).values
def EMA(S,N): #指数移动平均,为了精度 S>4*N EMA至少需要120周期
return pd.Series(S).ewm(span=N, adjust=False).mean().values
def TRIX(CLOSE,M1=12, M2=20): #三重指数平滑平均线
TR = EMA(EMA(EMA(CLOSE, M1), M1), M1)
TRIX = (TR - REF(TR, 1)) / REF(TR, 1) * 100
TRMA = MA(TRIX, M2)
return TRIX, TRMA | null |
162,023 | ): #对序列整体下移动N,返回序列(shift后会产生NAN)
return pd.Series(S).shift(N).values
def IF(S_BOOL,S_TRUE,S_FALSE):
def SUM(S, N):
def VR(CLOSE,VOL,M1=26): #VR容量比率
LC = REF(CLOSE, 1)
return SUM(IF(CLOSE > LC, VOL, 0), M1) / SUM(IF(CLOSE <= LC, VOL, 0), M1) * 100 | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.